gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
"""
"""
from __future__ import absolute_import, unicode_literals
from __future__ import print_function
from builtins import str
from celery import shared_task
from django.http import QueryDict
from django.db.models import Q
from isisdata.models import Citation, CRUDRule, Authority
from isisdata.filters import CitationFilter, AuthorityFilter
from isisdata.operations import filter_queryset
from django.contrib.auth.models import User
import logging, iso8601
from django.utils import timezone
from django.conf import settings
logger = logging.getLogger(__name__)
def _load_model_instance(module, cname, pk, qs=False):
_mod = __import__(module, fromlist=[cname])
model = getattr(_mod, cname)
if qs:
return model.objects.filter(pk=pk)
return model.objects.get(pk=pk)
@shared_task
def update_instance(*args, **kwargs):
if len(args) == 5: # Called directly with intended signature.
module, cname, pk, field, value = args
elif len(args) == 6: # Upstream task may have returned a value.
_, module, cname, pk, field, value = args
obj = _load_model_instance(module, cname, pk)
setattr(obj, field, value)
obj.save()
@shared_task
def update_task(task, amount):
task.current_value += amount
task.save()
@shared_task
def update_task_status(task, status):
# task.value = value
task.status = status
task.save()
@shared_task
def bulk_update_instances(task_data, queryset, field, value):
"""
Iteratively update objects in a queryset, using the ``save()`` method.
This is necessary for some cases in which we need to trigger post-save
signals and execute instance-specific code.
"""
task_module, task_model, task_pk = task_data
task = _load_model_instance(task_module, task_model, task_pk)
for obj in queryset:
setattr(obj, field, value)
obj.save()
def _get_filtered_record_queryset(filter_params_raw, user_id=None, type='CITATION'):
"""
Parameters
----------
params : str
Returns
-------
:class:`.QuerySet`
"""
# We need a mutable QueryDict.
filter_params = QueryDict(filter_params_raw, mutable=True)
if type=='AUTHORITY':
_qs = Authority.objects.all()
else:
_qs = Citation.objects.all()
if user_id:
_qs = filter_queryset(User.objects.get(pk=user_id), _qs, CRUDRule.UPDATE)
if type=='AUTHORITY':
queryset = AuthorityFilter(filter_params, queryset=_qs).qs
else:
queryset = CitationFilter(filter_params, queryset=_qs).qs
return queryset, filter_params_raw
@shared_task
def save_creation_to_citation(user_id, filter_params_raw, prepend_value, task_id=None, object_type='CITATION'):
from isisdata.models import AsyncTask
queryset, _ = _get_filtered_record_queryset(filter_params_raw, user_id, type=object_type)
task = AsyncTask.objects.get(pk=task_id)
try:
for i, obj in enumerate(queryset):
task.current_value += 1
task.save()
created_by = obj.created_by
# store creator
if isinstance(created_by, User):
if object_type == 'AUTHORITY':
Authority.objects.filter(pk=obj.id).update(created_by_stored=created_by)
else:
Citation.objects.filter(pk=obj.id).update(created_by_native=created_by)
# store creation date
if not obj.created_native:
if obj.created_on_fm:
if object_type == 'AUTHORITY':
Authority.objects.filter(pk=obj.id).update(created_on_stored=obj.created_on_fm)
else:
Citation.objects.filter(pk=obj.id).update(created_native=obj.created_on_fm)
else:
default_date = iso8601.parse_date(settings.CITATION_CREATION_DEFAULT_DATE)
if object_type == 'AUTHORITY':
Authority.objects.filter(pk=obj.id).update(created_on_stored=default_date)
else:
Citation.objects.filter(pk=obj.id).update(created_native=default_date)
task.state = 'SUCCESS'
task.save()
except Exception as E:
logger.exception('save_creator_to_citation failed for %s:: %s' % (filter_params_raw, prepend_value))
if task_id:
task = AsyncTask.objects.get(pk=task_id)
task.value = str(E)
task.state = 'FAILURE'
task.save()
@shared_task
def bulk_prepend_record_history(user_id, filter_params_raw, prepend_value, task_id=None, object_type='CITATION'):
from django.db.models import CharField, Value as V
from django.db.models.functions import Concat
from isisdata.models import AsyncTask
import math, datetime
user = User.objects.get(pk=user_id)
now = datetime.datetime.now().strftime('%Y-%m-%d at %I:%M%p')
prepend_value = 'On %s, %s wrote: %s\n\n' % (now, user.username, prepend_value)
queryset, _ = _get_filtered_record_queryset(filter_params_raw, user_id, type=object_type)
queryset.update(record_history=Concat(V(prepend_value), 'record_history'), modified_by=user_id, modified_on=timezone.now())
try:
if task_id:
task = AsyncTask.objects.get(pk=task_id)
task.state = 'SUCCESS'
task.save()
print('success:: %s' % str(task_id))
except Exception as E:
print('bulk_prepend_record_history failed for %s:: %s' % (filter_params_raw, prepend_value), end=' ')
print(E)
if task_id:
task = AsyncTask.objects.get(pk=task_id)
task.value = str(E)
task.state = 'FAILURE'
task.save()
@shared_task
def bulk_change_tracking_state(user_id, filter_params_raw, target_state, info,
notes, task_id=None, object_type='CITATION'):
from curation.tracking import TrackingWorkflow
from isisdata.models import AsyncTask, Tracking
import math
queryset, _ = _get_filtered_record_queryset(filter_params_raw, user_id, type=object_type)
# We should have already filtered out ineligible citations, but just in
# case....
allowed_prior = TrackingWorkflow.allowed(target_state)
# bugfix ISISCB-1008: if None is in prior allowed states, we need to build a different filter
q = (Q(tracking_state__in=allowed_prior) | Q(tracking_state__isnull=True)) if None in allowed_prior else Q(tracking_state__in=allowed_prior)
queryset = queryset.filter(q)
idents = list(queryset.values_list('id', flat=True))
try:
if target_state != Citation.HSTM_UPLOAD:
queryset.update(tracking_state=target_state, modified_by=user_id, modified_on=timezone.now())
else:
queryset.update(hstm_uploaded=Citation.IS_HSTM_UPLOADED, modified_by=user_id, modified_on=timezone.now())
for ident in idents:
if object_type == 'AUTHORITY':
AuthorityTracking.objects.create(authority_id=ident,
type_controlled=target_state,
tracking_info=info,
notes=notes,
modified_by_id=user_id)
else:
Tracking.objects.create(citation_id=ident,
type_controlled=target_state,
tracking_info=info,
notes=notes,
modified_by_id=user_id)
if task_id:
task = AsyncTask.objects.get(pk=task_id)
task.state = 'SUCCESS'
task.save()
print('success:: %s' % str(task_id))
except Exception as E:
logger.error('bulk_change_tracking_state failed for %s:: %s' % (filter_params_raw, target_state))
logger.error(E)
if task_id:
task = AsyncTask.objects.get(pk=task_id)
task.value = str(E)
task.state = 'FAILURE'
task.save()
| |
# -*- coding: utf-8 -*-
"""Elements that will constitute the parse tree of a query.
You may use these items to build a tree representing a query,
or get a tree as the result of parsing a query string.
"""
from decimal import Decimal
_MARKER = object()
class Item(object):
"""Base class for all items that compose the parse tree.
An item is a part of a request.
"""
# /!\ Note on Item (and subclasses) __magic__ methods: /!\
#
# Since we're dealing with recursive structures, we must avoid using
# the builtin helper methods when dealing with nested objects in
# __magic__ methods.
#
# As the helper usually calls the relevant method, we end up with two
# function calls instead of one, and end up hitting python's max recursion
# limit twice as fast!
#
# This is why we're calling c.__repr__ instead of repr(c) in the __repr__
# method. Same thing applies for all magic methods (__str__, __eq__, and any
# other we might add in the future).
_equality_attrs = []
@property
def children(self):
"""As base of a tree structure, an item may have children"""
# empty by default
return []
def __repr__(self):
children = ", ".join(c.__repr__() for c in self.children)
return "%s(%s)" % (self.__class__.__name__, children)
def __eq__(self, other):
"""a basic equal operation
"""
return (self.__class__ == other.__class__ and
all(getattr(self, a, _MARKER) == getattr(other, a, _MARKER)
for a in self._equality_attrs) and
all(c.__eq__(d) for c, d in zip(self.children, other.children)))
class SearchField(Item):
"""Indicate wich field the search expression operates on
eg: *desc* in ``desc:(this OR that)``
:param str name: name of the field
:param expr: the searched expression
"""
_equality_attrs = ['name']
def __init__(self, name, expr):
self.name = name
self.expr = expr
def __str__(self):
return self.name + ":" + self.expr.__str__()
def __repr__(self):
return "SearchField(%r, %s)" % (self.name, self.expr.__repr__())
@property
def children(self):
"""the only child is the expression"""
return [self.expr]
class BaseGroup(Item):
"""Base class for group of expressions or field values
:param expr: the expression inside parenthesis
"""
def __init__(self, expr):
self.expr = expr
def __str__(self):
return "(%s)" % self.expr.__str__()
@property
def children(self):
"""the only child is the expression"""
return [self.expr]
class Group(BaseGroup):
"""Group sub expressions
"""
class FieldGroup(BaseGroup):
"""Group values for a query on a field
"""
def group_to_fieldgroup(g): # FIXME: no use !
return FieldGroup(g.expr)
class Range(Item):
"""A Range
:param low: lower bound
:param high: higher bound
:param bool include_low: wether lower bound is included
:param bool include_high: wether higher bound is included
"""
LOW_CHAR = {True: '[', False: '{'}
HIGH_CHAR = {True: ']', False: '}'}
def __init__(self, low, high, include_low=True, include_high=True):
self.low = low
self.high = high
self.include_low = include_low
self.include_high = include_high
@property
def children(self):
"""children are lower and higher bound expressions"""
return [self.low, self.high]
def __str__(self):
return "%s%s TO %s%s" % (
self.LOW_CHAR[self.include_low],
self.low.__str__(),
self.high.__str__(),
self.HIGH_CHAR[self.include_high])
class Term(Item):
"""Base for terms
:param str value: the value
"""
WILDCARD = "*"
_equality_attrs = ['value']
def __init__(self, value):
self.value = value
def is_wildcard(self):
""":return bool: True if value is the wildcard ``*``
"""
return self.value == self.WILDCARD
def has_wildcard(self):
""":return bool: True if value contains a wildcard ``*``
"""
return self.WILDCARD in self.value
def __str__(self):
return self.value
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.value)
class Word(Term):
"""A single word term
:param str value: the value
"""
pass
class Phrase(Term):
"""A phrase term, that is a sequence of words enclose in quotes
:param str value: the value, including the quotes. Eg. ``'"my phrase"'``
"""
def __init__(self, value):
super(Phrase, self).__init__(value)
assert self.value.endswith('"') and self.value.startswith('"'), (
"Phrase value must contain the quotes")
class BaseApprox(Item):
"""Base for approximations, that is fuzziness and proximity
"""
_equality_attrs = ['term', 'degree']
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.term.__repr__(), self.degree)
@property
def children(self):
return [self.term]
class Fuzzy(BaseApprox):
"""Fuzzy search on word
:param Word term: the approximated term
:param degree: the degree which will be converted to :py:class:`decimal.Decimal`.
"""
def __init__(self, term, degree=None):
self.term = term
if degree is None:
degree = 0.5
self.degree = Decimal(degree).normalize()
def __str__(self):
return "%s~%s" % (self.term, self.degree)
class Proximity(BaseApprox):
"""Proximity search on phrase
:param Phrase term: the approximated phrase
:param degree: the degree which will be converted to :py:func:`int`.
"""
def __init__(self, term, degree=None):
self.term = term
if degree is None:
degree = 1
self.degree = int(degree)
def __str__(self):
return "%s~" % self.term + ("%d" % self.degree if self.degree is not None else "")
class Boost(Item):
"""A term for boosting a value or a group there of
:param expr: the boosted expression
:param force: boosting force, will be converted to :py:class:`decimal.Decimal`
"""
def __init__(self, expr, force):
self.expr = expr
self.force = Decimal(force).normalize()
@property
def children(self):
"""The only child is the boosted expression
"""
return [self.expr]
def __str__(self):
return "%s^%s" % (self.expr.__str__(), self.force)
class BaseOperation(Item):
"""
Parent class for binary operations are binary operation used to join expressions,
like OR and AND
:param operands: expressions to apply operation on
"""
def __init__(self, *operands):
self.operands = operands
def __str__(self):
return (" %s " % self.op).join(str(o) for o in self.operands)
@property
def children(self):
"""children are left and right expressions
"""
return self.operands
class UnknownOperation(BaseOperation):
"""Unknown Boolean operator.
.. warning::
This is used to represent implicit operations (ie: term:foo term:bar),
as we cannot know for sure which operator should be used.
Lucene seem to use whatever operator was used before reaching that one,
defaulting to AND, but we cannot know anything about this at parsing
time...
.. seealso::
the :py:class:`.utils.UnknownOperationResolver` to resolve those nodes to OR and AND
"""
op = ''
def __str__(self):
return " ".join(str(o) for o in self.operands)
class OrOperation(BaseOperation):
"""OR expression
"""
op = 'OR'
class AndOperation(BaseOperation):
"""AND expression
"""
op = 'AND'
def create_operation(cls, a, b):
"""Create operation between a and b, merging if a or b is already an operation of same class
"""
operands = []
operands.extend(a.operands if isinstance(a, cls) else [a])
operands.extend(b.operands if isinstance(b, cls) else [b])
return cls(*operands)
class Unary(Item):
"""Parent class for unary operations
:param a: the expression the operator applies on
"""
def __init__(self, a):
self.a = a
def __str__(self):
return "%s%s" % (self.op, self.a.__str__())
@property
def children(self):
return [self.a]
class Plus(Unary):
"""plus, unary operation
"""
op = "+"
class Not(Unary):
op = 'NOT '
class Prohibit(Unary):
"""The negation
"""
op = "-"
| |
#!/usr/bin/env python
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import webapp2
import json
import base64
from datetime import datetime
from datetime import timedelta
from googleapiclient.discovery import build
from google.appengine.api import app_identity
import os
import cloudstorage as gcs
from cloudstorage import NotFoundError
import config
import random
import string
import re
def set_last_end_time(project_id, bucket_name, end_time_str, offset):
""" Write the end_time as a string value in a JSON object in GCS.
This file is used to remember the last end_time in case one isn't provided
"""
# get the datetime object
end_time = datetime.strptime(end_time_str, '%Y-%m-%dT%H:%M:%S.%fZ')
delta = timedelta(seconds=offset)
# Add offset seconds & convert back to str
end_time_calc = end_time + delta
end_time_calc_str = end_time_calc.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
file_name = '{}.{}'.format(project_id, config.LAST_END_TIME_FILENAME)
logging.debug("set_last_end_time - end_time_str: {}, end_time_Calc_str: {}".format(
end_time_str, end_time_calc_str)
)
end_time_str_json = {
"end_time": end_time_calc_str
}
write_retry_params = gcs.RetryParams(backoff_factor=1.1)
gcs_file = gcs.open('/{}/{}'.format(
bucket_name, file_name),
'w',
content_type='text/plain',
retry_params=write_retry_params)
gcs_file.write(json.dumps(end_time_str_json))
gcs_file.close()
return end_time_calc_str
def get_last_end_time(project_id, bucket_name):
""" Get the end_time as a string value from a JSON object in GCS.
This file is used to remember the last end_time in case one isn't provided
"""
last_end_time_str = ""
file_name = '{}.{}'.format(project_id, config.LAST_END_TIME_FILENAME)
logging.debug("get_last_end_time - file_name: {}".format(file_name))
try:
gcs_file = gcs.open('/{}/{}'.format(
bucket_name, file_name))
contents = gcs_file.read()
logging.debug("GCS FILE CONTENTS: {}".format(contents))
json_contents = json.loads(contents)
last_end_time_str = json_contents["end_time"]
gcs_file.close()
except NotFoundError as nfe:
logging.error("Missing file when reading {} from GCS: {}".format(file_name, nfe))
last_end_time_str = None
except Exception as e:
logging.error("Received error when reading {} from GCS: {}".format(file_name,e))
last_end_time_str = None
return last_end_time_str
def publish_metrics(msg_list):
""" Call the https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/publish
using the googleapiclient to publish a message to Pub/Sub.
The token and batch_id are included as attributes
"""
if len(msg_list) > 0:
service = build('pubsub', 'v1', cache_discovery=True)
topic_path = 'projects/{project_id}/topics/{topic}'.format(
project_id=app_identity.get_application_id(),
topic=config.PUBSUB_TOPIC
)
body = {
"messages": msg_list
}
#logging.debug("pubsub msg is {}".format(json.dumps(body, sort_keys=True, indent=4)))
response = service.projects().topics().publish(
topic=topic_path, body=body
).execute()
#logging.debug("response is {}".format(json.dumps(response, sort_keys=True, indent=4)))
else:
logging.debug("No pubsub messages to publish")
def get_message_for_publish_metric(request, metadata):
""" Build a message for the https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/publish
using the googleapiclient to publish a message to Pub/Sub.
The token and batch_id are included as attributes
"""
#logging.debug("sending message is {}".format(json.dumps(request, sort_keys=True, indent=4)))
data = json.dumps(request).encode('utf-8')
message = {
"data": base64.b64encode(data),
"attributes": {
"batch_id": metadata["batch_id"],
"token": config.PUBSUB_VERIFICATION_TOKEN,
"batch_start_time": metadata["batch_start_time"],
"src_message_id": metadata["message_id"]
}
}
#logging.debug("pubsub message is {}".format(json.dumps(message, sort_keys=True, indent=4)))
return message
def get_batch_id():
""" Generate a unique id to use across the batches to uniquely identify each one
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(32))
def check_date_format(date_str):
""" Check the date to ensure that it's in the proper format
"""
pattern = re.compile("^\d{4}-+\d{2}-+\d{2}T+\d{2}:+\d{2}:+\d{2}.+\d{1,}Z+$")
matched = pattern.match(date_str)
return matched
def check_exclusions(metric):
""" Check whether to exclude a metric based on the inclusions OR exclusions list.
Note that this checks inclusions first.
returns True for metrics to include
returns False for metrics to exclude
"""
inclusions = config.INCLUSIONS
if "include_all" in inclusions and inclusions["include_all"] == config.ALL:
#logging.debug("including based on include_all setting {},{}".format(metric['type'],inclusions["include_all"]))
return True
if 'metricKinds' in inclusions:
for inclusion in inclusions['metricKinds']:
#logging.debug("inclusion check: {},{}".format(metric['metricKind'],inclusion['metricKind']))
if ((metric['metricKind'] == inclusion['metricKind']) and
(metric['valueType'] == inclusion['valueType'])):
#logging.debug("including based on metricKind {},{} AND {},{}".format(metric['metricKind'],inclusion['metricKind'],metric['valueType'],inclusion['valueType']))
return True
if 'metricTypes' in inclusions:
for inclusion in inclusions['metricTypes']:
#logging.debug("inclusion metricTypes check: {},{}".format(metric['type'],inclusion['metricType']))
if metric['type'].find(inclusion['metricType']) != -1:
#logging.debug("including based on metricType {},{}".format(metric['type'],inclusion['metricType']))
return True
if 'metricTypeGroups' in inclusions:
for inclusion in inclusions['metricTypeGroups']:
#logging.debug("inclusion metricTypes check: {},{}".format(metric['type'],inclusion['metricTypeGroup']))
if metric['type'].find(inclusion['metricTypeGroup']) != -1:
logging.debug("including based on metricTypeGroups {},{}".format(metric['type'],inclusion['metricTypeGroup']))
return True
exclusions = config.EXCLUSIONS
if "exclude_all" in exclusions and exclusions["exclude_all"] == config.ALL:
#logging.debug("excluding based on exclude_all setting {},{}".format(metric['type'],exclusions["exclude_all"]))
return False
if 'metricKinds' in exclusions:
for exclusion in exclusions['metricKinds']:
#logging.debug("exclusion check: {},{}".format(metric['metricKind'],exclusion['metricKind']))
if ((metric['metricKind'] == exclusion['metricKind']) and
(metric['valueType'] == exclusion['valueType'])):
#logging.debug("excluding based on metricKind {},{} AND {},{}".format(metric['metricKind'],exclusion['metricKind'],metric['valueType'],exclusion['valueType']))
return False
if 'metricTypes' in exclusions:
for exclusion in exclusions['metricTypes']:
#logging.debug("exclusion metricTypes check: {},{}".format(metric['type'],exclusion['metricType']))
if metric['type'].find(exclusion['metricType']) != -1:
#logging.debug("excluding based on metricType {},{}".format(metric['type'],exclusion['metricType']))
return False
if 'metricTypeGroups' in exclusions:
for exclusion in exclusions['metricTypeGroups']:
#logging.debug("exclusion metricTypeGroups check: {},{}".format(metric['type'],exclusion['metricTypeGroup']))
if metric['type'].find(exclusion['metricTypeGroup']) != -1:
#logging.debug("excluding based on metricTypeGroup {},{}".format(metric['type'],exclusion['metricTypeGroup']))
return False
return True
def get_metrics(project_id, next_page_token):
""" Call the https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors/list
using the googleapiclient to get all the metricDescriptors for the project
"""
service = build('monitoring', 'v3', cache_discovery=True)
project_name = 'projects/{project_id}'.format(
project_id=project_id
)
metrics = service.projects().metricDescriptors().list(
name=project_name,
pageSize=config.PAGE_SIZE,
pageToken=next_page_token
).execute()
logging.debug("project_id: {}, size: {}".format(
project_id,
len(metrics["metricDescriptors"])
)
)
return metrics
def get_and_publish_metrics(message_to_publish, metadata):
""" Publish the direct JSON results of each metricDescriptor as a separate Pub/Sub message
"""
stats = {}
msgs_published = 0
msgs_excluded = 0
metrics_count_from_api = 0
next_page_token = ""
while True:
json_msg_list = []
pubsub_msg_list = []
project_id = message_to_publish["project_id"]
metric_list = get_metrics(project_id, next_page_token)
metrics_count_from_api += len(metric_list['metricDescriptors'])
for metric in metric_list['metricDescriptors']:
#logging.debug("Processing metric {} for publish".format(metric))
metadata["payload"] = '{}'.format(json.dumps(metric))
metadata["error_msg_cnt"] = 0
message_to_publish["metric"] = metric
if check_exclusions(metric):
pubsub_msg = get_message_for_publish_metric(
message_to_publish, metadata
)
pubsub_msg_list.append(pubsub_msg)
metadata["msg_written_cnt"] = 1
metadata["msg_without_timeseries"] = 0
msgs_published += 1
else:
#logging.debug("Excluded the metric: {}".format(metric['name']))
msgs_excluded += 1
metadata["msg_written_cnt"] = 0
metadata["msg_without_timeseries"] = 1
# build a list of stats messages to write to BigQuery
if config.WRITE_BQ_STATS_FLAG:
json_msg = build_bigquery_stats_message(
message_to_publish, metadata
)
json_msg_list.append(json_msg)
# Write to pubsub if there is 1 or more
publish_metrics(pubsub_msg_list)
# write the list of stats messages to BigQuery
if config.WRITE_BQ_STATS_FLAG:
write_to_bigquery(json_msg_list)
if "nextPageToken" in metric_list:
next_page_token = metric_list["nextPageToken"]
else:
break
stats["msgs_published"] = msgs_published
stats["msgs_excluded"] = msgs_excluded
stats["metrics_count_from_api"] = metrics_count_from_api
return stats
def write_stats(stats, stats_project_id, batch_id):
""" Write 3 custom monitoring metrics to the Monitoring API
"""
logging.debug("write_stats: {}".format(json.dumps(stats)))
service = build('monitoring', 'v3',cache_discovery=True)
project_name = 'projects/{project_id}'.format(
project_id=app_identity.get_application_id()
)
end_time = datetime.now()
end_time_str = end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
metric_type = "custom.googleapis.com/stackdriver-monitoring-export/msgs-published"
body ={
"timeSeries": [
{
"metric": {
"type": metric_type,
"labels": {
"batch_id": batch_id,
"metrics_project_id": stats_project_id
}
},
"resource": {
"type": "generic_node",
"labels": {
"project_id": app_identity.get_application_id(),
"location": "us-central1-a",
"namespace": "stackdriver-metric-export",
"node_id": "list-metrics"
}
},
"metricKind": "GAUGE",
"valueType": "INT64",
"points": [
{
"interval": {
"endTime": end_time_str
},
"value": {
"int64Value": stats["msgs_published"]
}
}
]
}
]
}
metrics = service.projects().timeSeries().create(
name=project_name,
body=body
).execute()
logging.debug("wrote a response is {}".format(json.dumps(metrics, sort_keys=True, indent=4)))
body["timeSeries"][0]["metric"]["type"] = "custom.googleapis.com/stackdriver-monitoring-export/msgs-excluded"
body["timeSeries"][0]["points"][0]["value"]["int64Value"] = stats["msgs_excluded"]
metrics = service.projects().timeSeries().create(
name=project_name,
body=body
).execute()
logging.debug("response is {}".format(json.dumps(metrics, sort_keys=True, indent=4)))
body["timeSeries"][0]["metric"]["type"] = "custom.googleapis.com/stackdriver-monitoring-export/metrics-from-api"
body["timeSeries"][0]["points"][0]["value"]["int64Value"] = stats["metrics_count_from_api"]
metrics = service.projects().timeSeries().create(
name=project_name,
body=body
).execute()
logging.debug("response is {}".format(json.dumps(metrics, sort_keys=True, indent=4)))
def build_bigquery_stats_message(metric, metadata):
processing_end_time = datetime.now()
processing_end_time_str = processing_end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
# Write the stats to the BigQuery stats tabledata
bq_msg = {
"app_name": "list_metrics",
"batch_id": metadata["batch_id"],
"message_id": metadata["message_id"],
# "src_message_id": src_message_id,
"metric_type": metric["metric"]["type"],
"error_msg_cnt": metadata["error_msg_cnt"],
"msg_written_cnt": metadata["msg_written_cnt"],
"msg_without_timeseries": metadata["msg_without_timeseries"],
"payload": metadata["payload"],
"batch_start_time": metadata["batch_start_time"],
"processing_end_time": processing_end_time_str
}
json_msg = {
"json": bq_msg
}
#logging.debug("json_msg {}".format(json.dumps(json_msg, sort_keys=True, indent=4)))
return json_msg
def write_to_bigquery(json_row_list):
""" Write rows to the BigQuery stats table using the googleapiclient and the streaming insertAll method
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
"""
#logging.debug("write_to_bigquery")
if len(json_row_list) > 0:
bigquery = build('bigquery', 'v2', cache_discovery=True)
body = {
"kind": "bigquery#tableDataInsertAllRequest",
"skipInvalidRows": "false",
"rows": json_row_list
}
#logging.debug('body: {}'.format(json.dumps(body, sort_keys=True, indent=4)))
response = bigquery.tabledata().insertAll(
projectId=app_identity.get_application_id(),
datasetId=config.BIGQUERY_DATASET,
tableId=config.BIGQUERY_STATS_TABLE,
body=body
).execute()
#logging.debug("BigQuery said... = {}".format(response))
bq_msgs_with_errors = 0
if "insertErrors" in response:
if len(response["insertErrors"]) > 0:
logging.error("Error: {}".format(response))
bq_msgs_with_errors = len(response["insertErrors"])
else:
logging.debug("By amazing luck, there are no errors, response = {}".format(response))
logging.debug("bq_msgs_written: {}".format(bq_msgs_with_errors))
return response
else:
logging.debug("No BigQuery records to write")
return None
def write_input_parameters_to_bigquery(project_id, metadata, msg):
""" Write rows to the BigQuery input parameters table using the
googleapiclient and the streaming insertAll method
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
"""
#logging.debug("write_input_parameters_to_bigquery")
bigquery = build('bigquery', 'v2', cache_discovery=True)
body = {
"kind": "bigquery#tableDataInsertAllRequest",
"skipInvalidRows": "false",
"rows": [
{
"json":
{
"start_time": msg["start_time"],
"end_time": msg["end_time"],
"aggregation_alignment_period": msg["aggregation_alignment_period"],
"message_id": metadata["message_id"],
"project_list": {
"project_id": [
project_id
]
},
"batch_id": metadata["batch_id"],
"batch_start_time": metadata["batch_start_time"]
}
}
]
}
#logging.debug('body: {}'.format(json.dumps(body, sort_keys=True, indent=4)))
response = bigquery.tabledata().insertAll(
projectId=app_identity.get_application_id(),
datasetId=config.BIGQUERY_DATASET,
tableId=config.BIGQUERY_PARAMS_TABLE,
body=body
).execute()
#logging.debug("BigQuery said... = {}".format(response))
bq_msgs_with_errors = 0
if "insertErrors" in response:
if len(response["insertErrors"]) > 0:
logging.error("Error: {}".format(response))
bq_msgs_with_errors = len(response["insertErrors"])
else:
logging.debug("By amazing luck, there are no errors, response = {}".format(response))
logging.debug("bq_msgs_written: {}".format(bq_msgs_with_errors))
return response
class ReceiveMessage(webapp2.RequestHandler):
""" Handle the Pub/Sub push messages
"""
def post(self):
""" Receive the Pub/Sub message via POST
Validate the input and then process the message
"""
logging.debug("received message")
try:
if not self.request.body:
raise ValueError("No request body received")
envelope = json.loads(self.request.body.decode('utf-8'))
logging.debug("Raw pub/sub message: {}".format(envelope))
if "message" not in envelope:
raise ValueError("No message in envelope")
if "messageId" in envelope["message"]:
logging.debug("messageId: {}".format(envelope["message"]["messageId"]))
message_id = envelope["message"]["messageId"]
if "publishTime" in envelope["message"]:
publish_time = envelope["message"]["publishTime"]
if "data" not in envelope["message"]:
raise ValueError("No data in message")
payload = base64.b64decode(envelope["message"]["data"])
logging.debug('payload: {} '.format(payload))
data = json.loads(payload)
logging.debug('data: {} '.format(data))
# Add any of the parameters to the pubsub message to send
message_to_publish = {}
# if the pubsub PUBSUB_VERIFICATION_TOKEN isn't included or doesn't match, don't continue
if "token" not in data:
raise ValueError("token missing from request")
if not data["token"] == config.PUBSUB_VERIFICATION_TOKEN:
raise ValueError("token from request doesn't match, received: {}".format(data["token"]))
# if the project has been passed in, use that, otherwise use default project of App Engine app
if "project_id" not in data:
project_id = project_id=app_identity.get_application_id()
else:
project_id = data["project_id"]
message_to_publish["project_id"] = project_id
# if the alignment_period is supplied, use that, otherwise use default
if "aggregation_alignment_period" not in data:
aggregation_alignment_period = config.AGGREGATION_ALIGNMENT_PERIOD
else:
aggregation_alignment_period = data["aggregation_alignment_period"]
pattern = re.compile("^\d{1,}s+$")
matched = pattern.match(aggregation_alignment_period)
if not matched:
raise ValueError("aggregation_alignment_period needs to be digits followed by an 's' such as 3600s, received: {}".format(aggregation_alignment_period))
alignment_seconds = int(aggregation_alignment_period[:len(aggregation_alignment_period)-1])
if alignment_seconds < 60:
raise ValueError("aggregation_alignment_period needs to be more than 60s, received: {}".format(aggregation_alignment_period))
message_to_publish["aggregation_alignment_period"] = aggregation_alignment_period
# get the App Engine default bucket name to store a GCS file with last end_time
bucket_name = os.environ.get('BUCKET_NAME',
app_identity.get_default_gcs_bucket_name()
)
# Calculate the end_time first
if "end_time" not in data:
# the end_time should be set here for all metrics in the batch
# setting later in the architecture would mean that the end_time may vary
end_time = datetime.now()
end_time_str = end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
else:
end_time_str = data["end_time"]
matched = check_date_format(end_time_str)
if not matched:
raise ValueError("end_time needs to be in the format 2019-02-08T14:00:00.311635Z, received: {}".format(end_time_str))
message_to_publish["end_time"] = end_time_str
# if the start_time is supplied, use the previous end_time
sent_in_start_time_flag = False
if "start_time" not in data:
start_time_str = get_last_end_time(project_id, bucket_name)
# if the file hasn't been found, then start 1 alignment period in the past
if not start_time_str:
start_time_str = set_last_end_time(project_id, bucket_name, end_time_str, (alignment_seconds * -1))
#raise ValueError("start_time couldn't be read from GCS, received: {}".format(start_time_str))
logging.debug("start_time_str: {}, end_time_str: {}".format(start_time_str, end_time_str))
else:
sent_in_start_time_flag = True
start_time_str = data["start_time"]
matched = check_date_format(start_time_str)
if not matched:
raise ValueError("start_time needs to be in the format 2019-02-08T14:00:00.311635Z, received: {}".format(start_time_str))
message_to_publish["start_time"] = start_time_str
# Create a unique identifier for this batch
batch_id = get_batch_id()
logging.debug("batch_id: {}".format(batch_id))
# Publish the messages to Pub/Sub
logging.info("Running with input parameters - {}".format(json.dumps(message_to_publish, sort_keys=True, indent=4)))
metadata = {
"batch_id": batch_id,
"message_id": message_id,
"batch_start_time": publish_time
}
if config.WRITE_BQ_STATS_FLAG:
write_input_parameters_to_bigquery(project_id, metadata, message_to_publish)
stats = get_and_publish_metrics(message_to_publish, metadata)
logging.debug("Stats are {}".format(json.dumps(stats)))
""" Write the late end_time_str to GCS to use in a subsequent run,
but only if the start_time was not sent in. If the start_time is
supplied, then we consider that an ad hoc run, and won't set the
previous end_time
"""
if not sent_in_start_time_flag:
set_last_end_time(project_id, bucket_name, end_time_str, 1)
# Write the stats to custom monitoring metrics
if config.WRITE_MONITORING_STATS_FLAG:
write_stats(stats, project_id, batch_id)
self.response.write(stats)
except KeyError as ke:
logging.error("Key Error: {}".format(ke))
self.response.write(ke)
except ValueError as ve:
logging.error("Value Error: {}".format(ve))
self.response.write(ve)
except Exception as e:
logging.error("Error: {}".format(e))
self.response.write(e)
self.response.status = 200
app = webapp2.WSGIApplication([
('/_ah/push-handlers/receive_message', ReceiveMessage)
], debug=True)
| |
"""
A parser for the SEED biochemistry modules that are available on Github
at https://github.com/ModelSEED/ModelSEEDDatabase. We have also included
them in our repo as a submodule.
We parse compounds from the compounds file in Biochemistry. Locations
are currently hardcoded because the ModelSeedDirectory does not contain
a mapping for compartments (the mapping files do not have the integers
used in the reactions file!).
"""
import copy
import os
import re
import sys
import io
import PyFBA
MODELSEED_DIR = ""
if 'ModelSEEDDatabase' in os.environ:
MODELSEED_DIR = os.environ['ModelSEEDDatabase']
else:
sys.stderr.write("Please ensure that you install the Model SEED Database somewhere, and set the environment " +
"variable ModelSEEDDatabase to point to that directory.\n" +
" See INSTALLATION.md for more information\n")
sys.exit(-1)
if not MODELSEED_DIR:
sys.stderr.write("The ModelSEEDDatabase environment variable is not set.\n")
sys.stderr.write("Please install the ModelSEEDDatabase, set the variable, and try again")
sys.exit(-1)
if not os.path.exists(MODELSEED_DIR):
sys.stderr.write("The MODEL SEED directory: {} does not exist.\n".format(MODELSEED_DIR))
sys.stderr.write("Please check your installation.\n")
sys.exit(-1)
def template_reactions(modeltype='microbial'):
"""
Load the template reactions to adjust the model. Returns a hash of some altered parameters for the model
:param modeltype: which type of model to load e.g. GramNegative, GramPositive, Microbial
:type modeltype: str
:return: A hash of the new model parameters that should be used to update the reactions object
:rtype: dict
"""
inputfile = ""
if modeltype.lower() == 'microbial':
inputfile = "Templates/Microbial/Reactions.tsv"
elif modeltype.lower() == 'gramnegative' or modeltype.lower() == 'gram_negative':
inputfile = "Templates/GramNegative/Reactions.tsv"
elif modeltype.lower() == 'grampositive' or modeltype.lower() == 'gram_positive':
inputfile = "Templates/GramPositive/Reactions.tsv"
elif modeltype.lower() == 'mycobacteria':
inputfile = "Templates/Mycobacteria/Reactions.tsv"
elif modeltype.lower() == 'plant':
inputfile = "Templates/Plant/Reactions.tsv"
else:
raise NotImplementedError("Parsing data for " + inputfile + " has not been implemented!")
if not os.path.exists(os.path.join(MODELSEED_DIR, inputfile)):
raise IOError(os.path.join(MODELSEED_DIR, inputfile) +
" was not found. Please check your model SEED directory (" + MODELSEED_DIR + ")")
new_enz = {}
with open(os.path.join(MODELSEED_DIR, inputfile), 'r') as f:
for l in f:
if l.startswith('id'):
continue
p = l.strip().split("\t")
new_enz[p[0]] = {}
new_enz[p[0]]['direction'] = p[2]
new_enz[p[0]]['enzymes'] = set(p[-1].split("|"))
return new_enz
def compounds(compounds_file=None):
"""
Load the compounds mapping. This maps from cpd id to name (we use
the name in our reactions, but use the cpd to parse the model
seed database to avoid ambiguities.
Optionally, you can provide a compounds file. If not, the default
in MODELSEED_DIR/Biochemistry/compounds.master.tsv will be used.
:param compounds_file: An optional filename of a compounds file to parse
:type compounds_file: str
:return: A hash of compounds with the str(compound) as the key and the compound object as the value
:rtype: dict
"""
cpds = {}
if not compounds_file:
compounds_file = os.path.join(MODELSEED_DIR, 'Biochemistry/compounds.master.tsv')
try:
with open(compounds_file, 'r') as f:
for li, l in enumerate(f):
if li == 0:
# skip the header line
continue
p = l.strip().split("\t")
c = PyFBA.metabolism.Compound(p[2], '')
c.model_seed_id = p[0]
c.abbreviation = p[1]
c.formula = p[3]
c.mw = p[4]
# there are some compounds (like D-Glucose and Fe2+) that appear >1x in the table
if str(c) in cpds:
cpds[str(c)].alternate_seed_ids.add(p[0])
else:
cpds[str(c)] = c
except IOError as e:
sys.exit("There was an error parsing " +
compounds_file + "\n" + "I/O error({0}): {1}".format(e.errno, e.strerror))
return cpds
def location():
"""Parse or return the codes for the locations. The ModelSEEDDatabase
uses codes, and has a compartments file but they do not match up.
This is currently hardcoded, but is put here so we can rewrite it as
if the compartments file is updated
:return: A dict of location numeric IDs and string IDs
:rtype: dict
"""
# 0: cytoplasmic, 1: extracellular, 2: chloroplast
global all_locations
all_locations = {'0': 'c', '1': 'e', '2': 'h'}
return all_locations
def reactions(organism_type="", rctf='Biochemistry/reactions.master.tsv', verbose=False):
"""
Parse the reaction information in Biochemistry/reactions.master.tsv
One reaction ID is associated with one equation and thus many
compounds and parts.
If the boolean verbose is set we will print out error/debugging
messages.
You can supply an alternative reactions file (rctf) if you
don't like the default.
:param organism_type: The type of organism, eg. microbial, gram_negative, gram_positive
:type organism_type: str
:param rctf: The optional reaction file to provide
:type rctf: str
:param verbose: Print more output
:type verbose: bool
:return: Two components, a dict of the reactions and a dict of all the compounds used in the reactions.
:rtype: dict, dict
"""
locations = location()
cpds = compounds()
# cpds_by_id = {cpds[c].model_seed_id: cpds[c] for c in cpds}
cpds_by_id = {}
for c in cpds:
cpds_by_id[cpds[c].model_seed_id] = cpds[c]
for asi in cpds[c].alternate_seed_ids:
cpds_by_id[asi] = cpds[c]
all_reactions = {}
try:
with open(os.path.join(MODELSEED_DIR, rctf), 'r') as rxnf:
for l in rxnf:
if l.startswith('id'):
# ignore the header line
continue
if l.startswith("#"):
# ignore any comment lines
continue
pieces = l.strip().split("\t")
if len(pieces) < 20:
sys.stderr.write("ERROR PARSING REACTION INFO: " + l)
continue
if pieces[18] == True or "MI:" in pieces[17]:
# skip any reactions that are obsolete or any reactions that
# are invalid due to mass imbalance, charge imbalance, or
# invalid compound format
continue
rid = pieces[0]
rxn = pieces[6]
for i in range(len(pieces)):
if pieces[i] == "none" or pieces[i] == "null":
pieces[i] = None
if pieces[14]:
deltaG = float(pieces[14])
else:
deltaG = 0.0
if pieces[15]:
deltaG_error = float(pieces[15])
else:
deltaG_error = 0.0
# we need to split the reaction, but different reactions
# have different splits!
separator = ""
for separator in [" <=> ", " => ", " <= ", " = ", " < ", " > ", "Not found"]:
if separator in rxn:
break
if separator == "Not found":
if verbose:
sys.stderr.write("WARNING: Could not find a seperator in " + rxn +
". This reaction was skipped. Please check it\n")
continue
left, right = rxn.split(separator)
# check and see we have a valid equation
left = left.strip()
right = right.strip()
if False:
if left == "" or right == "":
if verbose:
sys.stderr.write("One side missing for " + rxn + " ignored\n")
continue
# create a new reaction object to hold all the information ...
r = PyFBA.metabolism.Reaction(rid)
r.deltaG = deltaG
r.deltaG_error = deltaG_error
if pieces[5] != '0':
r.is_transport = True
all_reactions[rid] = r
r.direction = pieces[9]
# we have to rewrite the equation to accomodate
# the proper locations
newleft = []
newright = []
# deal with the compounds on the left side of the equation
m = re.findall('\(([\d\.e-]+)\)\s+(.*?)\[(\d+)\]', left)
if m == [] and verbose:
sys.stderr.write("ERROR: Could not parse the compounds" + " on the left side of the reaction " +
rid + ": " + rxn + "\n")
for p in m:
(q, cmpd, locval) = p
if locval in locations:
loc = locations[locval]
else:
if verbose:
sys.stderr.write("WARNING: Could not get a location " + " for " + locval + "\n")
loc = locval
# we first look up to see whether we have the compound
# and then we need to create a new compound with the
# appropriate location
if cmpd in cpds_by_id:
nc = PyFBA.metabolism.Compound(cpds_by_id[cmpd].name, loc)
else:
if verbose:
sys.stderr.write("ERROR: Did not find " + cmpd + " in the compounds file.\n")
nc = PyFBA.metabolism.Compound(cmpd, loc)
ncstr = str(nc)
if ncstr in cpds:
nc = copy.copy(cpds[ncstr])
nc.add_reactions({rid})
cpds[ncstr] = nc
r.add_left_compounds({nc})
r.set_left_compound_abundance(nc, float(q))
newleft.append("(" + str(q) + ") " + nc.name + "[" + loc + "]")
# deal with the right side of the equation
m = re.findall('\(([\d\.e-]+)\)\s+(.*?)\[(\d+)\]', right)
if m == [] and verbose:
sys.stderr.write("ERROR: Could not parse the compounds on the right side of the reaction " +
rid + ": " + rxn + " >>" + right + "<<\n")
for p in m:
(q, cmpd, locval) = p
if locval in locations:
loc = locations[locval]
else:
if verbose:
sys.stderr.write("WARNING: Could not get a location " + " for " + locval + "\n")
loc = locval
# we first look up to see whether we have the compound
# and then we need to create a new compound with the
# appropriate location
if cmpd in cpds_by_id:
nc = PyFBA.metabolism.Compound(cpds_by_id[cmpd].name, loc)
else:
if verbose:
sys.stderr.write("ERROR: Did not find " + cmpd + " in the compounds file.\n")
nc = PyFBA.metabolism.Compound(cmpd, loc)
ncstr = str(nc)
if ncstr in cpds:
nc = copy.copy(cpds[ncstr])
nc.add_reactions({rid})
cpds[ncstr] = nc
r.add_right_compounds({nc})
r.set_right_compound_abundance(nc, float(q))
newright.append("(" + str(q) + ") " + nc.name + "[" + loc + "]")
r.equation = " + ".join(newleft) + " <=> " + " + ".join(newright)
all_reactions[rid] = r
except IOError as e:
sys.exit("There was an error parsing " + rctf + "\n" + "I/O error({0}): {1}".format(e.errno, e.strerror))
# finally, if we need to adjust the organism type based on Template reactions, we shall
if organism_type:
new_rcts = template_reactions(organism_type)
for r in new_rcts:
if r in all_reactions:
all_reactions[r].direction = new_rcts[r]['direction']
all_reactions[r].enzymes = new_rcts[r]['enzymes']
return cpds, all_reactions
def complexes(cf="SOLRDump/TemplateReactions.tsv", verbose=False):
"""
Connection between complexes and reactions. A complex can be
involved in many reactions.
In addition, many complexes are involved in one reaction, so we have
a many:many relationship here
Read the complex file and return a hash of the complexes where
key is the complex id and the value is a set of reactions that the
complex is involved in.
You can provide an optional complexes file (cf) if you don't like
the default!
:param cf: An optional complexes file name
:type cf: str
:param verbose: Print more output
:type verbose: bool
:return A dict of the complexes where the key is the complex id and the value is the set of reactions
:rtype: dict
"""
cplxes = {}
try:
# io.open() to enable the encoding and errors arguments when using Python2
# io.open() will read lines as unicode objects instead of str objects
# In Python2, unicode objects are equivalent to Python3 str objects
with io.open(os.path.join(MODELSEED_DIR, cf), 'r', encoding='utf-8', errors='replace') as rin:
for l in rin:
# If using Python2, must convert unicode object to str object
if sys.version_info.major == 2:
l = l.encode('utf-8', 'replace')
if l.startswith("#") or l.startswith('id'):
# ignore any comment lines
continue
p = l.strip().split("\t")
if len(p) < 30:
if verbose:
sys.stderr.write("WARNING: Malformed line in " + cf + ": " + l + "\n")
continue
if p[28] == "":
continue
for cmplx in p[28].split(';'):
if cmplx not in cplxes:
cplxes[cmplx] = set()
cplxes[cmplx].add(p[1])
except IOError as e:
sys.stderr.write("There was an error parsing {}\n".format(os.path.join(MODELSEED_DIR, cf)))
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
sys.exit(-1)
return cplxes
def roles_ec(rf="SOLRDump/ComplexRoles.tsv"):
"""
Read the roles and EC and return a hash of the roles and EC where the id
is the role name or EC number and the value is the set of complex IDs that
the role is inolved in.
One role or EC can be involved in many complexes.
You can provide an alternate roles file (rf) if you don't like the
default.
:param rf: an alternate roles file
:type rf: str
:return: A dict of role name and complex ids that the roles is involved with
:rtype: dict
"""
rles_ec = {}
try:
with open(os.path.join(MODELSEED_DIR, rf), 'r') as rin:
for l in rin:
if l.startswith("#") or l.startswith('complex_id'):
# ignore any comment lines
continue
p = l.strip().split("\t")
if p[5] not in rles_ec:
rles_ec[p[5]] = set()
rles_ec[p[5]].add(p[0])
# Try to add EC number if it exists in role name
for ecno in re.findall('[\d\-]+\.[\d\-]+\.[\d\-]+\.[\d\-]+', l):
if ecno not in rles_ec:
rles_ec[ecno] = set()
rles_ec[ecno].add(p[0])
except IOError as e:
sys.exit("There was an error parsing " + rf + "\n" + "I/O error({0}): {1}".format(e.errno, e.strerror))
return rles_ec
def roles(rf="SOLRDump/ComplexRoles.tsv"):
"""
Read the roles and return a hash of the roles where the id is the
role name and the value is the set of complex IDs that the role is
inolved in.
One role can be involved in many complexes.
You can provide an alternate roles file (rf) if you don't like the
default.
:param rf: an alternate roles file
:type rf: str
:return: A dict of role name and complex ids that the roles is involved with
:rtype: dict
"""
rles = {}
try:
with open(os.path.join(MODELSEED_DIR, rf), 'r') as rin:
for l in rin:
if l.startswith("#") or l.startswith('complex_id'):
# ignore any comment lines
continue
p = l.strip().split("\t")
if p[5] not in rles:
rles[p[5]] = set()
rles[p[5]].add(p[0])
except IOError as e:
sys.exit("There was an error parsing " + rf + "\n" + "I/O error({0}): {1}".format(e.errno, e.strerror))
return rles
def enzymes(verbose=False):
"""
Convert each of the roles and complexes into a set of enzymes, and
connect them to reactions.
Return just the enzyme objects.
You probably want to use compounds_reactions_enzymes, this is partly here
as a test case to make sure that enzymes and complexes play well
together
:param verbose: Print more output
:type verbose: bool
:return: A dict of with complex id as key and reaction id as value
"""
roleset = roles()
cmplxset = complexes()
enzs = {}
cpds, rcts = reactions()
# for roles the key is the role name and the value is the complex it
# is in
for rolename in roleset:
# what complex is this involved in
for complexid in roleset[rolename]:
if complexid not in cmplxset:
if verbose:
sys.stderr.write("WARNING: " + complexid + " is not in the complexes\n")
continue
if complexid not in enzs:
enzs[complexid] = PyFBA.metabolism.Enzyme(complexid)
enzs[complexid].add_roles({rolename})
for ecno in re.findall('[\d\-]+\.[\d\-]+\.[\d\-]+\.[\d\-]+', rolename):
enzs[complexid].add_ec(ecno)
for complexid in cmplxset:
if complexid not in enzs:
if verbose:
sys.stderr.write("WARNING: No roles found that are part" + " of complex " + complexid + "\n")
continue
for reactid in cmplxset[complexid]:
if reactid in rcts:
enzs[complexid].add_reaction(reactid)
rcts[reactid].add_enzymes({complexid})
return enzs
def compounds_reactions_enzymes(organism_type='', verbose=False):
"""
Convert each of the roles and complexes into a set of enzymes, and
connect them to reactions.
We return three dicts, the compounds, the enzymes, and the reactions. See the individual methods for the dicts
that we return!
:param organism_type: The type of organism, eg. Microbial, Gram_positive, Gram_negative
:type organism_type:str
:param verbose:Print more output
:type verbose:bool
:return: The compounds, the reactions, and the enzymes in that order
:rtype: dict of Compound, dict of Reaction, dict of Enzyme
"""
roleset = roles()
cmplxset = complexes()
cpds, rcts = reactions(organism_type, verbose=verbose)
enzs = {}
# for roles the key is the role name and the value is the complex it
# is in
for rolename in roleset:
# what complex is this involved in
for complexid in roleset[rolename]:
if complexid not in cmplxset:
if verbose:
sys.stderr.write("WARNING: " + complexid + " is not in the complexes\n")
continue
if complexid not in enzs:
enzs[complexid] = PyFBA.metabolism.Enzyme(complexid)
enzs[complexid].add_roles({rolename})
for ecno in re.findall('[\d\-]+\.[\d\-]+\.[\d\-]+\.[\d\-]+', rolename):
enzs[complexid].add_ec(ecno)
for complexid in cmplxset:
if complexid not in enzs:
if verbose:
sys.stderr.write("WARNING: No roles found that are part" + " of complex " + complexid + "\n")
continue
for reactid in cmplxset[complexid]:
if reactid in rcts:
enzs[complexid].add_reaction(reactid)
rcts[reactid].add_enzymes({complexid})
return cpds, rcts, enzs
| |
#!/usr/bin/env python
#coding:utf8
import argparse
import os
import sys
import platform
import commands
import json
import textwrap
import xml.sax
from bs4 import BeautifulSoup
def load_translated_po_to_list(filename):
msglist = []
file = open(filename, 'r')
try:
data = file.read()
finally:
file.close()
targets = data.split("msgid \"")
for target in targets:
if "msgstr " in target:
results = target.replace("\"\n\n", "").split("\"\nmsgstr \"")
#print(results)
msgid = results[0]
msgstr = results[1]
#print(msgid, msgstr)
dic={'msgid': msgid, 'msgstr': msgstr}
result = json.dumps(dic)
msglist.append(result)
return msglist
def translate_single_html_from_po(htmlfile, msgs):
print('===================' + htmlfile + '===================')
if os.path.isfile(htmlfile):
html_doc = ''
file = open(htmlfile,"r")
try:
html_doc = file.read()
except Exception,e:
print e.message
finally:
file.close()
#print html_doc
#soup = BeautifulSoup(html_doc,'lxml')
soup = BeautifulSoup(html_doc,'html.parser')
soup.encode("utf-8")
#print soup.prettify()
'''
for string in soup.strings:
print len(string.strip().replace('\n', '')), string.strip().replace('\n', '').encode('utf-8')
'''
for child in soup.descendants:
#print(type(child.string),child.string)
isFound = False
for msg in msgs:
#print msg
s = json.loads(msg)
#print(s['msgid'], s['msgstr'])
if child.string is not None and s['msgstr'].strip() and s['msgid'] == child.string.strip().replace('\n', ''):
isFound = True
print('Found [%s]'%child.string.strip().replace('\n', '').encode('utf-8'))
child.string = s['msgstr']
break
#print(type(soup.prettify()), soup.prettify(formatter="html"))
html_doc = ''
html_doc = soup.prettify(formatter="html")
#print type(html_doc)
#print html_doc
file = open(htmlfile, 'w')
try:
file.write(html_doc.encode('utf-8'))
except Exception,e:
print e.message
finally:
file.close()
def translate_html_from_po(arguments):
msgs = load_translated_po_to_list("translate.po")
all_results = []
for target in arguments.targets:
if os.path.isdir(target):
walk_results = os.walk(target)
for p,d,files in walk_results:
for f in files:
fullpath = os.path.join(p,f)
sysstr = platform.system()
if(sysstr =="Windows"):
fullpath.replace('\/', '\\')
elif(sysstr == "Linux"):
fullpath.replace('\\', '\/')
else:
print ("Other System tasks")
all_results.append(fullpath)
elif os.path.isfile(target):
all_results.append(target)
else:
print "%s is a special file (socket, FIFO, device file), pass it..." % target
for result in all_results:
translate_single_html_from_po(result, msgs)
def translate_html_from_po_parser(subparsers):
"""Argument parser for translate-html-from-po command.
translate-html-from-po [file1] [file2] ... [filen]
"""
description = """
Translate html from po file.
"""
parser = subparsers.add_parser(
'translate-html-from-po',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description
)
parser.add_argument('targets', nargs='*', help="Directory or HTML file")
parser.set_defaults(func=translate_html_from_po)
return parser
def format_single_html_prettify(htmlfile):
print('===================' + htmlfile + '===================')
if os.path.isfile(htmlfile):
html_doc = ''
file = open(htmlfile,"r")
try:
html_doc = file.read()
except Exception,e:
print e.message
finally:
file.close()
soup = BeautifulSoup(html_doc,'lxml')
soup.encode("utf-8")
html_doc = soup.prettify(formatter="html")
#print type(html_doc)
#print html_doc
file = open(htmlfile, 'w')
try:
file.write(html_doc.encode('utf-8'))
except Exception,e:
print e.message
finally:
file.close()
def format_html_prettify(arguments):
all_results = []
for target in arguments.targets:
if os.path.isdir(target):
walk_results = os.walk(target)
for p,d,files in walk_results:
for f in files:
fullpath = os.path.join(p,f)
sysstr = platform.system()
if(sysstr =="Windows"):
fullpath.replace('\/', '\\')
elif(sysstr == "Linux"):
fullpath.replace('\\', '\/')
else:
print ("Other System tasks")
all_results.append(fullpath)
elif os.path.isfile(target):
all_results.append(target)
else:
print "%s is a special file (socket, FIFO, device file), pass it..." % target
for result in all_results:
format_single_html_prettify(result)
def format_html_prettify_parser(subparsers):
"""Argument parser for format-html-prettify command.
format-html-prettify [dir1] [dir2] ... [dirn]
"""
description = """
format html file.
"""
parser = subparsers.add_parser(
'format-html-prettify',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description
)
parser.add_argument('targets', nargs='*', help="Directory or HTML file")
parser.set_defaults(func=format_html_prettify)
return parser
def load_translated_po(arguments):
msgs = []
for target in arguments.pofiles:
if os.path.isfile(target):
msglist = load_translated_po_to_list(target)
msgs = msgs + msglist
for msg in msgs:
#print msg
s = json.loads(msg)
print('============================Translated==============================')
if s['msgstr'].strip():
# print msgstr is not empty
print(s['msgid'], s['msgstr'])
print('==========================Not Translated============================')
if not s['msgstr'].strip():
# print msgstr is empty
print(s['msgid'], s['msgstr'])
def load_translated_po_parser(subparsers):
"""Argument parser for load-translated-po command.
load-translated-po [file1] [file2] ... [filen]
"""
description = """
Load the translated po file and extract msgid and msgstr.
"""
parser = subparsers.add_parser(
'load-translated-po',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description
)
parser.add_argument('pofiles', nargs='*', help="PO file")
parser.set_defaults(func=load_translated_po)
return parser
def update_po_from_pot(arguments):
#os.remove('translate-update.po')
msgs = []
for target in arguments.potfiles:
if os.path.isfile(target):
msglist = load_translated_po_to_list(target)
msgs = msgs + msglist
pomsgs = load_translated_po_to_list("translate.po")
for msg in msgs:
s = json.loads(msg)
msgstr = s['msgstr']
for pomsg in pomsgs:
pos = json.loads(pomsg)
if pos['msgstr'].strip() and s['msgid'] == pos['msgid']:
msgstr = pos['msgstr']
print('Found it [%s]'%s['msgid'])
break
# generate translate po
file = open('translate-update.po', 'a')
try:
file.write('msgid "%s"\n' % s['msgid'])
file.write('msgstr "%s"\n' % msgstr.encode("utf-8"))
file.write('\n')
finally:
file.close()
def update_po_from_pot_parser(subparsers):
"""Argument parser for update-po-from-pot command.
update-po-from-pot [file1] [file2] ... [filen]
"""
description = """
Load the translated po file and extract msgid and msgstr.
"""
parser = subparsers.add_parser(
'update-po-from-pot',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description
)
parser.add_argument('potfiles', nargs='*', help="POT file")
parser.set_defaults(func=update_po_from_pot)
return parser
def translate_pot_to_po(arguments):
#os.remove('translate.po')
msgs = []
for target in arguments.potfiles:
if os.path.isfile(target):
msglist = load_translated_po_to_list(target)
msgs = msgs + msglist
newmsgs = []
# remove dup msgid
for msg1 in msgs:
if msg1 not in newmsgs:
newmsgs.append(msg1)
for msg in newmsgs:
# generate translate po
file = open('translate.po', 'a')
try:
s = json.loads(msg)
file.write('msgid "%s"\n' % s['msgid'])
file.write('msgstr "%s"\n' % s['msgstr'].encode("utf-8"))
file.write('\n')
finally:
file.close()
def translate_pot_to_po_parser(subparsers):
"""Argument parser for translate-pot-to-po command.
translate-pot-to-po [file1] [file2] ... [filen]
"""
description = """
Load the translated po file and extract msgid and msgstr.
"""
parser = subparsers.add_parser(
'translate-pot-to-po',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description
)
parser.add_argument('potfiles', nargs='*', help="POT file")
parser.set_defaults(func=translate_pot_to_po)
return parser
def find_untranslated_to_pot(arguments):
parser = xml.sax.make_parser(['expat'])
# disable external validation to make it work without network access
parser.setFeature(xml.sax.handler.feature_external_ges, False)
parser.setFeature(xml.sax.handler.feature_external_pes, False)
if arguments.silent:
pass
elif arguments.nosummary:
pass
all_results = []
for target in arguments.targets:
if os.path.isdir(target):
walk_results = os.walk(target)
for p,d,files in walk_results:
for f in files:
fullpath = os.path.join(p,f)
sysstr = platform.system()
if(sysstr =="Windows"):
fullpath.replace('\/', '\\')
elif(sysstr == "Linux"):
fullpath.replace('\\', '\/')
else:
print ("Other System tasks")
all_results.append(fullpath)
elif os.path.isfile(target):
all_results.append(target)
else:
print "%s is a special file (socket, FIFO, device file), pass it..." % target
for result in all_results:
print(result)
cmd = 'i18ndude find-untranslated ' + result
print(cmd)
os.system(cmd)
#status, output = commands.getstatusoutput(cmd)
#if 0 != status:
# print(output)
def find_untranslated_to_pot_parser(subparsers):
"""Argument parser for find-untranslated command.
find-untranslated-to-pot [-s|-n] [dir1] [dir2] ... [dirn]
"""
description = """
Provide a list of ZPT files or directorys and I will output a report of places
where I suspect untranslated messages, and write them to untranslated.pot,
i.e. tags for which "i18n:translate" or "i18n:attributes" are missing.
If you provide the -s option, the report will only contain a summary
of errors and warnings for each file (or no output if there are no
errors or warnings). If you provide the -n option, the report will
contain only the errors for each file.
"""
parser = subparsers.add_parser(
'find-untranslated-to-pot',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description
)
parser.add_argument('-s', '--silent', action='store_true', help=(
"The report will only contain a summary of errors and warnings for "
"each file (or no output if there are no errors or warnings)."))
parser.add_argument('-n', '--nosummary', action='store_true', help=(
"The report will contain only the errors for each file."))
parser.add_argument('targets', nargs='*', help="ZPT targets")
parser.set_defaults(func=find_untranslated_to_pot)
return parser
def main():
description = """
find_untranslated_to_pot performs tasks related to i18n.
Call find_untranslated_to_pot with one of the listed subcommands followed by
--help to get help for that subcommand.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(description))
subparsers = parser.add_subparsers(title='subcommands')
# Add subparsers.
find_untranslated_to_pot_parser(subparsers)
translate_pot_to_po_parser(subparsers)
update_po_from_pot_parser(subparsers)
load_translated_po_parser(subparsers)
format_html_prettify_parser(subparsers)
translate_html_from_po_parser(subparsers)
# Parse the arguments.
arguments = parser.parse_args(sys.argv[1:])
# Call the function of the chosen command with the arguments.
errors = arguments.func(arguments)
if errors:
sys.exit(1)
if __name__ == "__main__":
main()
| |
from __future__ import absolute_import
from sentry.models import UserEmail, UserOption
from sentry.testutils import APITestCase
from django.core.urlresolvers import reverse
class UserNotificationFineTuningTest(APITestCase):
def setUp(self):
self.user = self.create_user(email='a@example.com')
self.org = self.create_organization(name='Org Name', owner=self.user)
self.org2 = self.create_organization(name='Another Org', owner=self.user)
self.team = self.create_team(name='Team Name', organization=self.org, members=[self.user])
self.project = self.create_project(
organization=self.org,
teams=[self.team],
name='Project Name'
)
self.project2 = self.create_project(
organization=self.org,
teams=[self.team],
name='Another Name'
)
self.login_as(user=self.user)
def test_returns_correct_defaults(self):
UserOption.objects.create(user=self.user, project=self.project, key="mail:alert", value=1)
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'alerts',
}
)
resp = self.client.get(url)
assert resp.data.get(self.project.id) == 1
UserOption.objects.create(
user=self.user,
organization=self.org,
key="deploy-emails",
value=1)
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'deploy',
}
)
resp = self.client.get(url)
assert resp.data.get(self.org.id) == 1
UserOption.objects.create(
user=self.user,
organization=None,
key="reports:disabled-organizations",
value=[
self.org.id])
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'reports',
}
)
resp = self.client.get(url)
assert resp.data.get(self.org.id) == 0
def test_invalid_notification_type(self):
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'invalid',
}
)
resp = self.client.get(url)
assert resp.status_code == 404
resp = self.client.put(url)
assert resp.status_code == 404
def test_update_invalid_project(self):
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'alerts',
}
)
update = {}
update['123'] = 1
resp = self.client.put(url, data=update)
assert resp.status_code == 403
def test_saves_and_returns_alerts(self):
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'alerts',
}
)
update = {}
update[self.project.id] = 1
update[self.project2.id] = 2
resp = self.client.put(url, data=update)
assert resp.status_code == 204
assert UserOption.objects.get(
user=self.user,
project=self.project,
key="mail:alert").value == 1
assert UserOption.objects.get(
user=self.user,
project=self.project2,
key="mail:alert").value == 2
update = {}
update[self.project.id] = -1
# Can return to default
resp = self.client.put(url, data=update)
assert resp.status_code == 204
assert not UserOption.objects.filter(
user=self.user,
project=self.project,
key="mail:alert").exists()
assert UserOption.objects.get(
user=self.user,
project=self.project2,
key="mail:alert").value == 2
def test_saves_and_returns_workflow(self):
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'workflow',
}
)
update = {}
update[self.project.id] = 1
update[self.project2.id] = 2
resp = self.client.put(url, data=update)
assert resp.status_code == 204
assert UserOption.objects.get(
user=self.user,
project=self.project,
key="workflow:notifications").value == '1'
assert UserOption.objects.get(
user=self.user,
project=self.project2,
key="workflow:notifications").value == '2'
update = {}
update[self.project.id] = -1
# Can return to default
resp = self.client.put(url, data=update)
assert resp.status_code == 204
assert not UserOption.objects.filter(
user=self.user,
project=self.project,
key="workflow:notifications")
assert UserOption.objects.get(
user=self.user,
project=self.project2,
key="workflow:notifications").value == '2'
def test_saves_and_returns_email_routing(self):
UserEmail.objects.create(user=self.user, email='alias@example.com', is_verified=True).save()
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'email',
}
)
update = {}
update[self.project.id] = 'a@example.com'
update[self.project2.id] = 'alias@example.com'
resp = self.client.put(url, data=update)
assert resp.status_code == 204
assert UserOption.objects.get(
user=self.user,
project=self.project,
key="mail:email").value == 'a@example.com'
assert UserOption.objects.get(
user=self.user,
project=self.project2,
key="mail:email").value == 'alias@example.com'
def test_email_routing_emails_must_be_verified(self):
UserEmail.objects.create(
user=self.user,
email='alias@example.com',
is_verified=False).save()
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'email',
}
)
update = {}
update[self.project.id] = 'alias@example.com'
resp = self.client.put(url, data=update)
assert resp.status_code == 400
def test_email_routing_emails_must_be_valid(self):
new_user = self.create_user(email="b@example.com")
UserEmail.objects.create(user=new_user, email="alias2@example.com", is_verified=True).save()
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'email',
}
)
update = {}
update[self.project2.id] = 'alias2@example.com'
resp = self.client.put(url, data=update)
assert resp.status_code == 400
def test_saves_and_returns_deploy(self):
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'deploy',
}
)
update = {}
update[self.org.id] = 0
resp = self.client.put(url, data=update)
assert resp.status_code == 204
assert UserOption.objects.get(
user=self.user,
organization=self.org.id,
key="deploy-emails").value == '0'
update = {}
update[self.org.id] = 1
resp = self.client.put(url, data=update)
assert UserOption.objects.get(
user=self.user,
organization=self.org,
key="deploy-emails").value == '1'
update = {}
update[self.org.id] = -1
resp = self.client.put(url, data=update)
assert not UserOption.objects.filter(
user=self.user,
organization=self.org,
key="deploy-emails").exists()
def test_saves_and_returns_weekly_reports(self):
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'reports',
}
)
update = {}
update[self.org.id] = 0
update[self.org2.id] = "0"
resp = self.client.put(url, data=update)
assert resp.status_code == 204
assert set(UserOption.objects.get(
user=self.user,
key="reports:disabled-organizations").value) == set([self.org.id, self.org2.id])
update = {}
update[self.org.id] = 1
resp = self.client.put(url, data=update)
assert set(UserOption.objects.get(
user=self.user,
key="reports:disabled-organizations").value) == set([self.org2.id])
update = {}
update[self.org.id] = 0
resp = self.client.put(url, data=update)
assert set(UserOption.objects.get(
user=self.user,
key="reports:disabled-organizations").value) == set([self.org.id, self.org2.id])
def test_enable_weekly_reports_from_default_setting(self):
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'reports',
}
)
update = {}
update[self.org.id] = 1
update[self.org2.id] = "1"
resp = self.client.put(url, data=update)
assert resp.status_code == 204
assert set(UserOption.objects.get(
user=self.user,
key="reports:disabled-organizations").value) == set([])
# can disable
update = {}
update[self.org.id] = 0
resp = self.client.put(url, data=update)
assert set(UserOption.objects.get(
user=self.user,
key="reports:disabled-organizations").value) == set([self.org.id])
# re-enable
update = {}
update[self.org.id] = 1
resp = self.client.put(url, data=update)
assert set(UserOption.objects.get(
user=self.user,
key="reports:disabled-organizations").value) == set([])
def test_permissions(self):
new_user = self.create_user(email='b@example.com')
new_org = self.create_organization(name='New Org')
new_team = self.create_team(name='New Team', organization=new_org, members=[new_user])
new_project = self.create_project(
organization=new_org,
teams=[new_team],
name='New Project'
)
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'reports',
}
)
update = {}
update[new_org.id] = 0
resp = self.client.put(url, data=update)
assert resp.status_code == 403
assert not UserOption.objects.filter(
user=self.user,
organization=new_org,
key="reports").exists()
url = reverse(
'sentry-api-0-user-notifications-fine-tuning', kwargs={
'user_id': 'me',
'notification_type': 'alerts',
}
)
update = {}
update[new_project.id] = 1
resp = self.client.put(url, data=update)
assert resp.status_code == 403
assert not UserOption.objects.filter(
user=self.user,
project=new_project,
key="mail:alert").exists()
| |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
from unittest.mock import MagicMock
import pytest
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import _MLFLOW_AVAILABLE, MLFlowLogger
from pytorch_lightning.loggers.mlflow import MLFLOW_RUN_NAME, resolve_tags
from tests.helpers import BoringModel
def mock_mlflow_run_creation(logger, experiment_name=None, experiment_id=None, run_id=None):
"""Helper function to simulate mlflow client creating a new (or existing) experiment."""
run = MagicMock()
run.info.run_id = run_id
logger._mlflow_client.get_experiment_by_name = MagicMock(return_value=experiment_name)
logger._mlflow_client.create_experiment = MagicMock(return_value=experiment_id)
logger._mlflow_client.create_run = MagicMock(return_value=run)
return logger
@mock.patch("pytorch_lightning.loggers.mlflow.mlflow")
@mock.patch("pytorch_lightning.loggers.mlflow.MlflowClient")
def test_mlflow_logger_exists(client, mlflow, tmpdir):
"""Test launching three independent loggers with either same or different experiment name."""
run1 = MagicMock()
run1.info.run_id = "run-id-1"
run2 = MagicMock()
run2.info.run_id = "run-id-2"
run3 = MagicMock()
run3.info.run_id = "run-id-3"
# simulate non-existing experiment creation
client.return_value.get_experiment_by_name = MagicMock(return_value=None)
client.return_value.create_experiment = MagicMock(return_value="exp-id-1") # experiment_id
client.return_value.create_run = MagicMock(return_value=run1)
logger = MLFlowLogger("test", save_dir=tmpdir)
assert logger._experiment_id is None
assert logger._run_id is None
_ = logger.experiment
assert logger.experiment_id == "exp-id-1"
assert logger.run_id == "run-id-1"
assert logger.experiment.create_experiment.asset_called_once()
client.reset_mock(return_value=True)
# simulate existing experiment returns experiment id
exp1 = MagicMock()
exp1.experiment_id = "exp-id-1"
client.return_value.get_experiment_by_name = MagicMock(return_value=exp1)
client.return_value.create_run = MagicMock(return_value=run2)
# same name leads to same experiment id, but different runs get recorded
logger2 = MLFlowLogger("test", save_dir=tmpdir)
assert logger2.experiment_id == logger.experiment_id
assert logger2.run_id == "run-id-2"
assert logger2.experiment.create_experiment.call_count == 0
assert logger2.experiment.create_run.asset_called_once()
client.reset_mock(return_value=True)
# simulate a 3rd experiment with new name
client.return_value.get_experiment_by_name = MagicMock(return_value=None)
client.return_value.create_experiment = MagicMock(return_value="exp-id-3")
client.return_value.create_run = MagicMock(return_value=run3)
# logger with new experiment name causes new experiment id and new run id to be created
logger3 = MLFlowLogger("new", save_dir=tmpdir)
assert logger3.experiment_id == "exp-id-3" != logger.experiment_id
assert logger3.run_id == "run-id-3"
@mock.patch("pytorch_lightning.loggers.mlflow.mlflow")
@mock.patch("pytorch_lightning.loggers.mlflow.MlflowClient")
def test_mlflow_run_name_setting(client, mlflow, tmpdir):
"""Test that the run_name argument makes the MLFLOW_RUN_NAME tag."""
tags = resolve_tags({MLFLOW_RUN_NAME: "run-name-1"})
# run_name is appended to tags
logger = MLFlowLogger("test", run_name="run-name-1", save_dir=tmpdir)
logger = mock_mlflow_run_creation(logger, experiment_id="exp-id")
_ = logger.experiment
client.return_value.create_run.assert_called_with(experiment_id="exp-id", tags=tags)
# run_name overrides tags[MLFLOW_RUN_NAME]
logger = MLFlowLogger("test", run_name="run-name-1", tags={MLFLOW_RUN_NAME: "run-name-2"}, save_dir=tmpdir)
logger = mock_mlflow_run_creation(logger, experiment_id="exp-id")
_ = logger.experiment
client.return_value.create_run.assert_called_with(experiment_id="exp-id", tags=tags)
# default run_name (= None) does not append new tag
logger = MLFlowLogger("test", save_dir=tmpdir)
logger = mock_mlflow_run_creation(logger, experiment_id="exp-id")
_ = logger.experiment
default_tags = resolve_tags(None)
client.return_value.create_run.assert_called_with(experiment_id="exp-id", tags=default_tags)
@mock.patch("pytorch_lightning.loggers.mlflow.mlflow")
@mock.patch("pytorch_lightning.loggers.mlflow.MlflowClient")
def test_mlflow_log_dir(client, mlflow, tmpdir):
"""Test that the trainer saves checkpoints in the logger's save dir."""
# simulate experiment creation with mlflow client mock
run = MagicMock()
run.info.run_id = "run-id"
client.return_value.get_experiment_by_name = MagicMock(return_value=None)
client.return_value.create_experiment = MagicMock(return_value="exp-id")
client.return_value.create_run = MagicMock(return_value=run)
# test construction of default log dir path
logger = MLFlowLogger("test", save_dir=tmpdir)
assert logger.save_dir == tmpdir
assert logger.version == "run-id"
assert logger.name == "exp-id"
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, logger=logger, max_epochs=1, limit_train_batches=1, limit_val_batches=3)
assert trainer.log_dir == logger.save_dir
trainer.fit(model)
assert trainer.checkpoint_callback.dirpath == (tmpdir / "exp-id" / "run-id" / "checkpoints")
assert set(os.listdir(trainer.checkpoint_callback.dirpath)) == {"epoch=0-step=0.ckpt"}
assert trainer.log_dir == logger.save_dir
def test_mlflow_logger_dirs_creation(tmpdir):
"""Test that the logger creates the folders and files in the right place."""
if not _MLFLOW_AVAILABLE:
pytest.xfail("test for explicit file creation requires mlflow dependency to be installed.")
assert not os.listdir(tmpdir)
logger = MLFlowLogger("test", save_dir=tmpdir)
assert logger.save_dir == tmpdir
assert set(os.listdir(tmpdir)) == {".trash"}
run_id = logger.run_id
exp_id = logger.experiment_id
# multiple experiment calls should not lead to new experiment folders
for i in range(2):
_ = logger.experiment
assert set(os.listdir(tmpdir)) == {".trash", exp_id}
assert set(os.listdir(tmpdir / exp_id)) == {run_id, "meta.yaml"}
class CustomModel(BoringModel):
def training_epoch_end(self, *args, **kwargs):
super().training_epoch_end(*args, **kwargs)
self.log("epoch", self.current_epoch)
model = CustomModel()
limit_batches = 5
trainer = Trainer(
default_root_dir=tmpdir,
logger=logger,
max_epochs=1,
limit_train_batches=limit_batches,
limit_val_batches=limit_batches,
log_gpu_memory=True,
)
trainer.fit(model)
assert set(os.listdir(tmpdir / exp_id)) == {run_id, "meta.yaml"}
assert "epoch" in os.listdir(tmpdir / exp_id / run_id / "metrics")
assert set(os.listdir(tmpdir / exp_id / run_id / "params")) == model.hparams.keys()
assert trainer.checkpoint_callback.dirpath == (tmpdir / exp_id / run_id / "checkpoints")
assert os.listdir(trainer.checkpoint_callback.dirpath) == [f"epoch=0-step={limit_batches - 1}.ckpt"]
@mock.patch("pytorch_lightning.loggers.mlflow.mlflow")
@mock.patch("pytorch_lightning.loggers.mlflow.MlflowClient")
def test_mlflow_experiment_id_retrieved_once(client, mlflow, tmpdir):
"""
Test that the logger experiment_id retrieved only once.
"""
logger = MLFlowLogger("test", save_dir=tmpdir)
_ = logger.experiment
_ = logger.experiment
_ = logger.experiment
assert logger.experiment.get_experiment_by_name.call_count == 1
@mock.patch("pytorch_lightning.loggers.mlflow.mlflow")
@mock.patch("pytorch_lightning.loggers.mlflow.MlflowClient")
def test_mlflow_logger_with_unexpected_characters(client, mlflow, tmpdir):
"""
Test that the logger raises warning with special characters not accepted by MLFlow.
"""
logger = MLFlowLogger("test", save_dir=tmpdir)
metrics = {"[some_metric]": 10}
with pytest.warns(RuntimeWarning, match="special characters in metric name"):
logger.log_metrics(metrics)
@mock.patch("pytorch_lightning.loggers.mlflow.mlflow")
@mock.patch("pytorch_lightning.loggers.mlflow.MlflowClient")
def test_mlflow_logger_with_long_param_value(client, mlflow, tmpdir):
"""
Test that the logger raises warning with special characters not accepted by MLFlow.
"""
logger = MLFlowLogger("test", save_dir=tmpdir)
value = "test" * 100
key = "test_param"
params = {key: value}
with pytest.warns(RuntimeWarning, match=f"Discard {key}={value}"):
logger.log_hyperparams(params)
@mock.patch("pytorch_lightning.loggers.mlflow.time")
@mock.patch("pytorch_lightning.loggers.mlflow.mlflow")
@mock.patch("pytorch_lightning.loggers.mlflow.MlflowClient")
def test_mlflow_logger_experiment_calls(client, mlflow, time, tmpdir):
"""
Test that the logger calls methods on the mlflow experiment correctly.
"""
time.return_value = 1
logger = MLFlowLogger("test", save_dir=tmpdir, artifact_location="my_artifact_location")
logger._mlflow_client.get_experiment_by_name.return_value = None
params = {"test": "test_param"}
logger.log_hyperparams(params)
logger.experiment.log_param.assert_called_once_with(logger.run_id, "test", "test_param")
metrics = {"some_metric": 10}
logger.log_metrics(metrics)
logger.experiment.log_metric.assert_called_once_with(logger.run_id, "some_metric", 10, 1000, None)
logger._mlflow_client.create_experiment.assert_called_once_with(
name="test", artifact_location="my_artifact_location"
)
| |
import logging
import select
from datetime import timedelta, datetime
from dateutil.relativedelta import relativedelta
from django.db import connections, DatabaseError
from django.db import transaction
from django.db import models
from django.conf import settings
from django.utils.timezone import now
from six import string_types
from .job import Job
from .utils import get_restricted_datetime
from .exceptions import (DequeueTimeout, InvalidBetween,
InvalidInterval, InvalidQueueName)
_PQ_QUEUES = {}
PQ_DEFAULT_JOB_TIMEOUT = getattr(settings, 'PQ_DEFAULT_JOB_TIMEOUT', 600)
PQ_QUEUE_CACHE = getattr(settings, 'PQ_QUEUE_CACHE', True)
logger = logging.getLogger(__name__)
def get_failed_queue(connection='default'):
"""Returns a handle to the special failed queue."""
return FailedQueue.create(connection=connection)
class _EnqueueArgs(object):
"""Simple argument and keyword argument wrapper
for enqueue and schedule queue methods
"""
def __init__(self, *args, **kwargs):
self.timeout = None
self.result_ttl = None
self.async = True
self.args = args
self.kwargs = kwargs
# Detect explicit invocations, i.e. of the form:
# q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, timeout=30)
if 'args' in kwargs or 'kwargs' in kwargs:
assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs.' # noqa
self.result_ttl = kwargs.pop('result_ttl', None)
self.timeout = kwargs.pop('timeout', None)
self.async = kwargs.pop('async', True)
self.args = kwargs.pop('args', None)
self.kwargs = kwargs.pop('kwargs', None)
class Queue(models.Model):
connection = None
name = models.CharField(max_length=100, primary_key=True, default='default')
default_timeout = models.PositiveIntegerField(null=True, blank=True)
cleaned = models.DateTimeField(null=True, blank=True)
scheduled = models.BooleanField(default=False,
help_text="Optimisation: scheduled tasks are slower.")
lock_expires = models.DateTimeField(default=now())
serial = models.BooleanField(default=False)
idempotent = models.BooleanField(default=False)
_async = True
_saved = False
def __unicode__(self):
return self.name
@classmethod
def create(cls,
name='default', default_timeout=None,
connection='default', scheduled=False, async=True, idempotent=False):
"""Returns a Queue ready for accepting jobs"""
queue = cls(name=cls.validated_name(name))
queue.default_timeout = default_timeout or PQ_DEFAULT_JOB_TIMEOUT
queue.connection = connection
queue.scheduled = scheduled
queue.idempotent = idempotent
queue._async = async
return queue
@classmethod
def validated_name(cls, name):
"""Ensure there is no closing parenthesis"""
if not name or not isinstance(name, string_types):
raise InvalidQueueName('%s is not a valid queue name' % str(name))
name = name.strip()
if name.lower() == 'failed':
raise InvalidQueueName("'failed' is a reserved queue name")
return name
@classmethod
def validated_queue(cls, name):
q = _PQ_QUEUES.get(name) if PQ_QUEUE_CACHE else None
created = False
if not q:
q, created = cls.objects.get_or_create(name=name)
_PQ_QUEUES[name] = q
if not created and q.serial:
raise InvalidQueueName("%s is a serial queue" % name)
return q
def save_queue(self):
q = self.validated_queue(self.name)
fields = ['default_timeout', 'scheduled', 'idempotent']
dirty = [f for f in fields if q.__dict__[f] != self.__dict__[f]]
if dirty:
q.default_timeout = self.default_timeout
q.serial = self.serial
q.idempotent = self.idempotent
# a queue remains a scheduled queue if prior scheduled jobs have been
# submitted to it
q.scheduled = True if self.scheduled else q.scheduled
q.save()
_PQ_QUEUES[self.name] = q
@classmethod
def all(cls, connection='default'):
allqs = []
queues = cls.objects.using(connection).all()[:]
for q in queues:
if q.name == 'failed':
allqs.append(get_failed_queue(connection))
else:
allqs.append(q)
return allqs
@property
def count(self):
return Job.objects.using(self.connection).filter(queue_id=self.name).count()
def delete_expired_ttl(self):
"""Delete jobs from the queue which have expired"""
with transaction.commit_on_success(using=self.connection):
Job.objects.using(self.connection).filter(
origin=self.name, status=Job.FINISHED, expired_at__lte=now()).delete()
def empty(self):
"""Delete all jobs from a queue"""
Job.objects.using(self.connection).filter(queue_id=self.name).delete()
def enqueue_next(self, job):
"""Enqueue the next scheduled job relative to this one"""
if not job.repeat:
return
if isinstance(job.repeat, datetime):
if job.repeat <= now():
return
else:
repeat = job.repeat
else:
repeat = job.repeat - 1 if job.repeat > 0 else -1
timeout = job.timeout
scheduled_for = job.scheduled_for + job.interval
scheduled_for = get_restricted_datetime(scheduled_for, job.between, job.weekdays)
status = Job.SCHEDULED if scheduled_for > job.scheduled_for else Job.QUEUED
self.save_queue()
job = Job.create(job.func, job.args, job.kwargs, connection=job.connection,
result_ttl=job.result_ttl,
scheduled_for=scheduled_for,
repeat=repeat,
interval=job.interval,
between=job.between,
weekdays=job.weekdays,
status=status)
return self.enqueue_job(job, timeout=timeout)
def enqueue_call(self, func, args=None, kwargs=None,
timeout=None, result_ttl=None, async=True, at=None,
repeat=None, interval=0, between='', weekdays=None): #noqa
"""Creates a job to represent the delayed function call and enqueues
it.
It is much like `.enqueue()`, except that it takes the function's args
and kwargs as explicit arguments. Any kwargs passed to this function
contain options for PQ itself.
"""
at = get_restricted_datetime(at, between, weekdays)
# Scheduled tasks require a slower query
status = Job.SCHEDULED if at else Job.QUEUED
self.save_queue()
timeout = timeout or self.default_timeout
job = Job.create(func, args, kwargs, connection=self.connection,
result_ttl=result_ttl,
scheduled_for=at,
repeat=repeat,
interval=interval,
between=between,
weekdays=weekdays,
status=status)
return self.enqueue_job(job, timeout=timeout, async=async)
def enqueue(self, f, *args, **kwargs):
"""Creates a job to represent the delayed function call and enqueues
it.
Expects the function to call, along with the arguments and keyword
arguments.
The function argument `f` may be any of the following:
* A reference to a function
* A reference to an object's instance method
* A string, representing the location of a function (must be
meaningful to the import context of the workers)
"""
if not isinstance(f, string_types) and f.__module__ == '__main__':
raise ValueError(
'Functions from the __main__ module cannot be processed '
'by workers.')
enq = _EnqueueArgs(*args, **kwargs)
return self.enqueue_call(func=f, args=enq.args, kwargs=enq.kwargs,
timeout=enq.timeout,
result_ttl=enq.result_ttl,
async=enq.async)
def enqueue_job(self, job, timeout=None, set_meta_data=True, async=True):
"""Enqueues a job for delayed execution.
When the `timeout` argument is sent, it will overrides the default
timeout value of 180 seconds. `timeout` may either be a string or
integer.
If the `set_meta_data` argument is `True` (default), it will update
the properties `origin` and `enqueued_at`.
If Queue is instantiated with async=False, job is executed immediately.
"""
if set_meta_data:
job.origin = self.name
if timeout:
job.timeout = timeout
else:
job.timeout = PQ_DEFAULT_JOB_TIMEOUT # default
if self._async and async:
job.queue_id = self.name
job.save()
self.notify(job.id)
else:
job.perform()
job.status = Job.FINISHED
return job
def schedule(self, at, f, *args, **kwargs):
"""As per enqueue but schedule ``at`` datetime"""
if not isinstance(f, string_types) and f.__module__ == '__main__':
raise ValueError(
'Functions from the __main__ module cannot be processed '
'by workers.')
enq = _EnqueueArgs(*args, **kwargs)
return self.enqueue_call(func=f, args=enq.args, kwargs=enq.kwargs,
timeout=enq.timeout, result_ttl=enq.result_ttl,
async=enq.async,
at=at)
def schedule_call(self, at, f, args=None, kwargs=None,
timeout=None, result_ttl=None, repeat=0, interval=0,
between='', weekdays=None):
"""
As per enqueue_call but with a datetime argument ``at`` first.
``repeat`` a number of times or infinitely -1 at
``interval`` seconds. Interval also accepts a timedelta or
dateutil relativedelta instance
``between`` is a time window that the scheduled
function will be called for example:
'0:0/6:00' or '0-6' or '0.0-6.0'
``weekdays`` is a tuple or list of relativedelta weekday
instances or the same of integers ranging from 0 (MO) to 6 (SU)
"""
return self.enqueue_call(func=f, args=args, kwargs=kwargs,
timeout=timeout, result_ttl=result_ttl,
at=at, repeat=repeat, interval=interval,
between=between, weekdays=weekdays)
def dequeue(self):
"""Dequeues the front-most job from this queue.
Returns a Job instance, which can be executed or inspected.
Does not respect serial queue locks
"""
with transaction.commit_on_success(using=self.connection):
try:
job = Job.objects.using(self.connection).select_for_update().filter(
queue=self, status=Job.QUEUED,
scheduled_for__lte=now()).order_by('scheduled_for')[0]
job.queue = None
job.save()
except IndexError:
job = None
if job and job.repeat:
self.enqueue_next(job)
return job
@classmethod
def _listen_for_jobs(cls, queue_names, connection_name, timeout):
"""Get notification from postgresql channels
corresponding to queue names.
"""
conn = cls.listen(connection_name, queue_names)
while True:
for notify in conn.notifies:
if not notify.channel in queue_names:
continue
elif notify.payload == 'stop':
raise DequeueTimeout(0)
conn.notifies.remove(notify)
logger.debug('Got job notification %s on queue %s'% (
notify.payload, notify.channel))
return notify.channel
else:
r, w, e = select.select([conn], [], [], timeout)
if not (r or w or e):
raise DequeueTimeout(timeout)
logger.debug('Got data on %s' % (str(r[0])))
conn.poll()
@classmethod
def dequeue_any(cls, queues, timeout):
"""Helper method, that polls the database queues for new jobs.
The timeout parameter is interpreted as follows:
None - non-blocking (return immediately)
> 0 - maximum number of seconds to block
Returns a job instance and a queue
"""
burst = True if not timeout else False
job = None
# queues must share the same connection - enforced at worker startup
conn = queues[0].connection
queue_names = [q.name for q in queues]
q_lookup = dict(zip(queue_names, queues))
default_timeout = timeout or 0
queue_stack = queues[:]
while True:
while queue_stack:
q = queue_stack.pop(0)
if q.serial and not q.acquire_lock(timeout):
# promise to check the queue at timeout
job = None
promise = q.name
else:
job, promise, timeout = Job._get_job_or_promise(
conn, q, timeout)
if job and job.repeat:
self.enqueue_next(job)
if job:
return job, q
if burst:
return
if promise:
queue_stack.append(promise)
q = cls._listen_for_jobs(queue_names, conn, timeout)
timeout = default_timeout
queue_stack.append(q_lookup[q])
@classmethod
def listen(cls, connection_name, queue_names):
conn = connections[connection_name]
cursor = conn.cursor()
for q_name in queue_names:
sql = "LISTEN \"%s\"" % q_name
cursor.execute(sql)
cursor.close()
# Need to return django's wrapped open connection so that
# the calling method can use the same session to actually
# receive pg notify messages
return conn.connection
def notify(self, job_id):
"""Notify postgresql channel when a job is enqueued"""
cursor = connections[self.connection].cursor()
cursor.execute("SELECT pg_notify(%s, %s);", (self.name, str(job_id)))
cursor.close()
class SerialQueue(Queue):
"""A queue with a lock"""
class Meta:
proxy = True
@classmethod
def create(cls,
name='serial', default_timeout=None,
connection='default', scheduled=False, async=True):
"""Returns a Queue ready for accepting jobs"""
queue = super(SerialQueue, cls).create(name,
default_timeout, connection, scheduled, async)
if not queue.serial:
queue.serial=True
queue.save()
return queue
@classmethod
def validated_queue(cls, name):
q, created = cls.objects.get_or_create(name=name)
if not created and not q.serial:
raise InvalidQueueName("%s is not a serial queue" % name)
return q
def acquire_lock(self, timeout=0, no_wait=True):
try:
with transaction.commit_on_success(using=self.connection):
SerialQueue.objects.using(
self.connection).select_for_update(
no_wait=no_wait).get(
name=self.name, lock_expires__lte=now())
if timeout:
self.lock_expires = now() + timedelta(seconds=timeout)
self.save()
except DatabaseError:
logger.debug('%s SerialQueue currently locked on update' % self.name)
return False
except SerialQueue.DoesNotExist:
logger.debug('%s SerialQueue currently locked' % self.name)
return False
return True
def release_lock(self):
self.lock_expires = now()
self.save()
class FailedQueue(Queue):
class Meta:
proxy = True
@classmethod
def validated_name(self, name):
return name
@classmethod
def create(cls, connection='default'):
fq = super(FailedQueue, cls).create('failed', connection=connection)
fq.save()
return fq
def quarantine(self, job, exc_info):
"""Puts the given Job in quarantine (i.e. put it on the failed
queue).
This is different from normal job enqueueing, since certain meta data
must not be overridden (e.g. `origin` or `enqueued_at`) and other meta
data must be inserted (`ended_at` and `exc_info`).
"""
job.ended_at = now()
job.exc_info = exc_info
return self.enqueue_job(job, timeout=job.timeout, set_meta_data=False)
def requeue(self, job_id):
"""Requeues the job with the given job ID."""
with transaction.commit_on_success(self.connection):
job = Job.objects.using(self.connection).select_for_update().get(id=job_id)
# Delete it from the failed queue (raise an error if that failed)
job.queue = None
job.status = Job.QUEUED
job.exc_info = None
job.scheduled_for = now()
job.save()
q = Queue.create(job.origin, connection=self.connection)
q.enqueue_job(job, timeout=job.timeout)
| |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017-2017 Cisco Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
import subprocess
from pprint import pprint
import shlex
from subprocess import Popen, PIPE
import json
import pprint
from .trex_astf_exceptions import ASTFError
class map_driver(object):
opts=None;
def run_cmd (cmd):
process = Popen(shlex.split(cmd), stdout=PIPE)
(output, err) = process.communicate()
output=output.decode("utf-8")
exit_code = process.wait()
return (exit_code,output, err)
class TsharkFileProcess(object):
def __init__(self, file_name,is_tcp):
self.file_name = file_name
self.is_tcp = is_tcp
def is_valid_tcp (self):
res=(self.is_tcp_one_flow() and self.is_tcp_file_is_clean());
return (res);
def is_tcp_file_is_clean (self):
file = self.file_name;
cmd='tshark -r %s -R "tcp.analysis.out_of_order or tcp.analysis.duplicate_ack or tcp.analysis.retransmission or tcp.analysis.fast_retransmission or tcp.analysis.ack_lost_segment or tcp.analysis.keep_alive" ' % (file);
(exit_code,output, err)=run_cmd (cmd)
if exit_code==0:
if len(output)==0:
return True;
return False
def is_tcp_one_flow (self):
file = self.file_name;
cmd='tshark -r %s -T fields -e tcp.stream ' % (file);
(exit_code,output, err)=run_cmd (cmd)
if exit_code==0:
return self._proces_output_is_one_tcp_flow(output)
return False
def _proces_output_is_one_tcp_flow (self,output):
found=False;
cnt=0;
l=output.split("\n")
for line in l:
last=False;
if line.isdigit() :
if int(line)==0:
found=True;
else:
found=False
break;
else:
cnt=cnt+1
last=True;
return ((cnt==0) or (cnt==1) and (last==True)) and found;
#def main(args=None):
# for root, subdirs, files in os.walk(PCAP_DIR):
# for filename in files:
# file=os.path.join(root, filename);
# if process_file_name (file):
# print file;
profile="""
from trex_astf_lib.api import *
class Prof1():
def __init__(self):
pass
def get_profile(self):
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
return ASTFProfile(default_ip_gen=ip_gen,
cap_list=[ASTFCapInfo(file="%s",
cps=2.776)])
def register():
return Prof1()
""";
def tshark_stream_process (output):
a=output.split("\n")[6:]
s="\n".join(a)
return convert_file_l7 (s)
def get_payload_data (file):
cmd='tshark -nr %s -q -z follow,tcp,raw,0 ' % (file);
(exit_code,output, err)=run_cmd (cmd)
if exit_code==0:
return tshark_stream_process (output)
raise ASTFError("can't get payload ")
def compare_l7_data (file_a,file_b):
(s1,c1)=get_payload_data (file_a)
(s2,c2)=get_payload_data (file_b)
if s1==s2:
if c1==c2:
return (True,c1)
else:
print("ERROR counters are not the same !\n");
print(c1)
print(c2)
return (False,[0,0]);
else:
print(c1)
print(c2)
print("FILES not the same !\n");
print(" FILE ## 1 - f1.txt")
f=open("f1.txt","w+");
f.write(s1);
f.close();
print(" FILE ## 2 - f2.txt")
f=open("f2.txt","w+");
f.write(s2);
f.close();
return (False,[0,0]);
def process_sim_to_json (output):
lines=output.split("\n");
next=False;
for l in lines:
if next==True:
return(json.loads(l));
if l.strip()=="json-start":
next=True;
raise ASTFError("can't find json output ")
class SimCounter(object):
def __init__(self, json):
self.m_json =json;
def is_any_error (self):
j=self.m_json;
sides=['client','server']
for s in sides:
if 'err' in j['data'][s]:
return(True)
return(False)
def get_error_str (self):
res="";
j=self.m_json;
sides=['client','server']
for s in sides:
if 'err' in j['data'][s]:
res+=pprint.pformat( j['data'][s]['err']);
return(res)
def dump(self):
pprint.pprint(self.m_json);
def compare_counter (self,c,name,val):
read_val=c.get(name,0);
if (read_val != val):
raise ASTFError("counter {0} read {1} expect {2}".format(name,read_val,val));
def _match (self,_str,val1,val2):
if (val1 != val2):
raise ASTFError("counter-name{0} read {1} expect {2}".format(_str,val1,val2));
def get_bytes (self,side):
j=self.m_json;
c=j['data'][side]['all'];
self.compare_counter(c,'tcps_connects',1)
self.compare_counter(c,'tcps_closed',1)
if side=='client':
self.compare_counter(c,'tcps_connattempt',1)
else:
self.compare_counter(c,'tcps_accepts',1)
return (c.get('tcps_sndbyte',0),c.get('tcps_rcvbyte',0))
def compare (self,tx_bytes,rx_bytes):
(ctx,crx)=self.get_bytes ('client')
(stx,srx)=self.get_bytes ('server')
print("client [%d,%d]" %(ctx,crx));
print("server [%d,%d]" %(stx,srx));
print("expect [%d,%d]" %(tx_bytes,rx_bytes));
self._match("client.tx==server.rx",ctx,srx);
self._match("client.rx==server.tx",crx,stx);
self._match("client.tx==pcap.rx",ctx,tx_bytes);
self._match("client.rx==pcap.rx",crx,rx_bytes);
def compare_counters (output,tx_bytes,
rx_bytes,skip_errors):
json=process_sim_to_json(output)
simc = SimCounter(json);
if skip_errors==False:
if map_driver.opts.skip_counter_err==False and simc.is_any_error():
raise ASTFError("ERROR counters has an error {0} ".format(simc.get_error_str ()));
simc.compare (tx_bytes,rx_bytes)
def _run_sim(file):
ifile=file.rstrip();
p =profile% (ifile);
f=open("n.py",'w')
lines=f.write(p);
f.close();
ofile="generated/"+os.path.split(ifile)[1]
print("process: ", ifile)
cmd=""
if map_driver.opts.cmd:
cmd=","+map_driver.opts.cmd
cmd='./astf-sim --python3 -f n.py -o {0} -v -c="--sim-json{1}"'.format(ofile,cmd);
print(cmd);
(exit_code,output, err)=run_cmd (cmd)
if map_driver.opts.verbose:
print(output);
if exit_code==100:
print("SKIP3-TRex reason");
return;
if exit_code==0:
file1 = ifile;
file2 = ofile+"_c.pcap"
file3 = ofile+"_s.pcap"
(p,c)=compare_l7_data(file1,file2);
if p!=True:
raise ASTFError("ERROR {0} {1} are not the same".format(file1,file2));
(p,c)=compare_l7_data(file2,file3)
if p!=True:
raise ASTFError("ERROR {0} {1} are not the same".format(file2,file3));
compare_counters(output,c[0],c[1],False)
print ("OK",c)
else:
raise ASTFError("ERROR running TRex {0} are not the same".format(output));
def run_sim(file):
skip=map_driver.opts.skip
if not skip:
_run_sim(file)
else:
try:
_run_sim(file)
except Exception as e:
print(e);
def file_is_ok(f):
a= TsharkFileProcess(f,True)
return (a.is_valid_tcp());
def run_one_file (one_pcap_file):
print("run-"+one_pcap_file);
if file_is_ok (one_pcap_file):
run_sim(one_pcap_file)
else:
print("SKIP1 "+one_pcap_file);
def load_files (files):
f=open(files,'r')
lines=f.readlines();
f.close();
for obj in lines:
if (len(obj)>0) and (obj[0]=="#"):
print("SKIP "+obj);
continue;
run_one_file (obj)
def tshark_trunc_line (line,dir):
if dir==1:
return(line[1:])
else:
return(line)
def tshark_add_line (line,dir):
if dir==1:
return(" "+line);
else:
return(line)
def convert_file_l7 (s):
bytes=[0,0]
l=s.split("\n");
res=""
last_dir=-1;
cur="";
for line in l:
# set dir
if len(line)==0:
break;
if line[0]=="=":
break;
if line[0]=='\x09':
dir=1
else:
dir=0
if (dir!=last_dir) and (last_dir!=-1):
res+=(tshark_add_line(cur,dir^1)+"\n")
cur="";
lp=tshark_trunc_line(line,dir)
bytes[dir]+=len(lp)/2;
cur+=lp;
last_dir=dir
if len(cur)>0:
res+=(tshark_add_line(cur,dir^1)+"\n")
return(res,bytes);
def test_convert_l7 ():
f=open("f1.txt","r");
s=f.read()
f.close();
return (convert_file_l7 (s))
def run_sfr ():
l=[
"avl/delay_10_http_browsing_0.pcap",
"avl/delay_10_http_get_0.pcap",
"avl/delay_10_http_post_0.pcap",
"avl/delay_10_https_0.pcap",
"avl/delay_10_exchange_0.pcap",
"avl/delay_10_mail_pop_0.pcap",
"avl/delay_10_oracle_0.pcap",
"avl/delay_10_smtp_0.pcap",
"avl/delay_10_citrix_0.pcap"]
for obj in l:
run_sim(obj)
def setParserOptions():
parser = argparse.ArgumentParser(prog="sim_utl.py",
usage="""
Examples:
---------
./astf-sim-utl --sfr # run SFR profile
./astf-sim-utl -f pcap-file.pcap [sim-option] # run one pcap file
./astf-sim-utl -f pcap-list.txt [sim-option] # run a list of pcap files
./astf-sim-utl -f pcap-list.txt --cmd="--sim-mode=28,--sim-arg=0.1" --skip-counter-err --dev
""")
parser.add_argument('-p', '--path',
help="BP sim path",
dest='bp_sim_path',
default=None,
type=str)
parser.add_argument("-k", "--skip",
help="skip in case of an error",
action="store_true",
default=False)
parser.add_argument('-v', '--verbose',
action="store_true",
help="Print output to screen")
parser.add_argument('-d', '--dev',
action="store_true",
help="Print output to screen")
parser.add_argument( "--skip-counter-err",
help="skip in case of an error",
action="store_true",
default=False)
parser.add_argument("-c", "--cmd",
help="command to the simulator",
dest='cmd',
default=None,
type=str)
group = parser.add_mutually_exclusive_group()
group.add_argument("-f",
dest="input_file",
help="list of pcap files or one pcap file",
)
group.add_argument("--sfr",
dest="sfr",
action="store_true",
default=False,
help="run sfr profile test ",
)
return parser
def test1 ():
run_sfr();
def run ():
opts =map_driver.opts
if opts.sfr:
run_sfr ()
return;
f=opts.input_file;
if not os.path.isfile(f) :
raise ASTFError("ERROR {0} is not a file".format(f));
extension = os.path.splitext(f)[1]
if extension=='.txt':
load_files (f)
return;
if extension in ['.pcap','.cap'] :
run_one_file (f)
return;
raise ASTFError("ERROR {0} is not a file in the right format".format(f));
def main(args=None):
parser = setParserOptions()
if args is None:
opts = parser.parse_args()
else:
opts = parser.parse_args(args)
map_driver.opts = opts;
if opts.dev:
run();
else:
try:
run();
except Exception as e:
print(e)
sys.exit(1)
if __name__ == '__main__':
main()
| |
from __future__ import print_function
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from functools import partial, wraps
from itertools import islice, takewhile, dropwhile
import operator
from pipetools.compat import map, filter, range, dict_items
from pipetools.debug import set_name, repr_args, get_name
from pipetools.decorators import data_structure_builder, regex_condition
from pipetools.decorators import pipe_util, auto_string_formatter
from pipetools.main import pipe, X, _iterable
KEY, VALUE = X[0], X[1]
@pipe_util
@auto_string_formatter
@data_structure_builder
def foreach(function):
"""
Returns a function that takes an iterable and returns an iterator over the
results of calling `function` on each item of the iterable.
>>> range(5) > foreach(factorial) | list
[1, 1, 2, 6, 24]
"""
return partial(map, function)
@pipe_util
def foreach_do(function):
"""
Like :func:`foreach` but is evaluated immediately and doesn't return
anything.
For the occasion that you just want to do some side-effects::
open('addresses.txt') > foreach(geocode) | foreach_do(launch_missile)
-- With :func:`foreach` nothing would happen (except an itetrator being
created)
"""
def f(iterable):
for item in iterable:
function(item)
return f
@pipe_util
@regex_condition
def where(condition):
"""
Pipe-able lazy filter.
>>> odd_range = range | where(X % 2) | list
>>> odd_range(10)
[1, 3, 5, 7, 9]
"""
return partial(filter, condition)
@pipe_util
@regex_condition
def where_not(condition):
"""
Inverted :func:`where`.
"""
return partial(filter, pipe | condition | operator.not_)
@pipe_util
@data_structure_builder
def sort_by(function):
"""
Sorts an incoming sequence by using the given `function` as key.
>>> range(10) > sort_by(-X)
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
Supports automatic data-structure creation::
users > sort_by([X.last_name, X.first_name])
There is also a shortcut for ``sort_by(X)`` called ``sort``:
>>> [4, 5, 8, -3, 0] > sort
[-3, 0, 4, 5, 8]
And (as of ``0.2.3``) a shortcut for reversing the sort:
>>> 'asdfaSfa' > sort_by(X.lower()).descending
['s', 'S', 'f', 'f', 'd', 'a', 'a', 'a']
"""
f = partial(sorted, key=function)
f.attrs = {'descending': _descending_sort_by(function)}
return f
@pipe_util
def _descending_sort_by(function):
return partial(sorted, key=function, reverse=True)
sort = sort_by(X)
@pipe_util
@auto_string_formatter
@data_structure_builder
def debug_print(function):
"""
Prints function applied on input and returns the input.
::
foo = (pipe
| something
| debug_print(X.get_status())
| something_else
| foreach(debug_print("attr is: {0.attr}"))
| etc)
"""
def debug(thing):
print(function(thing))
return thing
return debug
@pipe_util
def tee(function):
"""
Sends a copy of the input into function - like a T junction.
"""
def _tee(thing):
function(thing)
return thing
return _tee
@pipe_util
def as_args(function):
"""
Applies the sequence in the input as positional arguments to `function`.
::
some_lists > as_args(izip)
"""
return lambda x: function(*x)
@pipe_util
def as_kwargs(function):
"""
Applies the dictionary in the input as keyword arguments to `function`.
"""
return lambda x: function(**x)
def take_first(count):
"""
Assumes an iterable on the input, returns an iterable with first `count`
items from the input (or possibly less, if there isn't that many).
>>> range(9000) > where(X % 100 == 0) | take_first(5) | tuple
(0, 100, 200, 300, 400)
"""
def _take_first(iterable):
return islice(iterable, count)
return pipe | set_name('take_first(%s)' % count, _take_first)
def drop_first(count):
"""
Assumes an iterable on the input, returns an iterable with identical items
except for the first `count`.
>>> range(10) > drop_first(5) | tuple
(5, 6, 7, 8, 9)
"""
def _drop_first(iterable):
g = (x for x in range(1, count + 1))
return dropwhile(
lambda i: unless(StopIteration, lambda: next(g))(), iterable)
return pipe | set_name('drop_first(%s)' % count, _drop_first)
def unless(exception_class_or_tuple, func, *args, **kwargs):
"""
When `exception_class_or_tuple` occurs while executing `func`, it will
be caught and ``None`` will be returned.
>>> f = where(X > 10) | list | unless(IndexError, X[0])
>>> f([5, 8, 12, 4])
12
>>> f([1, 2, 3])
None
"""
@pipe_util
@auto_string_formatter
@data_structure_builder
def construct_unless(function):
# a wrapper so we can re-use the decorators
def _unless(*args, **kwargs):
try:
return function(*args, **kwargs)
except exception_class_or_tuple:
pass
return _unless
name = lambda: 'unless(%s, %s)' % (exception_class_or_tuple, ', '.join(
filter(None, (get_name(func), repr_args(*args, **kwargs)))))
return set_name(name, construct_unless(func, *args, **kwargs))
@pipe_util
@regex_condition
def select_first(condition):
"""
Returns first item from input sequence that satisfies `condition`. Or
``None`` if none does.
>>> ['py', 'pie', 'pi'] > select_first(X.startswith('pi'))
'pie'
As of ``0.2.1`` you can also
:ref:`directly use regular expressions <auto-regex>` and write the above
as:
>>> ['py', 'pie', 'pi'] > select_first('^pi')
'pie'
There is also a shortcut for ``select_first(X)`` called ``first_of``:
>>> first_of(['', None, 0, 3, 'something'])
3
>>> first_of([])
None
"""
return where(condition) | unless(StopIteration, next)
first_of = select_first(X)
@pipe_util
@auto_string_formatter
@data_structure_builder
def group_by(function):
"""
Groups input sequence by `function`.
Returns an iterator over a sequence of tuples where the first item is a
result of `function` and the second one a list of items matching this
result.
Ordering of the resulting iterator is undefined, but ordering of the items
in the groups is preserved.
>>> [1, 2, 3, 4, 5, 6] > group_by(X % 2) | list
[(0, [2, 4, 6]), (1, [1, 3, 5])]
"""
def _group_by(seq):
result = {}
for item in seq:
result.setdefault(function(item), []).append(item)
return dict_items(result)
return _group_by
def _flatten(x):
if not _iterable(x) or isinstance(x, Mapping):
yield x
else:
for y in x:
for z in _flatten(y):
yield z
def flatten(*args):
"""
Flattens an arbitrarily deep nested iterable(s).
>>> [[[[[[1]]], 2], range(2) > foreach(X + 3)]] > flatten | list
[1, 2, 3, 4]
Does not treat strings and (as of ``0.3.1``) mappings (dictionaries)
as iterables so these are left alone.
>>> ('hello', [{'how': 'are'}, [['you']]]) > flatten | list
['hello', {'how': 'are'}, 'you']
Also turns non-iterables into iterables which is convenient when you
are not sure about the input.
>>> 'stuff' > flatten | list
['stuff']
"""
return _flatten(args)
flatten = wraps(flatten)(pipe | flatten)
def count(iterable):
"""
Returns the number of items in `iterable`.
"""
return sum(1 for whatever in iterable)
count = wraps(count)(pipe | count)
@pipe_util
@regex_condition
def take_until(condition):
"""
>>> [1, 4, 6, 4, 1] > take_until(X > 5) | list
[1, 4]
>>> [1, 4, 6, 4, 1] > take_until(X > 5).including | list
[1, 4, 6]
"""
f = partial(takewhile, pipe | condition | operator.not_)
f.attrs = {'including': take_until_including(condition)}
return f
@pipe_util
@regex_condition
def take_until_including(condition):
"""
>>> [1, 4, 6, 4, 1] > take_until_including(X > 5) | list
[1, 4, 6]
"""
def take_until_including_(interable):
for i in interable:
if not condition(i):
yield i
else:
yield i
break
return take_until_including_
| |
#!/usr/local/bin/python
from io import open
import os
from PIL import Image
import sys
# =========================
# The Icon class!
class Icon(object):
def __init__(self, w, h):
self.width = w
self.height = h
self.image = [0] * (w * h)
self.chars = Font("5x6.font")
self.palette = {0: (255, 255, 255), 1: (0, 0, 0)}
self.fgcolor = 1
self.bgcolor = 0
def charset(self, fn):
self.chars = Font(fn)
def set(self, x, y, v=1):
if v:
v = self.fgcolor
elif self.bgcolor is None:
return
else:
v = self.bgcolor
if (y < self.height and x < self.width):
self.image[y * self.width + x] = v
def get(self, x, y):
if (y < self.height and x < self.width):
return self.image[y * self.width + x]
return 0
def tog(self, x, y):
if (y < self.height and x < self.width):
if self.image[y * self.width + x] == self.bgcolor:
self.image[y * self.width + x] = self.fgcolor
elif self.bgcolor is not None:
self.image[y * self.width + x] = self.bgcolor
else:
self.image[y * self.width + x] = 0
def saveicn(self, f):
'''Old-style, SUN format, monochrome icon files.'''
iX = iY = 0
uVal = 0
iBit = 0
iImg = 0
BPW = 16
icnhdr = "/* Format_version=1, Width=%d, Height=%d, Depth=1, Valid_bits_per_item=%d\n */\n"
f.write(icnhdr % (self.width, self.height, BPW))
for iY in range(0, self.height):
f.write("\t")
for iX in range(0, (self.width - 1) / BPW + 1):
uVal = iBit = 0
while ((iX * BPW + iBit < self.width) and (iBit < BPW)):
val = int(not not self.image[iImg])
uVal = uVal | val << (15 - iBit)
iImg = iImg + 1
iBit = iBit + 1
f.write("0x%4.4X" % (uVal & 0xFFFF))
f.write(",")
f.write("\n")
def invert(self, x, y, w, h):
for iY in range(0, h):
for iX in range(0, w):
self.tog(x + iX, y + iY)
def merge(self, x, y, w, h, lImage):
for iY in range(h - 1, -1, -1):
for iX in range(w - 1, -1, -1):
self.set(x + iX, y + iY, lImage & 1)
lImage >>= 1
def box(self, x, y, w, h):
for iCh in range(y, y + h):
self.set(x, iCh)
self.set(x + w, iCh)
for iCh in range(x, x + w):
self.set(iCh, y)
self.set(iCh, y + h)
def string(self, x, y, s):
xt = x
for c in s:
if c == '\n':
xt = x
y = y + self.chars.height + 1
continue
ch = self.chars.get(c, (0, 0))
if xt + ch[0] > self.width:
xt = x
y = y + self.chars.height + 1
self.merge(xt, y, ch[0], self.chars.height, ch[1])
xt += ch[0] + 1
def number(self, x, y, v):
t = abs(v)
c = 0
st = ''
while t:
ch = str(t % 10)
st = ch + st
t /= 10
c = c + self.chars[ch][0] + 1
self.string(x, y, st)
def save(self, fname, fmt=None):
im = self.getimage()
im.save(fname, fmt)
def getimage(self):
im = Image.new("RGBA", (self.width, self.height))
for iY in range(0, self.height):
for iX in range(0, self.width):
im.putpixel((iX, iY), self.palette[self.get(iX, iY)])
return im
def background(self, rgb):
c = self.setpalette(rgb)
self.bgcolor = c
def foreground(self, rgb):
c = self.setpalette(rgb)
self.fgcolor = c
def setpalette(self, rgb):
c = 0
for i in self.palette.items():
if i[1] == rgb:
return i[0]
if i[0] > c:
c = i[0]
self.palette[c + 1] = rgb
return c + 1
# =========================
# The Font class!
class Font(object):
def __init__(self, fname):
sys.path.append('./bin')
f = None
for path in sys.path:
try:
fp = os.path.join(path, fname)
f = open(fp)
except Exception:
continue
break
if f:
inp = f.readlines()
self.chars = {}
xall = x = 0
v = 0
c = None
for line in inp:
if line[0] == '\t':
# if not c:
# print '*** badly formed input'
line = line[1:] + " " * x
line = line[:x]
for p in line:
v = v * 2
if p == '#':
v = v + 1
elif line[0] == 'x':
x = xall = int(line[1:])
elif line[0] == 'y':
self.height = int(line[1:])
elif line[0] == '=':
if c:
self.chars[c] = (x, v)
self.chars.setdefault(c.lower(), (x, v))
c = line[1]
v = 0
if not xall:
x = int(line[2:])
if c:
self.chars[c] = (x, v)
self.chars.setdefault(c.lower(), (x, v))
def __get__(self, key):
return self.chars[key]
def get(self, key, defval=None):
return self.chars.get(key, defval)
| |
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from tkinter import TOP, E
import tkinter.messagebox
from Client import menu, repoids, global_username, SHARED_REPO_ID
class UploadPage(tk.Frame):
def __init__(self, frame, gui):
# parameter: frame
# parameter: gui
"""
Init the frame.
"""
tk.Frame.__init__(self, frame)
self.client = gui.getClient()
"""
Creates a Label to display 'Upload'.
"""
label = tk.Label(self, text="Upload")
label.pack(side=TOP)
# Frame used for organization
top = tk.Frame(self)
self.top = top
top.pack(side=TOP)
groupInfoFrame = tk.Frame(self)
groupInfoFrame.pack()
# Frame used for organization
bottom = tk.Frame(self)
bottom.pack(side=TOP)
"""
Creates a Label to display 'Filename'.
"""
filenameText = tk.Label(top, text="Filename")
filenameText.grid(row=0, sticky=E)
"""
Creates a Entry to display a textbox for the client to enter the name of the file.
"""
self.filenameInput = tk.Entry(top)
self.filenameInput.grid(row=0, column=1)
self.file_path = None
"""
Creates and adds a back button.
Takes the client back to menu page when clicked on.
"""
searchButton = tk.Button(top, text="Search",
command=lambda: self.searchClientFile(gui))
searchButton.grid(row=0, column=2)
"""
Creates a Label to display 'Tags'.
"""
tagsText = tk.Label(top, text="Tags")
tagsText.grid(row=1, sticky=E)
"""
Creates a Entry to display a textbox for the client to enter the category of the file.
"""
self.tagsInput = tk.Entry(top)
self.tagsInput.grid(row=1, column=1)
"""
Creates a Label to display 'Comments'.
"""
commentsText = tk.Label(top, text="Comments")
commentsText.grid(row=2, sticky=E)
"""
Creates a Entry to display a textbox for the client to enter the keywords of the file.
"""
self.commentsInput = tk.Text(top, width=25, height=3, bd=5)
self.commentsInput.grid(row=2, column=1)
repoText = tk.Label(top, text="Repositary")
repoText.grid(row=3, column=0)
self.groupNameText = tk.Label(top, text="Group Name")
self.groupNameInput = tk.Entry(top)
repoOptionsFrame = tk.Frame(top)
repoOptionsFrame.grid(row=3, column=1, columnspan=1)
self.var = StringVar()
self.repo_destination = StringVar()
self.repo_destination.set("self")
selfRB = Radiobutton(repoOptionsFrame, text="Self", variable=self.repo_destination, value="self",
command=lambda: self.remove_groupOptions())
groupRB = Radiobutton(repoOptionsFrame, text="Group", variable=self.repo_destination, value="group",
command=lambda: self.show_groupOptions())
sharedRB = Radiobutton(repoOptionsFrame, text="Shared", variable=self.repo_destination, value="shared",
command=lambda: self.remove_groupOptions())
selfRB.grid(row=0, column=0, sticky=W)
groupRB.grid(row=0, column=1)
sharedRB.grid(row=0, column=2)
"""
Creates and adds a upload button.
Takes all text the client enters and
uploads the file with the corresponding information.
"""
uploadButton = tk.Button(bottom, text="Upload",
command=lambda: self.upload(gui,
self.filenameInput.get(),
self.tagsInput.get(),
self.commentsInput.get("1.0", END)))
uploadButton.grid(row=0)
"""
Creates and adds a back button.
Takes the client back to menu page when clicked on.
"""
backButton = tk.Button(bottom, text="Back",
command=lambda: self.back(gui))
backButton.grid(row=0, column=1)
def show_groupOptions(self):
self.groupNameText.grid(row=4, column=0)
self.group_names.grid(row=4, column=1)
def remove_groupOptions(self):
self.groupNameText.grid_forget()
self.group_names.grid_forget()
def get_repo_id(self):
repo = self.repo_destination.get()
if repo == "self":
return repoids[0]
elif repo == "group":
for group_tuple in self.list_groups:
if group_tuple[1] == self.var.get():
return group_tuple[0]
elif repo == "shared":
return SHARED_REPO_ID
def upload(self, gui, filename, tag, comment):
if not filename and not tag and len(comment) == 1:
tkinter.messagebox.showinfo("Warning", "Please enter the name of a file, a tag, and a comment.")
elif not filename:
tkinter.messagebox.showinfo("Warning", "Please enter the name of a file or search for one on your machine.")
elif not tag:
tkinter.messagebox.showinfo("Warning", "Please enter a tag.")
elif len(comment) == 1:
tkinter.messagebox.showinfo("Warning", "Please enter a comment.")
elif filename and tag and comment:
repo_id = self.get_repo_id()
if self.file_path:
response = gui.getClient().upload(self.file_path, [tag], comment, str(repo_id))
else:
response = gui.getClient().upload(filename, [tag], comment, str(repo_id))
if not response:
tkinter.messagebox.showinfo("Warning", "File " + filename + " was not found. The file was not uploaded")
elif response:
tkinter.messagebox.showinfo("Notice", "File " + filename + " was sucessfully uploaded.")
self.filenameInput.delete(0, 'end')
self.tagsInput.delete(0, 'end')
self.commentsInput.delete("1.0", END)
self.file_path = None
def searchClientFile(self, gui):
gui.filename = filedialog.askopenfilename(initialdir="/", title="Select file")
print(gui.filename)
path = gui.filename.split("/")
print(path[-1])
filename = path[-1]
self.filenameInput.delete(0, 'end')
self.filenameInput.insert(0, filename)
self.file_path = gui.filename
def back(self, gui):
# parameter: gui -> The GUI that is being used.
"""
Empties the textboxes before heading back to the starting page.
"""
self.filenameInput.delete(0, 'end')
self.tagsInput.delete(0, 'end')
self.commentsInput.delete("1.0", END)
self.repo_destination.set("self")
try: # god forgive my sins
self.group_names.destroy()
except AttributeError:
pass # ignore if group_names does not exist
"""
Goes back to the starting page.
"""
gui.show_frame(menu.MenuPage)
def on_show(self):
self.list_groups = self.client.retrieve_groups(global_username[0])
if self.list_groups:
self.var.set(self.list_groups[0][1])
else:
self.var.set(" ")
print(self.list_groups)
self.group_names = tk.OptionMenu(self.top, self.var, *[tup[1] for tup in self.list_groups]
if self.list_groups else " ")
| |
from dark.utils import countPrint
try:
from itertools import zip_longest
except ImportError:
# zip_longest does not exist in Python 2.7 itertools. We should be able
# to get it via from six.moves import zip_longest according to
# https://pythonhosted.org/six/index.html?highlight=zip_longest but
# that doesn't work for me.
from itertools import izip_longest as zip_longest
# From https://en.wikipedia.org/wiki/Amino_acid
#
# Alanine Ala A
# Arginine Arg R
# Asparagine Asn N
# Aspartic acid Asp D
# Cysteine Cys C
# Glutamic acid Glu E
# Glutamine Gln Q
# Glycine Gly G
# Histidine His H
# Isoleucine Ile I
# Leucine Leu L
# Lysine Lys K
# Methionine Met M
# Phenylalanine Phe F
# Proline Pro P
# Serine Ser S
# Threonine Thr T
# Tryptophan Trp W
# Tyrosine Tyr Y
# Valine Val V
NAMES = {
'A': 'Alanine',
'R': 'Arginine',
'N': 'Asparagine',
'D': 'Aspartic acid',
'C': 'Cysteine',
'E': 'Glutamic acid',
'Q': 'Glutamine',
'G': 'Glycine',
'H': 'Histidine',
'I': 'Isoleucine',
'L': 'Leucine',
'K': 'Lysine',
'M': 'Methionine',
'F': 'Phenylalanine',
'P': 'Proline',
'S': 'Serine',
'T': 'Threonine',
'V': 'Valine',
'W': 'Tryptophan',
'Y': 'Tyrosine',
}
AA_LETTERS = sorted(NAMES)
NAMES_TO_ABBREV1 = dict((name, abbrev1) for abbrev1, name in NAMES.items())
ABBREV3 = {
'A': 'Ala',
'R': 'Arg',
'N': 'Asn',
'D': 'Asp',
'C': 'Cys',
'E': 'Glu',
'Q': 'Gln',
'G': 'Gly',
'H': 'His',
'I': 'Ile',
'L': 'Leu',
'K': 'Lys',
'M': 'Met',
'F': 'Phe',
'P': 'Pro',
'S': 'Ser',
'T': 'Thr',
'V': 'Val',
'W': 'Trp',
'Y': 'Tyr',
}
ABBREV3_TO_ABBREV1 = dict((abbrev3, abbrev1)
for abbrev1, abbrev3 in ABBREV3.items())
HYDROPHOBIC = 0x0001
HYDROPHILIC = 0x0002
AROMATIC = 0x0004
SULPHUR = 0x0008
ALIPHATIC = 0x0010
HYDROXYLIC = 0x0020
TINY = 0x0040
SMALL = 0x0080
ACIDIC = 0x0100
BASIC_POSITIVE = 0x0200
NEGATIVE = 0x0400
POLAR = 0x0800
NONE = 0x1000
ALL_PROPERTIES = (
ACIDIC, ALIPHATIC, AROMATIC, BASIC_POSITIVE, HYDROPHILIC,
HYDROPHOBIC, HYDROXYLIC, NEGATIVE, NONE, POLAR, SMALL, SULPHUR, TINY)
PROPERTY_NAMES = {
ACIDIC: 'Acidic',
ALIPHATIC: 'Aliphatic',
AROMATIC: 'Aromatic',
BASIC_POSITIVE: 'Basic positive',
HYDROPHILIC: 'Hydrophilic',
HYDROPHOBIC: 'Hydrophobic',
HYDROXYLIC: 'Hydroxylic',
NEGATIVE: 'Negative',
NONE: '<NONE>',
POLAR: 'Polar',
SMALL: 'Small',
SULPHUR: 'Sulphur',
TINY: 'Tiny',
}
PROPERTIES = {
'A': HYDROPHOBIC | SMALL | TINY,
'C': HYDROPHOBIC | SMALL | TINY | SULPHUR,
'D': HYDROPHILIC | SMALL | POLAR | NEGATIVE,
'E': HYDROPHILIC | NEGATIVE | ACIDIC,
'F': HYDROPHOBIC | AROMATIC,
'G': HYDROPHILIC | SMALL | TINY,
'H': HYDROPHOBIC | AROMATIC | POLAR | BASIC_POSITIVE,
'I': ALIPHATIC | HYDROPHOBIC,
'K': HYDROPHOBIC | BASIC_POSITIVE | POLAR,
'L': ALIPHATIC | HYDROPHOBIC,
'M': HYDROPHOBIC | SULPHUR,
'N': HYDROPHILIC | SMALL | POLAR | ACIDIC,
'P': HYDROPHILIC | SMALL,
'Q': HYDROPHILIC | POLAR | ACIDIC,
'R': HYDROPHILIC | POLAR | BASIC_POSITIVE,
'S': HYDROPHILIC | SMALL | POLAR | HYDROXYLIC,
'T': HYDROPHOBIC | SMALL | HYDROXYLIC,
'V': ALIPHATIC | HYDROPHOBIC | SMALL,
'W': HYDROPHOBIC | AROMATIC | POLAR,
'Y': HYDROPHOBIC | AROMATIC | POLAR,
}
# A table with which codons translate to which amino acids.
# Based on https://en.wikipedia.org/wiki/DNA_codon_table
#
# Note that the trailing commas are necessary in the AAs that only have one
# codon. If you omit them, the parentheses will not create a tuple.
CODONS = {
'A': ('GCA', 'GCC', 'GCG', 'GCT',),
'C': ('TGC', 'TGT',),
'D': ('GAC', 'GAT',),
'E': ('GAA', 'GAG',),
'F': ('TTC', 'TTT',),
'G': ('GGA', 'GGC', 'GGG', 'GGT',),
'H': ('CAC', 'CAT',),
'I': ('ATA', 'ATC', 'ATT',),
'K': ('AAA', 'AAG',),
'L': ('CTA', 'CTC', 'CTG', 'CTT', 'TTA', 'TTG',),
'M': ('ATG',),
'N': ('AAC', 'AAT',),
'P': ('CCA', 'CCC', 'CCG', 'CCT',),
'Q': ('CAA', 'CAG',),
'R': ('AGA', 'AGG', 'CGA', 'CGC', 'CGG', 'CGT',),
'S': ('AGC', 'AGT', 'TCA', 'TCC', 'TCG', 'TCT',),
'T': ('ACA', 'ACC', 'ACG', 'ACT',),
'V': ('GTA', 'GTC', 'GTG', 'GTT',),
'W': ('TGG',),
'Y': ('TAC', 'TAT',),
}
START_CODON = 'ATG'
STOP_CODONS = ('TAA', 'TAG', 'TGA',)
"""
The dictionary below contains for each amino acid the value for
each property scaled from -1 to 1.
For documentation, check https://notebooks.antigenic-cartography.org/barbara/
pages/features/aa-properties.html
"""
PROPERTY_DETAILS = {
'A': {
'aliphaticity': 0.305785123967,
'aromaticity': -0.550128534704,
'composition': -1.0,
'hydrogenation': 0.8973042362,
'hydropathy': 0.4,
'hydroxythiolation': -0.265160523187,
'iep': -0.191489361702,
'polar requirement': -0.463414634146,
'polarity': -0.20987654321,
'volume': -0.664670658683,
},
'C': {
'aliphaticity': -0.00826446280992,
'aromaticity': -0.740359897172,
'composition': 1.0,
'hydrogenation': 0.240051347882,
'hydropathy': 0.555555555556,
'hydroxythiolation': 0.785969084423,
'iep': -0.424280350438,
'polar requirement': -1.0,
'polarity': -0.851851851852,
'volume': -0.377245508982,
},
'D': {
'aliphaticity': -0.818181818182,
'aromaticity': -1.0,
'composition': 0.00363636363636,
'hydrogenation': -0.90243902439,
'hydropathy': -0.777777777778,
'hydroxythiolation': -0.348394768133,
'iep': -1.0,
'polar requirement': 1.0,
'polarity': 1.0,
'volume': -0.389221556886,
},
'E': {
'aliphaticity': -0.553719008264,
'aromaticity': -0.899742930591,
'composition': -0.330909090909,
'hydrogenation': -1.0,
'hydropathy': -0.777777777778,
'hydroxythiolation': -0.555291319857,
'iep': -0.887359198999,
'polar requirement': 0.878048780488,
'polarity': 0.827160493827,
'volume': -0.0419161676647,
},
'F': {
'aliphaticity': 0.223140495868,
'aromaticity': 0.858611825193,
'composition': -1.0,
'hydrogenation': 0.0218228498074,
'hydropathy': 0.622222222222,
'hydroxythiolation': 0.0582639714625,
'iep': -0.321652065081,
'polar requirement': -0.951219512195,
'polarity': -0.925925925926,
'volume': 0.544910179641,
},
'G': {
'aliphaticity': -1.0,
'aromaticity': -0.45501285347,
'composition': -0.461818181818,
'hydrogenation': 1.0,
'hydropathy': -0.0888888888889,
'hydroxythiolation': -0.158145065398,
'iep': -0.198998748436,
'polar requirement': -0.243902439024,
'polarity': 0.0123456790123,
'volume': -1.0,
},
'H': {
'aliphaticity': -0.256198347107,
'aromaticity': 0.555269922879,
'composition': -0.578181818182,
'hydrogenation': -0.150192554557,
'hydropathy': -0.711111111111,
'hydroxythiolation': 0.0154577883472,
'iep': 0.206508135169,
'polar requirement': -0.121951219512,
'polarity': 0.358024691358,
'volume': 0.11377245509,
},
'I': {
'aliphaticity': 0.867768595041,
'aromaticity': -0.264781491003,
'composition': -1.0,
'hydrogenation': 0.432605905006,
'hydropathy': 1.0,
'hydroxythiolation': -0.85255648038,
'iep': -0.18648310388,
'polar requirement': -0.975609756098,
'polarity': -0.925925925926,
'volume': 0.293413173653,
},
'K': {
'aliphaticity': 0.123966942149,
'aromaticity': -0.141388174807,
'composition': -0.76,
'hydrogenation': -0.142490372272,
'hydropathy': -0.866666666667,
'hydroxythiolation': -1.0,
'iep': 0.744680851064,
'polar requirement': 0.292682926829,
'polarity': 0.58024691358,
'volume': 0.389221556886,
},
'L': {
'aliphaticity': 1.0,
'aromaticity': -0.287917737789,
'composition': -1.0,
'hydrogenation': 0.381258023107,
'hydropathy': 0.844444444444,
'hydroxythiolation': -0.745541022592,
'iep': -0.196495619524,
'polar requirement': -0.975609756098,
'polarity': -1.0,
'volume': 0.293413173653,
},
'M': {
'aliphaticity': 0.537190082645,
'aromaticity': -0.372750642674,
'composition': -1.0,
'hydrogenation': -0.186136071887,
'hydropathy': 0.422222222222,
'hydroxythiolation': 0.0653983353151,
'iep': -0.256570713392,
'polar requirement': -0.878048780488,
'polarity': -0.802469135802,
'volume': 0.221556886228,
},
'N': {
'aliphaticity': 0.471074380165,
'aromaticity': -0.616966580977,
'composition': -0.0327272727273,
'hydrogenation': -0.548138639281,
'hydropathy': -0.777777777778,
'hydroxythiolation': 0.277051129608,
'iep': -0.339173967459,
'polar requirement': 0.268292682927,
'polarity': 0.654320987654,
'volume': -0.365269461078,
},
'P': {
'aliphaticity': -0.917355371901,
'aromaticity': -0.308483290488,
'composition': -0.716363636364,
'hydrogenation': 1.0,
'hydropathy': -0.355555555556,
'hydroxythiolation': -0.203329369798,
'iep': -0.116395494368,
'polar requirement': -0.560975609756,
'polarity': -0.234567901235,
'volume': -0.646706586826,
},
'Q': {
'aliphaticity': 0.652892561983,
'aromaticity': -0.439588688946,
'composition': -0.352727272727,
'hydrogenation': -0.602053915276,
'hydropathy': -0.777777777778,
'hydroxythiolation': -0.177170035672,
'iep': -0.279098873592,
'polar requirement': -0.0731707317073,
'polarity': 0.382716049383,
'volume': -0.0179640718563,
},
'R': {
'aliphaticity': -0.157024793388,
'aromaticity': -0.0642673521851,
'composition': -0.527272727273,
'hydrogenation': -0.401797175866,
'hydropathy': -1.0,
'hydroxythiolation': -0.51486325802,
'iep': 1.0,
'polar requirement': 0.0487804878049,
'polarity': 0.382716049383,
'volume': 0.449101796407,
},
'S': {
'aliphaticity': 0.256198347107,
'aromaticity': -0.660668380463,
'composition': 0.0327272727273,
'hydrogenation': 0.106546854942,
'hydropathy': -0.177777777778,
'hydroxythiolation': 1.0,
'iep': -0.271589486859,
'polar requirement': -0.341463414634,
'polarity': 0.0617283950617,
'volume': -0.652694610778,
},
'T': {
'aliphaticity': -0.123966942149,
'aromaticity': -0.80205655527,
'composition': -0.483636363636,
'hydrogenation': 0.399229781772,
'hydropathy': -0.155555555556,
'hydroxythiolation': 0.709869203329,
'iep': -0.151439299124,
'polar requirement': -0.560975609756,
'polarity': -0.0864197530864,
'volume': -0.305389221557,
},
'V': {
'aliphaticity': 0.570247933884,
'aromaticity': -0.665809768638,
'composition': -1.0,
'hydrogenation': 0.679075738126,
'hydropathy': 0.933333333333,
'hydroxythiolation': -0.621878715815,
'iep': -0.201501877347,
'polar requirement': -0.80487804878,
'polarity': -0.753086419753,
'volume': -0.0299401197605,
},
'W': {
'aliphaticity': -0.619834710744,
'aromaticity': 1.0,
'composition': -0.905454545455,
'hydrogenation': 0.0218228498074,
'hydropathy': -0.2,
'hydroxythiolation': 0.00118906064209,
'iep': -0.219023779725,
'polar requirement': -0.90243902439,
'polarity': -0.876543209877,
'volume': 1.0,
},
'Y': {
'aliphaticity': -0.454545454545,
'aromaticity': 0.712082262211,
'composition': -0.854545454545,
'hydrogenation': -0.304236200257,
'hydropathy': 0.288888888889,
'hydroxythiolation': 0.405469678954,
'iep': -0.276595744681,
'polar requirement': -0.853658536585,
'polarity': -0.679012345679,
'volume': 0.592814371257,
},
}
"""
The dictionary below contains for each amino acid the value for
each property.
"""
PROPERTY_DETAILS_RAW = {
'A': {
'aliphaticity': 0.239,
'aromaticity': -0.11,
'composition': 0.0,
'hydrogenation': 0.33,
'hydropathy': 1.8,
'hydroxythiolation': -0.062,
'iep': 6.0,
'polar requirement': 7.0,
'polarity': 8.1,
'volume': 31.0,
},
'C': {
'aliphaticity': 0.22,
'aromaticity': -0.184,
'composition': 2.75,
'hydrogenation': 0.074,
'hydropathy': 2.5,
'hydroxythiolation': 0.38,
'iep': 5.07,
'polar requirement': 4.8,
'polarity': 5.5,
'volume': 55.0,
},
'D': {
'aliphaticity': 0.171,
'aromaticity': -0.285,
'composition': 1.38,
'hydrogenation': -0.371,
'hydropathy': -3.5,
'hydroxythiolation': -0.079,
'iep': 2.77,
'polar requirement': 13.0,
'polarity': 13.0,
'volume': 54.0,
},
'E': {
'aliphaticity': 0.187,
'aromaticity': -0.246,
'composition': 0.92,
'hydrogenation': -0.409,
'hydropathy': -3.5,
'hydroxythiolation': -0.184,
'iep': 3.22,
'polar requirement': 12.5,
'polarity': 12.3,
'volume': 83.0,
},
'F': {
'aliphaticity': 0.234,
'aromaticity': 0.438,
'composition': 0.0,
'hydrogenation': -0.011,
'hydropathy': 2.8,
'hydroxythiolation': 0.074,
'iep': 5.48,
'polar requirement': 5.0,
'polarity': 5.4,
'volume': 132.0,
},
'G': {
'aliphaticity': 0.16,
'aromaticity': -0.073,
'composition': 0.74,
'hydrogenation': 0.37,
'hydropathy': -0.4,
'hydroxythiolation': -0.017,
'iep': 5.97,
'polar requirement': 7.9,
'polarity': 9.0,
'volume': 3.0,
},
'H': {
'aliphaticity': 0.205,
'aromaticity': 0.32,
'composition': 0.58,
'hydrogenation': -0.078,
'hydropathy': -3.2,
'hydroxythiolation': 0.056,
'iep': 7.59,
'polar requirement': 8.4,
'polarity': 10.4,
'volume': 96.0,
},
'I': {
'aliphaticity': 0.273,
'aromaticity': 0.001,
'composition': 0.0,
'hydrogenation': 0.149,
'hydropathy': 4.5,
'hydroxythiolation': -0.309,
'iep': 6.02,
'polar requirement': 4.9,
'polarity': 5.2,
'volume': 111.0,
},
'K': {
'aliphaticity': 0.228,
'aromaticity': 0.049,
'composition': 0.33,
'hydrogenation': -0.075,
'hydropathy': -3.9,
'hydroxythiolation': -0.371,
'iep': 9.74,
'polar requirement': 10.1,
'polarity': 11.3,
'volume': 119.0,
},
'L': {
'aliphaticity': 0.281,
'aromaticity': -0.008,
'composition': 0.0,
'hydrogenation': 0.129,
'hydropathy': 3.8,
'hydroxythiolation': -0.264,
'iep': 5.98,
'polar requirement': 4.9,
'polarity': 4.9,
'volume': 111.0,
},
'M': {
'aliphaticity': 0.253,
'aromaticity': -0.041,
'composition': 0.0,
'hydrogenation': -0.092,
'hydropathy': 1.9,
'hydroxythiolation': 0.077,
'iep': 5.74,
'polar requirement': 5.3,
'polarity': 5.7,
'volume': 105.0,
},
'N': {
'aliphaticity': 0.249,
'aromaticity': -0.136,
'composition': 1.33,
'hydrogenation': -0.233,
'hydropathy': -3.5,
'hydroxythiolation': 0.166,
'iep': 5.41,
'polar requirement': 10.0,
'polarity': 11.6,
'volume': 56.0,
},
'P': {
'aliphaticity': 0.165,
'aromaticity': -0.016,
'composition': 0.39,
'hydrogenation': 0.37,
'hydropathy': -1.6,
'hydroxythiolation': -0.036,
'iep': 6.3,
'polar requirement': 6.6,
'polarity': 8.0,
'volume': 32.5,
},
'Q': {
'aliphaticity': 0.26,
'aromaticity': -0.067,
'composition': 0.89,
'hydrogenation': -0.254,
'hydropathy': -3.5,
'hydroxythiolation': -0.025,
'iep': 5.65,
'polar requirement': 8.6,
'polarity': 10.5,
'volume': 85.0,
},
'R': {
'aliphaticity': 0.211,
'aromaticity': 0.079,
'composition': 0.65,
'hydrogenation': -0.176,
'hydropathy': -4.5,
'hydroxythiolation': -0.167,
'iep': 10.76,
'polar requirement': 9.1,
'polarity': 10.5,
'volume': 124.0,
},
'S': {
'aliphaticity': 0.236,
'aromaticity': -0.153,
'composition': 1.42,
'hydrogenation': 0.022,
'hydropathy': -0.8,
'hydroxythiolation': 0.47,
'iep': 5.68,
'polar requirement': 7.5,
'polarity': 9.2,
'volume': 32.0,
},
'T': {
'aliphaticity': 0.213,
'aromaticity': -0.208,
'composition': 0.71,
'hydrogenation': 0.136,
'hydropathy': -1.3,
'hydroxythiolation': 0.348,
'iep': 6.16,
'polar requirement': 6.6,
'polarity': 8.6,
'volume': 61.0,
},
'V': {
'aliphaticity': 0.255,
'aromaticity': -0.155,
'composition': 0.0,
'hydrogenation': 0.245,
'hydropathy': 4.2,
'hydroxythiolation': -0.212,
'iep': 5.96,
'polar requirement': 5.6,
'polarity': 5.9,
'volume': 84.0,
},
'W': {
'aliphaticity': 0.183,
'aromaticity': 0.493,
'composition': 0.13,
'hydrogenation': -0.011,
'hydropathy': -0.9,
'hydroxythiolation': 0.05,
'iep': 5.89,
'polar requirement': 5.2,
'polarity': 5.4,
'volume': 170.0,
},
'Y': {
'aliphaticity': 0.193,
'aromaticity': 0.183,
'composition': 0.2,
'hydrogenation': -0.138,
'hydropathy': -1.3,
'hydroxythiolation': 0.22,
'iep': 5.66,
'polar requirement': 5.4,
'polarity': 6.2,
'volume': 136.0,
},
}
"""
Clusters based on raw amino acid property values. See
https://notebooks.antigenic-cartography.org/barbara/pages/features/
aa-properties.html and https://notebooks.antigenic-cartography.org/barbara/
pages/features/new-tps.html
"""
PROPERTY_CLUSTERS = {
'A': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 3,
'hydroxythiolation': 2,
'iep': 2,
'polar requirement': 2,
'polarity': 2,
'volume': 2,
},
'C': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 3,
'hydrogenation': 1,
'hydropathy': 3,
'hydroxythiolation': 5,
'iep': 2,
'polar requirement': 1,
'polarity': 1,
'volume': 3,
},
'D': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 2,
'hydrogenation': 1,
'hydropathy': 1,
'hydroxythiolation': 2,
'iep': 1,
'polar requirement': 4,
'polarity': 4,
'volume': 3,
},
'E': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 1,
'hydroxythiolation': 1,
'iep': 1,
'polar requirement': 4,
'polarity': 4,
'volume': 4,
},
'F': {
'aliphaticity': 1,
'aromaticity': 2,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 3,
'hydroxythiolation': 3,
'iep': 2,
'polar requirement': 1,
'polarity': 1,
'volume': 4,
},
'G': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 2,
'hydroxythiolation': 2,
'iep': 2,
'polar requirement': 2,
'polarity': 2,
'volume': 1,
},
'H': {
'aliphaticity': 1,
'aromaticity': 2,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 1,
'hydroxythiolation': 3,
'iep': 3,
'polar requirement': 2,
'polarity': 3,
'volume': 4,
},
'I': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 4,
'hydroxythiolation': 1,
'iep': 2,
'polar requirement': 1,
'polarity': 1,
'volume': 4,
},
'K': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 1,
'hydroxythiolation': 1,
'iep': 3,
'polar requirement': 3,
'polarity': 4,
'volume': 4,
},
'L': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 4,
'hydroxythiolation': 1,
'iep': 2,
'polar requirement': 1,
'polarity': 1,
'volume': 4,
},
'M': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 3,
'hydroxythiolation': 3,
'iep': 2,
'polar requirement': 1,
'polarity': 1,
'volume': 4,
},
'N': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 2,
'hydrogenation': 1,
'hydropathy': 1,
'hydroxythiolation': 4,
'iep': 2,
'polar requirement': 3,
'polarity': 4,
'volume': 3,
},
'P': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 2,
'hydroxythiolation': 2,
'iep': 2,
'polar requirement': 2,
'polarity': 2,
'volume': 2,
},
'Q': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 1,
'hydroxythiolation': 2,
'iep': 2,
'polar requirement': 2,
'polarity': 3,
'volume': 4,
},
'R': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 1,
'hydroxythiolation': 1,
'iep': 3,
'polar requirement': 2,
'polarity': 3,
'volume': 4,
},
'S': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 2,
'hydrogenation': 1,
'hydropathy': 2,
'hydroxythiolation': 5,
'iep': 2,
'polar requirement': 2,
'polarity': 2,
'volume': 2,
},
'T': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 2,
'hydroxythiolation': 5,
'iep': 2,
'polar requirement': 2,
'polarity': 2,
'volume': 3,
},
'V': {
'aliphaticity': 1,
'aromaticity': 1,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 4,
'hydroxythiolation': 1,
'iep': 2,
'polar requirement': 1,
'polarity': 1,
'volume': 4,
},
'W': {
'aliphaticity': 1,
'aromaticity': 2,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 2,
'hydroxythiolation': 3,
'iep': 2,
'polar requirement': 1,
'polarity': 1,
'volume': 5,
},
'Y': {
'aliphaticity': 1,
'aromaticity': 2,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 2,
'hydroxythiolation': 4,
'iep': 2,
'polar requirement': 1,
'polarity': 1,
'volume': 4,
},
}
class AminoAcid(object):
"""
Hold information about an amino acid.
@param name: The full C{str} name of the amino acid.
@param abbrev3: The 3-letter C{str} abbreviation of the amino acid,
e.g., 'Arg'.
@param abbrev1: The 1-letter C{str} abbreviation of the amino acid,
e.g., 'A'.
@param codons: A C{list} of 3-letter codons for the amino acid.
@param properties: An C{int} logical-AND of the various properties
(see PROPERTIES, above) of this amino acid.
@param propertyDetails: A C{dict} containing property names and values
for this amino acid. E.g.:
{
'aliphaticity': -0.157024793388,
'aromaticity': -0.0642673521851,
'composition': -0.527272727273,
'hydrogenation': -0.401797175866,
'hydropathy': -1.0,
'hydroxythiolation': -0.51486325802,
'iep': 1.0,
'polar requirement': 0.0487804878049,
'polarity': 0.382716049383,
'volume': 0.449101796407,
}
@param propertyClusters: A C{dict} containing the property names and
clusters for this amino acid. E.g.:
{
'aliphaticity': 1,
'aromaticity': 2,
'composition': 1,
'hydrogenation': 1,
'hydropathy': 2,
'hydroxythiolation': 4,
'iep': 2,
'polar requirement': 1,
'polarity': 1,
'volume': 4,
}
"""
def __init__(self, name, abbrev3, abbrev1, codons, properties,
propertyDetails, propertyClusters):
self.name = name
self.abbrev3 = abbrev3
self.abbrev1 = abbrev1
self.codons = codons
self.properties = properties
self.propertyDetails = propertyDetails
self.propertyClusters = propertyClusters
def find(s):
"""
Find an amino acid whose name or abbreviation is s.
@param s: A C{str} amino acid specifier. This may be a full name,
a 3-letter abbreviation or a 1-letter abbreviation. Case is ignored.
return: A generator that yields matching L{AminoAcid} instances.
"""
abbrev1 = None
origS = s
if ' ' in s:
# Convert first word to title case, others to lower.
first, rest = s.split(' ', 1)
s = first.title() + ' ' + rest.lower()
else:
s = s.title()
if s in NAMES:
abbrev1 = s
elif s in ABBREV3_TO_ABBREV1:
abbrev1 = ABBREV3_TO_ABBREV1[s]
elif s in NAMES_TO_ABBREV1:
abbrev1 = NAMES_TO_ABBREV1[s]
else:
# Look for a 3-letter codon.
def findCodon(target):
for abbrev1, codons in CODONS.items():
for codon in codons:
if codon == target:
return abbrev1
abbrev1 = findCodon(origS.upper())
if abbrev1:
abbrev1s = [abbrev1]
else:
# Try partial matching on names.
abbrev1s = []
sLower = s.lower()
for abbrev1, name in NAMES.items():
if name.lower().find(sLower) > -1:
abbrev1s.append(abbrev1)
for abbrev1 in abbrev1s:
yield AminoAcid(
NAMES[abbrev1], ABBREV3[abbrev1], abbrev1, CODONS[abbrev1],
PROPERTIES[abbrev1], PROPERTY_DETAILS[abbrev1],
PROPERTY_CLUSTERS[abbrev1])
def _propertiesOrClustersForSequence(sequence, propertyNames, propertyValues,
missingAAValue):
"""
Extract amino acid property values or cluster numbers for a sequence.
@param sequence: An C{AARead} (or a subclass) instance.
@param propertyNames: An iterable of C{str} property names (each of which
must be a key of a key in the C{propertyValues} C{dict}).
@param propertyValues: A C{dict} in the form of C{PROPERTY_DETAILS} or
C{PROPERTY_CLUSTERS} (see above).
@param missingAAValue: A C{float} value to use for properties when an AA
(e.g., 'X') is not known.
@raise ValueError: If an unknown property is given in C{propertyNames}.
@return: A C{dict} keyed by (lowercase) property name, with values that are
C{list}s of the corresponding property value in C{propertyValues} in
order of sequence position.
"""
propertyNames = sorted(map(str.lower, set(propertyNames)))
# Make sure all mentioned property names exist for at least one AA.
knownProperties = set()
for names in propertyValues.values():
knownProperties.update(names)
unknown = set(propertyNames) - knownProperties
if unknown:
raise ValueError(
'Unknown propert%s: %s.' %
('y' if len(unknown) == 1 else 'ies', ', '.join(unknown)))
aas = sequence.sequence.upper()
result = {}
for propertyName in propertyNames:
result[propertyName] = values = []
append = values.append
for aa in aas:
try:
properties = propertyValues[aa]
except KeyError:
# No such AA.
append(missingAAValue)
else:
append(properties[propertyName])
return result
def propertiesForSequence(sequence, propertyNames, missingAAValue=-1.1):
"""
Extract amino acid property values for a sequence.
@param sequence: An C{AARead} (or a subclass) instance.
@param propertyNames: An iterable of C{str} property names (each of which
must be a key of a key in the C{dark.aa.PROPERTY_DETAILS} C{dict}).
@param missingAAValue: A C{float} value to use for properties when an AA
(e.g., 'X') is not known.
@raise ValueError: If an unknown property is given in C{propertyNames}.
@return: A C{dict} keyed by (lowercase) property name, with values that are
C{list}s of the corresponding property value according to sequence
position.
"""
return _propertiesOrClustersForSequence(
sequence, propertyNames, PROPERTY_DETAILS, missingAAValue)
def clustersForSequence(sequence, propertyNames, missingAAValue=0):
"""
Extract amino acid property cluster numbers for a sequence.
@param sequence: An C{AARead} (or a subclass) instance.
@param propertyNames: An iterable of C{str} property names (each of which
must be a key of a key in the C{dark.aa.PROPERTY_CLUSTERS} C{dict}).
@param missingAAValue: An C{int} value to use for properties when an AA
(e.g., 'X') is not known.
@raise ValueError: If an unknown property is given in C{propertyNames}.
@return: A C{dict} keyed by (lowercase) property name, with values that are
C{list}s of the corresponding property cluster number according to
sequence position.
"""
return _propertiesOrClustersForSequence(
sequence, propertyNames, PROPERTY_CLUSTERS, missingAAValue)
def matchToString(aaMatch, read1, read2, indent='', offsets=None):
"""
Format amino acid sequence match as a string.
@param aaMatch: A C{dict} returned by C{compareAaReads}.
@param read1: A C{Read} instance or an instance of one of its subclasses.
@param read2: A C{Read} instance or an instance of one of its subclasses.
@param indent: A C{str} to indent all returned lines with.
@param offsets: If not C{None}, a C{set} of offsets of interest that were
only considered when making C{match}.
@return: A C{str} describing the match.
"""
match = aaMatch['match']
matchCount = match['matchCount']
gapMismatchCount = match['gapMismatchCount']
gapGapMismatchCount = match['gapGapMismatchCount']
nonGapMismatchCount = match['nonGapMismatchCount']
if offsets:
len1 = len2 = len(offsets)
else:
len1, len2 = map(len, (read1, read2))
result = []
append = result.append
append(countPrint('%sMatches' % indent, matchCount, len1, len2))
mismatchCount = (gapMismatchCount + gapGapMismatchCount +
nonGapMismatchCount)
append(countPrint('%sMismatches' % indent, mismatchCount, len1, len2))
append(countPrint('%s Not involving gaps (i.e., conflicts)' % (indent),
nonGapMismatchCount, len1, len2))
append(countPrint('%s Involving a gap in one sequence' % indent,
gapMismatchCount, len1, len2))
append(countPrint('%s Involving a gap in both sequences' % indent,
gapGapMismatchCount, len1, len2))
for read, key in zip((read1, read2), ('read1', 'read2')):
append('%s Id: %s' % (indent, read.id))
length = len(read)
append('%s Length: %d' % (indent, length))
gapCount = len(aaMatch[key]['gapOffsets'])
append(countPrint('%s Gaps' % indent, gapCount, length))
if gapCount:
append(
'%s Gap locations (1-based): %s' %
(indent,
', '.join(map(lambda offset: str(offset + 1),
sorted(aaMatch[key]['gapOffsets'])))))
extraCount = aaMatch[key]['extraCount']
if extraCount:
append(countPrint('%s Extra nucleotides at end' % indent,
extraCount, length))
return '\n'.join(result)
def compareAaReads(read1, read2, gapChars='-', offsets=None):
"""
Compare two amino acid sequences.
@param read1: A C{Read} instance or an instance of one of its subclasses.
@param read2: A C{Read} instance or an instance of one of its subclasses.
@param gapChars: An object supporting __contains__ with characters that
should be considered to be gaps.
@param offsets: If not C{None}, a C{set} of offsets of interest. Offsets
not in the set will not be considered.
@return: A C{dict} with information about the match and the individual
sequences (see below).
"""
matchCount = 0
gapMismatchCount = nonGapMismatchCount = gapGapMismatchCount = 0
read1ExtraCount = read2ExtraCount = 0
read1GapOffsets = []
read2GapOffsets = []
for offset, (a, b) in enumerate(zip_longest(read1.sequence.upper(),
read2.sequence.upper())):
# Use 'is not None' in the following to allow an empty offsets set
# to be passed.
if offsets is not None and offset not in offsets:
continue
if a is None:
# b has an extra character at its end (it cannot be None).
assert b is not None
read2ExtraCount += 1
if b in gapChars:
read2GapOffsets.append(offset)
elif b is None:
# a has an extra character at its end.
read1ExtraCount += 1
if a in gapChars:
read1GapOffsets.append(offset)
else:
# We have a character from both sequences (they could still be
# gap characters).
if a in gapChars:
read1GapOffsets.append(offset)
if b in gapChars:
# Both are gaps. This can happen (though hopefully not
# if the sequences were pairwise aligned).
gapGapMismatchCount += 1
read2GapOffsets.append(offset)
else:
# a is a gap, b is not.
gapMismatchCount += 1
else:
if b in gapChars:
# b is a gap, a is not.
gapMismatchCount += 1
read2GapOffsets.append(offset)
else:
# Neither is a gap character.
if a == b:
matchCount += 1
else:
nonGapMismatchCount += 1
return {
'match': {
'matchCount': matchCount,
'gapMismatchCount': gapMismatchCount,
'gapGapMismatchCount': gapGapMismatchCount,
'nonGapMismatchCount': nonGapMismatchCount,
},
'read1': {
'extraCount': read1ExtraCount,
'gapOffsets': read1GapOffsets,
},
'read2': {
'extraCount': read2ExtraCount,
'gapOffsets': read2GapOffsets,
},
}
| |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import requests
import os
import sys
"""
Purpose:
Download dicoms from xnat and place them into
a BIDs "like" directory structure.
using the xnat rest API to download dicoms.
see here for xnat REST API documentation: (https://wiki.xnat.org/display/XNAT16/Using+the+XNAT+REST+API)
TODO:
1) better error checking
2) add a log to write events to
3) handle conditionals better
4) find a better way to call the script instead of main()
5) add json descriptors according to BIDs format. (not available in API)
6) revise some ugly formating
7) don't copy if already completed? (or just have user be cognizant?)
8) parallelize the processing stream (e.g. get all the data first, then download)
9) Make main more modular (add more methods/possibly classes)
10) Fix error where if a subject has a alpha character in their name I can't filter the subject.
11) Add conversion script?
12) make the session re-ordering better (ording based on string instead of date/number)
"""
import requests
import os
import sys
__all__ = ['xnat_init_session','xnat_query_subjects','xnat_query_sessions','xnat_query_scans','xnat_query_dicoms','subject_variables_dictionary']
class xnat_init_session(object):
"""starts the xnat session and allows user to login to a particular project page"""
def __init__(self,username,password,project):
self.url_base = 'https://rpacs.iibi.uiowa.edu/xnat/REST/projects/%s/' % project
self.username = username
self.password = password
self.project = project
def login(self):
login_query = requests.get(self.url_base,auth=(self.username,self.password))
if login_query.ok:
cookie_info = login_query.cookies.get('JSESSIONID')
self.cookie = {'JSESSIONID' : cookie_info}
else:
print('error')
return 1
#def logout(self):
# logout_query = requests.delete(self.url_base,self.cookie)
# if logout_query.ok:
# print('logout successful')
# else:
# print('logout unsuccessful')
# return 1
class xnat_query_subjects(object):
"""get the subject ids from xnat"""
def __init__(self,cookie,url_base,project):
self.cookie=cookie
self.url_base=url_base
self.project=project
def get_subjects(self):
subject_query = requests.get(self.url_base+'subjects', cookies=self.cookie)
if subject_query.ok:
subject_json = subject_query.json()
subject_list_dict = subject_json['ResultSet']['Result']
self.subject_ids = { x['label']:0 for x in subject_list_dict }
def filter_subjects(self,subjects):
import re
#catch and remove subjects with characters in the name
if subjects != "ALL": #if the subject list specifies who to download
missing_xnat_subjects = list(set(subjects) - set([int(x) for x in self.subject_ids.keys()]))
if missing_xnat_subjects:
self.filt_subject_ids = dict.fromkeys(list(set(subjects) - set(missing_xnat_subjects)))
print('xnat does not have data for these subjects: %s' % str(missing_xnat_subjects))
else:
self.filt_subject_ids = dict.fromkeys(subjects)
else:
self.filt_subject_ids = dict.fromkeys([int(x) for x in self.subject_ids.keys()]) #use all the subjects otherwise
class xnat_query_sessions(object):
"""get the sessions from a particular subject"""
def __init__(self,cookie,url_base,project,subject):
self.cookie=cookie
self.url_base=url_base
self.subject=subject
self.project=project
def get_sessions(self,session_labels=None):
import re
session_query = requests.get(self.url_base+'subjects/%s/experiments' % (self.subject), cookies=self.cookie)
if session_query.ok:
session_json = session_query.json()
session_list_dict = session_json['ResultSet']['Result']
#sort the session list (fix issues where they are uploaded in the wrong order)
session_list = [session['label'] for session in session_list_dict]
date_list = [session['date'] for session in session_list_dict]
session_list_comp = [re.sub('_[0-9]', '', session) for session in session_list]
date_list_comp = [session.replace('-','') for session in date_list]
print(str(session_list_comp))
print(str(date_list_comp))
if session_list_comp == date_list_comp:
print('date check passed')
else:
print('mismatch between label and date, exiting')
self.session_ids = False
return 1
session_list.sort()
if session_labels is not None:
num_sessions = int(session_json['ResultSet']['totalRecords'])
num_labels = len(session_labels)
if num_sessions != num_labels:
print('%s has the wrong number of sessions, expected: %s, found: %s' % (self.subject,str(num_labels),str(num_sessions)))
print('getting session info for available sessions (assuming they are in the correct order)')
self.session_ids = { sess_label : {session: 0} for sess_label,session in zip(session_labels[0:num_sessions],session_list) }
else:
self.session_ids = { sess_label : {session: 0} for sess_label,session in zip(session_labels,session_list) }
else:
#not supported in this script
self.session_ids = { x['label']: 0 for x in session_list_dict }
def filter_sessions(self,sessions):
#updates the session_ids dictionary
if sessions != "ALL":
#find all session that are not a part of the list
pop_list=list(set(self.session_ids.keys()) - set(sessions))
for key in pop_list:
self.session_ids.pop(key) #remove session from analysis
class xnat_query_scans(object):
"""get the scans from a particular session"""
def __init__(self,cookie,url_base,project,subject,session):
self.cookie=cookie
self.url_base=url_base
self.subject=subject
self.session=session
self.project=project
def get_scans(self):
scan_query = requests.get(self.url_base+'subjects/%s/experiments/%s/scans/' % (self.subject,self.session), cookies=self.cookie)
if scan_query.ok:
scan_json = scan_query.json()
scan_list_dict = scan_json['ResultSet']['Result']
self.scan_ids = { x['ID']:[{str(x['series_description']) },x['quality']] for x in scan_list_dict }
#ID is a number like 1,3,300
#type is a name like fMRI FLANKER, PU:Sag CUBE FLAIR, represented as a set?
#^use series_description instead of type to differentiate multiple
#scans as the same type (e.g. DTI 64 dir versus DTI extra B0)
#quality specifies if the scan is usable
class xnat_query_dicoms(object):
"""get the dicoms from a particular scan"""
def __init__(self,cookie,url_base,project,subject,session,scan):
self.cookie=cookie
self.url_base=url_base
self.subject=subject
self.session=session
self.scan=scan
def get_dicoms(self,out_dir):
#http://stackoverflow.com/questions/4917284/extract-files-from-zip-without-keeping-the-structure-using-python-zipfile
import zipfile
from io import BytesIO
import shutil
dicom_query = requests.get(self.url_base+'subjects/%s/experiments/%s/scans/%s/resources/DICOM/files?format=zip' % (self.subject,self.session,self.scan), cookies=self.cookie)
if dicom_query.ok:
dicom_zip = zipfile.ZipFile(BytesIO(dicom_query.content))
for member in dicom_zip.namelist():
filename = os.path.basename(member)
if not filename:
continue
source = dicom_zip.open(member)
target = open(os.path.join(out_dir,filename), "wb")
with source, target:
shutil.copyfileobj(source, target)
class subject_variables_dictionary(object):
def __init__(self,sub_vars):
self.sub_dict = {}
with open(sub_vars) as sub_file:
for line in sub_file:
#mac os specific
sub_entry = line.strip('\n').split(',')
self.sub_dict[sub_entry[0]] = sub_entry[1:]
def get_bids_var(self,sub_num):
#assume the sub_num is not zero-padded
#assume the entries are not zero-padded
return "".join(self.sub_dict[sub_num])
def parse_cmdline(args):
"""Parse command line arguments."""
import argparse
parser = argparse.ArgumentParser(
description=(
'download_xnat.py downloads xnat dicoms and saves them in BIDs compatible directory format'))
#Required arguments
requiredargs = parser.add_argument_group('Required arguments')
requiredargs.add_argument('-i','--input_json',
dest='input_json',required=True,
help='json file defining inputs for this script.')
parsed_args = parser.parse_args(args)
return parsed_args
def parse_json(json_file):
"""Parse json file."""
import json
with open(json_file) as json_input:
input_dict = json.load(json_input)
mandatory_keys = ['username','scan_dict','dcm_dir','sessions','session_labels','project','subjects','scans']
optional_keys = ['subject_variables_csv','zero_pad','nii_dir']
total_keys = mandatory_keys+optional_keys
print("total_keys: "+str(total_keys))
#are there any inputs in the json_file that are not supported?
extra_inputs = list(set(input_dict.keys()) - set(total_keys))
if extra_inputs:
print('option(s) not supported: %s' % str(extra_inputs))
#are there missing mandatory inputs?
missing_inputs = list(set(mandatory_keys) - set(input_dict.keys()))
if missing_inputs:
print('option(s) need to be specified in input file: %s' % str(missing_inputs))
return 1
return input_dict
def run_xnat():
import getpass
"""Command line entry point."""
args = parse_cmdline(sys.argv[1:])
input_dict = parse_json(args.input_json)
#assign variables to save space
username = input_dict['username']
project = input_dict['project']
subjects = input_dict['subjects']
session_labels = input_dict['session_labels']
sessions = input_dict['sessions']
scans = input_dict['scans']
scan_dict = input_dict['scan_dict']
dcm_dir = input_dict['dcm_dir']
#optional entries
sub_vars = input_dict.get('subject_variables_csv', False)
BIDs_num_length = input_dict.get('zero_pad', False)
nii_dir = input_dict.get('nii_dir', False) #not sure if this is needed
#make the BIDs subject dictionary
if os.path.isfile(sub_vars):
sub_vars_dict = subject_variables_dictionary(sub_vars)
print('sub_vars_dict')
print(str(sub_vars_dict.sub_dict))
#get the password
password = getpass.getpass()
#create my session for xnat
xnat_session = xnat_init_session(username,password,project)
#log in to my session
xnat_session.login()
#get the list of subjects
subject_query = xnat_query_subjects(xnat_session.cookie,xnat_session.url_base,project)
#gives the object subject_query the dictionary subject_ids
subject_query.get_subjects()
#gives the object subject_query the dictionary filt_subject_ids
subject_query.filter_subjects(subjects)
#assign subjects the filtered dictionary
subjects = subject_query.filt_subject_ids
#number to use to name BIDS outdir (e.g. 005 instead of 5)
if not BIDs_num_length:
BIDs_num_length = len(max([str(x) for x in list(subjects)],key=len))
for subject in subjects:
#workaround for xnat session closing
#xnat_session.logout()
#xnat_session.login()
#^^potentially not necessary, test first
session_query = xnat_query_sessions(xnat_session.cookie,xnat_session.url_base,project,subject)
if session_labels == "None":
print('no session labels, assuming there is only one session')
session_labels_dummy=['dummy_session']
session_query.get_sessions(session_labels_dummy)
else:
session_query.get_sessions(session_labels)
#check to see if dictionary is empty
if not bool(session_query.session_ids):
#skip if there are no sessions
continue
#filtering the sessions
session_query.filter_sessions(sessions)
subject_sessions = session_query.session_ids
#update the master subject dictionary
subjects[subject] = subject_sessions
for session in subject_sessions: #where session is pre, post, etc
#getting the session folder name in xnat (e.g. 20150524)
session_date = list(subject_sessions[session].keys())[0]
#scan_query object
scan_query = xnat_query_scans(xnat_session.cookie,xnat_session.url_base,project,subject,session_date)
#makes a dictionary of scan ids
scan_query.get_scans()
subject_sessions[session] = scan_query.scan_ids
for scan in scan_query.scan_ids:
#scan names are listed as a set type for some reason...
#making it a list type to access scan name as a string.
scan_name=list(scan_query.scan_ids[scan][0])[0]
scan_usable=list(scan_query.scan_ids[scan])[1]
#check to see if you defined the scan name (equivalent to scan type in
# the REST API in the input json file)
if scan_name in list(scan_dict) and scan_usable == 'usable':
BIDs_scan = scan_dict[scan_name][0]
BIDs_scan_suffix = scan_dict[scan_name][1]
BIDs_subject=str(subject).zfill(BIDs_num_length)
print('sub_vars: '+str(sub_vars))
if os.path.isfile(sub_vars):
print('made it here!')
BIDs_subject_info = sub_vars_dict.get_bids_var(str(subject))
print('BIDs_subject_info: '+BIDs_subject_info)
BIDs_subject = "".join([BIDs_subject_info,BIDs_subject])
print('BIDs_subject: '+BIDs_subject)
scan_name_no_spaces = scan_name.replace(" ","_")
if session_labels == "None":
print('Downloading Dicoms[subject: %s, scan %s' % (str(subject), scan_name))
#sub_dir = 'sub-%s/%s/%s_%s' % (BIDs_subject, BIDs_scan, scan, scan_name_no_spaces)
sub_dir = 'sub-%s/%s/sub-%s_%s' % (BIDs_subject, BIDs_scan, BIDs_subject, BIDs_scan_suffix)
else:
print('Downloading Dicoms[subject: %s, session: %s, scan %s' % (str(subject), session, scan_name))
#sub_dir = 'sub-%s/ses-%s/%s/%s_%s' % (BIDs_subject, session, BIDs_scan, scan, scan_name_no_spaces)
sub_dir = 'sub-%s/ses-%s/%s/sub-%s_ses-%s_%s' % (BIDs_subject, session, BIDs_scan, BIDs_subject, session, BIDs_scan_suffix)
out_dir = os.path.join(dcm_dir,sub_dir)
print('out_dir: '+str(out_dir))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
dicoms = os.listdir(out_dir)
if not dicoms:
dicom_query = xnat_query_dicoms(xnat_session.cookie,xnat_session.url_base,project,subject,session_date,scan)
dicom_query.get_dicoms(out_dir)
else:
print('dicoms exist: not downloading')
# Conversion option here.
# convert_to_nifti(nii_dir,dcm_dir,sub_dir)
#def convert_to_nifti(nii_dir,dcm_dir,sub_dir):
if __name__ == "__main__":
import sys
run_xnat()
| |
# -*- coding: utf-8 -*-
"""The task-based multi-process processing engine."""
import os
import shutil
import tempfile
import redis
from plaso.lib import definitions
from plaso.multi_process import engine
from plaso.storage import factory as storage_factory
from plaso.storage.redis import redis_store
class TaskMultiProcessEngine(engine.MultiProcessEngine):
"""Task-based multi-process engine base.
This class contains functionality to:
* manage task storage used to store task results.
"""
# pylint: disable=abstract-method
def __init__(self):
"""Initializes a task-based multi-process engine."""
super(TaskMultiProcessEngine, self).__init__()
self._merge_task_storage_path = None
self._processing_configuration = None
self._processed_task_storage_path = None
self._redis_client = None
self._storage_file_path = None
self._task_storage_path = None
# TODO: remove, currently only used by psort.
def _CheckTaskReadyForMerge(self, task_storage_format, task):
"""Checks if a task is ready for merging with this session storage.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Returns:
bool: True if the task is ready to be merged.
Raises:
IOError: if the size of the SQLite task storage file cannot be determined.
OSError: if the size of the SQLite task storage file cannot be determined.
"""
if task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
try:
stat_info = os.stat(processed_storage_file_path)
except (IOError, OSError):
return False
task.storage_file_size = stat_info.st_size
return True
return False
def _GetMergeTaskStorage(self, task_storage_format, task):
"""Retrieves a task store ready to be merged with the session store.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Returns:
StorageReader: storage reader of the task storage.
Raises:
IOError: if the temporary path for the task storage does not exist or
if the temporary path for the task storage doe not refers to a file.
OSError: if the temporary path for the task storage does not exist or
if the temporary path for the task storage doe not refers to a file.
"""
merge_storage_file_path = self._GetMergeTaskStorageFilePath(
task_storage_format, task)
if task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
if not self._merge_task_storage_path:
raise IOError('Missing merge task storage path.')
if not os.path.isfile(merge_storage_file_path):
raise IOError('Merge task storage path is not a file.')
task_storage_reader = (
storage_factory.StorageFactory.CreateTaskStorageReader(
task_storage_format, task, merge_storage_file_path))
task_storage_reader.SetStorageProfiler(self._storage_profiler)
return task_storage_reader
def _GetMergeTaskStorageRedisHashName(self, task):
"""Retrieves the Redis hash name of a task store that should be merged.
Args:
task (Task): task the storage changes are part of.
Returns:
str: Redis hash name of a task store.
"""
return '{0:s}-merge'.format(task.session_identifier)
def _GetMergeTaskStorageFilePath(self, task_storage_format, task):
"""Retrieves the path of a task storage file in the merge directory.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Returns:
str: path of a task storage file file in the merge directory or None if
not set.
"""
if task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
filename = '{0:s}.plaso'.format(task.identifier)
return os.path.join(self._merge_task_storage_path, filename)
return None
def _GetProcessedRedisHashName(self, session_identifier):
"""Retrieves the Redis hash name of a processed task store.
Args:
session_identifier (str): the identifier of the session the tasks are
part of.
Returns:
str: Redis hash name of a task store.
"""
return '{0:s}-processed'.format(session_identifier)
def _GetProcessedStorageFilePath(self, task):
"""Retrieves the path of a task storage file in the processed directory.
Args:
task (Task): task the storage changes are part of.
Returns:
str: path of a task storage file in the processed directory.
"""
filename = '{0:s}.plaso'.format(task.identifier)
return os.path.join(self._processed_task_storage_path, filename)
def _GetProcessedTaskIdentifiers(
self, task_storage_format, session_identifier):
"""Identifiers for tasks which have been processed.
Args:
task_storage_format (str): storage format used to store task results.
session_identifier (str): the identifier of the session the tasks are
part of.
Returns:
list[str]: task identifiers that are processed.
Raises:
IOError: if the temporary path for the task storage does not exist.
OSError: if the temporary path for the task storage does not exist.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
redis_hash_name = self._GetProcessedRedisHashName(session_identifier)
try:
task_identifiers = self._redis_client.hkeys(redis_hash_name)
task_identifiers = [
identifier.decode('utf-8') for identifier in task_identifiers]
except redis.exceptions.TimeoutError:
# If there is a timeout fetching identifiers, we assume that there are
# no processed tasks.
task_identifiers = []
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
if not self._processed_task_storage_path:
raise IOError('Missing processed task storage path.')
task_identifiers = [
path.replace('.plaso', '')
for path in os.listdir(self._processed_task_storage_path)]
return task_identifiers
def _PrepareMergeTaskStorage(self, task_storage_format, task):
"""Prepares a task storage for merging.
Moves the task storage file from the processed directory to the merge
directory.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Raises:
IOError: if the SQLite task storage file cannot be renamed.
OSError: if the SQLite task storage file cannot be renamed.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
# TODO: use number of attribute containers instead of file size?
task.storage_file_size = 1000
redis_hash_name = self._GetProcessedRedisHashName(task.session_identifier)
number_of_results = self._redis_client.hdel(
redis_hash_name, task.identifier)
if number_of_results == 0:
raise IOError('Task identifier {0:s} was not processed'.format(
task.identifier))
redis_hash_name = self._GetMergeTaskStorageRedisHashName(task)
# TODO: set timestamp as value.
self._redis_client.hset(
redis_hash_name, key=task.identifier, value=b'true')
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
merge_storage_file_path = self._GetMergeTaskStorageFilePath(
task_storage_format, task)
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
task.storage_file_size = os.path.getsize(processed_storage_file_path)
try:
os.rename(processed_storage_file_path, merge_storage_file_path)
except OSError as exception:
raise IOError((
'Unable to rename task storage file: {0:s} with error: '
'{1!s}').format(processed_storage_file_path, exception))
def _RemoveMergeTaskStorage(self, task_storage_format, task):
"""Removes a merge task storage.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Raises:
IOError: if a SQLite task storage file cannot be removed.
OSError: if a SQLite task storage file cannot be removed.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
redis_hash_pattern = '{0:s}-{1:s}-*'.format(
task.session_identifier, task.identifier)
for redis_hash_name in self._redis_client.keys(redis_hash_pattern):
self._redis_client.delete(redis_hash_name)
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
merge_storage_file_path = self._GetMergeTaskStorageFilePath(
task_storage_format, task)
try:
os.remove(merge_storage_file_path)
except OSError as exception:
raise IOError((
'Unable to remove merge task storage file: {0:s} with error: '
'{1!s}').format(merge_storage_file_path, exception))
def _RemoveProcessedTaskStorage(self, task_storage_format, task):
"""Removes a processed task storage.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Raises:
IOError: if a SQLite task storage file cannot be removed.
OSError: if a SQLite task storage file cannot be removed.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
redis_hash_pattern = '{0:s}-{1:s}-*'.format(
task.session_identifier, task.identifier)
for redis_hash_name in self._redis_client.keys(redis_hash_pattern):
self._redis_client.delete(redis_hash_name)
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
try:
os.remove(processed_storage_file_path)
except OSError as exception:
raise IOError((
'Unable to remove processed task storage file: {0:s} with error: '
'{1!s}').format(processed_storage_file_path, exception))
def _StartTaskStorage(self, task_storage_format):
"""Starts the task storage.
Args:
task_storage_format (str): storage format used to store task results.
Raises:
IOError: if the temporary path for the SQLite task storage already exists.
OSError: if the temporary path for the SQLite task storage already exists.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
url = redis_store.RedisStore.DEFAULT_REDIS_URL
self._redis_client = redis.from_url(url=url, socket_timeout=60)
self._redis_client.client_setname('task_engine')
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
if self._task_storage_path:
raise IOError('SQLite task storage path already exists.')
output_directory = os.path.dirname(self._storage_file_path)
self._task_storage_path = tempfile.mkdtemp(dir=output_directory)
self._merge_task_storage_path = os.path.join(
self._task_storage_path, 'merge')
os.mkdir(self._merge_task_storage_path)
self._processed_task_storage_path = os.path.join(
self._task_storage_path, 'processed')
os.mkdir(self._processed_task_storage_path)
self._processing_configuration.task_storage_path = self._task_storage_path
def _StopTaskStorage(
self, task_storage_format, session_identifier, abort=False):
"""Stops the task storage.
The results of tasks will be lost on abort.
Args:
task_storage_format (str): storage format used to store task results.
session_identifier (str): the identifier of the session the tasks are
part of.
abort (Optional[bool]): True to indicate the stop is issued on abort.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
redis_hash_pattern = '{0:s}-*'.format(session_identifier)
for redis_hash_name in self._redis_client.keys(redis_hash_pattern):
self._redis_client.delete(redis_hash_name)
self._redis_client = None
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
if os.path.isdir(self._merge_task_storage_path):
if abort:
shutil.rmtree(self._merge_task_storage_path)
else:
os.rmdir(self._merge_task_storage_path)
if os.path.isdir(self._processed_task_storage_path):
if abort:
shutil.rmtree(self._processed_task_storage_path)
else:
os.rmdir(self._processed_task_storage_path)
if os.path.isdir(self._task_storage_path):
if abort:
shutil.rmtree(self._task_storage_path)
else:
os.rmdir(self._task_storage_path)
self._merge_task_storage_path = None
self._processed_task_storage_path = None
self._task_storage_path = None
self._processing_configuration.task_storage_path = None
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DLFramework is a framework to consolidate methods used throughout all
Indigo plugins with the com.fogbert.indigoPlugin.xxxx bundle identifier.
.
"""
import ast
import logging
import operator as op
import os
import platform
import sys
# import traceback
try:
import indigo
except ImportError:
pass
__author__ = "DaveL17"
__build__ = "Unused"
__copyright__ = "Copyright 2017-2020 DaveL17"
__license__ = "MIT"
__title__ = "DLFramework"
__version__ = "0.1.04"
class Fogbert(object):
def __init__(self, plugin):
self.plugin = plugin
self.plugin.debugLog(u"Initializing DLFramework...")
self.pluginPrefs = plugin.pluginPrefs
log_format = '%(asctime)s.%(msecs)03d\t%(levelname)-10s\t%(name)s.%(funcName)-28s %(msg)s'
self.plugin.plugin_file_handler.setFormatter(logging.Formatter(fmt=log_format, datefmt='%Y-%m-%d %H:%M:%S'))
def pluginEnvironment(self):
"""
The pluginEnvironment method prints selected information about the
pluginEnvironment that the plugin is running in. It pulls some of this
information from the calling plugin and some from the server
pluginEnvironment. It uses the legacy "indigo.server.log" method to
write to the log.
"""
self.plugin.debugLog(u"DLFramework pluginEnvironment method called.")
indigo.server.log(u"")
indigo.server.log(u"{0:{1}^135}".format(" Initializing New Plugin Session ", "="))
indigo.server.log(u"{0:<31} {1}".format("Plugin name:", self.plugin.pluginDisplayName))
indigo.server.log(u"{0:<31} {1}".format("Plugin version:", self.plugin.pluginVersion))
indigo.server.log(u"{0:<31} {1}".format("Plugin ID:", self.plugin.pluginId))
indigo.server.log(u"{0:<31} {1}".format("Indigo version:", indigo.server.version))
indigo.server.log(u"{0:<31} {1}".format("Python version:", sys.version.replace('\n', '')))
indigo.server.log(u"{0:<31} {1}".format("Mac OS Version:", platform.mac_ver()[0]))
indigo.server.log(u"{0:<31} {1}".format("Process ID:", os.getpid()))
indigo.server.log(u"{0:{1}^135}".format("", "="))
def pluginEnvironmentLogger(self):
"""
The pluginEnvironmentLogger method prints selected information about
the pluginEnvironment that the plugin is running in. It pulls some of
this information from the calling plugin and some from the server
pluginEnvironment. This method differs from the pluginEnvironment
method in that it leverages Indigo's logging hooks using the Python
Logger framework.
"""
self.plugin.logger.debug(u"DLFramework pluginEnvironment method called.")
self.plugin.logger.info(u"")
self.plugin.logger.info(u"{0:{1}^135}".format(" Initializing New Plugin Session ", "="))
self.plugin.logger.info(u"{0:<31} {1}".format("Plugin name:", self.plugin.pluginDisplayName))
self.plugin.logger.info(u"{0:<31} {1}".format("Plugin version:", self.plugin.pluginVersion))
self.plugin.logger.info(u"{0:<31} {1}".format("Plugin ID:", self.plugin.pluginId))
self.plugin.logger.info(u"{0:<31} {1}".format("Indigo version:", indigo.server.version))
self.plugin.logger.info(u"{0:<31} {1}".format("Python version:", sys.version.replace('\n', '')))
self.plugin.logger.info(u"{0:<31} {1}".format("Mac OS Version:", platform.mac_ver()[0]))
self.plugin.logger.info(u"{0:<31} {1}".format("Process ID:", os.getpid()))
self.plugin.logger.info(u"{0:{1}^135}".format("", "="))
# =============================================================================
def pluginErrorHandler(self, sub_error):
"""
Centralized handling of traceback messages
Centralized handling of traceback messages formatted for pretty display in the
plugin log file. If sent here, they will not be displayed in the Indigo Events
log. Use the following syntax to send exceptions here::
self.pluginErrorHandler(traceback.format_exc())
-----
:param traceback object sub_error:
"""
sub_error = sub_error.splitlines()
self.plugin.logger.critical(u"{0:!^80}".format(" TRACEBACK "))
for line in sub_error:
self.plugin.logger.critical(u"!!! {0}".format(line))
self.plugin.logger.critical(u"!" * 80)
def convertDebugLevel(self, debug_val):
"""
The convertDebugLevel method is used to standardize the various implementations
of debug level settings across plugins. Its main purpose is to convert an old
string-based setting to account for older plugin versions. Over time, this
method will become obsolete and should be deprecated.
"""
self.plugin.debugLog(u"DLFramework convertDebugLevel method called.")
# If the debug value is High/Medium/Low, it is the old style. Covert it to 3/2/1
if debug_val in ["High", "Medium", "Low"]:
if debug_val == "High":
return 3
elif debug_val == "Medium":
return 2
else:
return 1
return debug_val
def deviceList(self, dev_filter=None):
"""
Returns a list of tuples containing Indigo devices for use in
config dialogs (etc.)
:return: [(ID, "Name"), (ID, "Name")]
"""
devices_list = [('None', 'None')]
[devices_list.append((dev.id, dev.name)) for dev in indigo.devices.iter(dev_filter)]
return devices_list
def deviceListEnabled(self, dev_filter=None):
"""
Returns a list of tuples containing Indigo devices for use in
config dialogs (etc.) Returns enabled devices only.
:return: [(ID, "Name"), (ID, "Name")]
"""
devices_list = [('None', 'None')]
[devices_list.append((dev.id, dev.name)) for dev in indigo.devices.iter(dev_filter) if dev.enabled]
return devices_list
def variableList(self):
"""
Returns a list of tuples containing Indigo variables for use in
config dialogs (etc.)
:return: [(ID, "Name"), (ID, "Name")]
"""
variable_list = [('None', 'None')]
[variable_list.append((var.id, var.name)) for var in indigo.variables]
return variable_list
def deviceAndVariableList(self):
"""
Returns a list of tuples containing Indigo devices and variables
for use in config dialogs (etc.)
:return: [(ID, "(D) Name"), (ID, "(V) Name")]
"""
devices_and_variables_list = []
[devices_and_variables_list.append((dev.id, u"(D) {0}".format(dev.name))) for dev in indigo.devices]
[devices_and_variables_list.append((var.id, u"(V) {0}".format(var.name))) for var in indigo.variables]
devices_and_variables_list.append(('-1', '%%separator%%'),)
devices_and_variables_list.append(('None', 'None'),)
return devices_and_variables_list
def launchWebPage(self, launch_url):
"""
The launchWebPage method is used to direct a call to the registered
default browser and open the page referenced by the parameter 'URL'.
"""
import webbrowser
webbrowser.open(url=launch_url)
def generatorStateOrValue(self, dev_id):
"""The generatorStateOrValue() method returns a list to populate the relevant
device states or variable value to populate a menu control."""
try:
id_number = int(dev_id)
if id_number in indigo.devices.keys():
state_list = [(state, state) for state in indigo.devices[id_number].states if not state.endswith('.ui')]
if ('onOffState', 'onOffState') in state_list:
state_list.remove(('onOffState', 'onOffState'))
return state_list
elif id_number in indigo.variables.keys():
return [('value', 'Value')]
except (KeyError, ValueError):
return [(0, 'Pick a Device or Variable')]
def audit_server_version(self, min_ver):
# =========================== Audit Indigo Version ============================
ver = self.plugin.versStrToTuple(indigo.server.version)
if ver[0] < min_ver:
self.plugin.stopPlugin(u"This plugin requires Indigo version {0} or above.".format(min_ver), isError=True)
self.plugin.debug(u"Indigo server version OK.")
def audit_os_version(self, min_ver):
# =========================== Audit Operating System Version ============================
ver = platform.mac_ver()[0].split('.')
if int(ver[1]) < min_ver:
self.plugin.stopPlugin(u"This plugin requires Mac OS version 10.{0} or above.".format(min_ver),
isError=True)
self.plugin.logger.debug(u"OS X version OK.")
class Formatter(object):
"""
The Formatter class contains methods to provide unique custom data
formats as needed.
"""
def __init__(self, plugin):
self.plugin = plugin
self.pluginPrefs = plugin.pluginPrefs
def dateFormat(self):
"""
The dateFormat method takes the user configuration preference for
date and time display and converts them to a valid datetime()
format specifier.
"""
date_formatter = {'DD-MM-YYYY': '%d-%m-%Y', 'MM-DD-YYYY': '%m-%d-%Y', 'YYYY-MM-DD': '%Y-%m-%d'}
return date_formatter[self.pluginPrefs['uiDateFormat']]
def timeFormat(self):
"""
The timeFormat method takes the user configuration preference for
date and time display and converts them to a valid datetime()
format specifier.
"""
time_formatter = {'military': '%H:%M', 'standard': '%I:%M', 'standard_am_pm': '%I:%M %p'}
return time_formatter[self.pluginPrefs['uiTimeFormat']]
class evalExpr(object):
"""
The evalExpr method evaluates mathematical expressions that are passed as
strings and returns a numerical result.
This code is licensed under an MIT-compatible license.
credit: jfs @ https://stackoverflow.com/a/9558001/2827397
"""
def __init__(self, plugin):
self.plugin = plugin
self.pluginPrefs = plugin.pluginPrefs
# supported operators
self.operators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul, ast.Div: op.truediv, ast.Pow: op.pow,
ast.BitXor: op.xor, ast.USub: op.neg}
def eval_expr(self, expr):
return self.eval_(ast.parse(expr, mode='eval').body)
def eval_(self, node):
if isinstance(node, ast.Num): # <number>
return node.n
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return self.operators[type(node.op)](self.eval_(node.left), self.eval_(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return self.operators[type(node.op)](self.eval_(node.operand))
else:
raise TypeError(node)
| |
import numpy as np
from ..helpers import *
import tempfile
import pytest
from hail.utils.java import FatalError, HailUserError
def assert_ndarrays(asserter, exprs_and_expecteds):
exprs, expecteds = zip(*exprs_and_expecteds)
expr_tuple = hl.tuple(exprs)
evaled_exprs = hl.eval(expr_tuple)
evaled_and_expected = zip(evaled_exprs, expecteds)
for (idx, (evaled, expected)) in enumerate(evaled_and_expected):
assert asserter(evaled, expected), f"NDArray comparison {idx} failed, got: {evaled}, expected: {expected}"
def assert_ndarrays_eq(*expr_and_expected):
assert_ndarrays(np.array_equal, expr_and_expected)
def assert_ndarrays_almost_eq(*expr_and_expected):
assert_ndarrays(np.allclose, expr_and_expected)
def test_ndarray_ref():
scalar = 5.0
np_scalar = np.array(scalar)
h_scalar = hl.nd.array(scalar)
h_np_scalar = hl.nd.array(np_scalar)
assert_evals_to(h_scalar[()], 5.0)
assert_evals_to(h_np_scalar[()], 5.0)
cube = [[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]]
h_cube = hl.nd.array(cube)
h_np_cube = hl.nd.array(np.array(cube))
missing = hl.nd.array(hl.null(hl.tarray(hl.tint32)))
assert_all_eval_to(
(h_cube[0, 0, 1], 1),
(h_cube[1, 1, 0], 6),
(h_np_cube[0, 0, 1], 1),
(h_np_cube[1, 1, 0], 6),
(hl.nd.array([[[[1]]]])[0, 0, 0, 0], 1),
(hl.nd.array([[[1, 2]], [[3, 4]]])[1, 0, 0], 3),
(missing[1], None),
(hl.nd.array([1, 2, 3])[hl.null(hl.tint32)], None),
(h_cube[0, 0, hl.null(hl.tint32)], None)
)
with pytest.raises(HailUserError) as exc:
hl.eval(hl.nd.array([1, 2, 3])[4])
assert "Index 4 is out of bounds for axis 0 with size 3" in str(exc)
def test_ndarray_slice():
np_rect_prism = np.arange(24).reshape((2, 3, 4))
rect_prism = hl.nd.array(np_rect_prism)
np_mat = np.arange(8).reshape((2, 4))
mat = hl.nd.array(np_mat)
np_flat = np.arange(20)
flat = hl.nd.array(np_flat)
a = [0, 1]
an = np.array(a)
ah = hl.nd.array(a)
assert_ndarrays_eq(
(rect_prism[:, :, :], np_rect_prism[:, :, :]),
(rect_prism[:, :, 1], np_rect_prism[:, :, 1]),
(rect_prism[0:1, 1:3, 0:2], np_rect_prism[0:1, 1:3, 0:2]),
(rect_prism[:, :, 1:4:2], np_rect_prism[:, :, 1:4:2]),
(rect_prism[:, 2, 1:4:2], np_rect_prism[:, 2, 1:4:2]),
(rect_prism[0, 2, 1:4:2], np_rect_prism[0, 2, 1:4:2]),
(rect_prism[0, :, 1:4:2] + rect_prism[:, :1, 1:4:2],
np_rect_prism[0, :, 1:4:2] + np_rect_prism[:, :1, 1:4:2]),
(rect_prism[0:, :, 1:4:2] + rect_prism[:, :1, 1:4:2],
np_rect_prism[0:, :, 1:4:2] + np_rect_prism[:, :1, 1:4:2]),
(rect_prism[0, 0, -3:-1], np_rect_prism[0, 0, -3:-1]),
(rect_prism[-1, 0:1, 3:0:-1], np_rect_prism[-1, 0:1, 3:0:-1]),
(flat[15:5:-1], np_flat[15:5:-1]),
(flat[::-1], np_flat[::-1]),
(flat[::22], np_flat[::22]),
(flat[::-22], np_flat[::-22]),
(flat[15:5], np_flat[15:5]),
(flat[3:12:-1], np_flat[3:12:-1]),
(flat[12:3:1], np_flat[12:3:1]),
(flat[4:1:-2], np_flat[4:1:-2]),
(flat[0:0:1], np_flat[0:0:1]),
(flat[-4:-1:2], np_flat[-4:-1:2]),
(mat[::-1, :], np_mat[::-1, :]),
(mat[0, 1:4:2] + mat[:, 1:4:2], np_mat[0, 1:4:2] + np_mat[:, 1:4:2]),
(mat[-1:4:1, 0], np_mat[-1:4:1, 0]),
(mat[-1:4:-1, 0], np_mat[-1:4:-1, 0]),
# out of bounds on start
(mat[9:2:-1, 1:4], np_mat[9:2:-1, 1:4]),
(mat[9:-1:-1, 1:4], np_mat[9:-1:-1, 1:4]),
(mat[-5::, 0], np_mat[-5::, 0]),
(mat[-5::-1, 0], np_mat[-5::-1, 0]),
(mat[-5:-1:-1, 0], np_mat[-5:-1:-1, 0]),
(mat[-5:-5:-1, 0], np_mat[-5:-5:-1, 0]),
(mat[4::, 0], np_mat[4::, 0]),
(mat[4:-1:, 0], np_mat[4:-1:, 0]),
(mat[4:-1:-1, 0], np_mat[4:-1:-1, 0]),
(mat[5::, 0], np_mat[5::, 0]),
(mat[5::-1, 0], np_mat[5::-1, 0]),
(mat[-5::-1, 0], np_mat[-5::-1, 0]),
(mat[-5::1, 0], np_mat[-5::1, 0]),
(mat[5:-1:-1, 0], np_mat[5:-1:-1, 0]),
(mat[5:-5:-1, 0], np_mat[5:-5:-1, 0]),
# out of bounds on stop
(mat[0:20, 0:17], np_mat[0:20, 0:17]),
(mat[0:20, 2:17], np_mat[0:20, 2:17]),
(mat[:4, 0], np_mat[:4, 0]),
(mat[:4:-1, 0], np_mat[:4:-1, 0]),
(mat[:-5, 0], np_mat[:-5, 0]),
(mat[:-5:-1, 0], np_mat[:-5:-1, 0]),
(mat[0:-5, 0], np_mat[0:-5, 0]),
(mat[0:-5:-1, 0], np_mat[0:-5:-1, 0]),
(ah[:-3:1], an[:-3:1]),
(ah[:-3:-1], an[:-3:-1]),
(ah[-3::-1], an[-3::-1]),
(ah[-3::1], an[-3::1])
)
assert hl.eval(flat[hl.null(hl.tint32):4:1]) is None
assert hl.eval(flat[4:hl.null(hl.tint32)]) is None
assert hl.eval(flat[4:10:hl.null(hl.tint32)]) is None
assert hl.eval(rect_prism[:, :, 0:hl.null(hl.tint32):1]) is None
assert hl.eval(rect_prism[hl.null(hl.tint32), :, :]) is None
with pytest.raises(HailUserError, match="Slice step cannot be zero"):
hl.eval(flat[::0])
with pytest.raises(HailUserError, match="Index 3 is out of bounds for axis 0 with size 2"):
hl.eval(mat[3, 1:3])
with pytest.raises(HailUserError, match="Index -4 is out of bounds for axis 0 with size 2"):
hl.eval(mat[-4, 0:3])
def test_ndarray_transposed_slice():
a = hl.nd.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
np_a = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
aT = a.T
np_aT = np_a.T
assert_ndarrays_eq(
(a, np_a),
(aT[0:aT.shape[0], 0:5], np_aT[0:np_aT.shape[0], 0:5])
)
def test_ndarray_eval():
data_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
mishapen_data_list1 = [[4], [1, 2, 3]]
mishapen_data_list2 = [[[1], [2, 3]]]
mishapen_data_list3 = [[4], [1, 2, 3], 5]
nd_expr = hl.nd.array(data_list)
evaled = hl.eval(nd_expr)
np_equiv = np.array(data_list, dtype=np.int32)
np_equiv_fortran_style = np.asfortranarray(np_equiv)
np_equiv_extra_dimension = np_equiv.reshape((3, 1, 3))
assert(np.array_equal(evaled, np_equiv))
assert(evaled.strides == np_equiv.strides)
assert hl.eval(hl.nd.array([[], []])).strides == (8, 8)
assert np.array_equal(hl.eval(hl.nd.array([])), np.array([]))
zero_array = np.zeros((10, 10), dtype=np.int64)
evaled_zero_array = hl.eval(hl.literal(zero_array))
assert np.array_equal(evaled_zero_array, zero_array)
assert zero_array.dtype == evaled_zero_array.dtype
# Testing correct interpretation of numpy strides
assert np.array_equal(hl.eval(hl.literal(np_equiv_fortran_style)), np_equiv_fortran_style)
assert np.array_equal(hl.eval(hl.literal(np_equiv_extra_dimension)), np_equiv_extra_dimension)
# Testing from hail arrays
assert np.array_equal(hl.eval(hl.nd.array(hl.range(6))), np.arange(6))
assert np.array_equal(hl.eval(hl.nd.array(hl.int64(4))), np.array(4))
# Testing from nested hail arrays
assert np.array_equal(
hl.eval(hl.nd.array(hl.array([hl.array(x) for x in data_list]))), np.arange(9).reshape((3, 3)) + 1)
# Testing missing data
assert hl.eval(hl.nd.array(hl.null(hl.tarray(hl.tint32)))) is None
with pytest.raises(ValueError) as exc:
hl.nd.array(mishapen_data_list1)
assert "inner dimensions do not match" in str(exc.value)
with pytest.raises(HailUserError) as exc:
hl.eval(hl.nd.array(hl.array(mishapen_data_list1)))
assert "inner dimensions do not match" in str(exc.value)
with pytest.raises(HailUserError) as exc:
hl.eval(hl.nd.array(hl.array(mishapen_data_list2)))
assert "inner dimensions do not match" in str(exc.value)
with pytest.raises(ValueError) as exc:
hl.nd.array(mishapen_data_list3)
assert "inner dimensions do not match" in str(exc.value)
def test_ndarray_shape():
np_e = np.array(3)
np_row = np.array([1, 2, 3])
np_col = np.array([[1], [2], [3]])
np_m = np.array([[1, 2], [3, 4]])
np_nd = np.arange(30).reshape((2, 5, 3))
e = hl.nd.array(np_e)
row = hl.nd.array(np_row)
col = hl.nd.array(np_col)
m = hl.nd.array(np_m)
nd = hl.nd.array(np_nd)
missing = hl.nd.array(hl.null(hl.tarray(hl.tint32)))
assert_all_eval_to(
(e.shape, np_e.shape),
(row.shape, np_row.shape),
(col.shape, np_col.shape),
(m.shape, np_m.shape),
(nd.shape, np_nd.shape),
((row + nd).shape, (np_row + np_nd).shape),
((row + col).shape, (np_row + np_col).shape),
(m.transpose().shape, np_m.transpose().shape),
(missing.shape, None)
)
def test_ndarray_reshape():
np_single = np.array([8])
single = hl.nd.array([8])
np_zero_dim = np.array(4)
zero_dim = hl.nd.array(4)
np_a = np.array([1, 2, 3, 4, 5, 6])
a = hl.nd.array(np_a)
np_cube = np.array([0, 1, 2, 3, 4, 5, 6, 7]).reshape((2, 2, 2))
cube = hl.nd.array([0, 1, 2, 3, 4, 5, 6, 7]).reshape((2, 2, 2))
cube_to_rect = cube.reshape((2, 4))
np_cube_to_rect = np_cube.reshape((2, 4))
cube_t_to_rect = cube.transpose((1, 0, 2)).reshape((2, 4))
np_cube_t_to_rect = np_cube.transpose((1, 0, 2)).reshape((2, 4))
np_hypercube = np.arange(3 * 5 * 7 * 9).reshape((3, 5, 7, 9))
hypercube = hl.nd.array(np_hypercube)
np_shape_zero = np.array([])
shape_zero = hl.nd.array(np_shape_zero)
assert_ndarrays_eq(
(single.reshape(()), np_single.reshape(())),
(zero_dim.reshape(()), np_zero_dim.reshape(())),
(zero_dim.reshape((1,)), np_zero_dim.reshape((1,))),
(a.reshape((6,)), np_a.reshape((6,))),
(a.reshape((2, 3)), np_a.reshape((2, 3))),
(a.reshape((3, 2)), np_a.reshape((3, 2))),
(a.reshape((3, -1)), np_a.reshape((3, -1))),
(a.reshape((-1, 2)), np_a.reshape((-1, 2))),
(cube_to_rect, np_cube_to_rect),
(cube_t_to_rect, np_cube_t_to_rect),
(hypercube.reshape((5, 7, 9, 3)).reshape((7, 9, 3, 5)), np_hypercube.reshape((7, 9, 3, 5))),
(hypercube.reshape(hl.tuple([5, 7, 9, 3])), np_hypercube.reshape((5, 7, 9, 3))),
(shape_zero.reshape((0, 5)), np_shape_zero.reshape((0, 5))),
(shape_zero.reshape((-1, 5)), np_shape_zero.reshape((-1, 5)))
)
assert hl.eval(hl.null(hl.tndarray(hl.tfloat, 2)).reshape((4, 5))) is None
assert hl.eval(hl.nd.array(hl.range(20)).reshape(
hl.null(hl.ttuple(hl.tint64, hl.tint64)))) is None
with pytest.raises(FatalError) as exc:
hl.eval(hl.literal(np_cube).reshape((-1, -1)))
assert "more than one -1" in str(exc)
with pytest.raises(FatalError) as exc:
hl.eval(hl.literal(np_cube).reshape((20,)))
assert "requested shape is incompatible with number of elements" in str(exc)
with pytest.raises(FatalError) as exc:
hl.eval(a.reshape((3,)))
assert "requested shape is incompatible with number of elements" in str(exc)
with pytest.raises(FatalError) as exc:
hl.eval(a.reshape(()))
assert "requested shape is incompatible with number of elements" in str(exc)
with pytest.raises(FatalError) as exc:
hl.eval(hl.literal(np_cube).reshape((0, 2, 2)))
assert "requested shape is incompatible with number of elements" in str(exc)
with pytest.raises(FatalError) as exc:
hl.eval(hl.literal(np_cube).reshape((2, 2, -2)))
assert "must contain only nonnegative numbers or -1" in str(exc)
with pytest.raises(FatalError) as exc:
hl.eval(shape_zero.reshape((0, -1)))
assert "Can't reshape" in str(exc)
def test_ndarray_map():
a = hl.nd.array([[2, 3, 4], [5, 6, 7]])
b = hl.map(lambda x: -x, a)
c = hl.map(lambda x: True, a)
assert_ndarrays_eq(
(b, [[-2, -3, -4], [-5, -6, -7]]),
(c, [[True, True, True],
[True, True, True]]))
assert hl.eval(hl.null(hl.tndarray(hl.tfloat, 1)).map(lambda x: x * 2)) is None
s = hl.nd.array(["hail", "is", "great"])
s_lens = s.map(lambda e: hl.len(e))
assert np.array_equal(hl.eval(s_lens), np.array([4, 2, 5]))
structs = hl.nd.array([hl.struct(x=5, y=True), hl.struct(x=9, y=False)])
assert np.array_equal(hl.eval(structs.map(lambda e: e.y)), np.array([True, False]))
def test_ndarray_map2():
a = 2.0
b = 3.0
x = np.array([a, b])
y = np.array([b, a])
row_vec = np.array([[1, 2]])
cube1 = np.array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
cube2 = np.array([[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]]])
na = hl.nd.array(a)
nx = hl.nd.array(x)
ny = hl.nd.array(y)
nrow_vec = hl.nd.array(row_vec)
ncube1 = hl.nd.array(cube1)
ncube2 = hl.nd.array(cube2)
assert_ndarrays_eq(
# with lists/numerics
(na + b, np.array(a + b)),
(b + na, np.array(a + b)),
(nx + y, x + y),
(ncube1 + cube2, cube1 + cube2),
# Addition
(na + na, np.array(a + a)),
(nx + ny, x + y),
(ncube1 + ncube2, cube1 + cube2),
# Broadcasting
(ncube1 + na, cube1 + a),
(na + ncube1, a + cube1),
(ncube1 + ny, cube1 + y),
(ny + ncube1, y + cube1),
(nrow_vec + ncube1, row_vec + cube1),
(ncube1 + nrow_vec, cube1 + row_vec),
# Subtraction
(na - na, np.array(a - a)),
(nx - nx, x - x),
(ncube1 - ncube2, cube1 - cube2),
# Broadcasting
(ncube1 - na, cube1 - a),
(na - ncube1, a - cube1),
(ncube1 - ny, cube1 - y),
(ny - ncube1, y - cube1),
(ncube1 - nrow_vec, cube1 - row_vec),
(nrow_vec - ncube1, row_vec - cube1),
# Multiplication
(na * na, np.array(a * a)),
(nx * nx, x * x),
(nx * na, x * a),
(na * nx, a * x),
(ncube1 * ncube2, cube1 * cube2),
# Broadcasting
(ncube1 * na, cube1 * a),
(na * ncube1, a * cube1),
(ncube1 * ny, cube1 * y),
(ny * ncube1, y * cube1),
(ncube1 * nrow_vec, cube1 * row_vec),
(nrow_vec * ncube1, row_vec * cube1),
# Floor div
(na // na, np.array(a // a)),
(nx // nx, x // x),
(nx // na, x // a),
(na // nx, a // x),
(ncube1 // ncube2, cube1 // cube2),
# Broadcasting
(ncube1 // na, cube1 // a),
(na // ncube1, a // cube1),
(ncube1 // ny, cube1 // y),
(ny // ncube1, y // cube1),
(ncube1 // nrow_vec, cube1 // row_vec),
(nrow_vec // ncube1, row_vec // cube1))
# Division
assert_ndarrays_almost_eq(
(na / na, np.array(a / a)),
(nx / nx, x / x),
(nx / na, x / a),
(na / nx, a / x),
(ncube1 / ncube2, cube1 / cube2),
# Broadcasting
(ncube1 / na, cube1 / a),
(na / ncube1, a / cube1),
(ncube1 / ny, cube1 / y),
(ny / ncube1, y / cube1),
(ncube1 / nrow_vec, cube1 / row_vec),
(nrow_vec / ncube1, row_vec / cube1))
# Missingness tests
missing = hl.null(hl.tndarray(hl.tfloat64, 2))
present = hl.nd.array(np.arange(10).reshape(5, 2))
assert hl.eval(missing + missing) is None
assert hl.eval(missing + present) is None
assert hl.eval(present + missing) is None
@skip_unless_spark_backend()
@run_with_cxx_compile()
def test_ndarray_to_numpy():
nd = np.array([[1, 2, 3], [4, 5, 6]])
np.array_equal(hl.nd.array(nd).to_numpy(), nd)
@skip_unless_spark_backend()
@run_with_cxx_compile()
def test_ndarray_save():
arrs = [
np.array([[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]], dtype=np.int32),
np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64),
np.array(3.0, dtype=np.float32),
np.array([3.0], dtype=np.float64),
np.array([True, False, True, True])
]
for expected in arrs:
with tempfile.NamedTemporaryFile(suffix='.npy') as f:
hl.nd.array(expected).save(f.name)
actual = np.load(f.name)
assert expected.dtype == actual.dtype, f'expected: {expected.dtype}, actual: {actual.dtype}'
assert np.array_equal(expected, actual)
@skip_unless_spark_backend()
@run_with_cxx_compile()
def test_ndarray_sum():
np_m = np.array([[1, 2], [3, 4]])
m = hl.nd.array(np_m)
assert_all_eval_to(
(m.sum(axis=0), np_m.sum(axis=0)),
(m.sum(axis=1), np_m.sum(axis=1)),
(m.sum(), np_m.sum()))
def test_ndarray_transpose():
np_v = np.array([1, 2, 3])
np_m = np.array([[1, 2, 3], [4, 5, 6]])
np_cube = np.array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
v = hl.nd.array(np_v)
m = hl.nd.array(np_m)
cube = hl.nd.array(np_cube)
assert_ndarrays_eq(
(v.T, np_v.T),
(v.T, np_v),
(m.T, np_m.T),
(cube.transpose((0, 2, 1)), np_cube.transpose((0, 2, 1))),
(cube.T, np_cube.T))
assert hl.eval(hl.null(hl.tndarray(hl.tfloat, 1)).T) is None
with pytest.raises(ValueError) as exc:
v.transpose((1,))
assert "Invalid axis: 1" in str(exc.value)
with pytest.raises(ValueError) as exc:
cube.transpose((1, 1))
assert "Expected 3 axes, got 2" in str(exc.value)
with pytest.raises(ValueError) as exc:
cube.transpose((1, 1, 1))
assert "Axes cannot contain duplicates" in str(exc.value)
def test_ndarray_matmul():
np_v = np.array([1, 2])
np_y = np.array([1, 1, 1])
np_m = np.array([[1, 2], [3, 4]])
np_m_f32 = np_m.astype(np.float32)
np_m_f64 = np_m.astype(np.float64)
np_r = np.array([[1, 2, 3], [4, 5, 6]])
np_r_f32 = np_r.astype(np.float32)
np_r_f64 = np_r.astype(np.float64)
np_cube = np.arange(8).reshape((2, 2, 2))
np_rect_prism = np.arange(12).reshape((3, 2, 2))
np_broadcasted_mat = np.arange(4).reshape((1, 2, 2))
np_six_dim_tensor = np.arange(3 * 7 * 1 * 9 * 4 * 5).reshape((3, 7, 1, 9, 4, 5))
np_five_dim_tensor = np.arange(7 * 5 * 1 * 5 * 3).reshape((7, 5, 1, 5, 3))
np_ones_int32 = np.ones((4, 4), dtype=np.int32)
np_ones_float64 = np.ones((4, 4), dtype=np.float64)
np_zero_by_four = np.array([], dtype=np.float64).reshape((0, 4))
v = hl.nd.array(np_v)
y = hl.nd.array(np_y)
m = hl.nd.array(np_m)
m_f32 = hl.nd.array(np_m_f32)
m_f64 = hl.nd.array(np_m_f64)
r = hl.nd.array(np_r)
r_f32 = hl.nd.array(np_r_f32)
r_f64 = hl.nd.array(np_r_f64)
cube = hl.nd.array(np_cube)
rect_prism = hl.nd.array(np_rect_prism)
broadcasted_mat = hl.nd.array(np_broadcasted_mat)
six_dim_tensor = hl.nd.array(np_six_dim_tensor)
five_dim_tensor = hl.nd.array(np_five_dim_tensor)
ones_int32 = hl.nd.array(np_ones_int32)
ones_float64 = hl.nd.array(np_ones_float64)
zero_by_four = hl.nd.array(np_zero_by_four)
assert_ndarrays_eq(
(v @ v, np_v @ np_v),
(m @ m, np_m @ np_m),
(m_f32 @ m_f32, np_m_f32 @ np_m_f32),
(m_f64 @ m_f64, np_m_f64 @ np_m_f64),
(m @ m.T, np_m @ np_m.T),
(m_f64 @ m_f64.T, np_m_f64 @ np_m_f64.T),
(r @ r.T, np_r @ np_r.T),
(r_f32 @ r_f32.T, np_r_f32 @ np_r_f32.T),
(r_f64 @ r_f64.T, np_r_f64 @ np_r_f64.T),
(v @ m, np_v @ np_m),
(m @ v, np_m @ np_v),
(v @ r, np_v @ np_r),
(r @ y, np_r @ np_y),
(cube @ cube, np_cube @ np_cube),
(cube @ v, np_cube @ np_v),
(v @ cube, np_v @ np_cube),
(cube @ m, np_cube @ np_m),
(m @ cube, np_m @ np_cube),
(rect_prism @ m, np_rect_prism @ np_m),
(m @ rect_prism, np_m @ np_rect_prism),
(m @ rect_prism.T, np_m @ np_rect_prism.T),
(broadcasted_mat @ rect_prism, np_broadcasted_mat @ np_rect_prism),
(six_dim_tensor @ five_dim_tensor, np_six_dim_tensor @ np_five_dim_tensor),
(zero_by_four @ ones_float64, np_zero_by_four, np_ones_float64),
(zero_by_four.transpose() @ zero_by_four, np_zero_by_four.transpose() @ np_zero_by_four)
)
assert hl.eval(hl.null(hl.tndarray(hl.tfloat64, 2)) @
hl.null(hl.tndarray(hl.tfloat64, 2))) is None
assert hl.eval(hl.null(hl.tndarray(hl.tint64, 2)) @
hl.nd.array(np.arange(10).reshape(5, 2))) is None
assert hl.eval(hl.nd.array(np.arange(10).reshape(5, 2)) @
hl.null(hl.tndarray(hl.tint64, 2))) is None
assert np.array_equal(hl.eval(ones_int32 @ ones_float64), np_ones_int32 @ np_ones_float64)
with pytest.raises(ValueError):
m @ 5
with pytest.raises(ValueError):
m @ hl.nd.array(5)
with pytest.raises(ValueError):
cube @ hl.nd.array(5)
with pytest.raises(FatalError) as exc:
hl.eval(r @ r)
assert "Matrix dimensions incompatible: 3 2" in str(exc)
with pytest.raises(FatalError) as exc:
hl.eval(hl.nd.array([1, 2]) @ hl.nd.array([1, 2, 3]))
assert "Matrix dimensions incompatible" in str(exc)
def test_ndarray_big():
assert hl.eval(hl.nd.array(hl.range(100_000))).size == 100_000
def test_ndarray_full():
assert_ndarrays_eq(
(hl.nd.zeros(4), np.zeros(4)),
(hl.nd.zeros((3, 4, 5)), np.zeros((3, 4, 5))),
(hl.nd.ones(6), np.ones(6)),
(hl.nd.ones((6, 6, 6)), np.ones((6, 6, 6))),
(hl.nd.full(7, 9), np.full(7, 9)),
(hl.nd.full((3, 4, 5), 9), np.full((3, 4, 5), 9))
)
assert hl.eval(hl.nd.zeros((5, 5), dtype=hl.tfloat32)).dtype, np.float32
assert hl.eval(hl.nd.ones(3, dtype=hl.tint64)).dtype, np.int64
assert hl.eval(hl.nd.full((5, 6, 7), hl.int32(3), dtype=hl.tfloat64)).dtype, np.float64
def test_ndarray_arange():
assert_ndarrays_eq(
(hl.nd.arange(40), np.arange(40)),
(hl.nd.arange(5, 50), np.arange(5, 50)),
(hl.nd.arange(2, 47, 13), np.arange(2, 47, 13))
)
with pytest.raises(FatalError) as exc:
hl.eval(hl.nd.arange(5, 20, 0))
assert "Array range cannot have step size 0" in str(exc)
def test_ndarray_mixed():
assert hl.eval(hl.null(hl.tndarray(hl.tint64, 2)).map(
lambda x: x * x).reshape((4, 5)).T) is None
assert hl.eval(
(hl.nd.zeros((5, 10)).map(lambda x: x - 2) +
hl.nd.ones((5, 10)).map(lambda x: x + 5)).reshape(hl.null(hl.ttuple(hl.tint64, hl.tint64))).T.reshape((10, 5))) is None
assert hl.eval(hl.or_missing(False, hl.nd.array(np.arange(10)).reshape(
(5, 2)).map(lambda x: x * 2)).map(lambda y: y * 2)) is None
def test_ndarray_show():
hl.nd.array(3).show()
hl.nd.arange(6).show()
hl.nd.arange(6).reshape((2, 3)).show()
hl.nd.arange(8).reshape((2, 2, 2)).show()
def test_ndarray_diagonal():
assert np.array_equal(hl.eval(hl.nd.diagonal(hl.nd.array([[1, 2], [3, 4]]))), np.array([1, 4]))
assert np.array_equal(hl.eval(hl.nd.diagonal(
hl.nd.array([[1, 2, 3], [4, 5, 6]]))), np.array([1, 5]))
assert np.array_equal(hl.eval(hl.nd.diagonal(
hl.nd.array([[1, 2], [3, 4], [5, 6]]))), np.array([1, 4]))
with pytest.raises(AssertionError) as exc:
hl.nd.diagonal(hl.nd.array([1, 2]))
assert "2 dimensional" in str(exc)
def test_ndarray_qr():
def assert_raw_equivalence(hl_ndarray, np_ndarray):
ndarray_h, ndarray_tau = hl.eval(hl.nd.qr(hl_ndarray, mode="raw"))
np_ndarray_h, np_ndarray_tau = np.linalg.qr(np_ndarray, mode="raw")
# Can't ask for the rank of something that has a 0 in its shape.
if 0 in np_ndarray.shape:
assert ndarray_h.shape == np_ndarray_h.shape
assert ndarray_tau.shape == np_ndarray_tau.shape
else:
rank = np.linalg.matrix_rank(np_ndarray)
assert np.allclose(ndarray_h[:, :rank], np_ndarray_h[:, :rank])
assert np.allclose(ndarray_tau[:rank], np_ndarray_tau[:rank])
def assert_r_equivalence(hl_ndarray, np_ndarray):
assert np.allclose(hl.eval(hl.nd.qr(hl_ndarray, mode="r")),
np.linalg.qr(np_ndarray, mode="r"))
def assert_reduced_equivalence(hl_ndarray, np_ndarray):
q, r = hl.eval(hl.nd.qr(hl_ndarray, mode="reduced"))
nq, nr = np.linalg.qr(np_ndarray, mode="reduced")
# Can't ask for the rank of something that has a 0 in its shape.
if 0 in np_ndarray.shape:
assert q.shape == nq.shape
assert r.shape == nr.shape
else:
rank = np.linalg.matrix_rank(np_ndarray)
assert np.allclose(q[:, :rank], nq[:, :rank])
assert np.allclose(r, nr)
assert np.allclose(q @ r, np_ndarray)
def assert_complete_equivalence(hl_ndarray, np_ndarray):
q, r = hl.eval(hl.nd.qr(hl_ndarray, mode="complete"))
nq, nr = np.linalg.qr(np_ndarray, mode="complete")
# Can't ask for the rank of something that has a 0 in its shape.
if 0 in np_ndarray.shape:
assert q.shape == nq.shape
assert r.shape == nr.shape
else:
rank = np.linalg.matrix_rank(np_ndarray)
assert np.allclose(q[:, :rank], nq[:, :rank])
assert np.allclose(r, nr)
assert np.allclose(q @ r, np_ndarray)
def assert_same_qr(hl_ndarray, np_ndarray):
assert_raw_equivalence(hl_ndarray, np_ndarray)
assert_r_equivalence(hl_ndarray, np_ndarray)
assert_reduced_equivalence(hl_ndarray, np_ndarray)
assert_complete_equivalence(hl_ndarray, np_ndarray)
np_identity4 = np.identity(4)
identity4 = hl.nd.array(np_identity4)
assert_same_qr(identity4, np_identity4)
np_all3 = np.full((3, 3), 3)
all3 = hl.nd.full((3, 3), 3)
assert_same_qr(all3, np_all3)
np_nine_square = np.arange(9).reshape((3, 3))
nine_square = hl.nd.arange(9).reshape((3, 3))
assert_same_qr(nine_square, np_nine_square)
np_wiki_example = np.array([[12, -51, 4],
[6, 167, -68],
[-4, 24, -41]])
wiki_example = hl.nd.array(np_wiki_example)
assert_same_qr(wiki_example, np_wiki_example)
np_wide_rect = np.arange(12).reshape((3, 4))
wide_rect = hl.nd.arange(12).reshape((3, 4))
assert_same_qr(wide_rect, np_wide_rect)
np_tall_rect = np.arange(12).reshape((4, 3))
tall_rect = hl.nd.arange(12).reshape((4, 3))
assert_same_qr(tall_rect, np_tall_rect)
np_single_element = np.array([1]).reshape((1, 1))
single_element = hl.nd.array([1]).reshape((1, 1))
assert_same_qr(single_element, np_single_element)
np_no_elements = np.array([]).reshape((0, 10))
no_elements = hl.nd.array(np_no_elements)
assert_same_qr(no_elements, np_no_elements)
with pytest.raises(ValueError) as exc:
hl.nd.qr(wiki_example, mode="invalid")
assert "Unrecognized mode" in str(exc)
with pytest.raises(AssertionError) as exc:
hl.nd.qr(hl.nd.arange(6))
assert "requires 2 dimensional" in str(exc)
def test_svd():
def assert_evals_to_same_svd(nd_expr, np_array, full_matrices=True, compute_uv=True):
evaled = hl.eval(hl.nd.svd(nd_expr, full_matrices, compute_uv))
np_svd = np.linalg.svd(np_array, full_matrices, compute_uv)
# check shapes
for h, n in zip(evaled, np_svd):
assert h.shape == n.shape
k = min(np_array.shape)
rank = np.linalg.matrix_rank(np_array)
if compute_uv:
np.testing.assert_array_almost_equal(evaled[0][:, :rank], np_svd[0][:, :rank])
np.testing.assert_array_almost_equal(evaled[1], np_svd[1])
np.testing.assert_array_almost_equal(evaled[2][:rank, :], np_svd[2][:rank, :])
else:
np.testing.assert_array_equal(evaled, np_svd)
np_small_square = np.arange(4).reshape((2, 2))
small_square = hl.nd.array(np_small_square)
np_rank_2_wide_rectangle = np.arange(12).reshape((4, 3))
rank_2_wide_rectangle = hl.nd.array(np_rank_2_wide_rectangle)
np_rank_2_tall_rectangle = np_rank_2_wide_rectangle.T
rank_2_tall_rectangle = hl.nd.array(np_rank_2_tall_rectangle)
assert_evals_to_same_svd(small_square, np_small_square)
assert_evals_to_same_svd(small_square, np_small_square, compute_uv=False)
assert_evals_to_same_svd(rank_2_wide_rectangle, np_rank_2_wide_rectangle)
assert_evals_to_same_svd(rank_2_wide_rectangle, np_rank_2_wide_rectangle, full_matrices=False)
assert_evals_to_same_svd(rank_2_tall_rectangle, np_rank_2_tall_rectangle)
assert_evals_to_same_svd(rank_2_tall_rectangle, np_rank_2_tall_rectangle, full_matrices=False)
def test_numpy_interop():
v = [2, 3]
w = [3, 5]
a = [[2, 3]]
b = [[3], [5]]
assert np.array_equal(hl.eval(np.array(v) * hl.literal(3)), np.array([6, 9]))
assert np.array_equal(hl.eval(hl.literal(3) * np.array(v)), np.array([6, 9]))
assert np.array_equal(hl.eval(np.array(v) * hl.nd.array(w)), np.array([6, 15]))
assert np.array_equal(hl.eval(hl.nd.array(w) * np.array(v)), np.array([6, 15]))
assert np.array_equal(hl.eval(np.array(v) + hl.literal(3)), np.array([5, 6]))
assert np.array_equal(hl.eval(hl.literal(3) + np.array(v)), np.array([5, 6]))
assert np.array_equal(hl.eval(np.array(v) + hl.nd.array(w)), np.array([5, 8]))
assert np.array_equal(hl.eval(hl.nd.array(w) + np.array(v)), np.array([5, 8]))
assert np.array_equal(hl.eval(np.array(v) @ hl.nd.array(w)), 21)
assert np.array_equal(hl.eval(hl.nd.array(v) @ np.array(w)), 21)
assert np.array_equal(hl.eval(np.array(a) @ hl.nd.array(b)), np.array([[21]]))
assert np.array_equal(hl.eval(hl.nd.array(a) @ np.array(b)), np.array([[21]]))
assert np.array_equal(hl.eval(hl.nd.array(b) @ np.array(a)),
np.array([[6, 9], [10, 15]]))
assert np.array_equal(hl.eval(np.array(b) @ hl.nd.array(a)),
np.array([[6, 9], [10, 15]]))
def test_ndarray_emitter_extract():
np_mat = np.array([0, 1, 2, 1, 0])
mat = hl.nd.array(np_mat)
mapped_mat = mat.map(lambda x: hl.array([3, 4, 5])[hl.int(x)])
assert hl.eval(hl.range(5).map(lambda i: mapped_mat[i])) == [3, 4, 5, 4, 3]
def test_ndarrays_transmute_ops():
u = hl.utils.range_table(10, n_partitions=10)
u = u.annotate(x=hl.nd.array([u.idx]), y=hl.nd.array([u.idx]))
u = u.transmute(xxx=u.x @ u.y)
assert u.xxx.collect() == [x * x for x in range(10)]
def test_ndarray():
a1 = hl.eval(hl.nd.array((1, 2, 3)))
a2 = hl.eval(hl.nd.array([1, 2, 3]))
an1 = np.array((1, 2, 3))
an2 = np.array([1, 2, 3])
assert(np.array_equal(a1, a2) and np.array_equal(a2, an2))
a1 = hl.eval(hl.nd.array(((1), (2), (3))))
a2 = hl.eval(hl.nd.array(([1], [2], [3])))
a3 = hl.eval(hl.nd.array([[1], [2], [3]]))
an1 = np.array(((1), (2), (3)))
an2 = np.array(([1], [2], [3]))
an3 = np.array([[1], [2], [3]])
assert(np.array_equal(a1, an1) and np.array_equal(a2, an2) and np.array_equal(a3, an3))
a1 = hl.eval(hl.nd.array(((1, 2), (2, 5), (3, 8))))
a2 = hl.eval(hl.nd.array([[1, 2], [2, 5], [3, 8]]))
an1 = np.array(((1, 2), (2, 5), (3, 8)))
an2 = np.array([[1, 2], [2, 5], [3, 8]])
assert(np.array_equal(a1, an1) and np.array_equal(a2, an2))
def test_cast():
def testequal(a, hdtype, ndtype):
ah = hl.eval(hl.nd.array(a, dtype=hdtype))
an = np.array(a, dtype=ndtype)
assert(ah.dtype == an.dtype)
def test(a):
testequal(a, hl.tfloat64, np.float64)
testequal(a, hl.tfloat32, np.float32)
testequal(a, hl.tint32, np.int32)
testequal(a, hl.tint64, np.int64)
test([1, 2, 3])
test([1, 2, 3.])
test([1., 2., 3.])
test([[1, 2], [3, 4]])
def test_inv():
c = np.random.randn(5, 5)
d = np.linalg.inv(c)
dhail = hl.eval(hl.nd.inv(c))
assert np.allclose(dhail, d)
def test_concatenate():
x = np.array([[1., 2.], [3., 4.]])
y = np.array([[5.], [6.]])
np_res = np.concatenate([x, y], axis=1)
res = hl.eval(hl.nd.concatenate([x, y], axis=1))
assert np.array_equal(np_res, res)
res = hl.eval(hl.nd.concatenate(hl.array([x, y]), axis=1))
assert np.array_equal(np_res, res)
x = np.array([[1], [3]])
y = np.array([[5], [6]])
seq = [x, y]
seq2 = hl.array(seq)
np_res = np.concatenate(seq)
res = hl.eval(hl.nd.concatenate(seq))
assert np.array_equal(np_res, res)
res = hl.eval(hl.nd.concatenate(seq2))
assert np.array_equal(np_res, res)
seq = (x, y)
seq2 = hl.array([x, y])
np_res = np.concatenate(seq)
res = hl.eval(hl.nd.concatenate(seq))
assert np.array_equal(np_res, res)
res = hl.eval(hl.nd.concatenate(seq2))
assert np.array_equal(np_res, res)
def test_vstack():
ht = hl.utils.range_table(10)
def assert_table(a, b):
ht2 = ht.annotate(x=hl.nd.array(a), y=hl.nd.array(b))
ht2 = ht2.annotate(stacked=hl.nd.vstack([ht2.x, ht2.y]))
assert np.array_equal(ht2.collect()[0].stacked, np.vstack([a, b]))
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
seq = (a, b)
seq2 = hl.array([a, b])
assert(np.array_equal(hl.eval(hl.nd.vstack(seq)), np.vstack(seq)))
assert(np.array_equal(hl.eval(hl.nd.vstack(seq2)), np.vstack(seq)))
assert_table(a, b)
a = np.array([[1], [2], [3]])
b = np.array([[2], [3], [4]])
seq = (a, b)
seq2 = hl.array([a, b])
assert(np.array_equal(hl.eval(hl.nd.vstack(seq)), np.vstack(seq)))
assert(np.array_equal(hl.eval(hl.nd.vstack(seq2)), np.vstack(seq)))
assert_table(a, b)
def test_hstack():
ht = hl.utils.range_table(10)
def assert_table(a, b):
ht2 = ht.annotate(x=hl.nd.array(a), y=hl.nd.array(b))
ht2 = ht2.annotate(stacked=hl.nd.hstack([ht2.x, ht2.y]))
assert np.array_equal(ht2.collect()[0].stacked, np.hstack([a, b]))
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
assert(np.array_equal(hl.eval(hl.nd.hstack((a, b))), np.hstack((a, b))))
assert(np.array_equal(hl.eval(hl.nd.hstack(hl.array([a, b]))), np.hstack((a, b))))
assert_table(a, b)
a = np.array([[1], [2], [3]])
b = np.array([[2], [3], [4]])
assert(np.array_equal(hl.eval(hl.nd.hstack((a, b))), np.hstack((a, b))))
assert(np.array_equal(hl.eval(hl.nd.hstack(hl.array([a, b]))), np.hstack((a, b))))
assert_table(a, b)
def test_eye():
for i in range(13):
for y in range(13):
assert(np.array_equal(hl.eval(hl.nd.eye(i, y)), np.eye(i, y)))
def test_identity():
for i in range(13):
assert(np.array_equal(hl.eval(hl.nd.identity(i)), np.identity(i)))
def test_agg_ndarray_sum():
no_values = hl.utils.range_table(0).annotate(x=hl.nd.arange(5))
assert no_values.aggregate(hl.agg.ndarray_sum(no_values.x)) is None
increasing_0d = hl.utils.range_table(10)
increasing_0d = increasing_0d.annotate(x=hl.nd.array(increasing_0d.idx))
assert np.array_equal(increasing_0d.aggregate(hl.agg.ndarray_sum(increasing_0d.x)), np.array(45))
just_ones_1d = hl.utils.range_table(20).annotate(x=hl.nd.ones((7,)))
assert np.array_equal(just_ones_1d.aggregate(hl.agg.ndarray_sum(just_ones_1d.x)), np.full((7,), 20))
just_ones_2d = hl.utils.range_table(100).annotate(x=hl.nd.ones((2, 3)))
assert np.array_equal(just_ones_2d.aggregate(hl.agg.ndarray_sum(just_ones_2d.x)), np.full((2, 3), 100))
transposes = hl.utils.range_table(4).annotate(x=hl.nd.arange(16).reshape((4, 4)))
transposes = transposes.annotate(x = hl.if_else((transposes.idx % 2) == 0, transposes.x, transposes.x.T))
np_arange_4_by_4 = np.arange(16).reshape((4, 4))
transposes_result = (np_arange_4_by_4 * 2) + (np_arange_4_by_4.T * 2)
assert np.array_equal(transposes.aggregate(hl.agg.ndarray_sum(transposes.x)), transposes_result)
with pytest.raises(FatalError) as exc:
mismatched = hl.utils.range_table(5)
mismatched = mismatched.annotate(x=hl.nd.ones((mismatched.idx,)))
mismatched.aggregate(hl.agg.ndarray_sum(mismatched.x))
assert "Can't sum" in str(exc)
| |
from datetime import date, datetime
import calendar
import unittest
from google.appengine.api.search import GeoPoint
from search import errors
from search import fields
from search import timezone
class Base(object):
def new_field(self, field_class, **kwargs):
f = field_class(**kwargs)
f.name = 'test_field'
f.cls_name = 'TestDocument'
return f
def test_to_search_value_null_no_default(self):
f = self.new_field(self.field_class, null=True)
self.assertEquals(f.to_search_value(None), f.none_value())
def test_to_search_value_no_null_no_default(self):
f = self.new_field(self.field_class, null=False)
self.assertRaises(errors.FieldError, f.to_search_value, None)
def test_to_search_value_null_default(self):
f = self.new_field(self.field_class, default='THINGS', null=True)
self.assertEquals(f.to_search_value(None), f.none_value())
def test_to_search_value_no_null_default(self):
f = self.new_field(self.field_class, default='THINGS', null=False)
self.assertEquals(f.to_search_value(None), 'THINGS')
def test_to_search_value_none(self):
f = self.new_field(self.field_class)
self.assertEquals(f.to_search_value(None), f.none_value())
class TestBaseField(Base, unittest.TestCase):
field_class = fields.Field
def test_contribute_to_class(self):
class FakeDocument(object):
pass
f = fields.Field()
f.add_to_class(FakeDocument, 'test_field')
self.assertEqual(f.name, 'test_field')
self.assertEqual(f.cls_name, 'FakeDocument')
class TestTextField(Base, unittest.TestCase):
field_class = fields.TextField
class TestFloatField(Base, unittest.TestCase):
field_class = fields.FloatField
def test_to_search_value_null_default(self):
f = self.new_field(self.field_class, default=123.0, null=True)
self.assertEquals(f.to_search_value(None), f.none_value())
def test_to_search_value_null_default_2(self):
f = self.new_field(self.field_class, default=123.0, null=True)
self.assertEquals(f.to_search_value(987.0), 987.0)
def test_to_search_value_no_null_default(self):
f = self.new_field(self.field_class, default=123.0, null=False)
self.assertEquals(f.to_search_value(None), 123.0)
def test_to_search_value_no_null_default_2(self):
f = self.new_field(self.field_class, default=123.0, null=False)
self.assertEquals(f.to_search_value(987.0), 987.0)
def test_to_search_value_floatstring(self):
f = self.new_field(self.field_class)
self.assertEquals(f.to_search_value('987.0'), 987.0)
def test_to_search_value_int(self):
f = self.new_field(self.field_class)
self.assertEquals(f.to_search_value(987), 987.0)
def test_max_min_limits(self):
f = self.new_field(self.field_class, minimum=2.0, maximum=4.7)
self.assertEquals(f.to_search_value(2.0), 2.0)
self.assertEquals(f.to_search_value(4.7), 4.7)
self.assertEquals(f.to_search_value(None), f.none_value())
self.assertRaises(ValueError, f.to_search_value, 4.8)
self.assertRaises(ValueError, f.to_search_value, 1.9)
class TestIntegerField(Base, unittest.TestCase):
field_class = fields.IntegerField
def test_to_search_value_null_default(self):
f = self.new_field(self.field_class, default=456, null=True)
self.assertEquals(f.to_search_value(None), f.none_value())
def test_to_search_value_null_default_2(self):
f = self.new_field(self.field_class, default=123.0, null=True)
self.assertEquals(f.to_search_value(987), 987)
def test_to_search_value_no_null_default(self):
f = self.new_field(self.field_class, default=456, null=False)
self.assertEquals(f.to_search_value(None), 456)
def test_to_search_value_no_null_default_2(self):
f = self.new_field(self.field_class, default=123.0, null=False)
self.assertEquals(f.to_search_value(987), 987)
def test_to_search_value_intstring(self):
f = self.new_field(self.field_class)
self.assertEquals(f.to_search_value('987'), 987)
def test_max_min_limits(self):
f = self.new_field(self.field_class, minimum=2, maximum=4)
self.assertEquals(f.to_search_value(2), 2)
self.assertEquals(f.to_search_value(4), 4)
self.assertEquals(f.to_search_value(None), f.none_value())
self.assertRaises(ValueError, f.to_search_value, 5)
self.assertRaises(ValueError, f.to_search_value, 1)
class TestBooleanField(Base, unittest.TestCase):
field_class = fields.BooleanField
def test_to_search_value_null_default(self):
f = self.new_field(self.field_class, default=True, null=True)
self.assertEquals(f.to_search_value(None), f.none_value())
def test_to_search_value_null_default_2(self):
f = self.new_field(self.field_class, default=True, null=True)
self.assertEquals(f.to_search_value(False), 0)
def test_to_search_value_no_null_default(self):
f = self.new_field(self.field_class, default=True, null=False)
self.assertEquals(f.to_search_value(None), 1)
def test_to_search_value_no_null_default_2(self):
f = self.new_field(self.field_class, default=False, null=False)
self.assertEquals(f.to_search_value(None), 0)
def test_to_search_value_no_null_default_3(self):
f = self.new_field(self.field_class, default=True, null=False)
self.assertEquals(f.to_search_value(False), 0)
def test_to_search_value_true(self):
f = self.new_field(self.field_class)
self.assertEquals(f.to_search_value(True), 1)
def test_to_search_value_false(self):
f = self.new_field(self.field_class)
self.assertEquals(f.to_search_value(False), 0)
class TestDateField(Base, unittest.TestCase):
field_class = fields.DateField
def test_to_search_value_null_default(self):
f = self.new_field(self.field_class, default=date(2012, 8, 3), null=True)
self.assertEquals(f.to_search_value(None), f.none_value())
def test_to_search_value_null_default_2(self):
f = self.new_field(self.field_class, default=date(2012, 8, 3), null=True)
self.assertEquals(f.to_search_value(date(1989, 8, 3)), date(1989, 8, 3))
def test_to_search_value_no_null_default(self):
f = self.new_field(self.field_class, default=date(2012, 8, 3), null=False)
self.assertEquals(f.to_search_value(None), date(2012, 8, 3))
def test_to_search_value_no_null_default_2(self):
f = self.new_field(self.field_class, default=date(2012, 8, 3), null=False)
self.assertEquals(f.to_search_value(date(1989, 8, 3)), date(1989, 8, 3))
def test_to_search_value_date(self):
f = self.new_field(self.field_class)
self.assertEquals(
f.to_search_value(datetime(2012, 8, 3)), datetime(2012, 8, 3))
def test_to_search_value_datetime(self):
f = self.new_field(self.field_class)
self.assertEquals(
f.to_search_value(datetime(2012, 8, 3, 23, 49)),
datetime(2012, 8, 3, 23, 49))
def test_to_search_value_datestring(self):
f = self.new_field(self.field_class)
self.assertEquals(f.to_search_value('2012-08-03'), date(2012, 8, 3))
def test_to_search_value_errors(self):
f = self.new_field(self.field_class)
self.assertRaises(ValueError, f.to_search_value, 'some nonsense')
self.assertRaises(TypeError, f.to_search_value, 17)
def test_error_using_aware_datetime(self):
xmas = datetime(2016, 12, 25, 0, 0, tzinfo=timezone.utc)
field = self.new_field(fields.DateField)
with self.assertRaisesRegexp(TypeError, r'Datetime values must be offset-naive'):
field.to_search_value(xmas)
class TestDateTimeField(Base, unittest.TestCase):
field_class = fields.DateTimeField
def test_to_search_value_no_null_default(self):
xmas = datetime(2016, 12, 25, 0, 0)
field = self.new_field(fields.DateTimeField, null=False, default=xmas)
result = field.to_search_value(None)
expected = calendar.timegm(xmas.timetuple())
self.assertEqual(result, expected)
def test_error_using_aware_datetime(self):
xmas = datetime(2016, 12, 25, 0, 0, tzinfo=timezone.utc)
field = self.new_field(fields.DateTimeField)
with self.assertRaisesRegexp(TypeError, r'Datetime values must be offset-naive'):
field.to_search_value(xmas)
def test_error_using_too_early_datetime(self):
timestamp = fields.MIN_SEARCH_API_INT - 1
olden_times = datetime.utcfromtimestamp(timestamp)
field = self.new_field(fields.DateTimeField)
with self.assertRaisesRegexp(ValueError, r'Datetime out of range'):
field.to_search_value(olden_times)
def test_error_using_too_late_datetime(self):
timestamp = fields.MAX_SEARCH_API_INT + 1
future_times = datetime.utcfromtimestamp(timestamp)
field = self.new_field(fields.DateTimeField)
with self.assertRaisesRegexp(ValueError, r'Datetime out of range'):
field.to_search_value(future_times)
class TestTZDateTimeField(Base, unittest.TestCase):
field_class = fields.TZDateTimeField
def test_to_search_value_no_null_default(self):
xmas = datetime(2016, 12, 25, 0, 0, tzinfo=timezone.utc)
field = self.new_field(fields.DateTimeField, null=False, default=xmas)
result = field.to_search_value(None)
expected = calendar.timegm(xmas.timetuple())
self.assertEqual(result, expected)
def test_error_using_naive_datetime(self):
xmas = datetime(2016, 12, 25, 0, 0, tzinfo=None)
field = self.new_field(fields.TZDateTimeField)
with self.assertRaisesRegexp(TypeError, r'Datetime values must be offset-aware'):
field.to_search_value(xmas)
class TestGeoField(Base, unittest.TestCase):
field_class = fields.GeoField
def test_to_search_value_no_null_default(self):
self.assertRaises(
AssertionError,
self.new_field,
self.field_class,
default=GeoPoint(latitude=3.14, longitude=3.14),
null=False)
def test_to_search_value_null_default(self):
self.assertRaises(
AssertionError,
self.new_field,
self.field_class,
default='THINGS',
null=True)
def test_to_search_value_no_null_no_default(self):
f = self.new_field(self.field_class, null=False)
self.assertRaises(TypeError, f.to_search_value, None)
def test_to_search_value_null_no_default(self):
self.assertRaises(
AssertionError,
self.new_field,
self.field_class,
default=None,
null=True)
def test_to_search_value_gp(self):
f = self.new_field(self.field_class)
gp = GeoPoint(latitude=4.2, longitude=4.2)
self.assertEquals(f.to_search_value(gp), gp)
def test_to_search_value_none(self):
f = self.new_field(self.field_class)
self.assertRaises(TypeError, f.to_search_value, None)
def test_to_search_value_errors(self):
f = self.new_field(self.field_class)
# TODO: maybe support these at some point?
self.assertRaises(TypeError, f.to_search_value, '3.14,3.14')
self.assertRaises(TypeError, f.to_search_value, 3.14)
self.assertRaises(TypeError, f.to_search_value, (3.14, 3.14,))
| |
"""Support for MQTT JSON lights."""
from contextlib import suppress
import json
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_MODE,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_RGBWW_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODE_XY,
ENTITY_ID_FORMAT,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
VALID_COLOR_MODES,
LightEntity,
legacy_supported_features,
valid_supported_color_modes,
)
from homeassistant.const import (
CONF_BRIGHTNESS,
CONF_COLOR_TEMP,
CONF_EFFECT,
CONF_HS,
CONF_NAME,
CONF_OPTIMISTIC,
CONF_RGB,
CONF_WHITE_VALUE,
CONF_XY,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.color as color_util
from .. import subscription
from ... import mqtt
from ..const import CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN, CONF_STATE_TOPIC
from ..debug_info import log_messages
from ..mixins import MQTT_ENTITY_COMMON_SCHEMA, MqttEntity
from .schema import MQTT_LIGHT_SCHEMA_SCHEMA
from .schema_basic import CONF_BRIGHTNESS_SCALE, MQTT_LIGHT_ATTRIBUTES_BLOCKED
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt_json"
DEFAULT_BRIGHTNESS = False
DEFAULT_COLOR_MODE = False
DEFAULT_COLOR_TEMP = False
DEFAULT_EFFECT = False
DEFAULT_FLASH_TIME_LONG = 10
DEFAULT_FLASH_TIME_SHORT = 2
DEFAULT_NAME = "MQTT JSON Light"
DEFAULT_OPTIMISTIC = False
DEFAULT_RGB = False
DEFAULT_WHITE_VALUE = False
DEFAULT_XY = False
DEFAULT_HS = False
DEFAULT_BRIGHTNESS_SCALE = 255
CONF_COLOR_MODE = "color_mode"
CONF_SUPPORTED_COLOR_MODES = "supported_color_modes"
CONF_EFFECT_LIST = "effect_list"
CONF_FLASH_TIME_LONG = "flash_time_long"
CONF_FLASH_TIME_SHORT = "flash_time_short"
CONF_MAX_MIREDS = "max_mireds"
CONF_MIN_MIREDS = "min_mireds"
def valid_color_configuration(config):
"""Test color_mode is not combined with deprecated config."""
deprecated = {CONF_COLOR_TEMP, CONF_HS, CONF_RGB, CONF_WHITE_VALUE, CONF_XY}
if config[CONF_COLOR_MODE] and any(config.get(key) for key in deprecated):
raise vol.Invalid(f"color_mode must not be combined with any of {deprecated}")
return config
_PLATFORM_SCHEMA_BASE = (
mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_BRIGHTNESS, default=DEFAULT_BRIGHTNESS): cv.boolean,
vol.Optional(
CONF_BRIGHTNESS_SCALE, default=DEFAULT_BRIGHTNESS_SCALE
): vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Inclusive(
CONF_COLOR_MODE, "color_mode", default=DEFAULT_COLOR_MODE
): cv.boolean,
vol.Optional(CONF_COLOR_TEMP, default=DEFAULT_COLOR_TEMP): cv.boolean,
vol.Optional(CONF_EFFECT, default=DEFAULT_EFFECT): cv.boolean,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(
CONF_FLASH_TIME_LONG, default=DEFAULT_FLASH_TIME_LONG
): cv.positive_int,
vol.Optional(
CONF_FLASH_TIME_SHORT, default=DEFAULT_FLASH_TIME_SHORT
): cv.positive_int,
vol.Optional(CONF_HS, default=DEFAULT_HS): cv.boolean,
vol.Optional(CONF_MAX_MIREDS): cv.positive_int,
vol.Optional(CONF_MIN_MIREDS): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_QOS, default=mqtt.DEFAULT_QOS): vol.All(
vol.Coerce(int), vol.In([0, 1, 2])
),
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_RGB, default=DEFAULT_RGB): cv.boolean,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Inclusive(CONF_SUPPORTED_COLOR_MODES, "color_mode"): vol.All(
cv.ensure_list,
[vol.In(VALID_COLOR_MODES)],
vol.Unique(),
valid_supported_color_modes,
),
vol.Optional(CONF_WHITE_VALUE, default=DEFAULT_WHITE_VALUE): cv.boolean,
vol.Optional(CONF_XY, default=DEFAULT_XY): cv.boolean,
},
)
.extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
.extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema)
)
PLATFORM_SCHEMA_JSON = vol.All(
_PLATFORM_SCHEMA_BASE,
valid_color_configuration,
)
DISCOVERY_SCHEMA_JSON = vol.All(
_PLATFORM_SCHEMA_BASE.extend({}, extra=vol.REMOVE_EXTRA),
valid_color_configuration,
)
async def async_setup_entity_json(
hass, config: ConfigType, async_add_entities, config_entry, discovery_data
):
"""Set up a MQTT JSON Light."""
async_add_entities([MqttLightJson(hass, config, config_entry, discovery_data)])
class MqttLightJson(MqttEntity, LightEntity, RestoreEntity):
"""Representation of a MQTT JSON light."""
_entity_id_format = ENTITY_ID_FORMAT
_attributes_extra_blocked = MQTT_LIGHT_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize MQTT JSON light."""
self._state = False
self._supported_features = 0
self._topic = None
self._optimistic = False
self._brightness = None
self._color_mode = None
self._color_temp = None
self._effect = None
self._flash_times = None
self._hs = None
self._rgb = None
self._rgbw = None
self._rgbww = None
self._white_value = None
self._xy = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return DISCOVERY_SCHEMA_JSON
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._topic = {
key: config.get(key) for key in (CONF_STATE_TOPIC, CONF_COMMAND_TOPIC)
}
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._flash_times = {
key: config.get(key)
for key in (CONF_FLASH_TIME_SHORT, CONF_FLASH_TIME_LONG)
}
self._supported_features = SUPPORT_TRANSITION | SUPPORT_FLASH
self._supported_features |= config[CONF_EFFECT] and SUPPORT_EFFECT
if not self._config[CONF_COLOR_MODE]:
self._supported_features |= config[CONF_BRIGHTNESS] and SUPPORT_BRIGHTNESS
self._supported_features |= config[CONF_COLOR_TEMP] and SUPPORT_COLOR_TEMP
self._supported_features |= config[CONF_HS] and SUPPORT_COLOR
self._supported_features |= config[CONF_RGB] and (
SUPPORT_COLOR | SUPPORT_BRIGHTNESS
)
self._supported_features |= config[CONF_WHITE_VALUE] and SUPPORT_WHITE_VALUE
self._supported_features |= config[CONF_XY] and SUPPORT_COLOR
def _update_color(self, values):
if not self._config[CONF_COLOR_MODE]:
# Deprecated color handling
try:
red = int(values["color"]["r"])
green = int(values["color"]["g"])
blue = int(values["color"]["b"])
self._hs = color_util.color_RGB_to_hs(red, green, blue)
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid RGB color value received")
return
try:
x_color = float(values["color"]["x"])
y_color = float(values["color"]["y"])
self._hs = color_util.color_xy_to_hs(x_color, y_color)
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid XY color value received")
return
try:
hue = float(values["color"]["h"])
saturation = float(values["color"]["s"])
self._hs = (hue, saturation)
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid HS color value received")
return
else:
color_mode = values["color_mode"]
if not self._supports_color_mode(color_mode):
_LOGGER.warning("Invalid color mode received")
return
try:
if color_mode == COLOR_MODE_COLOR_TEMP:
self._color_temp = int(values["color_temp"])
self._color_mode = COLOR_MODE_COLOR_TEMP
elif color_mode == COLOR_MODE_HS:
hue = float(values["color"]["h"])
saturation = float(values["color"]["s"])
self._color_mode = COLOR_MODE_HS
self._hs = (hue, saturation)
elif color_mode == COLOR_MODE_RGB:
r = int(values["color"]["r"]) # pylint: disable=invalid-name
g = int(values["color"]["g"]) # pylint: disable=invalid-name
b = int(values["color"]["b"]) # pylint: disable=invalid-name
self._color_mode = COLOR_MODE_RGB
self._rgb = (r, g, b)
elif color_mode == COLOR_MODE_RGBW:
r = int(values["color"]["r"]) # pylint: disable=invalid-name
g = int(values["color"]["g"]) # pylint: disable=invalid-name
b = int(values["color"]["b"]) # pylint: disable=invalid-name
w = int(values["color"]["w"]) # pylint: disable=invalid-name
self._color_mode = COLOR_MODE_RGBW
self._rgbw = (r, g, b, w)
elif color_mode == COLOR_MODE_RGBWW:
r = int(values["color"]["r"]) # pylint: disable=invalid-name
g = int(values["color"]["g"]) # pylint: disable=invalid-name
b = int(values["color"]["b"]) # pylint: disable=invalid-name
c = int(values["color"]["c"]) # pylint: disable=invalid-name
w = int(values["color"]["w"]) # pylint: disable=invalid-name
self._color_mode = COLOR_MODE_RGBWW
self._rgbww = (r, g, b, c, w)
elif color_mode == COLOR_MODE_XY:
x = float(values["color"]["x"]) # pylint: disable=invalid-name
y = float(values["color"]["y"]) # pylint: disable=invalid-name
self._color_mode = COLOR_MODE_XY
self._xy = (x, y)
except (KeyError, ValueError):
_LOGGER.warning("Invalid or incomplete color value received")
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
last_state = await self.async_get_last_state()
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new MQTT messages."""
values = json.loads(msg.payload)
if values["state"] == "ON":
self._state = True
elif values["state"] == "OFF":
self._state = False
if self._supported_features and SUPPORT_COLOR and "color" in values:
if values["color"] is None:
self._hs = None
else:
self._update_color(values)
if self._config[CONF_COLOR_MODE] and "color_mode" in values:
self._update_color(values)
if self._supported_features and SUPPORT_BRIGHTNESS:
try:
self._brightness = int(
values["brightness"]
/ float(self._config[CONF_BRIGHTNESS_SCALE])
* 255
)
except KeyError:
pass
except (TypeError, ValueError):
_LOGGER.warning("Invalid brightness value received")
if (
self._supported_features
and SUPPORT_COLOR_TEMP
and not self._config[CONF_COLOR_MODE]
):
try:
if values["color_temp"] is None:
self._color_temp = None
else:
self._color_temp = int(values["color_temp"])
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid color temp value received")
if self._supported_features and SUPPORT_EFFECT:
with suppress(KeyError):
self._effect = values["effect"]
if self._supported_features and SUPPORT_WHITE_VALUE:
try:
self._white_value = int(values["white_value"])
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid white value received")
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
}
},
)
if self._optimistic and last_state:
self._state = last_state.state == STATE_ON
last_attributes = last_state.attributes
self._brightness = last_attributes.get(ATTR_BRIGHTNESS, self._brightness)
self._color_mode = last_attributes.get(ATTR_COLOR_MODE, self._color_mode)
self._color_temp = last_attributes.get(ATTR_COLOR_TEMP, self._color_temp)
self._effect = last_attributes.get(ATTR_EFFECT, self._effect)
self._hs = last_attributes.get(ATTR_HS_COLOR, self._hs)
self._rgb = last_attributes.get(ATTR_RGB_COLOR, self._rgb)
self._rgbw = last_attributes.get(ATTR_RGBW_COLOR, self._rgbw)
self._rgbww = last_attributes.get(ATTR_RGBWW_COLOR, self._rgbww)
self._white_value = last_attributes.get(ATTR_WHITE_VALUE, self._white_value)
self._xy = last_attributes.get(ATTR_XY_COLOR, self._xy)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._config.get(CONF_MIN_MIREDS, super().min_mireds)
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._config.get(CONF_MAX_MIREDS, super().max_mireds)
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._config.get(CONF_EFFECT_LIST)
@property
def hs_color(self):
"""Return the hs color value."""
return self._hs
@property
def rgb_color(self):
"""Return the hs color value."""
return self._rgb
@property
def rgbw_color(self):
"""Return the hs color value."""
return self._rgbw
@property
def rgbww_color(self):
"""Return the hs color value."""
return self._rgbww
@property
def xy_color(self):
"""Return the hs color value."""
return self._xy
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def color_mode(self):
"""Return current color mode."""
return self._color_mode
@property
def supported_color_modes(self):
"""Flag supported color modes."""
return self._config.get(CONF_SUPPORTED_COLOR_MODES)
@property
def supported_features(self):
"""Flag supported features."""
return legacy_supported_features(
self._supported_features, self._config.get(CONF_SUPPORTED_COLOR_MODES)
)
def _set_flash_and_transition(self, message, **kwargs):
if ATTR_TRANSITION in kwargs:
message["transition"] = kwargs[ATTR_TRANSITION]
if ATTR_FLASH in kwargs:
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
message["flash"] = self._flash_times[CONF_FLASH_TIME_LONG]
elif flash == FLASH_SHORT:
message["flash"] = self._flash_times[CONF_FLASH_TIME_SHORT]
def _scale_rgbxx(self, rgbxx, kwargs):
# If there's a brightness topic set, we don't want to scale the
# RGBxx values given using the brightness.
if self._config[CONF_BRIGHTNESS]:
brightness = 255
else:
brightness = kwargs.get(ATTR_BRIGHTNESS, 255)
return tuple(round(i / 255 * brightness) for i in rgbxx)
def _supports_color_mode(self, color_mode):
return self.supported_color_modes and color_mode in self.supported_color_modes
async def async_turn_on(self, **kwargs): # noqa: C901
"""Turn the device on.
This method is a coroutine.
"""
should_update = False
message = {"state": "ON"}
if ATTR_HS_COLOR in kwargs and (
self._config[CONF_HS] or self._config[CONF_RGB] or self._config[CONF_XY]
):
hs_color = kwargs[ATTR_HS_COLOR]
message["color"] = {}
if self._config[CONF_RGB]:
# If there's a brightness topic set, we don't want to scale the
# RGB values given using the brightness.
if self._config[CONF_BRIGHTNESS]:
brightness = 255
else:
brightness = kwargs.get(ATTR_BRIGHTNESS, 255)
rgb = color_util.color_hsv_to_RGB(
hs_color[0], hs_color[1], brightness / 255 * 100
)
message["color"]["r"] = rgb[0]
message["color"]["g"] = rgb[1]
message["color"]["b"] = rgb[2]
if self._config[CONF_XY]:
xy_color = color_util.color_hs_to_xy(*kwargs[ATTR_HS_COLOR])
message["color"]["x"] = xy_color[0]
message["color"]["y"] = xy_color[1]
if self._config[CONF_HS]:
message["color"]["h"] = hs_color[0]
message["color"]["s"] = hs_color[1]
if self._optimistic:
self._hs = kwargs[ATTR_HS_COLOR]
should_update = True
if ATTR_HS_COLOR in kwargs and self._supports_color_mode(COLOR_MODE_HS):
hs_color = kwargs[ATTR_HS_COLOR]
message["color"] = {"h": hs_color[0], "s": hs_color[1]}
if self._optimistic:
self._color_mode = COLOR_MODE_HS
self._hs = hs_color
should_update = True
if ATTR_RGB_COLOR in kwargs and self._supports_color_mode(COLOR_MODE_RGB):
rgb = self._scale_rgbxx(kwargs[ATTR_RGB_COLOR], kwargs)
message["color"] = {"r": rgb[0], "g": rgb[1], "b": rgb[2]}
if self._optimistic:
self._color_mode = COLOR_MODE_RGB
self._rgb = rgb
should_update = True
if ATTR_RGBW_COLOR in kwargs and self._supports_color_mode(COLOR_MODE_RGBW):
rgb = self._scale_rgbxx(kwargs[ATTR_RGBW_COLOR], kwargs)
message["color"] = {"r": rgb[0], "g": rgb[1], "b": rgb[2], "w": rgb[3]}
if self._optimistic:
self._color_mode = COLOR_MODE_RGBW
self._rgbw = rgb
should_update = True
if ATTR_RGBWW_COLOR in kwargs and self._supports_color_mode(COLOR_MODE_RGBWW):
rgb = self._scale_rgbxx(kwargs[ATTR_RGBWW_COLOR], kwargs)
message["color"] = {
"r": rgb[0],
"g": rgb[1],
"b": rgb[2],
"c": rgb[3],
"w": rgb[4],
}
if self._optimistic:
self._color_mode = COLOR_MODE_RGBWW
self._rgbww = rgb
should_update = True
if ATTR_XY_COLOR in kwargs and self._supports_color_mode(COLOR_MODE_XY):
xy = kwargs[ATTR_XY_COLOR] # pylint: disable=invalid-name
message["color"] = {"x": xy[0], "y": xy[1]}
if self._optimistic:
self._color_mode = COLOR_MODE_XY
self._xy = xy
should_update = True
self._set_flash_and_transition(message, **kwargs)
if ATTR_BRIGHTNESS in kwargs and self._config[CONF_BRIGHTNESS]:
brightness_normalized = kwargs[ATTR_BRIGHTNESS] / DEFAULT_BRIGHTNESS_SCALE
brightness_scale = self._config[CONF_BRIGHTNESS_SCALE]
device_brightness = min(
round(brightness_normalized * brightness_scale), brightness_scale
)
# Make sure the brightness is not rounded down to 0
device_brightness = max(device_brightness, 1)
message["brightness"] = device_brightness
if self._optimistic:
self._brightness = kwargs[ATTR_BRIGHTNESS]
should_update = True
if ATTR_COLOR_TEMP in kwargs:
message["color_temp"] = int(kwargs[ATTR_COLOR_TEMP])
if self._optimistic:
self._color_temp = kwargs[ATTR_COLOR_TEMP]
should_update = True
if ATTR_EFFECT in kwargs:
message["effect"] = kwargs[ATTR_EFFECT]
if self._optimistic:
self._effect = kwargs[ATTR_EFFECT]
should_update = True
if ATTR_WHITE_VALUE in kwargs:
message["white_value"] = int(kwargs[ATTR_WHITE_VALUE])
if self._optimistic:
self._white_value = kwargs[ATTR_WHITE_VALUE]
should_update = True
await mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
json.dumps(message),
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
# Optimistically assume that the light has changed state.
self._state = True
should_update = True
if should_update:
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off.
This method is a coroutine.
"""
message = {"state": "OFF"}
self._set_flash_and_transition(message, **kwargs)
await mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
json.dumps(message),
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
# Optimistically assume that the light has changed state.
self._state = False
self.async_write_ha_state()
| |
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import itertools
import numpy as np
from numpy.testing import assert_, run_module_suite
from qutip.states import basis, ket2dm
from qutip.operators import identity, qeye, sigmax, sigmay, sigmaz
from qutip.qip import (rx, ry, rz, phasegate, cnot, swap, iswap,
sqrtswap, toffoli, fredkin, gate_expand_3toN, qubit_clifford_group)
from qutip.random_objects import rand_ket, rand_herm
from qutip.tensor import tensor
class TestGates:
"""
A test class for the QuTiP functions for generating quantum gates
"""
def testSwapGate(self):
"""
gates: swap gate
"""
a, b = np.random.rand(), np.random.rand()
psi1 = (a * basis(2, 0) + b * basis(2, 1)).unit()
c, d = np.random.rand(), np.random.rand()
psi2 = (c * basis(2, 0) + d * basis(2, 1)).unit()
psi_in = tensor(psi1, psi2)
psi_out = tensor(psi2, psi1)
psi_res = swap() * psi_in
assert_((psi_out - psi_res).norm() < 1e-12)
psi_res = swap() * swap() * psi_in
assert_((psi_in - psi_res).norm() < 1e-12)
def test_clifford_group_len(self):
assert_(len(list(qubit_clifford_group())) == 24)
def _prop_identity(self, U, tol=1e-6):
"""
Returns True if and only if U is proportional to the
identity.
"""
if U[0, 0] != 0:
norm_U = U / U[0, 0]
return (qeye(U.dims[0]) - norm_U).norm() <= tol
else:
return False
def case_is_clifford(self, U):
paulis = (identity(2), sigmax(), sigmay(), sigmaz())
for P in paulis:
U_P = U * P * U.dag()
out = (np.any(
np.array([self._prop_identity(U_P * Q) for Q in paulis])
))
return out
def test_are_cliffords(self):
for U in qubit_clifford_group():
assert_(self.case_is_clifford(U))
def testExpandGate1toN(self):
"""
gates: expand 1 to N
"""
N = 7
for g in [rx, ry, rz, phasegate]:
theta = np.random.rand() * 2 * 3.1415
a, b = np.random.rand(), np.random.rand()
psi1 = (a * basis(2, 0) + b * basis(2, 1)).unit()
psi2 = g(theta) * psi1
psi_rand_list = [rand_ket(2) for k in range(N)]
for m in range(N):
psi_in = tensor([psi1 if k == m else psi_rand_list[k]
for k in range(N)])
psi_out = tensor([psi2 if k == m else psi_rand_list[k]
for k in range(N)])
G = g(theta, N, m)
psi_res = G * psi_in
assert_((psi_out - psi_res).norm() < 1e-12)
def testExpandGate2toNSwap(self):
"""
gates: expand 2 to N (using swap)
"""
a, b = np.random.rand(), np.random.rand()
k1 = (a * basis(2, 0) + b * basis(2, 1)).unit()
c, d = np.random.rand(), np.random.rand()
k2 = (c * basis(2, 0) + d * basis(2, 1)).unit()
N = 6
kets = [rand_ket(2) for k in range(N)]
for m in range(N):
for n in set(range(N)) - {m}:
psi_in = tensor([k1 if k == m else k2 if k == n else kets[k]
for k in range(N)])
psi_out = tensor([k2 if k == m else k1 if k == n else kets[k]
for k in range(N)])
targets = [m, n]
G = swap(N, targets)
psi_out = G * psi_in
assert_((psi_out - G * psi_in).norm() < 1e-12)
def testExpandGate2toN(self):
"""
gates: expand 2 to N (using cnot, iswap, sqrtswap)
"""
a, b = np.random.rand(), np.random.rand()
k1 = (a * basis(2, 0) + b * basis(2, 1)).unit()
c, d = np.random.rand(), np.random.rand()
k2 = (c * basis(2, 0) + d * basis(2, 1)).unit()
psi_ref_in = tensor(k1, k2)
N = 6
psi_rand_list = [rand_ket(2) for k in range(N)]
for g in [cnot, iswap, sqrtswap]:
psi_ref_out = g() * psi_ref_in
rho_ref_out = ket2dm(psi_ref_out)
for m in range(N):
for n in set(range(N)) - {m}:
psi_list = [psi_rand_list[k] for k in range(N)]
psi_list[m] = k1
psi_list[n] = k2
psi_in = tensor(psi_list)
if g == cnot:
G = g(N, m, n)
else:
G = g(N, [m, n])
psi_out = G * psi_in
o1 = psi_out.overlap(psi_in)
o2 = psi_ref_out.overlap(psi_ref_in)
assert_(abs(o1 - o2) < 1e-12)
p = [0, 1] if m < n else [1, 0]
rho_out = psi_out.ptrace([m, n]).permute(p)
assert_((rho_ref_out - rho_out).norm() < 1e-12)
def testExpandGate3toN_permutation(self):
"""
gates: expand 3 to 3 with permuTation (using toffoli)
"""
for _p in itertools.permutations([0, 1, 2]):
controls, target = [_p[0], _p[1]], _p[2]
controls = [1, 2]
target = 0
p = [1, 2, 3]
p[controls[0]] = 0
p[controls[1]] = 1
p[target] = 2
U = toffoli(N=3, controls=controls, target=target)
ops = [basis(2, 0).dag(), basis(2, 0).dag(), identity(2)]
P = tensor(ops[p[0]], ops[p[1]], ops[p[2]])
assert_(P * U * P.dag() == identity(2))
ops = [basis(2, 1).dag(), basis(2, 0).dag(), identity(2)]
P = tensor(ops[p[0]], ops[p[1]], ops[p[2]])
assert_(P * U * P.dag() == identity(2))
ops = [basis(2, 0).dag(), basis(2, 1).dag(), identity(2)]
P = tensor(ops[p[0]], ops[p[1]], ops[p[2]])
assert_(P * U * P.dag() == identity(2))
ops = [basis(2, 1).dag(), basis(2, 1).dag(), identity(2)]
P = tensor(ops[p[0]], ops[p[1]], ops[p[2]])
assert_(P * U * P.dag() == sigmax())
def testExpandGate3toN(self):
"""
gates: expand 3 to N (using toffoli, fredkin, and random 3 qubit gate)
"""
a, b = np.random.rand(), np.random.rand()
psi1 = (a * basis(2, 0) + b * basis(2, 1)).unit()
c, d = np.random.rand(), np.random.rand()
psi2 = (c * basis(2, 0) + d * basis(2, 1)).unit()
e, f = np.random.rand(), np.random.rand()
psi3 = (e * basis(2, 0) + f * basis(2, 1)).unit()
N = 4
psi_rand_list = [rand_ket(2) for k in range(N)]
_rand_gate_U = tensor([rand_herm(2, density=1) for k in range(3)])
def _rand_3qubit_gate(N=None, controls=None, k=None):
if N is None:
return _rand_gate_U
else:
return gate_expand_3toN(_rand_gate_U, N, controls, k)
for g in [fredkin, toffoli, _rand_3qubit_gate]:
psi_ref_in = tensor(psi1, psi2, psi3)
psi_ref_out = g() * psi_ref_in
for m in range(N):
for n in set(range(N)) - {m}:
for k in set(range(N)) - {m, n}:
psi_list = [psi_rand_list[p] for p in range(N)]
psi_list[m] = psi1
psi_list[n] = psi2
psi_list[k] = psi3
psi_in = tensor(psi_list)
if g == fredkin:
targets = [n, k]
G = g(N, control=m, targets=targets)
else:
controls = [m, n]
G = g(N, controls, k)
psi_out = G * psi_in
o1 = psi_out.overlap(psi_in)
o2 = psi_ref_out.overlap(psi_ref_in)
assert_(abs(o1 - o2) < 1e-12)
if __name__ == "__main__":
run_module_suite()
| |
"""Regresssion tests for urllib"""
import urllib
import httplib
import unittest
from test import test_support
import os
import mimetools
import tempfile
import StringIO
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
"""Setup of a temp file to use for testing"""
self.text = "test_urllib: %s\n" % self.__class__.__name__
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(test_support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assert_(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual('', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assert_(isinstance(file_num, int),
"fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assert_(isinstance(self.returned_obj.info(), mimetools.Message))
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertEqual(self.returned_obj.getcode(), None)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = test_support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in os.environ.keys():
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEquals('localhost', proxies['no'])
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(StringIO.StringIO):
def sendall(self, str): pass
def makefile(self, mode, name): return self
def read(self, amt=None):
if self.closed: return ''
return StringIO.StringIO.read(self, amt)
def readline(self, length=None):
if self.closed: return ''
return StringIO.StringIO.readline(self, length)
class FakeHTTPConnection(httplib.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
assert httplib.HTTP._connection_class == httplib.HTTPConnection
httplib.HTTP._connection_class = FakeHTTPConnection
def unfakehttp(self):
httplib.HTTP._connection_class = httplib.HTTPConnection
def test_read(self):
self.fakehttp('Hello!')
try:
fp = urllib.urlopen("http://python.org/")
self.assertEqual(fp.readline(), 'Hello!')
self.assertEqual(fp.readline(), '')
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp('')
try:
self.assertRaises(IOError, urllib.urlopen, 'http://something')
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(test_support.TESTFN)
self.text = 'testing urllib.urlretrieve'
try:
FILE = file(test_support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.pathname2url(os.path.abspath(filePath))
def createNewTempFile(self, data=""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
self.assertEqual(result[0], test_support.TESTFN)
self.assert_(isinstance(result[1], mimetools.Message),
"did not get a mimetools.Message instance as second "
"returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.urlretrieve(self.constructLocalFileUrl(
test_support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assert_(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assert_(isinstance(count, int))
self.assert_(isinstance(block_size, int))
self.assert_(isinstance(total_size, int))
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 5)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 8193)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 ("Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>. The Python
code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %s != %s" % (do_not_quote, result))
result = urllib.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %s != %s" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.quote.func_defaults[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %s != %s" % (quote_by_default, result))
result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %s != %s" %
(quote_by_default, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): %s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %s != %s" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %s != %s" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %s != %s" % (result, hexescape(' ')))
result = urllib.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = '\xab\xea'
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquote_with_unicode(self):
r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assert_(expected in result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assert_(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assert_(expect in result,
"%s not found in %s" % (expect, result))
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.quote("make sure")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, someteimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic enviroments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assert_(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assert_(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', ".*urllib\.urlopen.*Python 3.0",
DeprecationWarning)
test_support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
URLopener_Tests,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import unittest2
import os
import json
import sys
import traceback
from mock import MagicMock, patch, mock_open
from snakebite.config import HDFSConfig
from snakebite.commandlineparser import Commands, CommandLineParser
from snakebite.namenode import Namenode
from snakebite.platformutils import get_current_username
from config_test import ConfigTest
class CommandLineParserTest(unittest2.TestCase):
def setUp(self):
self.parser = CommandLineParser()
self.default_dir = os.path.join("/user", get_current_username())
def test_general_options(self):
parser = self.parser
output = parser.parse('ls some_folder'.split())
self.assertFalse(output.debug)
self.assertFalse(output.human)
self.assertFalse(output.json)
self.assertEqual(output.namenode, None)
self.assertEqual(output.port, None)
#each option
output = parser.parse('-D ls some_folder'.split())
self.assertTrue(output.debug)
output = parser.parse('--debug ls some_folder'.split())
self.assertTrue(output.debug)
output = parser.parse('-j ls some_folder'.split())
self.assertTrue(output.json)
output = parser.parse('--json ls some_folder'.split())
self.assertTrue(output.json)
output = parser.parse('-n namenode_fqdn ls some_folder'.split()) # what are typical values for namenodes?
self.assertEqual(output.namenode, "namenode_fqdn")
output = parser.parse('--namenode namenode_fqdn ls some_folder'.split())
self.assertEqual(output.namenode, "namenode_fqdn")
output = parser.parse('-p 1234 ls some_folder'.split())
self.assertEqual(output.port, 1234)
output = parser.parse('--port 1234 ls some_folder'.split())
self.assertEqual(output.port, 1234)
output = parser.parse('-V 4 ls some_folder'.split())
self.assertEqual(output.version, 4)
output = parser.parse('--version 4 ls some_folder'.split())
self.assertEqual(output.version, 4)
#all options
output = parser.parse('-D -j -n namenode_fqdn -p 1234 -V 4 ls some_folder'.split())
self.assertTrue(output.debug)
self.assertTrue(output.json)
self.assertEqual(output.namenode, "namenode_fqdn")
self.assertEqual(output.port, 1234)
self.assertEqual(output.version, 4)
#options in illegal position
with self.assertRaises(SystemExit):
parser.parse('ls -D some_folder'.split())
with self.assertRaises(SystemExit):
parser.parse('ls some_folder -D'.split())
def test_ls(self):
parser = self.parser
#no dir
output = parser.parse('ls'.split())
self.assertEqual(output.command, 'ls')
self.assertEqual(output.dir, [self.default_dir])
#one dir
output = parser.parse('ls some_dir'.split())
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('ls dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#specific commands
output = parser.parse('ls -d -R -s -h some_dir'.split())
self.assertTrue(output.directory)
self.assertTrue(output.recurse)
self.assertTrue(output.summary)
self.assertTrue(output.human)
self.assertEqual(output.dir, ['some_dir'])
#multiple slashes
output = parser.parse('ls ///dir1 //dir2 /dir3'.split())
self.assertEqual(output.dir, ['///dir1', '//dir2', '/dir3'])
def test_mkdir(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('mkdir'.split())
#one dir
output = parser.parse('mkdir some_dir'.split())
self.assertEqual(output.command, 'mkdir')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('mkdir dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_mkdirp(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('mkdirp'.split())
#one dir
output = parser.parse('mkdirp some_dir'.split())
self.assertEqual(output.command, 'mkdirp')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('mkdirp dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_chown(self):
parser = self.parser
#no dir and/or no owner
with self.assertRaises(SystemExit):
parser.parse('chown'.split())
with self.assertRaises(SystemExit):
parser.parse('chown owner_or_dir'.split())
#one dir
output = parser.parse('chown root some_dir'.split())
self.assertEqual(output.command, 'chown')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_arg, 'root')
#multiple dirs
output = parser.parse('chown root dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_arg, 'root')
#recursive
output = parser.parse('chown -R root some_dir'.split())
self.assertTrue(output.recurse)
def test_chmod(self):
parser = self.parser
#no dir and/or no mode
with self.assertRaises(SystemExit):
parser.parse('chmod'.split())
with self.assertRaises(SystemExit):
parser.parse('chmod mode_or_dir'.split())
#one dir
output = parser.parse('chmod 664 some_dir'.split())
self.assertEqual(output.command, 'chmod')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_int_arg, 664)
#wrong type for mode argument
with self.assertRaises(SystemExit):
parser.parse('chmod not_an_int some_dir'.split())
#multiple dirs
output = parser.parse('chmod 664 dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_int_arg, 664)
#recursive
output = parser.parse('chmod -R 664 some_dir'.split())
self.assertTrue(output.recurse)
def test_chgrp(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('chgrp'.split())
with self.assertRaises(SystemExit):
parser.parse('chgrp group_or_dir'.split())
#one dir
output = parser.parse('chgrp group some_dir'.split())
self.assertEqual(output.command, 'chgrp')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_arg, 'group')
#multiple dirs
output = parser.parse('chgrp group dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_arg, 'group')
#recursive
output = parser.parse('chgrp -R group some_dir'.split())
self.assertTrue(output.recurse)
def test_count(self):
parser = self.parser
#no dir
output = parser.parse('count'.split())
self.assertEqual(output.command, 'count')
self.assertEqual(output.dir, [self.default_dir])
#one dir
output = parser.parse('count some_dir'.split())
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('count dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
# Human output
output = parser.parse('count -h dir1 dir2 dir3'.split())
self.assertTrue(output.human)
def test_df(self):
parser = self.parser
#no dir
output = parser.parse('df'.split())
self.assertEqual(output.command, 'df')
# Human output
output = parser.parse('df -h'.split())
self.assertEqual(output.command, 'df')
self.assertTrue(output.human)
with self.assertRaises(SystemExit):
parser.parse('df some_additional_argument'.split())
def test_du(self):
parser = self.parser
#no dir
output = parser.parse('du'.split())
self.assertEqual(output.command, 'du')
self.assertEqual(output.dir, [self.default_dir])
#one dir
output = parser.parse('du some_dir'.split())
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('du dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#summary
output = parser.parse('du -s some_dir'.split())
self.assertTrue(output.summary)
#human
output = parser.parse('du -h some_dir'.split())
self.assertTrue(output.human)
def test_mv(self):
parser = self.parser
#no source and/or no destination
with self.assertRaises(SystemExit):
parser.parse('mv'.split())
with self.assertRaises(SystemExit):
parser.parse('mv src_or_dest'.split())
#one source
output = parser.parse('mv source some_dest'.split())
self.assertEqual(output.command, 'mv')
self.assertEqual(output.dir, ['source'])
self.assertEqual(output.single_arg, 'some_dest')
#multiple sources
output = parser.parse('mv source1 source2 source3 some_dest'.split())
self.assertEqual(output.dir, ['source1', 'source2', 'source3'])
self.assertEqual(output.single_arg, 'some_dest')
def test_rm(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('rm'.split())
#one dir
output = parser.parse('rm some_dir'.split())
self.assertEqual(output.command, 'rm')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('rm dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#recursive
output = parser.parse('rm -R some_dir'.split())
self.assertTrue(output.recurse)
#skiptrash
output = parser.parse('rm -S some_dir'.split())
self.assertTrue(output.skiptrash)
#skiptrash
output = parser.parse('rm --skiptrash some_dir'.split())
self.assertTrue(output.skiptrash)
#usetrash
output = parser.parse('rm -T some_dir'.split())
self.assertTrue(output.usetrash)
#usetrash
output =parser.parse('rm --usetrash some_dir'.split())
self.assertTrue(output.usetrash)
#usetrash & skiptrash
output = parser.parse('rm --usetrash --skiptrash some_dir'.split())
self.assertTrue(output.usetrash)
self.assertTrue(output.skiptrash)
def test_touchz(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('touchz'.split())
#one dir
output = parser.parse('touchz some_dir'.split())
self.assertEqual(output.command, 'touchz')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('touchz dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_serverdefaults(self):
parser = self.parser
#no arg
output = parser.parse('serverdefaults'.split())
self.assertEqual(output.command, 'serverdefaults')
#too many args
with self.assertRaises(SystemExit):
parser.parse('serverdefaults some_additional_argument'.split())
def test_rmdir(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('rmdir'.split())
#one dir
output = parser.parse('rmdir some_dir'.split())
self.assertEqual(output.command, 'rmdir')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('rmdir dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_setrep(self):
parser = self.parser
#no dir and/or no replication factor
with self.assertRaises(SystemExit):
parser.parse('setrep'.split())
with self.assertRaises(SystemExit):
parser.parse('setrep some_dir'.split())
with self.assertRaises(SystemExit):
parser.parse('setrep 3'.split())
#one dir
output = parser.parse('setrep 3 some_dir'.split())
self.assertEqual(output.command, 'setrep')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_int_arg, 3)
#wrong type for mode argument
with self.assertRaises(SystemExit):
parser.parse('setrep not_an_int some_dir'.split())
#multiple dirs
output = parser.parse('setrep 3 dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_int_arg, 3)
#recursive
output = parser.parse('setrep -R 3 some_dir'.split())
self.assertTrue(output.recurse)
def test_usage(self):
parser = self.parser
#no command
output = parser.parse('usage'.split())
self.assertEqual(output.command, 'usage')
#one dir
output = parser.parse('usage some_cmd'.split())
self.assertEqual(output.command, 'usage')
self.assertEqual(output.arg, ['some_cmd'])
#multiple dirs
output = parser.parse('usage cmd1 cmd2 cmd3'.split())
self.assertEqual(output.arg, ['cmd1', 'cmd2', 'cmd3'])
def test_stat(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('stat'.split())
#one dir
output = parser.parse('stat some_dir'.split())
self.assertEqual(output.command, 'stat')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('stat dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_test(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('test'.split())
#one dir
output = parser.parse('test some_dir'.split())
self.assertEqual(output.command, 'test')
self.assertEqual(output.single_arg, 'some_dir')
#multiple dirs
with self.assertRaises(SystemExit):
parser.parse('test dir1 dir2 dir3'.split())
#specific commands
output = parser.parse('test -d -z -e some_dir'.split())
self.assertTrue(output.directory)
self.assertTrue(output.zero)
self.assertTrue(output.exists)
self.assertEqual(output.single_arg, 'some_dir')
def test_cat(self):
parser = self.parser
#no path
with self.assertRaises(SystemExit):
parser.parse('cat'.split())
#one path
output = parser.parse('cat some_file'.split())
self.assertEqual(output.command, 'cat')
self.assertEqual(output.dir, ['some_file'])
#multiple paths
output = parser.parse('cat dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#specific commands
output = parser.parse('cat -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
def test_copyFromLocal(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('copyFromLocal'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('copyFromLocal some_dir'.split())
#two dirs
output = parser.parse('copyFromLocal dir1 dir2'.split())
self.assertEqual(output.dir, ['dir1'])
self.assertEqual(output.single_arg, 'dir2')
def test_copyToLocal(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('copyToLocal'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('copyToLocal some_dir'.split())
#two dirs
output = parser.parse('copyToLocal dir1 dir2'.split())
self.assertEqual(output.dir, ['dir1'])
self.assertEqual(output.single_arg, 'dir2')
self.assertEqual(output.checkcrc, False)
#specific commands
output = parser.parse('copyToLocal -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
def test_cp(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('cp'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('cp some_dir'.split())
#multiple dirs
output = parser.parse('cp dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2'])
self.assertEqual(output.single_arg, 'dir3')
def test_get(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('get'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('get some_dir'.split())
#multiple dirs
output = parser.parse('get dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2'])
self.assertEqual(output.single_arg, 'dir3')
#specific commands
output = parser.parse('get -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
def test_getmerge(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('getmerge'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('getmerge some_dir'.split())
#two dirs
output = parser.parse('getmerge dir1 dir2'.split())
self.assertEqual(output.src_dst[0], 'dir1')
self.assertEqual(output.src_dst[1], 'dir2')
#multiple dirs
with self.assertRaises(SystemExit):
parser.parse('getmerge dir1 dir2 dir3'.split())
# def test_put(self):
# parser = self.parser
# #no dir
# with self.assertRaises(SystemExit):
# parser.parse('put'.split())
# #one dir
# with self.assertRaises(SystemExit):
# parser.parse('put some_dir'.split())
# #multiple dirs
# output = parser.parse('put dir1 dir2 dir3'.split())
# self.assertEqual(output.dir, ['dir1', 'dir2'])
# self.assertEqual(output.single_arg, 'dir3')
def test_tail(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('tail'.split())
#one dir
output = parser.parse('tail some_dir'.split())
self.assertEqual(output.single_arg, 'some_dir')
#multiple dirs
with self.assertRaises(SystemExit):
parser.parse('tail dir1 dir2'.split())
#specific commands
output = parser.parse('tail -f some_dir'.split())
self.assertTrue(output.append)
def test_text(self):
parser = self.parser
#no path
with self.assertRaises(SystemExit):
parser.parse('text'.split())
#one path
output = parser.parse('text some_file'.split())
self.assertEqual(output.command, 'text')
self.assertEqual(output.dir, ['some_file'])
#multiple paths
output = parser.parse('text dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#specific commands
output = parser.parse('text -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
class MockParseArgs(object):
# dir is a list of directories
def __init__(self, dir=[],
single_arg=None,
command=None,
namenode=None,
port=None,
usetrash=False,
skiptrash=False):
self.dir = dir
self.single_arg = single_arg
self.command = command
self.namenode = namenode
self.port = port
self.usetrash = usetrash
self.skiptrash = skiptrash
def __contains__(self, b):
return b in self.__dict__
class CommandLineParserInternalConfigTest(unittest2.TestCase):
def setUp(self):
self.parser = CommandLineParser()
self.default_dir = os.path.join("/user", get_current_username())
def assert_namenode_spec(self, host, port, version=None):
self.assertEqual(self.parser.args.namenode, host)
self.assertEqual(self.parser.args.port, port)
if version:
self.assertEqual(self.parser.args.version, version)
def assert_namenodes_spec(self, host, port, version=None):
for namenode in self.parser.namenodes:
try:
self.assertEqual(namenode.host, host)
self.assertEqual(namenode.port, port)
if version:
self.assertEqual(namenode.version, version)
except AssertionError:
continue
# There was no AssertError -> we found our NN
return
self.fail("NN not found in namenodes")
def test_cl_config_conflicted(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar2:50070/user/rav"])
with self.assertRaises(SystemExit):
self.parser.read_config()
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50071/user/rav",
"hdfs://foobar:50070/user/rav"])
with self.assertRaises(SystemExit):
self.parser.read_config()
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50072/user/rav",
"hdfs://foobar2:50070/user/rav"])
with self.assertRaises(SystemExit):
self.parser.read_config()
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav"],
single_arg="hdfs://foobar2:50070/user/rav",
command="mv")
with self.assertRaises(SystemExit):
self.parser.read_config()
def test_cl_config_simple(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav2"])
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav2"],
single_arg="hdfs://foobar:50070/user/rav",
command="mv")
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
def test_cl_config_slash_madness_check_scheme(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070///user//rav",
"hdfs://foobar:50070/user/////rav2"])
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user////rav",
"hdfs://foobar:50070////user/rav2"],
single_arg="hdfs://foobar:50070/////user/rav",
command="mv")
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
def test_cl_config_slash_madness_full_check(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar/user////rav",
"hdfs://foobar////user/rav2"],
single_arg="hdfs://foobar/////user/rav",
command="mv")
self.parser.init()
self.assert_namenode_spec("foobar", Namenode.DEFAULT_PORT)
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT)
self.assertIn("/user////rav", self.parser.args.dir)
self.assertIn("////user/rav2", self.parser.args.dir)
self.assertEqual(self.parser.args.single_arg, "/////user/rav")
def test_cl_config_reduce_paths(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav2"],
single_arg="hdfs://foobar:50070/user/rav3",
command="mv")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertIn("/user/rav", self.parser.args.dir)
self.assertIn("/user/rav2", self.parser.args.dir)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
def test_cl_config_test_single_arg_hdfs_paths(self):
self.parser.args = MockParseArgs(single_arg="hdfs://foobar:50070/user/rav3",
command="test")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
def test_cl_config_tail_single_arg_hdfs_paths(self):
self.parser.args = MockParseArgs(single_arg="hdfs://foobar:50070/user/rav3",
command="tail")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
def test_cl_config_mv_single_arg_hdfs_paths(self):
self.parser.args = MockParseArgs(single_arg="hdfs://foobar:50070/user/rav3",
command="mv")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
import snakebite.config
@patch.object(snakebite.config.HDFSConfig, 'get_external_config')
@patch("snakebite.commandlineparser.CommandLineParser._read_config_snakebiterc", return_value=None)
def test_config_no_config(self, config_mock, read_config_mock):
hadoop_home = None
config_mock.return_value = []
if os.environ.get("HADOOP_HOME"):
hadoop_home = os.environ["HADOOP_HOME"]
del os.environ["HADOOP_HOME"]
self.parser.args = MockParseArgs()
with self.assertRaises(SystemExit):
self.parser.read_config()
if hadoop_home:
os.environ["HADOOP_HOME"] = hadoop_home
self.assert_namenode_spec(None, None)
valid_snake_one_rc = {"namenode": "foobar", "version": 9, "port": 54310}
valid_snake_ha_rc = [{"namenode": "foobar", "version": 9, "port": 54310},
{"namenode": "foobar2", "version": 9, "port": 54310}]
invalid_snake_rc = "hdfs://foobar:54310"
@patch("os.path.exists")
def test_read_config_snakebiterc_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", 54310, 9)
self.assertEquals(self.parser.args.usetrash, self.parser.configs['use_trash'])
@patch("os.path.exists")
def test_read_config_snakebiterc_ha_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_ha_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", 54310, 9)
self.assert_namenodes_spec("foobar2", 54310, 9)
self.assertEquals(self.parser.args.usetrash, self.parser.configs['use_trash'])
@patch("os.path.exists")
def test_read_config_snakebiterc_invalid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.invalid_snake_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
with self.assertRaises(SystemExit):
self.parser.read_config()
valid_snake_noport_one_rc = {"namenode": "foobar", "version": 11}
valid_snake_noport_ha_rc = [{"namenode": "foobar", "version": 100},
{"namenode": "foobar2", "version": 100}]
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, 11)
self.assertEquals(self.parser.args.usetrash, self.parser.configs['use_trash'])
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_ha_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_ha_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, 100)
self.assert_namenodes_spec("foobar2", Namenode.DEFAULT_PORT, 100)
self.assertEquals(self.parser.args.usetrash, self.parser.configs['use_trash'])
valid_snake_noport_nov_one_rc = {"namenode": "foobar"}
valid_snake_noport_nov_ha_rc = [{"namenode": "foobar"},
{"namenode": "foobar2"}]
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_nov_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_nov_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
self.assertEquals(self.parser.args.usetrash, self.parser.configs['use_trash'])
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_nov_ha_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_nov_ha_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
self.assert_namenodes_spec("foobar2", Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
self.assertEquals(self.parser.args.usetrash, self.parser.configs['use_trash'])
valid_snake_noport_mix_rc = [{"namenode": "foobar", "version": 100},
{"namenode": "foobar2", "port": 66}]
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_mix_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_mix_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, 100)
self.assert_namenodes_spec("foobar2", 66, Namenode.DEFAULT_VERSION)
self.assertEquals(self.parser.args.usetrash, self.parser.configs['use_trash'])
valid_snake_one_rc_v2 = {
"config_version": 2,
"use_trash": False,
"namenodes": [
{"host": "foobar3", "version": 9, "port": 54310}
]
}
valid_snake_ha_rc_v2 = {
"config_version": 2,
"use_trash": True,
"namenodes": [
{"host": "foobar4", "version": 9, "port": 54310},
{"host": "foobar5", "version": 9, "port": 54310}
]
}
invalid_snake_rc_v2 = "hdfs://foobar:54310"
@patch("os.path.exists")
def test_read_config_snakebiterc_one_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_one_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertFalse(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar3", 54310, 9)
@patch("os.path.exists")
def test_read_config_snakebiterc_ha_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_ha_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertTrue(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar4", 54310, 9)
self.assert_namenodes_spec("foobar5", 54310, 9)
@patch("os.path.exists")
def test_read_config_snakebiterc_invalid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.invalid_snake_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
with self.assertRaises(SystemExit):
self.parser.read_config()
valid_snake_noport_one_rc_v2 = {
"config_version": 2,
"use_trash": False,
"namenodes": [
{"host": "foobar3", "version": 9}
]
}
valid_snake_mix_ha_rc_v2 = {
"config_version": 2,
"use_trash": True,
"namenodes": [
{"host": "foobar4", "version": 100},
{"host": "foobar5", "port": 54310}
]
}
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_one_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_one_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertFalse(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar3", Namenode.DEFAULT_PORT, 9)
@patch("os.path.exists")
def test_read_config_snakebiterc_mix_ha_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_mix_ha_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertTrue(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar4", Namenode.DEFAULT_PORT, 100)
self.assert_namenodes_spec("foobar5", 54310, Namenode.DEFAULT_VERSION)
valid_user_rc_v2 = {
"config_version": 2,
"use_trash": True,
"user": "hdfs_user",
"namenodes": [
{"host": "foobar4", "version": 100},
{"host": "foobar5", "port": 54310}
]
}
@patch("os.path.exists")
def test_read_config_snakebiterc_user_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_user_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.parser.setup_client()
self.assertTrue(self.parser.args.usetrash)
self.assertEquals(self.parser.client.effective_user, "hdfs_user")
self.assert_namenodes_spec("foobar4", Namenode.DEFAULT_PORT, 100)
self.assert_namenodes_spec("foobar5", 54310, Namenode.DEFAULT_VERSION)
def test_cl_default_port(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar/user/rav"],
single_arg="hdfs://foobar/user/rav",
command="mv")
self.parser.read_config()
self.assert_namenode_spec("foobar", Namenode.DEFAULT_PORT)
def test_cl_trash_setting_preserved_after_cl_config(self):
# no snakebiterc
# read config from CL
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav"],
skiptrash=True,
command="rm")
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
self.assertEquals(self.parser.args.skiptrash, True)
def _revert_hdfs_try_paths(self):
# Make sure HDFSConfig is in vanilla state
self.parser.configs['use_trash'] = False
HDFSConfig.hdfs_try_paths = ConfigTest.original_hdfs_try_path
HDFSConfig.core_try_paths = ConfigTest.original_core_try_path
@patch("os.path.exists")
def test_cl_trash_setting_preserved_after_snakebiterc_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs(usetrash=True)
self.parser.read_config()
self.assert_namenodes_spec("foobar", 54310, 9)
self.assertTrue(self.parser.args.usetrash)
@patch('os.environ.get')
def test_cl_usetrash_setting_preserved_after_external_nontrash_config(self, environ_get):
environ_get.return_value = False
# no snakebiterc
# read external config (hdfs-site, core-site)
self.parser.args = MockParseArgs(dir=["/user/rav/test"],
usetrash=True,
command="rm")
try:
HDFSConfig.core_try_paths = (ConfigTest.get_config_path('ha-core-site.xml'),)
HDFSConfig.hdfs_try_paths = (ConfigTest.get_config_path('ha-noport-hdfs-site.xml'),)
self.parser.init()
self.assertTrue(self.parser.args.usetrash)
self.assertTrue(self.parser.client.use_trash)
finally:
self._revert_hdfs_try_paths()
@patch('os.environ.get')
def test_cl_skiptrash_setting_preserved_after_external_nontrash_config(self, environ_get):
environ_get.return_value = False
# no snakebiterc
# read external config (hdfs-site, core-site)
self.parser.args = MockParseArgs(dir=["/user/rav/test"],
skiptrash=True,
usetrash=True,
command="rm")
try:
HDFSConfig.core_try_paths = (ConfigTest.get_config_path('ha-core-site.xml'),)
HDFSConfig.hdfs_try_paths = (ConfigTest.get_config_path('ha-noport-hdfs-site.xml'),)
self.parser.init()
self.assertTrue(self.parser.args.skiptrash)
self.assertTrue(self.parser.args.usetrash)
self.assertFalse(self.parser.client.use_trash)
finally:
self._revert_hdfs_try_paths()
@patch('os.environ.get')
def test_use_datanode_hostname(self, environ_get):
environ_get.return_value = False
# no snakebiterc
# read external config (hdfs-site, core-site)
self.parser.args = MockParseArgs()
try:
HDFSConfig.core_try_paths = (ConfigTest.get_config_path('ha-core-site.xml'),)
HDFSConfig.hdfs_try_paths = (ConfigTest.get_config_path('use-datanode-hostname-hdfs-site.xml'),)
self.parser.init()
self.assertTrue(self.parser.client.use_datanode_hostname)
finally:
self._revert_hdfs_try_paths()
class CommandLineParserExecuteTest(unittest2.TestCase):
def test_execute_does_not_swallow_tracebacks(self):
with patch.dict(Commands.methods, clear=True):
@CommandLineParser.command.im_func()
def boom(*args, **kwargs):
def subboom():
raise IndexError("Boom!")
subboom()
parser = CommandLineParser()
parser.parse(["boom"])
try:
parser.execute()
except IndexError:
_, _, exc_traceback = sys.exc_info()
self.assertIn(
"subboom()\n",
traceback.format_exc(),
msg="Lost some stack frames when re-raising!",
)
else:
self.fail("execute() should have raised an IndexError!")
| |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conformer utilities."""
import copy
from typing import List, Optional
from absl import logging
import numpy as np
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
import tensorflow.compat.v2 as tf
def generate_conformers(
molecule: Chem.rdchem.Mol,
max_num_conformers: int,
*,
random_seed: int = -1,
prune_rms_thresh: float = -1.0,
max_iter: int = -1,
fallback_to_random: bool = False,
) -> Chem.rdchem.Mol:
"""Generates conformers for a given molecule.
Args:
molecule: molecular representation of the compound.
max_num_conformers: maximum number of conformers to generate. If pruning is
done, the returned number of conformers is not guaranteed to match
max_num_conformers.
random_seed: random seed to use for conformer generation.
prune_rms_thresh: RMSD threshold which allows to prune conformers that are
too similar.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
fallback_to_random: if conformers cannot be obtained, use random coordinates
to initialise.
Returns:
Copy of a `molecule` with added hydrogens. The returned molecule contains
force field-optimised conformers. The number of conformers is guaranteed to
be <= max_num_conformers.
"""
mol = copy.deepcopy(molecule)
mol = Chem.AddHs(mol)
mol = _embed_conformers(
mol,
max_num_conformers,
random_seed,
prune_rms_thresh,
fallback_to_random,
use_random=False)
if max_iter > 0:
mol_with_conformers = _minimize_by_mmff(mol, max_iter)
if mol_with_conformers is None:
mol_with_conformers = _minimize_by_uff(mol, max_iter)
else:
mol_with_conformers = mol
# Aligns conformations in a molecule to each other using the first
# conformation as the reference.
AllChem.AlignMolConformers(mol_with_conformers)
# We remove hydrogens to keep the number of atoms consistent with the graph
# nodes.
mol_with_conformers = Chem.RemoveHs(mol_with_conformers)
return mol_with_conformers
def atom_to_feature_vector(
atom: rdkit.Chem.rdchem.Atom,
conformer: Optional[np.ndarray] = None,
) -> List[float]:
"""Converts rdkit atom object to feature list of indices.
Args:
atom: rdkit atom object.
conformer: Generated conformers. Returns -1 values if set to None.
Returns:
List containing positions (x, y, z) of each atom from the conformer.
"""
if conformer:
pos = conformer.GetAtomPosition(atom.GetIdx())
return [pos.x, pos.y, pos.z]
return [np.nan, np.nan, np.nan]
def compute_conformer(smile: str, max_iter: int = -1) -> np.ndarray:
"""Computes conformer.
Args:
smile: Smile string.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
Returns:
A tuple containing index, fingerprint and conformer.
Raises:
RuntimeError: If unable to convert smile string to RDKit mol.
"""
mol = rdkit.Chem.MolFromSmiles(smile)
if not mol:
raise RuntimeError('Unable to convert smile to molecule: %s' % smile)
conformer_failed = False
try:
mol = generate_conformers(
mol,
max_num_conformers=1,
random_seed=45,
prune_rms_thresh=0.01,
max_iter=max_iter)
except IOError as e:
logging.exception('Failed to generate conformers for %s . IOError %s.',
smile, e)
conformer_failed = True
except ValueError:
logging.error('Failed to generate conformers for %s . ValueError', smile)
conformer_failed = True
except: # pylint: disable=bare-except
logging.error('Failed to generate conformers for %s.', smile)
conformer_failed = True
atom_features_list = []
conformer = None if conformer_failed else list(mol.GetConformers())[0]
for atom in mol.GetAtoms():
atom_features_list.append(atom_to_feature_vector(atom, conformer))
conformer_features = np.array(atom_features_list, dtype=np.float32)
return conformer_features
def get_random_rotation_matrix(include_mirror_symmetry: bool) -> tf.Tensor:
"""Returns a single random rotation matrix."""
rotation_matrix = _get_random_rotation_3d()
if include_mirror_symmetry:
random_mirror_symmetry = _get_random_mirror_symmetry()
rotation_matrix = tf.matmul(rotation_matrix, random_mirror_symmetry)
return rotation_matrix
def rotate(vectors: tf.Tensor, rotation_matrix: tf.Tensor) -> tf.Tensor:
"""Batch of vectors on a single rotation matrix."""
return tf.matmul(vectors, rotation_matrix)
def _embed_conformers(
molecule: Chem.rdchem.Mol,
max_num_conformers: int,
random_seed: int,
prune_rms_thresh: float,
fallback_to_random: bool,
*,
use_random: bool = False,
) -> Chem.rdchem.Mol:
"""Embeds conformers into a copy of a molecule.
If random coordinates allowed, tries not to use random coordinates at first,
and uses random only if fails.
Args:
molecule: molecular representation of the compound.
max_num_conformers: maximum number of conformers to generate. If pruning is
done, the returned number of conformers is not guaranteed to match
max_num_conformers.
random_seed: random seed to use for conformer generation.
prune_rms_thresh: RMSD threshold which allows to prune conformers that are
too similar.
fallback_to_random: if conformers cannot be obtained, use random coordinates
to initialise.
*:
use_random: Use random coordinates. Shouldn't be set by any caller except
this function itself.
Returns:
A copy of a molecule with embedded conformers.
Raises:
ValueError: if conformers cannot be obtained for a given molecule.
"""
mol = copy.deepcopy(molecule)
# Obtains parameters for conformer generation.
# In particular, ETKDG is experimental-torsion basic knowledge distance
# geometry, which allows to randomly generate an initial conformation that
# satisfies various geometric constraints such as lower and upper bounds on
# the distances between atoms.
params = AllChem.ETKDGv3()
params.randomSeed = random_seed
params.pruneRmsThresh = prune_rms_thresh
params.numThreads = -1
params.useRandomCoords = use_random
conf_ids = AllChem.EmbedMultipleConfs(mol, max_num_conformers, params)
if not conf_ids:
if not fallback_to_random or use_random:
raise ValueError('Cant get conformers')
return _embed_conformers(
mol,
max_num_conformers,
random_seed,
prune_rms_thresh,
fallback_to_random,
use_random=True)
return mol
def _minimize_by_mmff(
molecule: Chem.rdchem.Mol,
max_iter: int,
) -> Optional[Chem.rdchem.Mol]:
"""Minimizes forcefield for conformers using MMFF algorithm.
Args:
molecule: a datastructure containing conformers.
max_iter: number of maximum iterations to use when optimising force field.
Returns:
A copy of a `molecule` containing optimised conformers; or None if MMFF
cannot be performed.
"""
molecule_props = AllChem.MMFFGetMoleculeProperties(molecule)
if molecule_props is None:
return None
mol = copy.deepcopy(molecule)
for conf_id in range(mol.GetNumConformers()):
ff = AllChem.MMFFGetMoleculeForceField(
mol, molecule_props, confId=conf_id, ignoreInterfragInteractions=False)
ff.Initialize()
# minimises a conformer within a mol in place.
ff.Minimize(max_iter)
return mol
def _minimize_by_uff(
molecule: Chem.rdchem.Mol,
max_iter: int,
) -> Chem.rdchem.Mol:
"""Minimizes forcefield for conformers using UFF algorithm.
Args:
molecule: a datastructure containing conformers.
max_iter: number of maximum iterations to use when optimising force field.
Returns:
A copy of a `molecule` containing optimised conformers.
"""
mol = copy.deepcopy(molecule)
conf_ids = range(mol.GetNumConformers())
for conf_id in conf_ids:
ff = AllChem.UFFGetMoleculeForceField(mol, confId=conf_id)
ff.Initialize()
# minimises a conformer within a mol in place.
ff.Minimize(max_iter)
return mol
def _get_symmetry_rotation_matrix(sign: tf.Tensor) -> tf.Tensor:
"""Returns the 2d/3d matrix for mirror symmetry."""
zero = tf.zeros_like(sign)
one = tf.ones_like(sign)
# pylint: disable=bad-whitespace,bad-continuation
rot = [sign, zero, zero,
zero, one, zero,
zero, zero, one]
# pylint: enable=bad-whitespace,bad-continuation
shape = (3, 3)
rot = tf.stack(rot, axis=-1)
rot = tf.reshape(rot, shape)
return rot
def _quaternion_to_rotation_matrix(quaternion: tf.Tensor) -> tf.Tensor:
"""Converts a batch of quaternions to a batch of rotation matrices."""
q0 = quaternion[0]
q1 = quaternion[1]
q2 = quaternion[2]
q3 = quaternion[3]
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
matrix = tf.stack([r00, r01, r02,
r10, r11, r12,
r20, r21, r22], axis=-1)
return tf.reshape(matrix, [3, 3])
def _get_random_rotation_3d() -> tf.Tensor:
random_quaternions = tf.random.normal(
shape=[4], dtype=tf.float32)
random_quaternions /= tf.linalg.norm(
random_quaternions, axis=-1, keepdims=True)
return _quaternion_to_rotation_matrix(random_quaternions)
def _get_random_mirror_symmetry() -> tf.Tensor:
random_0_1 = tf.random.uniform(
shape=(), minval=0, maxval=2, dtype=tf.int32)
random_signs = tf.cast((2 * random_0_1) - 1, tf.float32)
return _get_symmetry_rotation_matrix(random_signs)
| |
# -*- coding: utf-8 -*-
"""
survey - Assessment Data Analysis Tool
For more details see the blueprint at:
http://eden.sahanafoundation.org/wiki/BluePrint/SurveyTool/ADAT
@todo: open template from the dataTables into the section tab not update
@todo: in the pages that add a link to a template make the combobox display the label not the numbers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon.contenttype import contenttype
from s3survey import S3AnalysisPriority, \
survey_question_type, \
survey_analysis_type, \
getMatrix, \
DEBUG, \
LayoutBlocks, \
DataMatrix, MatrixElement, \
S3QuestionTypeOptionWidget, \
survey_T
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def create():
"""
Enter a new assessment.
- provides a simpler URL to access from mobile devices...
"""
redirect(URL(f="newAssessment.iframe",
vars={"viewing": "survey_series.%s" % request.args[0]}))
# -----------------------------------------------------------------------------
def template():
""" RESTful CRUD controller """
# Load Model
#table = s3db.survey_template
def prep(r):
if r.component:
if r.component_name == "translate":
table = s3db.survey_translate
if r.component_id == None:
# list existing translations and allow the addition of a new translation
table.file.readable = False
table.file.writable = False
else:
# edit the selected translation
table.language.writable = False
table.code.writable = False
# remove CRUD generated buttons in the tabs
s3db.configure("survey_translate",
deletable=False)
else:
table = r.table
s3_action_buttons(r)
# Status of Pending
rows = db(table.status == 1).select(table.id)
try:
s3.actions[1]["restrict"].extend(str(row.id) for row in rows)
except KeyError: # the restrict key doesn't exist
s3.actions[1]["restrict"] = [str(row.id) for row in rows]
except IndexError: # the delete buttons doesn't exist
pass
# Add some highlighting to the rows
# Status of Pending
s3.dataTableStyleAlert = [str(row.id) for row in rows]
# Status of closed
rows = db(table.status == 3).select(table.id)
s3.dataTableStyleDisabled = [str(row.id) for row in rows]
s3.dataTableStyleWarning = [str(row.id) for row in rows]
# Status of Master
rows = db(table.status == 4).select(table.id)
s3.dataTableStyleWarning.extend(str(row.id) for row in rows)
s3db.configure("survey_template",
orderby = "survey_template.status",
create_next = URL(c="survey", f="template"),
update_next = URL(c="survey", f="template"),
)
return True
s3.prep = prep
# Post-processor
def postp(r, output):
if r.component:
template_id = r.id
if r.component_name == "translate":
s3_action_buttons(r)
s3.actions.append(dict(label=str(T("Download")),
_class="action-btn",
url=r.url(method = "translate_download",
component = "translate",
component_id = "[id]",
representation = "xls",
)
),
)
s3.actions.append(
dict(label=str(T("Upload")),
_class="action-btn",
url=URL(c=module,
f="template",
args=[template_id, "translate", "[id]"])
),
)
#elif r.component_name == "section":
# # Add the section select widget to the form
# # undefined
# sectionSelect = s3.survey_section_select_widget(template_id)
# output.update(form = sectionSelect)
# Add a button to show what the questionnaire looks like
#s3_action_buttons(r)
#s3.actions = s3.actions + [
# dict(label=str(T("Display")),
# _class="action-btn",
# url=URL(c=module,
# f="templateRead",
# args=["[id]"])
# ),
# ]
return output
s3.postp = postp
if request.ajax:
post = request.post_vars
action = post.get("action")
template_id = post.get("parent_id")
section_id = post.get("section_id")
section_text = post.get("section_text")
if action == "section" and template_id != None:
id = db.survey_section.insert(name=section_text,
template_id=template_id,
cloned_section_id=section_id)
if id is None:
print "Failed to insert record"
return
# Remove CRUD generated buttons in the tabs
s3db.configure("survey_template",
listadd=False,
#deletable=False,
)
output = s3_rest_controller(rheader=s3db.survey_template_rheader)
return output
# -----------------------------------------------------------------------------
def templateRead():
"""
"""
if len(get_vars) > 0:
dummy, template_id = get_vars.viewing.split(".")
else:
template_id = request.args[0]
def postp(r, output):
if r.interactive:
template_id = r.id
form = s3db.survey_buildQuestionnaireFromTemplate(template_id)
output["items"] = None
output["form"] = None
output["item"] = form
output["title"] = s3.crud_strings["survey_template"].title_question_details
return output
s3.postp = postp
# remove CRUD generated buttons in the tabs
s3db.configure("survey_template",
listadd=False,
editable=False,
deletable=False,
)
r = s3_request("survey", "template", args=[template_id])
output = r(method = "read", rheader=s3db.survey_template_rheader)
return output
# -----------------------------------------------------------------------------
def templateSummary():
"""
"""
# Load Model
tablename = "survey_template"
s3db[tablename]
s3db.survey_complete
crud_strings = s3.crud_strings[tablename]
def postp(r, output):
if r.interactive:
if len(get_vars) > 0:
dummy, template_id = get_vars.viewing.split(".")
else:
template_id = r.id
form = s3db.survey_build_template_summary(template_id)
output["items"] = form
output["sortby"] = [[0, "asc"]]
output["title"] = crud_strings.title_analysis_summary
output["subtitle"] = crud_strings.subtitle_analysis_summary
return output
s3.postp = postp
# remove CRUD generated buttons in the tabs
s3db.configure(tablename,
listadd=False,
deletable=False,
)
output = s3_rest_controller("survey", "template",
method = "list",
rheader=s3.survey_template_rheader
)
s3.actions = None
return output
# -----------------------------------------------------------------------------
def series():
""" RESTful CRUD controller """
# Load Model
table = s3db.survey_series
s3db.survey_answerlist_dataTable_pre()
def prep(r):
if r.interactive:
if r.method == "create":
ttable = s3db.survey_template
if not db(ttable.deleted == False).select(ttable.id,
limitby=(0, 1)
):
session.warning = T("You need to create a template before you can create a series")
redirect(URL(c="survey", f="template", args=[], vars={}))
if r.id and (r.method == "update"):
table.template_id.writable = False
return True
s3.prep = prep
def postp(r, output):
if request.ajax == True and r.method == "read":
return output["item"]
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('survey_series_start_date','survey_series_end_date')''')
s3db.survey_serieslist_dataTable_post(r)
elif r.component_name == "complete":
if r.method == "update":
if r.http == "GET":
form = s3db.survey_buildQuestionnaireFromSeries(r.id,
r.component_id)
output["form"] = form
elif r.http == "POST":
if len(request.post_vars) > 0:
id = s3db.survey_save_answers_for_series(r.id,
r.component_id, # Update
request.post_vars)
response.confirmation = \
s3.crud_strings["survey_complete"].msg_record_modified
else:
s3db.survey_answerlist_dataTable_post(r)
return output
s3.postp = postp
# Remove CRUD generated buttons in the tabs
s3db.configure("survey_series",
deletable = False,
)
s3db.configure("survey_complete",
listadd = False,
deletable = False,
)
output = s3_rest_controller(rheader=s3db.survey_series_rheader)
return output
# -----------------------------------------------------------------------------
def series_export_formatted():
"""
Download a Spreadsheet which can be filled-in offline & uploaded
@ToDo: rewrite as S3Method handler
"""
try:
series_id = request.args[0]
except:
output = s3_rest_controller(module, "series",
rheader = s3db.survey_series_rheader)
return output
# Load Model
table = s3db.survey_series
s3db.table("survey_complete")
vars = request.post_vars
series = db(table.id == series_id).select(table.name,
table.logo,
limitby = (0, 1)
).first()
if not series.logo:
logo = None
else:
if "Export_Spreadsheet" in vars:
ext = "bmp"
else:
ext = "png"
logo = os.path.join(request.folder,
"uploads",
"survey",
"logo",
"%s.%s" % (series.logo, ext)
)
if not os.path.exists(logo) or not os.path.isfile(logo):
logo = None
# Get the translation dictionary
langDict = dict()
lang = request.post_vars.get("translationLanguage", None)
if lang:
if lang == "Default":
langDict = dict()
else:
try:
from gluon.languages import read_dict
lang_fileName = "applications/%s/uploads/survey/translations/%s.py" % \
(appname, lang)
langDict = read_dict(lang_fileName)
except:
langDict = dict()
if "Export_Spreadsheet" in vars:
(matrix, matrixAnswers) = series_prepare_matrix(series_id,
series,
logo,
langDict,
justified = True
)
output = series_export_spreadsheet(matrix,
matrixAnswers,
logo,
)
filename = "%s.xls" % series.name
contentType = ".xls"
elif "Export_Word" in vars:
template = s3db.survey_getTemplateFromSeries(series_id)
template_id = template.id
title = "%s (%s)" % (series.name, template.name)
title = survey_T(title, langDict)
widgetList = s3db.survey_getAllWidgetsForTemplate(template_id)
output = series_export_word(widgetList, langDict, title, logo)
filename = "%s.rtf" % series.name
contentType = ".rtf"
else:
output = s3_rest_controller(module, "series",
rheader = s3db.survey_series_rheader)
return output
output.seek(0)
response.headers["Content-Type"] = contenttype(contentType)
response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
return output.read()
# -----------------------------------------------------------------------------
def series_prepare_matrix(series_id, series, logo, langDict, justified=False):
"""
Helper function for series_export_formatted()
"""
######################################################################
#
# Get the data
# ============
# * The sections within the template
# * The layout rules for each question
######################################################################
# Check that the series_id has been passed in
try:
series_id = request.args[0]
except:
output = s3_rest_controller(module, "series",
rheader = s3db.survey_series_rheader)
return output
template = s3db.survey_getTemplateFromSeries(series_id)
template_id = template.id
sectionList = s3db.survey_getAllSectionsForSeries(series_id)
title = "%s (%s)" % (series.name, template.name)
title = survey_T(title, langDict)
layout = []
survey_getQstnLayoutRules = s3db.survey_getQstnLayoutRules
for section in sectionList:
sectionName = survey_T(section["name"], langDict)
rules = survey_getQstnLayoutRules(template_id,
section["section_id"])
layoutRules = [sectionName, rules]
layout.append(layoutRules)
widgetList = s3db.survey_getAllWidgetsForTemplate(template_id)
layoutBlocks = LayoutBlocks()
######################################################################
#
# Store the questions into a matrix based on the layout and the space
# required for each question - for example an option question might
# need one row for each possible option, and if this is in a layout
# then the position needs to be recorded carefully...
#
######################################################################
preliminaryMatrix = getMatrix(title,
logo,
layout,
widgetList,
False,
langDict,
showSectionLabels = False,
layoutBlocks = layoutBlocks
)
if not justified:
return preliminaryMatrix
######################################################################
# Align the questions so that each row takes up the same space.
# This is done by storing resize and margin instructions with
# each widget that is being printed
######################################################################
layoutBlocks.align()
######################################################################
# Now rebuild the matrix with the spacing for each widget set up so
# that the document will be fully justified
######################################################################
layoutBlocks = LayoutBlocks()
(matrix1, matrix2) = getMatrix(title,
logo,
layout,
widgetList,
True,
langDict,
showSectionLabels = False,
)
return (matrix1, matrix2)
# -----------------------------------------------------------------------------
def series_export_word(widgetList, langDict, title, logo):
"""
Export a Series in RTF Format
@ToDo: rewrite as S3Method handler
"""
try:
from PyRTF import Document, \
Languages, \
Section, \
Image, \
Paragraph, \
ShadingPropertySet, \
ParagraphPropertySet, \
StandardColours, \
Colour, \
Table, \
Cell, \
Renderer
except ImportError:
output = s3_rest_controller(module, "survey_series",
rheader=s3db.survey_series_rheader)
return output
output = StringIO()
doc = Document(default_language=Languages.EnglishUK)
section = Section()
ss = doc.StyleSheet
ps = ss.ParagraphStyles.Normal.Copy()
ps.SetName("NormalGrey")
ps.SetShadingPropertySet(ShadingPropertySet(pattern=1,
background=Colour("grey light", 224, 224, 224)))
ss.ParagraphStyles.append(ps)
ps = ss.ParagraphStyles.Normal.Copy()
ps.SetName("NormalCentre")
ps.SetParagraphPropertySet(ParagraphPropertySet(alignment=3))
ss.ParagraphStyles.append(ps)
doc.Sections.append(section)
heading = Paragraph(ss.ParagraphStyles.Heading1)
if logo:
image = Image(logo)
heading.append(image)
heading.append(title)
section.append(heading)
col = [2800, 6500]
table = Table(*col)
AddRow = table.AddRow
sortedwidgetList = sorted(widgetList.values(),
key=lambda widget: widget.question.posn)
for widget in sortedwidgetList:
line = widget.writeToRTF(ss, langDict)
try:
AddRow(*line)
except:
if DEBUG:
raise
pass
section.append(table)
renderer = Renderer()
renderer.Write(doc, output)
return output
# -----------------------------------------------------------------------------
def series_export_spreadsheet(matrix, matrixAnswers, logo):
"""
Now take the matrix data type and generate a spreadsheet from it
"""
try:
import xlwt
except ImportError:
response.error = T("xlwt not installed, so cannot export as a Spreadsheet")
output = s3_rest_controller(module, "survey_series",
rheader=s3db.survey_series_rheader)
return output
import math
# -------------------------------------------------------------------------
def wrapText(sheet, cell, style):
row = cell.row
col = cell.col
try:
text = unicode(cell.text)
except:
text = cell.text
width = 16
# Wrap text and calculate the row width and height
characters_in_cell = float(width-2)
twips_per_row = 255 #default row height for 10 point font
if cell.merged():
try:
sheet.write_merge(cell.row,
cell.row + cell.mergeV,
cell.col,
cell.col + cell.mergeH,
text,
style
)
except Exception as msg:
log = current.log
log.error(msg)
log.debug("row: %s + vert: %s, col: %s + horiz %s" % \
(cell.row, cell.mergeV, cell.col, cell.mergeH))
posn = "%s,%s" % (cell.row, cell.col)
if matrix.matrix[posn]:
log.debug(matrix.matrix[posn])
rows = math.ceil((len(text) / characters_in_cell) / (1 + cell.mergeH))
else:
sheet.write(cell.row,
cell.col,
text,
style
)
rows = math.ceil(len(text) / characters_in_cell)
new_row_height = int(rows * twips_per_row)
new_col_width = width * COL_WIDTH_MULTIPLIER
if sheet.row(row).height < new_row_height:
sheet.row(row).height = new_row_height
if sheet.col(col).width < new_col_width:
sheet.col(col).width = new_col_width
# -------------------------------------------------------------------------
def mergeStyles(listTemplate, styleList):
"""
Take a list of styles and return a single style object with
all the differences from a newly created object added to the
resultant style.
"""
if len(styleList) == 0:
finalStyle = xlwt.XFStyle()
elif len(styleList) == 1:
finalStyle = listTemplate[styleList[0]]
else:
zeroStyle = xlwt.XFStyle()
finalStyle = xlwt.XFStyle()
for i in range(0, len(styleList)):
finalStyle = mergeObjectDiff(finalStyle,
listTemplate[styleList[i]],
zeroStyle)
return finalStyle
# -------------------------------------------------------------------------
def mergeObjectDiff(baseObj, newObj, zeroObj):
"""
function to copy all the elements in newObj that are different from
the zeroObj and place them in the baseObj
"""
elementList = newObj.__dict__
for (element, value) in elementList.items():
try:
baseObj.__dict__[element] = mergeObjectDiff(baseObj.__dict__[element],
value,
zeroObj.__dict__[element])
except:
if zeroObj.__dict__[element] != value:
baseObj.__dict__[element] = value
return baseObj
COL_WIDTH_MULTIPLIER = 240
book = xlwt.Workbook(encoding="utf-8")
output = StringIO()
protection = xlwt.Protection()
protection.cell_locked = 1
noProtection = xlwt.Protection()
noProtection.cell_locked = 0
borders = xlwt.Borders()
borders.left = xlwt.Borders.DOTTED
borders.right = xlwt.Borders.DOTTED
borders.top = xlwt.Borders.DOTTED
borders.bottom = xlwt.Borders.DOTTED
borderT1 = xlwt.Borders()
borderT1.top = xlwt.Borders.THIN
borderT2 = xlwt.Borders()
borderT2.top = xlwt.Borders.MEDIUM
borderL1 = xlwt.Borders()
borderL1.left = xlwt.Borders.THIN
borderL2 = xlwt.Borders()
borderL2.left = xlwt.Borders.MEDIUM
borderR1 = xlwt.Borders()
borderR1.right = xlwt.Borders.THIN
borderR2 = xlwt.Borders()
borderR2.right = xlwt.Borders.MEDIUM
borderB1 = xlwt.Borders()
borderB1.bottom = xlwt.Borders.THIN
borderB2 = xlwt.Borders()
borderB2.bottom = xlwt.Borders.MEDIUM
alignBase = xlwt.Alignment()
alignBase.horz = xlwt.Alignment.HORZ_LEFT
alignBase.vert = xlwt.Alignment.VERT_TOP
alignWrap = xlwt.Alignment()
alignWrap.horz = xlwt.Alignment.HORZ_LEFT
alignWrap.vert = xlwt.Alignment.VERT_TOP
alignWrap.wrap = xlwt.Alignment.WRAP_AT_RIGHT
shadedFill = xlwt.Pattern()
shadedFill.pattern = xlwt.Pattern.SOLID_PATTERN
shadedFill.pattern_fore_colour = 0x16 # 25% Grey
shadedFill.pattern_back_colour = 0x08 # Black
headingFill = xlwt.Pattern()
headingFill.pattern = xlwt.Pattern.SOLID_PATTERN
headingFill.pattern_fore_colour = 0x1F # ice_blue
headingFill.pattern_back_colour = 0x08 # Black
styleTitle = xlwt.XFStyle()
styleTitle.font.height = 0x0140 # 320 twips, 16 points
styleTitle.font.bold = True
styleTitle.alignment = alignBase
styleHeader = xlwt.XFStyle()
styleHeader.font.height = 0x00F0 # 240 twips, 12 points
styleHeader.font.bold = True
styleHeader.alignment = alignBase
styleSubHeader = xlwt.XFStyle()
styleSubHeader.font.bold = True
styleSubHeader.alignment = alignWrap
styleSectionHeading = xlwt.XFStyle()
styleSectionHeading.font.bold = True
styleSectionHeading.alignment = alignWrap
styleSectionHeading.pattern = headingFill
styleHint = xlwt.XFStyle()
styleHint.protection = protection
styleHint.font.height = 160 # 160 twips, 8 points
styleHint.font.italic = True
styleHint.alignment = alignWrap
styleText = xlwt.XFStyle()
styleText.protection = protection
styleText.alignment = alignWrap
styleInstructions = xlwt.XFStyle()
styleInstructions.font.height = 0x00B4 # 180 twips, 9 points
styleInstructions.font.italic = True
styleInstructions.protection = protection
styleInstructions.alignment = alignWrap
styleBox = xlwt.XFStyle()
styleBox.borders = borders
styleBox.protection = noProtection
styleInput = xlwt.XFStyle()
styleInput.borders = borders
styleInput.protection = noProtection
styleInput.pattern = shadedFill
boxL1 = xlwt.XFStyle()
boxL1.borders = borderL1
boxL2 = xlwt.XFStyle()
boxL2.borders = borderL2
boxT1 = xlwt.XFStyle()
boxT1.borders = borderT1
boxT2 = xlwt.XFStyle()
boxT2.borders = borderT2
boxR1 = xlwt.XFStyle()
boxR1.borders = borderR1
boxR2 = xlwt.XFStyle()
boxR2.borders = borderR2
boxB1 = xlwt.XFStyle()
boxB1.borders = borderB1
boxB2 = xlwt.XFStyle()
boxB2.borders = borderB2
styleList = {}
styleList["styleTitle"] = styleTitle
styleList["styleHeader"] = styleHeader
styleList["styleSubHeader"] = styleSubHeader
styleList["styleSectionHeading"] = styleSectionHeading
styleList["styleHint"] = styleHint
styleList["styleText"] = styleText
styleList["styleInstructions"] = styleInstructions
styleList["styleInput"] = styleInput
styleList["boxL1"] = boxL1
styleList["boxL2"] = boxL2
styleList["boxT1"] = boxT1
styleList["boxT2"] = boxT2
styleList["boxR1"] = boxR1
styleList["boxR2"] = boxR2
styleList["boxB1"] = boxB1
styleList["boxB2"] = boxB2
sheet1 = book.add_sheet(T("Assessment"))
sheetA = book.add_sheet(T("Metadata"))
maxCol = 0
for cell in matrix.matrix.values():
if cell.col + cell.mergeH > 255:
current.log.warning("Cell (%s,%s) - (%s,%s) ignored" % \
(cell.col, cell.row, cell.col + cell.mergeH, cell.row + cell.mergeV))
continue
if cell.col + cell.mergeH > maxCol:
maxCol = cell.col + cell.mergeH
if cell.joined():
continue
style = mergeStyles(styleList, cell.styleList)
if (style.alignment.wrap == style.alignment.WRAP_AT_RIGHT):
# get all the styles from the joined cells
# and merge these styles in.
joinedStyles = matrix.joinedElementStyles(cell)
joinedStyle = mergeStyles(styleList, joinedStyles)
try:
wrapText(sheet1, cell, joinedStyle)
except:
pass
else:
if cell.merged():
# get all the styles from the joined cells
# and merge these styles in.
joinedStyles = matrix.joinedElementStyles(cell)
joinedStyle = mergeStyles(styleList, joinedStyles)
try:
sheet1.write_merge(cell.row,
cell.row + cell.mergeV,
cell.col,
cell.col + cell.mergeH,
unicode(cell.text),
joinedStyle
)
except Exception as msg:
log = current.log
log.error(msg)
log.debug("row: %s + vert: %s, col: %s + horiz %s" % \
(cell.row, cell.mergeV, cell.col, cell.mergeH))
posn = "%s,%s" % (cell.row, cell.col)
if matrix.matrix[posn]:
log.debug(matrix.matrix[posn])
else:
sheet1.write(cell.row,
cell.col,
unicode(cell.text),
style
)
cellWidth = 480 # approximately 2 characters
if maxCol > 255:
maxCol = 255
for col in range(maxCol + 1):
sheet1.col(col).width = cellWidth
sheetA.write(0, 0, "Question Code")
sheetA.write(0, 1, "Response Count")
sheetA.write(0, 2, "Values")
sheetA.write(0, 3, "Cell Address")
for cell in matrixAnswers.matrix.values():
style = mergeStyles(styleList, cell.styleList)
sheetA.write(cell.row,
cell.col,
unicode(cell.text),
style
)
if logo != None:
sheet1.insert_bitmap(logo, 0, 0)
sheet1.protect = True
sheetA.protect = True
for i in range(26):
sheetA.col(i).width = 0
sheetA.write(0,
26,
unicode(T("Please do not remove this sheet")),
styleHeader
)
sheetA.col(26).width = 12000
book.save(output)
return output
# -----------------------------------------------------------------------------
def completed_chart():
"""
Allows the user to display all the data from the selected question
in a simple chart. If the data is numeric then a histogram will be
drawn if it is an option type then a pie chart, although the type of
chart drawn is managed by the analysis widget.
"""
series_id = get_vars.get("series_id")
if not series_id:
return "Programming Error: Series ID missing"
question_id = get_vars.get("question_id")
if not question_id:
return "Programming Error: Question ID missing"
q_type = get_vars.get("type")
if not q_type:
return "Programming Error: Question Type missing"
getAnswers = s3db.survey_getAllAnswersForQuestionInSeries
answers = getAnswers(question_id, series_id)
analysisTool = survey_analysis_type[q_type](question_id, answers)
qstnName = analysisTool.qstnWidget.question.name
image = analysisTool.drawChart(series_id, output="png")
return image
# -----------------------------------------------------------------------------
def section():
"""
RESTful CRUD controller
- unused
"""
# Load Model
#table = s3db.survey_section
def prep(r):
s3db.configure(r.tablename,
deletable = False,
orderby = "%s.posn" % r.tablename,
)
return True
s3.prep = prep
# Post-processor
def postp(r, output):
""" Add the section select widget to the form """
try:
template_id = int(request.args[0])
except:
template_id = None
# Undefined?
sectionSelect = s3.survey_section_select_widget(template_id)
output["sectionSelect"] = sectionSelect
return output
#s3.postp = postp
output = s3_rest_controller(# Undefined
#rheader=s3db.survey_section_rheader
)
return output
# -----------------------------------------------------------------------------
def question():
""" RESTful CRUD controller """
def prep(r):
s3db.configure(r.tablename,
orderby = r.tablename + ".posn",
)
return True
s3.prep = prep
output = s3_rest_controller(# Undefined
#rheader=s3db.survey_section_rheader
)
return output
# -----------------------------------------------------------------------------
def question_list():
""" RESTful CRUD controller """
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def formatter():
""" RESTful CRUD controller """
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def question_metadata():
""" RESTful CRUD controller """
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def newAssessment():
"""
RESTful CRUD controller to create a new 'complete' survey
- although the created form is a fully custom one
"""
# Load Model
table = s3db.survey_complete
s3db.table("survey_series")
def prep(r):
if r.interactive:
viewing = get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = get_vars.get("series", None)
if not series_id:
series_id = r.id
if series_id is None:
# The URL is bad, without a series id we're lost so list all series
redirect(URL(c="survey", f="series", args=[], vars={}))
if len(request.post_vars) > 0:
id = s3db.survey_save_answers_for_series(series_id,
None, # Insert
request.post_vars)
response.confirmation = \
s3.crud_strings["survey_complete"].msg_record_created
r.method = "create"
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Not sure why we need to repeat this & can't do it outside the prep/postp
viewing = get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = get_vars.get("series", None)
if not series_id:
series_id = r.id
if output["form"] is None:
# The user is not authorised to create so switch to read
redirect(URL(c="survey", f="series",
args=[series_id, "read"],
vars={}))
# This is a bespoke form which confuses CRUD, which displays an
# error "Invalid form (re-opened in another window?)"
# So so long as we don't have an error in the form we can
# delete this error.
elif response.error and not output["form"]["error"]:
response.error = None
s3db.survey_answerlist_dataTable_post(r)
form = s3db.survey_buildQuestionnaireFromSeries(series_id, None)
urlimport = URL(c=module, f="complete", args=["import"],
vars={"viewing":"%s.%s" % ("survey_series", series_id),
"single_pass":True}
)
buttons = DIV(A(T("Upload Completed Assessment Form"),
_href=urlimport,
_id="Excel-import",
_class="action-btn"
),
)
output["subtitle"] = buttons
output["form"] = form
return output
s3.postp = postp
output = s3_rest_controller(module, "complete",
method = "create",
rheader = s3db.survey_series_rheader
)
return output
# -----------------------------------------------------------------------------
def complete():
""" RESTful CRUD controller """
# Load Model
table = s3db.survey_complete
stable = s3db.survey_series
s3db.survey_answerlist_dataTable_pre()
series_id = None
try:
viewing = get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
series = db(stable.id == series_id).select(stable.name,
limitby=(0, 1)
).first()
if series:
series_name = series.name
else:
series_name = ""
if series_name != "":
csv_extra_fields = [dict(label="Series", value=series_name)]
else:
csv_extra_fields = []
except:
csv_extra_fields = []
def postp(r, output):
if r.method == "import":
pass # don't want the import dataTable to be modified
else:
s3db.survey_answerlist_dataTable_post(r)
return output
s3.postp = postp
def import_xls(uploadFile):
"""
Import Assessment Spreadsheet
"""
if series_id is None:
response.error = T("Series details missing")
return
openFile = StringIO()
try:
import xlrd
from xlwt.Utils import cell_to_rowcol2
except ImportError:
current.log.error("ERROR: xlrd & xlwt modules are needed for importing spreadsheets")
return None
workbook = xlrd.open_workbook(file_contents=uploadFile)
try:
sheetR = workbook.sheet_by_name("Assessment")
sheetM = workbook.sheet_by_name("Metadata")
except:
session.error = T("You need to use the spreadsheet which you can download from this page")
redirect(URL(c="survey", f="newAssessment", args=[],
vars={"viewing": "survey_series.%s" % series_id}))
header = ""
body = ""
for row in xrange(1, sheetM.nrows):
header += ',"%s"' % sheetM.cell_value(row, 0)
code = sheetM.cell_value(row, 0)
qstn = s3.survey_getQuestionFromCode(code, series_id)
type = qstn["type"]
count = sheetM.cell_value(row, 1)
if count != "":
count = int(count)
optionList = sheetM.cell_value(row, 2).split("|#|")
else:
count = 1
optionList = None
if type == "Location" and optionList != None:
answerList = {}
elif type == "MultiOption":
answerList = []
else:
answerList = ""
for col in range(count):
cell = sheetM.cell_value(row, 3 + col)
(rowR, colR) = cell_to_rowcol2(cell)
try:
cellValue = sheetR.cell_value(rowR, colR)
except IndexError:
cellValue = ""
# BUG: The option list needs to work in different ways
# depending on the question type. The question type should
# be added to the spreadsheet to save extra db calls:
# * Location save all the data as a hierarchy
# * MultiOption save all selections
# * Option save the last selection
if cellValue != "":
if optionList != None:
if type == "Location":
answerList[optionList[col]]=cellValue
elif type == "MultiOption":
answerList.append(optionList[col])
else:
answerList = optionList[col]
else:
if type == "Date":
try:
(dtYear, dtMonth, dtDay, dtHour, dtMinute, dtSecond) = \
xlrd.xldate_as_tuple(cellValue,
workbook.datemode)
dtValue = datetime.date(dtYear, dtMonth, dtDay)
cellValue = dtValue.isoformat()
except:
pass
elif type == "Time":
try:
time = cellValue
hour = int(time * 24)
minute = int((time * 24 - hour) * 60)
cellValue = "%s:%s" % (hour, minute)
except:
pass
answerList += "%s" % cellValue
body += ',"%s"' % answerList
openFile.write(header)
openFile.write("\n")
openFile.write(body)
openFile.seek(0)
return openFile
s3db.configure("survey_complete",
listadd=False,
deletable=False)
s3.xls_parser = import_xls
output = s3_rest_controller(csv_extra_fields = csv_extra_fields)
return output
# -----------------------------------------------------------------------------
def answer():
""" RESTful CRUD controller """
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def analysis():
"""
RESTful CRUD controller
- for Completed Answers
- not editable (just for analysis)
"""
s3db.configure("survey_complete",
deletable = False,
listadd = False,
)
output = s3_rest_controller(module, "complete")
return output
# -----------------------------------------------------------------------------
def admin():
""" Custom Page """
series_id = None
get_vars_new = Storage()
try:
series_id = int(request.args[0])
except:
try:
(dummy, series_id) = get_vars["viewing"].split(".")
series_id = int(series_id)
except:
pass
if series_id:
get_vars_new.viewing = "survey_complete.%s" % series_id
return dict(series_id = series_id,
vars = get_vars_new)
# END =========================================================================
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest.api.image import base
from tempest.common import image as common_image
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
CONF = config.CONF
def get_container_and_disk_format():
a_formats = ['ami', 'ari', 'aki']
container_format = CONF.image.container_formats[0]
# In v1, If container_format is one of ['ami', 'ari', 'aki'], then
# disk_format must be same with container_format.
# If they are of different item sequence in tempest.conf, such as:
# container_formats = ami,ari,aki,bare
# disk_formats = ari,ami,aki,vhd
# we can select one in disk_format list that is same with container_format.
if container_format in a_formats:
if container_format in CONF.image.disk_formats:
disk_format = container_format
else:
msg = ("The container format and the disk format don't match. "
"Container format: %(container)s, Disk format: %(disk)s." %
{'container': container_format, 'disk':
CONF.image.disk_formats})
raise exceptions.InvalidConfiguration(msg)
else:
disk_format = CONF.image.disk_formats[0]
return container_format, disk_format
class CreateRegisterImagesTest(base.BaseV1ImageTest):
"""Here we test the registration and creation of images."""
@decorators.idempotent_id('3027f8e6-3492-4a11-8575-c3293017af4d')
def test_register_then_upload(self):
"""Register, then upload an image"""
properties = {'prop1': 'val1'}
container_format, disk_format = get_container_and_disk_format()
image = self.create_image(name='New Name',
container_format=container_format,
disk_format=disk_format,
is_public=False,
properties=properties)
self.assertEqual('New Name', image.get('name'))
self.assertFalse(image.get('is_public'))
self.assertEqual('queued', image.get('status'))
for key, val in properties.items():
self.assertEqual(val, image.get('properties')[key])
# Now try uploading an image file
image_file = six.BytesIO(data_utils.random_bytes())
body = self.client.update_image(image['id'], data=image_file)['image']
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@decorators.idempotent_id('69da74d9-68a9-404b-9664-ff7164ccb0f5')
def test_register_remote_image(self):
"""Register a new remote image"""
container_format, disk_format = get_container_and_disk_format()
body = self.create_image(name='New Remote Image',
container_format=container_format,
disk_format=disk_format, is_public=False,
location=CONF.image.http_image,
properties={'key1': 'value1',
'key2': 'value2'})
self.assertEqual('New Remote Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('active', body.get('status'))
properties = body.get('properties')
self.assertEqual(properties['key1'], 'value1')
self.assertEqual(properties['key2'], 'value2')
@decorators.idempotent_id('6d0e13a7-515b-460c-b91f-9f4793f09816')
def test_register_http_image(self):
"""Register a new image from an http image path url"""
container_format, disk_format = get_container_and_disk_format()
image = self.create_image(name='New Http Image',
container_format=container_format,
disk_format=disk_format, is_public=False,
copy_from=CONF.image.http_image)
self.assertEqual('New Http Image', image.get('name'))
self.assertFalse(image.get('is_public'))
waiters.wait_for_image_status(self.client, image['id'], 'active')
self.client.show_image(image['id'])
@decorators.idempotent_id('05b19d55-140c-40d0-b36b-fafd774d421b')
def test_register_image_with_min_ram(self):
"""Register an image with min ram"""
container_format, disk_format = get_container_and_disk_format()
properties = {'prop1': 'val1'}
body = self.create_image(name='New_image_with_min_ram',
container_format=container_format,
disk_format=disk_format,
is_public=False,
min_ram=40,
properties=properties)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
self.assertEqual(40, body.get('min_ram'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
self.client.delete_image(body['id'])
class ListImagesTest(base.BaseV1ImageTest):
"""Here we test the listing of image information"""
@classmethod
def skip_checks(cls):
super(ListImagesTest, cls).skip_checks()
if (len(CONF.image.container_formats) < 2 or
len(CONF.image.disk_formats) < 2):
skip_msg = ("%s skipped as multiple container formats "
"or disk formats are not available." % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
a_formats = ['ami', 'ari', 'aki']
(cls.container_format,
container_format_alt) = CONF.image.container_formats[:2]
cls.disk_format, cls.disk_format_alt = CONF.image.disk_formats[:2]
if cls.container_format in a_formats:
cls.disk_format = cls.container_format
if container_format_alt in a_formats:
cls.disk_format_alt = container_format_alt
img1 = cls._create_remote_image('one', cls.container_format,
cls.disk_format)
img2 = cls._create_remote_image('two', container_format_alt,
cls.disk_format_alt)
img3 = cls._create_remote_image('dup', cls.container_format,
cls.disk_format)
img4 = cls._create_remote_image('dup', cls.container_format,
cls.disk_format)
img5 = cls._create_standard_image('1', container_format_alt,
cls.disk_format_alt, 42)
img6 = cls._create_standard_image('2', container_format_alt,
cls.disk_format_alt, 142)
img7 = cls._create_standard_image('33', cls.container_format,
cls.disk_format, 142)
img8 = cls._create_standard_image('33', cls.container_format,
cls.disk_format, 142)
cls.created_set = set(cls.created_images)
# same container format
cls.same_container_format_set = set((img1, img3, img4, img7, img8))
# same disk format
cls.same_disk_format_set = set((img2, img5, img6))
# 1x with size 42
cls.size42_set = set((img5,))
# 3x with size 142
cls.size142_set = set((img6, img7, img8,))
# dup named
cls.dup_set = set((img3, img4))
@classmethod
def _create_remote_image(cls, name, container_format, disk_format):
"""Create a new remote image and return newly-registered image-id"""
name = 'New Remote Image %s' % name
location = CONF.image.http_image
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False,
location=location)
return image['id']
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""Create a new standard image and return newly-registered image-id
Note that the size of the new image is a random number between
1024 and 4096
"""
image_file = six.BytesIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file)
return image['id']
@decorators.idempotent_id('246178ab-3b33-4212-9a4b-a7fe8261794d')
def test_index_no_params(self):
"""Simple test to see all fixture images returned"""
images_list = self.client.list_images()['images']
image_list = [image['id'] for image in images_list]
for image_id in self.created_images:
self.assertIn(image_id, image_list)
@decorators.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
def test_index_disk_format(self):
"""Test listing images by disk format"""
images_list = self.client.list_images(
disk_format=self.disk_format_alt)['images']
for image in images_list:
self.assertEqual(image['disk_format'], self.disk_format_alt)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.same_disk_format_set <= result_set)
self.assertFalse(self.created_set - self.same_disk_format_set <=
result_set)
@decorators.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
def test_index_container_format(self):
"""Test listing images by container format"""
images_list = self.client.list_images(
container_format=self.container_format)['images']
for image in images_list:
self.assertEqual(image['container_format'], self.container_format)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.same_container_format_set <= result_set)
self.assertFalse(self.created_set - self.same_container_format_set <=
result_set)
@decorators.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
def test_index_max_size(self):
"""Test listing images by max size"""
images_list = self.client.list_images(size_max=42)['images']
for image in images_list:
self.assertLessEqual(image['size'], 42)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size42_set <= result_set)
self.assertFalse(self.created_set - self.size42_set <= result_set)
@decorators.idempotent_id('6ffc16d0-4cbf-4401-95c8-4ac63eac34d8')
def test_index_min_size(self):
"""Test listing images by min size"""
images_list = self.client.list_images(size_min=142)['images']
for image in images_list:
self.assertGreaterEqual(image['size'], 142)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size142_set <= result_set)
self.assertFalse(self.size42_set <= result_set)
@decorators.idempotent_id('e5dc26d9-9aa2-48dd-bda5-748e1445da98')
def test_index_status_active_detail(self):
"""Test listing active images sorting by size in descending order"""
images_list = self.client.list_images(detail=True,
status='active',
sort_key='size',
sort_dir='desc')['images']
top_size = images_list[0]['size'] # We have non-zero sized images
for image in images_list:
size = image['size']
self.assertLessEqual(size, top_size)
top_size = size
self.assertEqual(image['status'], 'active')
@decorators.idempotent_id('097af10a-bae8-4342-bff4-edf89969ed2a')
def test_index_name(self):
"""Test listing images by its name"""
images_list = self.client.list_images(
detail=True,
name='New Remote Image dup')['images']
result_set = set(map(lambda x: x['id'], images_list))
for image in images_list:
self.assertEqual(image['name'], 'New Remote Image dup')
self.assertTrue(self.dup_set <= result_set)
self.assertFalse(self.created_set - self.dup_set <= result_set)
class UpdateImageMetaTest(base.BaseV1ImageTest):
"""Test image metadata"""
@classmethod
def resource_setup(cls):
super(UpdateImageMetaTest, cls).resource_setup()
container_format, disk_format = get_container_and_disk_format()
cls.image_id = cls._create_standard_image('1', container_format,
disk_format, 42)
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""Create a new standard image and return newly-registered image-id"""
image_file = six.BytesIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file,
properties={'key1': 'value1'})
return image['id']
@decorators.idempotent_id('01752c1c-0275-4de3-9e5b-876e44541928')
def test_list_image_metadata(self):
"""Test listing image metadata"""
# All metadata key/value pairs for an image should be returned
resp = self.client.check_image(self.image_id)
resp_metadata = common_image.get_image_meta_from_headers(resp)
expected = {'key1': 'value1'}
self.assertEqual(expected, resp_metadata['properties'])
@decorators.idempotent_id('d6d7649c-08ce-440d-9ea7-e3dda552f33c')
def test_update_image_metadata(self):
"""Test updating image metadata"""
# The metadata for the image should match the updated values
req_metadata = {'key1': 'alt1', 'key2': 'value2'}
resp = self.client.check_image(self.image_id)
metadata = common_image.get_image_meta_from_headers(resp)
self.assertEqual(metadata['properties'], {'key1': 'value1'})
metadata['properties'].update(req_metadata)
headers = common_image.image_meta_to_headers(
properties=metadata['properties'])
self.client.update_image(self.image_id, headers=headers)
resp = self.client.check_image(self.image_id)
resp_metadata = common_image.get_image_meta_from_headers(resp)
self.assertEqual(req_metadata, resp_metadata['properties'])
| |
"""
sentry.web.frontend.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login as login_user, authenticate
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db import IntegrityError, transaction
from django.http import HttpResponseRedirect, Http404
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.utils import timezone
from django.utils.translation import ugettext as _
from social_auth.backends import get_backend
from social_auth.models import UserSocialAuth
from sudo.decorators import sudo_required
from sentry.models import (
UserEmail, LostPasswordHash, Project, UserOption, Authenticator
)
from sentry.signals import email_verified
from sentry.web.decorators import login_required, signed_auth_required
from sentry.web.forms.accounts import (
AccountSettingsForm, AppearanceSettingsForm,
RecoverPasswordForm, ChangePasswordRecoverForm,
EmailForm
)
from sentry.web.helpers import render_to_response
from sentry.utils import auth
def send_password_recovery_mail(user):
password_hash, created = LostPasswordHash.objects.get_or_create(
user=user
)
if not password_hash.is_valid():
password_hash.date_added = timezone.now()
password_hash.set_hash()
password_hash.save()
password_hash.send_recover_mail()
return password_hash
@login_required
def login_redirect(request):
login_url = auth.get_login_redirect(request)
return HttpResponseRedirect(login_url)
def expired(request, user):
password_hash = send_password_recovery_mail(user)
return render_to_response('sentry/account/recover/expired.html', {
'email': password_hash.user.email,
}, request)
def recover(request):
form = RecoverPasswordForm(request.POST or None)
if form.is_valid():
password_hash = send_password_recovery_mail(form.cleaned_data['user'])
return render_to_response('sentry/account/recover/sent.html', {
'email': password_hash.user.email,
}, request)
context = {
'form': form,
}
return render_to_response('sentry/account/recover/index.html', context, request)
def recover_confirm(request, user_id, hash):
try:
password_hash = LostPasswordHash.objects.get(user=user_id, hash=hash)
if not password_hash.is_valid():
password_hash.delete()
raise LostPasswordHash.DoesNotExist
user = password_hash.user
except LostPasswordHash.DoesNotExist:
context = {}
tpl = 'sentry/account/recover/failure.html'
else:
tpl = 'sentry/account/recover/confirm.html'
if request.method == 'POST':
form = ChangePasswordRecoverForm(request.POST)
if form.is_valid():
user.set_password(form.cleaned_data['password'])
user.save()
# Ugly way of doing this, but Django requires the backend be set
user = authenticate(
username=user.username,
password=form.cleaned_data['password'],
)
login_user(request, user)
password_hash.delete()
return login_redirect(request)
else:
form = ChangePasswordRecoverForm()
context = {
'form': form,
}
return render_to_response(tpl, context, request)
@login_required
def start_confirm_email(request):
has_unverified_emails = request.user.has_unverified_emails()
if has_unverified_emails:
request.user.send_confirm_emails()
unverified_emails = [e.email for e in request.user.get_unverified_emails()]
msg = _('A verification email has been sent to %s.') % (', ').join(unverified_emails)
else:
msg = _('Your email (%s) has already been verified.') % request.user.email
messages.add_message(request, messages.SUCCESS, msg)
return HttpResponseRedirect(reverse('sentry-account-settings-emails'))
def confirm_email(request, user_id, hash):
msg = _('Thanks for confirming your email')
level = messages.SUCCESS
try:
email = UserEmail.objects.get(user=user_id, validation_hash=hash)
if not email.hash_is_valid():
raise UserEmail.DoesNotExist
except UserEmail.DoesNotExist:
if request.user.is_anonymous() or request.user.has_unverified_emails():
msg = _('There was an error confirming your email. Please try again or '
'visit your Account Settings to resend the verification email.')
level = messages.ERROR
else:
email.is_verified = True
email.validation_hash = ''
email.save()
email_verified.send(email=email.email, sender=email)
messages.add_message(request, level, msg)
return HttpResponseRedirect(reverse('sentry-account-settings-emails'))
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def account_settings(request):
user = request.user
form = AccountSettingsForm(
user, request.POST or None,
initial={
'email': UserEmail.get_primary_email(user).email,
'username': user.username,
'name': user.name,
},
)
if form.is_valid():
old_email = user.email
form.save()
# remove previously valid email address
# TODO(dcramer): we should maintain validation here when we support
# multiple email addresses
if request.user.email != old_email:
UserEmail.objects.filter(user=user, email=old_email).delete()
try:
with transaction.atomic():
user_email = UserEmail.objects.create(
user=user,
email=user.email,
)
except IntegrityError:
pass
else:
user_email.set_hash()
user_email.save()
user.send_confirm_emails()
messages.add_message(
request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'form': form,
'page': 'settings',
'has_2fa': Authenticator.objects.user_has_2fa(request.user),
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/settings.html', context, request)
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
def twofactor_settings(request):
interfaces = Authenticator.objects.all_interfaces_for_user(
request.user, return_missing=True)
if request.method == 'POST' and 'back' in request.POST:
return HttpResponseRedirect(reverse('sentry-account-settings'))
context = csrf(request)
context.update({
'page': 'security',
'has_2fa': any(x.is_enrolled and not x.is_backup_interface for x in interfaces),
'interfaces': interfaces,
})
return render_to_response('sentry/account/twofactor.html', context, request)
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def avatar_settings(request):
context = csrf(request)
context.update({
'page': 'avatar',
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/avatar.html', context, request)
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def appearance_settings(request):
from django.conf import settings
options = UserOption.objects.get_all_values(user=request.user, project=None)
form = AppearanceSettingsForm(request.user, request.POST or None, initial={
'language': options.get('language') or request.LANGUAGE_CODE,
'stacktrace_order': int(options.get('stacktrace_order', -1) or -1),
'timezone': options.get('timezone') or settings.SENTRY_DEFAULT_TIME_ZONE,
'clock_24_hours': options.get('clock_24_hours') or False,
})
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'form': form,
'page': 'appearance',
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/appearance.html', context, request)
@csrf_protect
@never_cache
@signed_auth_required
@transaction.atomic
def email_unsubscribe_project(request, project_id):
# For now we only support getting here from the signed link.
if not request.user_from_signed_request:
raise Http404()
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404()
if request.method == 'POST':
if 'cancel' not in request.POST:
UserOption.objects.set_value(
request.user, project, 'mail:alert', 0)
return HttpResponseRedirect(auth.get_login_url())
context = csrf(request)
context['project'] = project
return render_to_response('sentry/account/email_unsubscribe_project.html',
context, request)
@csrf_protect
@never_cache
@login_required
def list_identities(request):
identity_list = list(UserSocialAuth.objects.filter(user=request.user))
AUTH_PROVIDERS = auth.get_auth_providers()
context = csrf(request)
context.update({
'identity_list': identity_list,
'page': 'identities',
'AUTH_PROVIDERS': AUTH_PROVIDERS,
})
return render_to_response('sentry/account/identities.html', context, request)
@csrf_protect
@never_cache
@login_required
def disconnect_identity(request, identity_id):
if request.method != 'POST':
raise NotImplementedError
try:
auth = UserSocialAuth.objects.get(id=identity_id)
except UserSocialAuth.DoesNotExist:
return HttpResponseRedirect(reverse('sentry-account-settings-identities'))
backend = get_backend(auth.provider, request, '/')
if backend is None:
raise Exception('Backend was not found for request: {}'.format(auth.provider))
# stop this from bubbling up errors to social-auth's middleware
# XXX(dcramer): IM SO MAD ABOUT THIS
try:
backend.disconnect(request.user, identity_id)
except Exception as exc:
import sys
exc_tb = sys.exc_info()[2]
six.reraise(Exception, exc, exc_tb)
del exc_tb
# XXX(dcramer): we experienced an issue where the identity still existed,
# and given that this is a cheap query, lets error hard in that case
assert not UserSocialAuth.objects.filter(
user=request.user,
id=identity_id,
).exists()
backend_name = backend.AUTH_BACKEND.name
messages.add_message(
request, messages.SUCCESS,
'Your {} identity has been disconnected.'.format(
settings.AUTH_PROVIDER_LABELS.get(backend_name, backend_name),
)
)
return HttpResponseRedirect(reverse('sentry-account-settings-identities'))
@csrf_protect
@never_cache
@login_required
def show_emails(request):
user = request.user
primary_email = UserEmail.get_primary_email(user)
alt_emails = user.emails.all().exclude(email=primary_email.email)
email_form = EmailForm(user, request.POST or None,
initial={
'primary_email': primary_email.email,
},
)
if 'remove' in request.POST:
email = request.POST.get('email')
del_email = UserEmail.objects.filter(user=user, email=email)
del_email.delete()
return HttpResponseRedirect(request.path)
if email_form.is_valid():
old_email = user.email
email_form.save()
if user.email != old_email:
useroptions = UserOption.objects.filter(user=user, value=old_email)
for option in useroptions:
option.value = user.email
option.save()
UserEmail.objects.filter(user=user, email=old_email).delete()
try:
with transaction.atomic():
user_email = UserEmail.objects.create(
user=user,
email=user.email,
)
except IntegrityError:
pass
else:
user_email.set_hash()
user_email.save()
user.send_confirm_emails()
alternative_email = email_form.cleaned_data['alt_email']
# check if this alternative email already exists for user
if alternative_email and not UserEmail.objects.filter(user=user, email=alternative_email):
# create alternative email for user
try:
with transaction.atomic():
new_email = UserEmail.objects.create(
user=user,
email=alternative_email
)
except IntegrityError:
pass
else:
new_email.set_hash()
new_email.save()
# send confirmation emails to any non verified emails
user.send_confirm_emails()
messages.add_message(
request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'email_form': email_form,
'primary_email': primary_email,
'alt_emails': alt_emails,
'page': 'emails',
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/emails.html', context, request)
| |
import subprocess
import json
import csv
import shutil
import sys
import os
import argparse
"""
This script collects CodeQL queries that are part of code scanning query packs
and prints CSV data to stdout that describes which packs contain which queries.
Errors are printed to stderr. This script requires that 'git' and 'codeql' commands
are on the PATH. It'll try to automatically set the CodeQL search path correctly,
as long as you run the script from one of the following locations:
- anywhere from within a clone of the CodeQL Git repo
- from the parent directory of a clone of the CodeQL Git repo (assuming 'codeql'
and 'codeql-go' directories both exist)
"""
parser = argparse.ArgumentParser(__name__)
parser.add_argument(
"--ignore-missing-query-packs",
action="store_true",
help="Don't fail if a query pack can't be found",
)
arguments = parser.parse_args()
assert hasattr(arguments, "ignore_missing_query_packs")
# Define which languages and query packs to consider
languages = [ "cpp", "csharp", "go", "java", "javascript", "python", "ruby"]
packs = [ "code-scanning", "security-and-quality", "security-extended" ]
class CodeQL:
def __init__(self):
pass
def __enter__(self):
self.proc = subprocess.Popen(['codeql', 'execute','cli-server'],
executable=shutil.which('codeql'),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=sys.stderr,
env=os.environ.copy(),
)
return self
def __exit__(self, type, value, tb):
self.proc.stdin.write(b'["shutdown"]\0')
self.proc.stdin.close()
try:
self.proc.wait(5)
except:
self.proc.kill()
def command(self, args):
data = json.dumps(args)
data_bytes = data.encode('utf-8')
self.proc.stdin.write(data_bytes)
self.proc.stdin.write(b'\0')
self.proc.stdin.flush()
res = b''
while True:
b = self.proc.stdout.read(1)
if b == b'\0':
return res.decode('utf-8')
res += b
def prefix_repo_nwo(filename):
"""
Replaces an absolute path prefix with a GitHub repository name with owner (NWO).
This function relies on `git` being available.
For example:
/home/alice/git/ql/java/ql/src/MyQuery.ql
becomes:
github/codeql/java/ql/src/MyQuery.ql
If we can't detect a known NWO (e.g. github/codeql, github/codeql-go), the
path will be truncated to the root of the git repo:
ql/java/ql/src/MyQuery.ql
If the filename is not part of a Git repo, the return value is the
same as the input value: the whole path.
"""
dirname = os.path.dirname(filename)
try:
git_toplevel_dir_subp = subprocess_run(["git", "-C", dirname, "rev-parse", "--show-toplevel"])
except:
# Not a Git repo
return filename
git_toplevel_dir = git_toplevel_dir_subp.stdout.strip()
# Detect 'github/codeql' and 'github/codeql-go' repositories by checking the remote (it's a bit
# of a hack but will work in most cases, as long as the remotes have 'codeql' and 'codeql-go'
# in the URL
git_remotes = subprocess_run(["git","-C",dirname,"remote","-v"]).stdout.strip()
if "codeql-go" in git_remotes: prefix = "github/codeql-go"
elif "codeql" in git_remotes: prefix = "github/codeql"
else: prefix = os.path.basename(git_toplevel_dir)
return os.path.join(prefix, filename[len(git_toplevel_dir)+1:])
def single_spaces(input):
"""
Workaround for https://github.com/github/codeql-coreql-team/issues/470 which causes
some metadata strings to contain newlines and spaces without a good reason.
"""
return " ".join(input.split())
def get_query_metadata(key, metadata, queryfile):
"""Returns query metadata or prints a warning to stderr if a particular piece of metadata is not available."""
if key in metadata: return single_spaces(metadata[key])
query_id = metadata['id'] if 'id' in metadata else 'unknown'
print("Warning: no '%s' metadata for query with ID '%s' (%s)" % (key, query_id, queryfile), file=sys.stderr)
return ""
def subprocess_run(cmd):
"""Runs a command through subprocess.run, with a few tweaks. Raises an Exception if exit code != 0."""
return subprocess.run(cmd, capture_output=True, text=True, env=os.environ.copy(), check=True)
try: # Check for `git` on path
subprocess_run(["git","--version"])
except Exception as e:
print("Error: couldn't invoke 'git'. Is it on the path? Aborting.", file=sys.stderr)
raise e
with CodeQL() as codeql:
try: # Check for `codeql` on path
codeql.command(["--version"])
except Exception as e:
print("Error: couldn't invoke CodeQL CLI 'codeql'. Is it on the path? Aborting.", file=sys.stderr)
raise e
# Define CodeQL search path so it'll find the CodeQL repositories:
# - anywhere in the current Git clone (including current working directory)
# - the 'codeql' subdirectory of the cwd
#
# (and assumes the codeql-go repo is in a similar location)
codeql_search_path = "./codeql:./codeql-go:." # will be extended further down
# Extend CodeQL search path by detecting root of the current Git repo (if any). This means that you
# can run this script from any location within the CodeQL git repository.
try:
git_toplevel_dir = subprocess_run(["git","rev-parse","--show-toplevel"])
# Current working directory is in a Git repo. Add it to the search path, just in case it's the CodeQL repo
git_toplevel_dir = git_toplevel_dir.stdout.strip()
codeql_search_path += ":" + git_toplevel_dir + ":" + git_toplevel_dir + "/../codeql-go"
except:
# git rev-parse --show-toplevel exited with non-zero exit code. We're not in a Git repo
pass
# Create CSV writer and write CSV header to stdout
csvwriter = csv.writer(sys.stdout)
csvwriter.writerow([
"Query filename", "Suite", "Query name", "Query ID",
"Kind", "Severity", "Precision", "Tags"
])
# Iterate over all languages and packs, and resolve which queries are part of those packs
for lang in languages:
for pack in packs:
# Get absolute paths to queries in this pack by using 'codeql resolve queries'
try:
queries_subp = codeql.command(["resolve","queries","--search-path", codeql_search_path, "%s-%s.qls" % (lang, pack)])
except Exception as e:
# Resolving queries might go wrong if the github/codeql and github/codeql-go repositories are not
# on the search path.
level = "Warning" if arguments.ignore_missing_query_packs else "Error"
print(
"%s: couldn't find query pack '%s' for language '%s'. Do you have the right repositories in the right places (search path: '%s')?" % (level, pack, lang, codeql_search_path),
file=sys.stderr
)
if arguments.ignore_missing_query_packs:
continue
else:
sys.exit("You can use '--ignore-missing-query-packs' to ignore this error")
# Investigate metadata for every query by using 'codeql resolve metadata'
for queryfile in queries_subp.strip().split("\n"):
query_metadata_json = codeql.command(["resolve","metadata",queryfile]).strip()
# Turn an absolute path to a query file into an nwo-prefixed path (e.g. github/codeql/java/ql/src/....)
queryfile_nwo = prefix_repo_nwo(queryfile)
meta = json.loads(query_metadata_json)
# Python's CSV writer will automatically quote fields if necessary
csvwriter.writerow([
queryfile_nwo, pack,
get_query_metadata('name', meta, queryfile_nwo),
get_query_metadata('id', meta, queryfile_nwo),
get_query_metadata('kind', meta, queryfile_nwo),
get_query_metadata('problem.severity', meta, queryfile_nwo),
get_query_metadata('precision', meta, queryfile_nwo),
get_query_metadata('tags', meta, queryfile_nwo)
])
| |
#! /usr/bin/env python3
"""
Utility for building a map using installed Source SDK tools.
Call with -h or --help to see usage information.
Examples:
# Creates/installs/runs .bsp in same dir
python buildbsp.py --game tf2 mymap.vmf
# Creates/installs .bsp but does not run
python buildbsp.py --game css --no-run mymap.vmf
# Only create .bsp, and use fast config
python buildbsp.py --game tf2 --no-run --no-install --fast mymap.vmf
"""
import argparse
import sys
import os
import subprocess
import webbrowser
import urllib.parse
import shutil
class Game:
def __init__(self, id, dir, common, uses_sdk):
self.id = id # Numeric Steam catalog ID number
self.dir = dir # Path to inner game directory (containing gameinfo.txt)
self.common = common # Game lives under "common" rather than "<username>"
self.uses_sdk = uses_sdk # False if game ships with its own map compilers
def get_game_dir(self, username=False):
"""Returns joined game directory path relative to Steamapps"""
if not self.common and not username:
raise RuntimeError("Can't determine this game's directory without username")
if self.common:
subdir = "common"
else:
subdir = "username"
subsubdir = self.dir
if WIN32 or CYGWIN:
subsubdir = subsubdir.lower()
return os.path.join(subdir, subsubdir)
WIN32 = sys.platform.startswith('win32')
CYGWIN = sys.platform.startswith('cygwin')
LINUX = sys.platform.startswith('linux')
DARWIN = False # Not supported yet
GAMES = {
'tf2': Game(440, os.path.join("Team Fortress 2", "tf"), True, False),
'css': Game(240, os.path.join("Counter-Strike Source", "cstrike"), False, False),
'hl2': Game(220, os.path.join("Half-Life 2", "hl2"), False, True),
'hl2mp': Game(320, os.path.join("Half-Life 2 Deathmatch", "hl2mp"), False, False),
'gm': Game(4000, os.path.join("GarrysMod", "garrysmod"), False, True),
}
def _make_arg_parser():
parser = argparse.ArgumentParser(description='Build, install, and test a VMF map.')
parser.add_argument('map')
parser.add_argument('-g', '--game', default='tf2', choices=GAMES.keys(),
help="selects which game to use")
parser.add_argument('--no-run', action="store_true",
help="don't run the game after building/installing")
parser.add_argument('--no-install', action="store_true",
help="don't install (or run) the map after building")
parser.add_argument('-f', '--fast', action="store_true",
help="enable fast compile options")
parser.add_argument('--hdr', action="store_true",
help="enable full HDR compile")
parser.add_argument('--final', action="store_true",
help="use with --hdr for slow high-quality HDR compile")
parser.add_argument('--steam-windows-path',
help="path to your (Windows) Steam folder (for games not dependent on SDK)")
parser.add_argument('--username',
help="your Steam username (needed for some games)")
return parser
def main():
parser = _make_arg_parser()
args = parser.parse_args()
game = GAMES[args.game]
username = args.username # May be None
vmf_file = os.path.abspath(args.map)
path, filename = os.path.split(vmf_file)
mapname = filename[:-4]
mappath = os.path.join(path, mapname)
bsp_file = os.path.join(path, mapname + ".bsp")
sourcesdk = None
winsteam = args.steam_windows_path
if not winsteam:
winsteam = os.getenv('winsteam')
# We need to find out where the SteamApps directory is.
if winsteam:
steamapps = os.path.join(winsteam, "Steamapps")
if not os.path.isdir(steamapps): # Try lowercase
steamapps = os.path.join(winsteam, "steamapps")
if not os.path.isdir(steamapps):
raise Exception(
"The provided Steam directory does not contain a Steamapps directory: %s" %
os.path.abspath(winsteam)
)
elif WIN32 or CYGWIN:
sourcesdk = os.getenv('sourcesdk')
if CYGWIN:
def cygwin2dos(path):
return subprocess.check_output(["cygpath", '-w', '%s' % path], universal_newlines=True).strip()
sourcesdk = subprocess.check_output(["cygpath", sourcesdk], universal_newlines=True).strip()
sourcesdk = os.path.abspath(sourcesdk)
steamapps = os.path.dirname(os.path.dirname(sourcesdk))
if not os.path.isdir(steamapps):
raise Exception("Steamapps directory could not be found. Please specify using --steam-windows-path or see --help.")
if not username:
username = os.path.basename(os.path.dirname(sourcesdk))
else:
raise Exception("Unable to determine where your (Windows) Steam installation is located. See --help.")
steamapps = os.path.abspath(steamapps)
# Prepare some useful paths
gamedir = os.path.join(steamapps, game.get_game_dir(username))
mapsdir = os.path.join(gamedir, "maps")
# Get path to correct bin tools directory (game or SDK)
if game.uses_sdk:
if not sourcesdk:
# Try finding SDK within Steamapps
# TODO
raise Exception("Sorry, SDK games aren't implemented right now unless you're on Windows.")
toolsdir = os.path.join(sourcesdk, "bin", "orangebox", "bin")
else:
toolsdir = os.path.abspath(os.path.join(gamedir, "..", "bin"))
# Make sure gamedir path seems legit
if not os.path.isfile(os.path.join(gamedir, "gameinfo.txt")):
raise Exception("Game directory does not contain a gameinfo.txt: %s" % gamedir)
if WIN32 or CYGWIN:
# Convert some paths if using Cygwin
if CYGWIN:
gamedir = cygwin2dos(gamedir)
mappath = cygwin2dos(mappath)
# Change working directory first because VBSP is dumb
os.chdir(os.path.join(sourcesdk, 'bin', 'orangebox'))
# Run the SDK tools
vbsp_exe = os.path.join(toolsdir, "vbsp.exe")
code = subprocess.call([vbsp_exe, '-game', gamedir, mappath])
print("VBSP finished with status %s." % code)
if code == 1:
print("Looks like SteamService isn't working. Try reopening Steam.")
exit(code)
elif code == -11:
print("Looks like you might have gotten the 'material not found' " +
"error messages. Try signing into Steam, or restarting it " +
"and signing in.")
exit(code)
elif code != 0:
print("Looks like VBSP crashed, but I'm not sure why.")
exit(code)
vvis_exe = os.path.join(toolsdir, "vvis.exe")
opts = [vvis_exe]
if args.fast:
opts.append('-fast')
opts.extend(['-game', gamedir, mappath])
subprocess.call(opts)
vrad_exe = os.path.join(toolsdir, "vrad.exe")
opts = [vrad_exe]
if args.fast:
opts.extend(['-bounce', '2', '-noextra'])
if args.hdr:
opts.append('-both')
if args.hdr and args.final:
opts.append('-final')
opts.extend(['-game', gamedir, mappath])
subprocess.call(opts)
# Install the map to the game's map directory (unless --no-install)
if not args.no_install:
print("Copying map %s to %s" % (mapname, mapsdir))
shutil.copy(bsp_file, mapsdir)
else:
print("Not installing map")
# Launch the game (unless --no-run or --no-install)
if not args.no_run and not args.no_install:
params = urllib.parse.quote("-dev -console -allowdebug +map %s" % mapname)
run_url = "steam://run/%d//%s" % (game['id'], params)
print(run_url)
webbrowser.open(run_url)
if cygwin:
print("\nYou're running cygwin, so I can't launch the game for you.")
print("Double-click the URL above, right-click, and click 'Open'.")
print("Or paste the URL above into the Windows 'Run...' dialog.")
print("Or, just run 'map %s' in the in-game console." % mapname)
else:
print("Not launching game")
elif LINUX:
# Environment to use with wine calls
env = os.environ.copy()
env['WINEPREFIX'] = os.path.expanduser("~/.winesteam")
# Define path-converting helper function
def unix2wine(path):
return subprocess.check_output(["winepath", '-w', '%s' % path], env=env).strip()
# Wine-ify some of our paths
gamedir = unix2wine(gamedir)
mappath = unix2wine(mappath)
# Tell wine to look for DLLs here
#env['WINEDLLPATH'] = os.path.join(sourcesdk, "bin")
#print("WINEDLLPATH is as follows: ", env['WINEDLLPATH'])
# Use native maps directory instead of the Wine installation's
mapsdir = os.path.join('~', '.steam', 'steam', 'SteamApps', game.get_game_dir(username), "maps")
mapsdir = os.path.expanduser(mapsdir)
# Change working directory first because VBSP is dumb
#os.chdir(os.path.join(sourcesdk, 'bin', 'orangebox'))
print("Using -game dir: %s" % gamedir)
# We now need to set the VPROJECT env variable
env['VPROJECT'] = gamedir
# Run the SDK tools
vbsp_exe = os.path.join(toolsdir, "vbsp.exe")
code = subprocess.call(['wine', vbsp_exe, '-game', gamedir, mappath], env=env)
print("VBSP finished with status %s." % code)
# Handle various exit status codes VBPS may have returned
if code == 1:
print("\nLooks like VBSP crashed, possibly due to invalid geometry in the map. Check the output above.")
print("\It could also be related to SteamService isn't working. Try re(launching) wine's Steam:")
steambin = os.path.join(os.path.dirname(steamapps), 'steam.exe')
print('\nWINEPREFIX="%s" wine "%s" -no-dwrite' % (env['WINEPREFIX'], steambin))
exit(code)
elif code == -11:
print("\nLooks like you might have gotten the 'material not found' " +
"error messages. Try signing into Steam, or restarting it " +
"and signing in.")
exit(code)
elif code != 0:
print("\nLooks like VBSP crashed, but I'm not sure why.")
exit(code)
vvis_exe = os.path.join(toolsdir, "vvis.exe")
opts = ['wine', vvis_exe]
if args.fast:
opts.append('-fast')
opts.extend(['-game', gamedir, mappath])
code = subprocess.call(opts, env=env)
if code != 0:
print("\nLooks like VVIS crashed, but I'm not sure why.")
exit(code)
vrad_exe = os.path.join(toolsdir, "vrad.exe")
opts = ['wine', vrad_exe]
if args.fast:
opts.extend(['-bounce', '2', '-noextra'])
if args.hdr:
opts.append('-both')
if args.hdr and args.final:
opts.append('-final')
opts.extend(['-game', gamedir, mappath])
code = subprocess.call(opts, env=env)
if code != 0:
print("\nLooks like VRAD crashed, but I'm not sure why.")
exit(code)
# Install the map to the game's map directory (unless --no-install)
if not args.no_install:
shutil.copy(bsp_file, mapsdir)
else:
print("Not installing map")
# Launch the game (unless --no-run or --no-install)
if not args.no_run and not args.no_install:
params = urllib.parse.quote("-dev -console -allowdebug +map %s" % mapname)
run_url = "steam://run/%d//%s" % (game.id, params)
print(run_url)
webbrowser.open(run_url)
else:
print("Not launching game")
else:
raise OSError('Your OS is not supported yet!')
if __name__ == '__main__':
main()
| |
from abc import ABCMeta, abstractmethod
from collections import Counter
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.utils.translation import ugettext as _
from corehq.apps.user_importer.helpers import spec_value_to_boolean_or_none
from corehq.apps.users.dbaccessors import get_existing_usernames
from dimagi.utils.chunked import chunked
from dimagi.utils.parsing import string_to_boolean
from corehq.apps.domain.forms import clean_password
from corehq.apps.enterprise.models import EnterprisePermissions
from corehq.apps.user_importer.exceptions import UserUploadError
from corehq.apps.users.forms import get_mobile_worker_max_username_length
from corehq.apps.users.util import normalize_username, raw_username
from corehq.util.workbook_json.excel import (
StringTypeRequiredError,
enforce_string_type,
)
def get_user_import_validators(domain_obj, all_specs, is_web_user_import, allowed_groups=None, allowed_roles=None,
allowed_profiles=None, upload_domain=None):
domain = domain_obj.name
validate_passwords = domain_obj.strong_mobile_passwords
noop = NoopValidator(domain)
validators = [
UsernameTypeValidator(domain),
DuplicateValidator(domain, 'username', all_specs),
UsernameLengthValidator(domain),
CustomDataValidator(domain),
EmailValidator(domain, 'email'),
RoleValidator(domain, allowed_roles),
ExistingUserValidator(domain, all_specs),
TargetDomainValidator(upload_domain)
]
if is_web_user_import:
return validators + [RequiredWebFieldsValidator(domain), DuplicateValidator(domain, 'email', all_specs),
EmailValidator(domain, 'username')]
else:
return validators + [
UsernameValidator(domain),
BooleanColumnValidator(domain, 'is_active'),
BooleanColumnValidator(domain, 'is_account_confirmed'),
BooleanColumnValidator(domain, 'send_confirmation_email'),
RequiredFieldsValidator(domain),
DuplicateValidator(domain, 'user_id', all_specs),
DuplicateValidator(domain, 'password', all_specs, is_password) if validate_passwords else noop,
NewUserPasswordValidator(domain),
PasswordValidator(domain) if validate_passwords else noop,
GroupValidator(domain, allowed_groups),
ProfileValidator(domain, allowed_profiles),
]
class ImportValidator(metaclass=ABCMeta):
error_message = None
def __init__(self, domain):
self.domain = domain
def __call__(self, spec):
error_message = self.validate_spec(spec)
if error_message:
raise UserUploadError(error_message)
@abstractmethod
def validate_spec(self, spec):
raise NotImplementedError
class NoopValidator(ImportValidator):
def validate_spec(self, spec):
pass
class UsernameValidator(ImportValidator):
error_message = _('username cannot contain spaces or symbols')
def validate_spec(self, spec):
username = spec.get('username')
if username:
try:
normalize_username(str(username), self.domain)
except TypeError:
pass
except ValidationError:
return self.error_message
class BooleanColumnValidator(ImportValidator):
_error_message = _("'{column_id}' column can only contain 'true' or 'false'")
def __init__(self, domain, column_id):
self.column_id = column_id
super().__init__(domain)
def validate_spec(self, spec):
value = spec.get(self.column_id)
if isinstance(value, str):
try:
string_to_boolean(value) if value else None
except ValueError:
return self.error_message
@property
def error_message(self):
return self._error_message.format(column_id=self.column_id)
class RequiredFieldsValidator(ImportValidator):
error_message = _("One of 'username' or 'user_id' is required")
def validate_spec(self, spec):
user_id = spec.get('user_id')
username = spec.get('username')
if not user_id and not username:
return self.error_message
class RequiredWebFieldsValidator(ImportValidator):
error_message = _("Upload of web users requires 'username' and 'role' for each user")
def validate_spec(self, spec):
username = spec.get('username')
role = spec.get('role')
if not username or not role:
return self.error_message
class DuplicateValidator(ImportValidator):
_error_message = _("'{field}' values must be unique")
def __init__(self, domain, field, all_specs, check_function=None):
super().__init__(domain)
self.field = field
self.check_function = check_function
self.duplicates = find_duplicates(all_specs, field)
@property
def error_message(self):
return self._error_message.format(field=self.field)
def validate_spec(self, row_spec):
item = row_spec.get(self.field)
if not item:
return
if self.check_function and not self.check_function(item):
return
if item in self.duplicates:
return self.error_message
def find_duplicates(specs, field):
counter = Counter([
spec.get(field) for spec in specs
])
return {
value for value, count in counter.items() if count > 1
}
class UsernameLengthValidator(ImportValidator):
_error_message = _("username cannot contain greater than {length} characters")
def __init__(self, domain, max_length=None):
super().__init__(domain)
self.max_username_length = max_length or get_mobile_worker_max_username_length(self.domain)
@property
def error_message(self):
return self._error_message.format(length=self.max_username_length)
def validate_spec(self, spec):
username = spec.get('username')
if username:
username = str(username)
if len(raw_username(username)) > self.max_username_length:
return self.error_message
class UsernameTypeValidator(ImportValidator):
error_message = _("Username must be Text")
def validate_spec(self, spec):
username = spec.get('username')
if not username:
return
try:
enforce_string_type(username)
except StringTypeRequiredError:
return self.error_message
class NewUserPasswordValidator(ImportValidator):
error_message = _("New users must have a password set.")
def validate_spec(self, spec):
user_id = spec.get('user_id')
password = spec.get('password')
is_account_confirmed = spec_value_to_boolean_or_none(spec, 'is_account_confirmed')
web_user = spec.get('web_user')
# explicitly check is_account_confirmed against False because None is the default
if not user_id and not is_password(password) and is_account_confirmed is not False and not web_user:
return self.error_message
class PasswordValidator(ImportValidator):
def validate_spec(self, spec):
password = spec.get('password')
if is_password(password):
try:
clean_password(password)
except ValidationError as e:
return e.message
class CustomDataValidator(ImportValidator):
def __init__(self, domain):
super().__init__(domain)
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
self.custom_data_validator = UserFieldsView.get_validator(domain)
def validate_spec(self, spec):
data = spec.get('data')
if data:
return self.custom_data_validator(data)
class EmailValidator(ImportValidator):
error_message = _("User has an invalid email address for their {}")
def __init__(self, domain, column_id):
super().__init__(domain)
self.column_id = column_id
def validate_spec(self, spec):
email = spec.get(self.column_id)
if email:
try:
validate_email(email)
except ValidationError:
return self.error_message.format(self.column_id)
class RoleValidator(ImportValidator):
error_message = _("Role '{}' does not exist")
def __init__(self, domain, allowed_roles=None):
super().__init__(domain)
self.allowed_roles = allowed_roles
def validate_spec(self, spec):
role = spec.get('role')
if role and role not in self.allowed_roles:
return self.error_message.format(role)
class ProfileValidator(ImportValidator):
error_message = _("Profile '{}' does not exist")
def __init__(self, domain, allowed_profiles=None):
super().__init__(domain)
self.allowed_profiles = allowed_profiles
def validate_spec(self, spec):
profile = spec.get('user_profile')
if profile and profile not in self.allowed_profiles:
return self.error_message.format(profile)
class GroupValidator(ImportValidator):
error_message = _("Group '{}' does not exist (try adding it to your spreadsheet)")
def __init__(self, domain, allowed_groups=None):
super().__init__(domain)
self.allowed_groups = allowed_groups
def validate_spec(self, spec):
group_names = list(map(str, spec.get('group') or []))
for group_name in group_names:
if group_name not in self.allowed_groups:
return self.error_message.format(group_name)
def is_password(password):
if not password:
return False
for c in str(password):
if c != "*":
return True
return False
class ExistingUserValidator(ImportValidator):
error_message = _("The username already belongs to a user. Specify an ID to update the user.")
def __init__(self, domain, all_sepcs):
super().__init__(domain)
self.all_specs = all_sepcs
self.existing_usernames = self.get_exising_users()
def get_exising_users(self):
usernames_without_ids = set()
for row in self.all_specs:
username = row.get('username')
if row.get('user_id') or not username:
continue
try:
usernames_without_ids.add(normalize_username(username, self.domain))
except ValidationError:
pass
existing_usernames = set()
for usernames in chunked(usernames_without_ids, 500):
existing_usernames.update(get_existing_usernames(usernames))
return existing_usernames
def validate_spec(self, spec):
try:
username = normalize_username(spec.get('username'), self.domain)
except ValidationError:
return
if username in self.existing_usernames:
return self.error_message
class TargetDomainValidator(ImportValidator):
error_message = _("Target domain {} does not use enterprise permissions of {}")
def validate_spec(self, spec):
target_domain = spec.get('domain')
if target_domain and target_domain != self.domain:
if target_domain not in EnterprisePermissions.get_domains(self.domain):
return self.error_message.format(target_domain, self.domain)
| |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.tests.menu_page_viewperm import ViewPermissionTests
from django.contrib.auth.models import User
class ViewPermissionComplexMenuStaffNodeTests(ViewPermissionTests):
"""
Test CMS_PUBLIC_FOR=staff group access and menu nodes rendering
"""
settings_overrides = {
'CMS_PERMISSION': True,
'CMS_PUBLIC_FOR': 'staff',
}
def test_public_pages_anonymous_norestrictions(self):
"""
All pages are INVISIBLE to an anonymous user
"""
all_pages = self._setup_tree_pages()
granted = []
self.assertGrantedVisibility(all_pages, granted)
def test_public_menu_anonymous_user(self):
"""
Anonymous sees nothing, as he is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = []
self.assertGrantedVisibility(all_pages, granted)
def test_node_staff_access_page_and_children_group_1(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
staff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_c',
'page_b_d',
'page_c',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_1')
# user 1 is member of group_b_access_page_and_children
user = User.objects.get(username='user_1')
urls = self.get_url_dict(all_pages)
# call /
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_and_children_group_1_no_staff(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
no staff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_b',
'page_b_a',
'page_b_b',
'page_b_c',
'page_b_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_1_nostaff')
user = User.objects.get(username='user_1_nostaff')
urls = self.get_url_dict(all_pages)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_children_group_2(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b and user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2')
user = User.objects.get(username='user_2')
urls = self.get_url_dict(all_pages)
self.assertViewNotAllowed(urls['/en/page_b/'], user)
self.assertViewNotAllowed(urls['/en/page_b/page_b_b/'], user)
self.assertViewAllowed(urls['/en/page_b/page_b_b/page_b_b_a/'], user)
self.assertViewAllowed(urls['/en/page_b/page_b_b/page_b_b_b/'], user)
self.assertViewNotAllowed(urls['/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/'], user)
self.assertViewNotAllowed(urls['/en/page_d/'], user)
self.assertViewAllowed(urls['/en/page_d/page_d_a/'], user)
#
def test_node_staff_access_children_group_2_nostaff(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b and user is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_b_b_a',
'page_b_b_b',
'page_b_b_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2_nostaff')
user = User.objects.get(username='user_2_nostaff')
urls = self.get_url_dict(all_pages)
# member of group that has access to this page
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_and_descendants_group_3(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
and user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
'page_c',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3')
user = User.objects.get(username='user_3')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_c/"], user)
def test_node_staff_access_page_and_descendants_group_3_nostaff(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
user is not staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3_nostaff')
user = User.objects.get(username='user_3_nostaff')
urls = self.get_url_dict(all_pages)
# call /
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
def test_node_staff_access_descendants_group_4(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4')
user = User.objects.get(username='user_4')
urls = self.get_url_dict(all_pages)
# call /
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
# not a direct child
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_d/"], user)
def test_node_staff_access_descendants_group_4_nostaff(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
user is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4_nostaff')
user = User.objects.get(username='user_4_nostaff')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_d/"], user)
def test_node_staff_access_page_group_5(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_c',
'page_d',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_5')
user = User.objects.get(username='user_5')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_group_5_nostaff(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
nostaff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_d',]
self.assertGrantedVisibility(all_pages, granted, username='user_5_nostaff')
user = User.objects.get(username='user_5_nostaff')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_d/"], user)
| |
from direct.distributed.DistributedNodeAI import DistributedNodeAI
from direct.distributed.ClockDelta import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.fsm import StateData
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
class DistributedFindFourAI(DistributedNodeAI):
def __init__(self, air, parent, name, x, y, z, h, p, r):
DistributedNodeAI.__init__(self, air)
self.name = name
self.air = air
self.setPos(x, y, z)
self.setHpr(h, p, r)
self.myPos = (x, y, z)
self.myHpr = (h, p, r)
self.board = [
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0]]
self._parent = self.air.doId2do[parent]
self.parentDo = parent
self.wantStart = []
self.playersPlaying = []
self.playersSitting = 0
self.playersTurn = 1
self.movesMade = 0
self.playerNum = 1
self.winDirection = None
self.playersGamePos = [
None,
None]
self.wantTimer = True
self.timerEnd = 0
self.turnEnd = 0
self.playersObserving = []
self.winLaffPoints = 20
self.movesRequiredToWin = 10
self.zoneId = self.air.allocateZone()
self.generateOtpObject(air.districtId, self.zoneId, optionalFields = [
'setX',
'setY',
'setZ',
'setH',
'setP',
'setR'])
self._parent.setCheckersZoneId(self.zoneId)
self.timerStart = None
self.fsm = ClassicFSM.ClassicFSM('Checkers', [
State.State('waitingToBegin', self.enterWaitingToBegin, self.exitWaitingToBegin, [
'playing']),
State.State('playing', self.enterPlaying, self.exitPlaying, [
'gameOver']),
State.State('gameOver', self.enterGameOver, self.exitGameOver, [
'waitingToBegin'])], 'waitingToBegin', 'waitingToBegin')
self.fsm.enterInitialState()
def announceGenerate(self):
self._parent.setGameDoId(self.doId)
def getTableDoId(self):
return self.parentDo
def delete(self):
self.fsm.requestFinalState()
self._parent = None
self.parentDo = None
del self.board
del self.fsm
DistributedNodeAI.delete(self)
def informGameOfPlayer(self):
self.playersSitting += 1
if self.playersSitting < 2:
self.timerEnd = 0
elif self.playersSitting == 2:
self.timerEnd = globalClock.getRealTime() + 20
self._parent.isAccepting = False
self._parent.sendUpdate('setIsPlaying', [
1])
elif self.playersSitting > 2:
pass
self.sendUpdate('setTimer', [
globalClockDelta.localToNetworkTime(self.timerEnd)])
def informGameOfPlayerLeave(self):
self.playersSitting -= 1
if self.playersSitting < 2 and self.fsm.getCurrentState().getName() == 'waitingToBegin':
self.timerEnd = 0
self._parent.isAccepting = True
self._parent.sendUpdate('setIsPlaying', [
0])
if self.playersSitting > 2 and self.fsm.getCurrentState().getName() == 'waitingToBegin':
pass
1
self.timerEnd = 0
if self.timerEnd != 0:
self.sendUpdate('setTimer', [
globalClockDelta.localToNetworkTime(self.timerEnd)])
else:
self.sendUpdate('setTimer', [
0])
def setGameCountdownTime(self):
self.timerEnd = globalClock.getRealTime() + 10
def setTurnCountdownTime(self):
self.turnEnd = globalClock.getRealTime() + 25
def getTimer(self):
if self.timerEnd != 0:
return 0
else:
return 0
def getTurnTimer(self):
return globalClockDelta.localToNetworkTime(self.turnEnd)
def requestTimer(self):
avId = self.air.getAvatarIdFromSender()
self.sendUpdateToAvatarId(avId, 'setTimer', [
globalClockDelta.localToNetworkTime(self.timerEnd)])
def handlePlayerExit(self, avId):
if avId in self.wantStart:
self.wantStart.remove(avId)
if self.fsm.getCurrentState().getName() == 'playing':
gamePos = self.playersGamePos.index(avId)
self.playersGamePos[gamePos] = None
self.fsm.request('gameOver')
def handleEmptyGame(self):
self.movesMade = 0
self.playersPlaying = []
self.playersTurn = 1
self.playerNum = 1
self.fsm.request('waitingToBegin')
self._parent.isAccepting = True
def requestWin(self, pieceNum):
avId = self.air.getAvatarIdFromSender()
playerNum = self.playersGamePos.index(avId) + 1
x = pieceNum[0]
y = pieceNum[1]
if self.checkWin(x, y, playerNum) == True:
self.sendUpdate('announceWinnerPosition', [
x,
y,
self.winDirection,
playerNum])
winnersSequence = Sequence(Wait(5.0), Func(self.fsm.request, 'gameOver'), Func(self._parent.announceWinner, 'Find Four', avId))
winnersSequence.start()
else:
self.sendUpdateToAvatarId(avId, 'illegalMove', [])
def distributeLaffPoints(self):
for x in self._parent.seats:
if x != None:
av = self.air.doId2do.get(x)
av.toonUp(self.winLaffPoints)
continue
def enterWaitingToBegin(self):
self.setGameCountdownTime()
self._parent.isAccepting = True
def exitWaitingToBegin(self):
self.turnEnd = 0
def enterPlaying(self):
self._parent.isAccepting = False
for x in self.playersGamePos:
if x != None:
self.playersTurn = self.playersGamePos.index(x)
self.d_sendTurn(self.playersTurn + 1)
break
continue
self.setTurnCountdownTime()
self.sendUpdate('setTurnTimer', [
globalClockDelta.localToNetworkTime(self.turnEnd)])
def exitPlaying(self):
pass
def enterGameOver(self):
self.timerEnd = 0
isAccepting = True
self._parent.handleGameOver()
self.playersObserving = []
self.playersTurn = 1
self.playerNum = 1
self.playersPlaying = []
self.movesMade = 0
self.playersGamePos = [
None,
None]
self._parent.isAccepting = True
self.fsm.request('waitingToBegin')
def exitGameOver(self):
pass
def requestBegin(self):
avId = self.air.getAvatarIdFromSender()
if avId not in self.wantStart:
self.wantStart.append(avId)
numPlayers = 0
for x in self._parent.seats:
if x != None:
numPlayers = numPlayers + 1
continue
if len(self.wantStart) == numPlayers and numPlayers >= 2:
self.d_gameStart(avId)
self._parent.sendIsPlaying()
def d_gameStart(self, avId):
for x in self.playersObserving:
self.sendUpdateToAvatarId(x, 'gameStart', [
255])
zz = 0
numPlayers = 0
for x in self._parent.seats:
if x != None:
numPlayers += 1
self.playersPlaying.append(x)
continue
if numPlayers == 2:
player1 = self.playersPlaying[0]
self.sendUpdateToAvatarId(player1, 'gameStart', [
1])
self.playersGamePos[0] = player1
player2 = self.playersPlaying[1]
self.sendUpdateToAvatarId(player2, 'gameStart', [
2])
self.playersGamePos[1] = player2
self.wantStart = []
self.fsm.request('playing')
self._parent.getTableState()
def d_sendTurn(self, playersTurn):
self.sendUpdate('sendTurn', [
playersTurn])
def advancePlayerTurn(self):
if self.playersTurn == 0:
self.playersTurn = 1
self.playerNum = 2
else:
self.playerNum = 1
self.playersTurn = 0
def requestMove(self, moveColumn):
avId = self.air.getAvatarIdFromSender()
turn = self.playersTurn
if avId in self.playersGamePos:
if self.playersGamePos.index(avId) != self.playersTurn:
pass
if self.board[0][moveColumn] != 0:
self.sendUpdateToAvatarId(avId, 'illegalMove', [])
for x in range(6):
if self.board[x][moveColumn] == 0:
movePos = x
continue
self.board[movePos][moveColumn] = self.playersTurn + 1
if self.checkForTie() == True:
self.sendUpdate('setGameState', [
self.board,
moveColumn,
movePos,
turn])
self.sendUpdate('tie', [])
winnersSequence = Sequence(Wait(8.0), Func(self.fsm.request, 'gameOver'))
winnersSequence.start()
return None
self.movesMade += 1
self.advancePlayerTurn()
self.setTurnCountdownTime()
self.sendUpdate('setTurnTimer', [
globalClockDelta.localToNetworkTime(self.turnEnd)])
self.d_sendTurn(self.playersTurn + 1)
self.sendUpdate('setGameState', [
self.board,
moveColumn,
movePos,
turn])
def checkForTie(self):
for x in range(7):
if self.board[0][x] == 0:
return False
continue
return True
def getState(self):
return self.fsm.getCurrentState().getName()
def getName(self):
return self.name
def getGameState(self):
return [
self.board,
0,
0,
0]
def clearBoard(self):
for x in self.board.squareList:
x.setState(0)
def getPosHpr(self):
return self.posHpr
def tempSetBoardState(self):
self.board = [
[
0,
0,
0,
0,
0,
0,
0],
[
1,
2,
1,
2,
2,
2,
1],
[
2,
2,
1,
2,
1,
2,
1],
[
2,
1,
1,
2,
2,
1,
2],
[
1,
2,
2,
1,
2,
1,
1],
[
1,
2,
1,
2,
1,
2,
1]]
self.sendUpdate('setGameState', [
self.board,
0,
0,
1])
def checkWin(self, rVal, cVal, playerNum):
if self.checkHorizontal(rVal, cVal, playerNum) == True:
self.winDirection = 0
return True
elif self.checkVertical(rVal, cVal, playerNum) == True:
self.winDirection = 1
return True
elif self.checkDiagonal(rVal, cVal, playerNum) == True:
self.winDirection = 2
return True
else:
self.winDirection = None
return False
def checkHorizontal(self, rVal, cVal, playerNum):
if cVal == 3:
for x in range(1, 4):
if self.board[rVal][cVal - x] != playerNum:
break
if self.board[rVal][cVal - x] == playerNum and x == 3:
return True
continue
for x in range(1, 4):
if self.board[rVal][cVal + x] != playerNum:
break
if self.board[rVal][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif cVal == 2:
for x in range(1, 4):
if self.board[rVal][cVal + x] != playerNum:
break
if self.board[rVal][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif cVal == 4:
for x in range(1, 4):
if self.board[rVal][cVal - x] != playerNum:
break
if self.board[rVal][cVal - x] == playerNum and x == 3:
return True
continue
return False
else:
return False
def checkVertical(self, rVal, cVal, playerNum):
if rVal == 2:
for x in range(1, 4):
if self.board[rVal + x][cVal] != playerNum:
break
if self.board[rVal + x][cVal] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3:
for x in range(1, 4):
if self.board[rVal - x][cVal] != playerNum:
break
if self.board[rVal - x][cVal] == playerNum and x == 3:
return True
continue
return False
else:
return False
def checkDiagonal(self, rVal, cVal, playerNum):
if cVal <= 2:
if rVal == 2:
for x in range(1, 4):
if self.board[rVal + x][cVal + x] != playerNum:
break
if self.board[rVal + x][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3:
for x in range(1, 4):
if self.board[rVal - x][cVal + x] != playerNum:
break
if self.board[rVal - x][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif cVal >= 4:
if rVal == 2:
for x in range(1, 4):
if self.board[rVal + x][cVal - x] != playerNum:
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3:
for x in range(1, 4):
if self.board[rVal - x][cVal - x] != playerNum:
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3 and rVal == 4 or rVal == 5:
for x in range(1, 4):
if self.board[rVal - x][cVal - x] != playerNum:
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return True
continue
for x in range(1, 4):
if self.board[rVal + x][cVal - x] != playerNum:
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 0 and rVal == 1 or rVal == 2:
for x in range(1, 4):
if self.board[rVal + x][cVal - x] != playerNum:
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return True
continue
for x in range(1, 4):
if self.board[rVal + x][cVal + x] != playerNum:
break
if self.board[rVal + x][cVal + x] == playerNum and x == 3:
return True
continue
return False
return False
| |
"""
SQLite3 backend for the sqlite3 module in the standard library.
"""
import decimal
import re
import warnings
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.encoding import force_text
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
def decoder(conv_func):
"""
Convert bytestrings from Python's sqlite3 interface to a regular string.
"""
return lambda s: conv_func(s.decode())
Database.register_converter("bool", decoder(lambda s: s == '1'))
Database.register_converter("time", decoder(parse_time))
Database.register_converter("date", decoder(parse_date))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_converter("decimal", decoder(backend_utils.typecast_decimal))
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
if self.features.can_share_in_memory_db:
kwargs.update({'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raise an IntegrityError on the first invalid foreign key reference
encountered (if any) and provide detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, int):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, int):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return (left - right).total_seconds() * 1000000
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| |
"""Image utilities
Some general image utilities using PIL.
"""
import Queue
import collections
import io
import mimetypes
import os
import struct
import threading
from gi.repository import (
GLib,
GObject,
GdkPixbuf,
Gtk,
Gdk,
)
from PIL import Image, ImageFilter
mimetypes.init()
# Generating a drop shadow is an expensive operation. Keep a cache
# of already generated drop shadows so they can be reutilized
_drop_shadows_cache = {}
_icon_theme = Gtk.IconTheme.get_default()
_icon_filename_cache = {}
def get_icon_filename(choose_list, size):
"""Get a theme icon filename.
:param list choose_list: the list of icon names to choose from.
The first existing icon will be returned.
:param int size: size of the icon, to be passed to
:class:`Gtk.IconTheme.choose_icon`
:return: the path to the icon
:rtype: str
"""
icon = _icon_theme.choose_icon(choose_list, size,
Gtk.IconLookupFlags.NO_SVG)
return icon and icon.get_filename()
def get_icon_for_file(filename, size):
"""Get icon for filename mimetype.
Analyze filename to get its mimetype and return the path of an
icon representing it.
:param str filename: path of the file to be alalyzed
:param int size: size of the icon, to be passed to
:class:`Gtk.IconTheme.choose_icon`
:return: the path to the icon
:rtype: str
"""
if os.path.isdir(filename):
# mimetypes.guess_type doesn't work for folders
guessed_mime = 'folder/folder'
else:
# Fallback to unknown if mimetypes wasn't able to guess it
guessed_mime = mimetypes.guess_type(filename)[0] or 'unknown/unknown'
if guessed_mime in _icon_filename_cache:
return _icon_filename_cache[guessed_mime]
# Is there any value returned by guess_type that would have no /?
mimetype, details = guessed_mime.split('/')
# FIXME: guess_type mimetype is formatted differently from what
# Gtk.IconTheme expects. We are trying to improve matching here.
# Is there a better way for doing this?
icon_list = ['%s-%s' % (mimetype, details), details, mimetype]
if mimetype == 'application':
icon_list.append('application-x-%s' % (details, ))
icon_list.append('%s-x-generic' % (mimetype, ))
icon_list.append('unknown')
icon_filename = get_icon_filename(icon_list, size)
_icon_filename_cache[guessed_mime] = icon_filename
return icon_filename
def image2pixbuf(image):
"""Convert a PIL image to a pixbuf.
:param image: the image to convert
:type image: `PIL.Image`
:returns: the newly created pixbuf
:rtype: `GdkPixbuf.Pixbuf`
"""
with io.BytesIO() as f:
image.save(f, 'png')
loader = GdkPixbuf.PixbufLoader.new_with_type('png')
loader.write(f.getvalue())
pixbuf = loader.get_pixbuf()
loader.close()
return pixbuf
def add_border(image, border_size=5,
background_color=(0xff, 0xff, 0xff, 0xff)):
"""Add a border on the image.
:param image: the image to add the border
:type image: `PIL.Image`
:param int border_size: the size of the border
:param tuple background_color: the color of the border as a
tuple containing (r, g, b, a) information
:returns: the new image with the border
:rtype: `PIL.Image`
"""
width = image.size[0] + border_size * 2
height = image.size[1] + border_size * 2
try:
image.convert("RGBA")
image_parts = image.split()
mask = image_parts[3] if len(image_parts) == 4 else None
except IOError:
mask = None
border = Image.new("RGBA", (width, height), background_color)
border.paste(image, (border_size, border_size), mask=mask)
return border
def add_drop_shadow(image, iterations=3, border_size=2, offset=(2, 2),
shadow_color=(0x00, 0x00, 0x00, 0xff)):
"""Add a border on the image.
Based on this receipe::
http://en.wikibooks.org/wiki/Python_Imaging_Library/Drop_Shadows
:param image: the image to add the drop shadow
:type image: `PIL.Image`
:param int iterations: number of times to apply the blur filter
:param int border_size: the size of the border to add to leave
space for the shadow
:param tuple offset: the offset of the shadow as (x, y)
:param tuple shadow_color: the color of the shadow as a
tuple containing (r, g, b, a) information
:returns: the new image with the drop shadow
:rtype: `PIL.Image`
"""
width = image.size[0] + abs(offset[0]) + 2 * border_size
height = image.size[1] + abs(offset[1]) + 2 * border_size
key = (width, height, iterations, border_size, offset, shadow_color)
existing_shadow = _drop_shadows_cache.get(key)
if existing_shadow:
shadow = existing_shadow.copy()
else:
shadow = Image.new('RGBA', (width, height),
(0xff, 0xff, 0xff, 0x00))
# Place the shadow, with the required offset
# if < 0, push the rest of the image right
shadow_lft = border_size + max(offset[0], 0)
# if < 0, push the rest of the image down
shadow_top = border_size + max(offset[1], 0)
shadow.paste(shadow_color,
[shadow_lft, shadow_top,
shadow_lft + image.size[0],
shadow_top + image.size[1]])
# Apply the BLUR filter repeatedly
for i in range(iterations):
shadow = shadow.filter(ImageFilter.BLUR)
_drop_shadows_cache[key] = shadow.copy()
# Paste the original image on top of the shadow
# if the shadow offset was < 0, push right
img_lft = border_size - min(offset[0], 0)
# if the shadow offset was < 0, push down
img_top = border_size - min(offset[1], 0)
shadow.paste(image, (img_lft, img_top))
return shadow
class ImageCacheManager(GObject.GObject):
"""Helper to cache image transformations.
Image transformations can be expensive and datagrid views will
ask for them a lot. This will help by:
* Caching the mru images so the pixbuf is ready to be used,
without having to load and transform it again
* Do the transformations on another thread so larger images
transformation will not disturb the main one.
"""
__gsignals__ = {
'image-loaded': (GObject.SignalFlags.RUN_LAST, None, ()),
}
_instance = None
MAX_CACHE_SIZE = 200
IMAGE_BORDER_SIZE = 6
IMAGE_SHADOW_SIZE = 6
IMAGE_SHADOW_OFFSET = 2
def __init__(self):
"""Initialize the image cache manager object."""
super(ImageCacheManager, self).__init__()
self._lock = threading.Lock()
self._cache = {}
self._placeholders = {}
self._mru = collections.deque([], self.MAX_CACHE_SIZE)
self._waiting = set()
# We are using a LifoQueue instead of a Queue to load the most recently
# used image. For example, when scrolling the treeview, you will want
# the visible rows to be loaded before the ones that were put in the
# queue during the process.
self._queue = Queue.LifoQueue()
self._task = threading.Thread(target=self._transform_task)
self._task.daemon = True
self._task.start()
###
# Public
###
@classmethod
def get_default(cls):
"""Get the singleton default cache manager.
:return: the cache manager
:rtype: :class:`ImageCacheManager`
"""
if cls._instance is None:
cls._instance = cls()
return cls._instance
def get_image(self, path, size=24, fill_image=True, draw_border=False,
draft=False, load_on_thread=False):
"""Render path into a pixbuf.
:param str path: the image path or `None` to use a fallback image
:param int size: the size to resize the image. It will be resized
to fit a square of (size, size)
:param bool fill_image: if we should fill the image with a transparent
background to make a smaller image be at least a square of
(size, size), with the real image at the center.
:param bool draw_border: if we should add a border on the image
:param bool draft: if we should load the image as a draft. This
trades a little quality for a much higher performance.
:param bool load_on_thread: if we should load the image on another
thread. This will make a placeholder be returned the first
time this method is called.
:returns: the resized pixbuf
:rtype: :class:`GdkPixbuf.Pixbuf`
"""
params = (path, size, fill_image, draw_border, draft)
with self._lock:
# We want this params to be the last element on the deque (meaning
# it will be the most recently used item). Since we will append it
# bellow, if it were already in the deque, make sure to remove it
if params in self._mru:
self._mru.remove(params)
# When self._mru reaches its maxlen, the least recently used
#element (the position 0 in case of an append) will be removed
self._mru.append(params)
pixbuf = self._cache.get(params, None)
# The pixbuf is on cache
if pixbuf is not None:
return pixbuf
# The pixbuf is not on cache, but we don't want to
# load it on a thread
if not load_on_thread:
pixbuf = self._transform_image(*params)
# If no pixbuf, let the fallback image be returned
if pixbuf:
self._cache_pixbuf(params, pixbuf)
return pixbuf
elif params not in self._waiting:
self._waiting.add(params)
self._queue.put(params)
# Size will always be rounded to the next value. After 48, the
# next is 256 and we don't want something that big here.
fallback_size = min(size, 48)
fallback = get_icon_for_file(path or '', fallback_size)
placeholder_key = (fallback, ) + tuple(params[1:])
placeholder = self._placeholders.get(placeholder_key, None)
if placeholder is None:
# If the image is damaged for some reason, use fallback for
# its mimetype. Maybe the image is not really an image
# (it could be a video, a plain text file, etc)
placeholder = self._transform_image(
fallback, fallback_size, *params[2:])
self._placeholders[placeholder_key] = placeholder
# Make the placeholder the initial value for the image. If the
# loading fails, it will be used as the pixbuf for the image.
self._cache[params] = placeholder
return placeholder
###
# Private
###
def _cache_pixbuf(self, params, pixbuf):
"""Cache the pixbuf.
Cache the pixbuf generated by the given params.
This will also free any item any item that is not needed
anymore (the least recently used items after the
cache > :attr:`.MAX_CACHE_SIZE`) from the cache.
:param tuple params: the params used to do the image
transformation. Will be used as the key for the cache dict
:param pixbuf: the pixbuf to be cached.
:type pixbuf: :class:`GdkPixbuf.Pixbuf`
"""
self._cache[params] = pixbuf
self._waiting.discard(params)
# Free anything that is not needed anymore from the memory
for params in set(self._cache) - set(self._mru):
del self._cache[params]
def _transform_task(self):
"""Task responsible for doing image transformations.
This will run on another thread, checking the queue for any
new images, transforming and caching them after.
After loading any image here, 'image-loaded' signal
will be emitted.
"""
while True:
params = self._queue.get()
# It probably isn't needed anymore
if params not in self._mru:
continue
pixbuf = self._transform_image(*params)
if pixbuf is None:
continue
with self._lock:
self._cache_pixbuf(params, pixbuf)
GObject.idle_add(self.emit, 'image-loaded')
def _transform_image(self, path, size, fill_image, draw_border, draft):
"""Render path into a pixbuf.
:param str path: the image path or `None` to use a fallback image
:param int size: the size to resize the image. It will be resized
to fit a square of (size, size)
:param bool fill_image: if we should fill the image with a transparent
background to make a smaller image be at least a square of
(size, size), with the real image at the center.
:param bool draw_border: if we should add a border on the image
:param bool draft: if we should load the image as a draft. This
trades a little quality for a much higher performance.
:returns: the resized pixbuf
:rtype: :class:`GdkPixbuf.Pixbuf`
"""
path = path or ''
image = self._open_image(path, size, draft)
if image is None:
return None
if draw_border:
image = add_border(image, border_size=self.IMAGE_BORDER_SIZE)
image = add_drop_shadow(
image, border_size=self.IMAGE_SHADOW_SIZE,
offset=(self.IMAGE_SHADOW_OFFSET, self.IMAGE_SHADOW_OFFSET))
size += self.IMAGE_BORDER_SIZE * 2
size += self.IMAGE_SHADOW_SIZE * 2
size += self.IMAGE_SHADOW_OFFSET
else:
# FIXME: There's a bug on PIL where image.thumbnail modifications
# will be lost for some images when saving it the way we do on
# image2pixbuf (even image.copy().size != image.size when it was
# resized). Adding a border of size 0 will make it at least be
# pasted to a new image (which didn't have its thumbnail method
# called), working around this issue.
image = add_border(image, 0)
pixbuf = image2pixbuf(image)
width = pixbuf.get_width()
height = pixbuf.get_height()
if not fill_image:
return pixbuf
# Make sure the image is on the center of the image_max_size
square_pic = GdkPixbuf.Pixbuf.new(
GdkPixbuf.Colorspace.RGB, True, pixbuf.get_bits_per_sample(),
size, size)
# Fill with transparent white
square_pic.fill(0xffffff00)
dest_x = (size - width) / 2
dest_y = (size - height) / 2
pixbuf.copy_area(0, 0, width, height, square_pic, dest_x, dest_y)
return square_pic
def _open_image(self, path, size, draft):
"""Open the image on the given path.
:param str path: the image path
:param int size: the size to resize the image. It will be resized
to fit a square of (size, size)
:param bool draft: if we should load the image as a draft. This
trades a little quality for a much higher performance.
:returns: the opened image
:rtype: :class:`PIL.Image`
"""
# When trying to open the brokensuit images
# (https://code.google.com/p/javapng/wiki/BrokenSuite), PIL failed to
# open 27 of them, while Pixbuf failed to open 32. But trying PIL first
# and Pixbuf if it failed reduced that number to 20.
# In general, most of the images (specially if they are not broken,
# which is something more uncommon) will be opened directly by PIL.
try:
image = Image.open(path)
if draft:
image.draft('P', (size, size))
image.load()
except (IOError, SyntaxError, OverflowError, struct.error) as e:
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(path)
except GLib.GError:
return None
else:
image = Image.fromstring(
"RGB", (pixbuf.get_width(), pixbuf.get_height()),
pixbuf.get_pixels())
image.thumbnail((size, size), Image.BICUBIC)
return image
| |
import codecs
import hashlib
import json
import os
import re
import tempfile
import time
from ..constants import SETTINGS_FILE, SYNTAX_FILE
from ..http import CurlRequestThread
from ..http import HttpClientRequestThread
from ..message import Request
from ..overrideable import OverrideableSettings
from ..parse import RequestParser
from ..util import get_end_of_line_character
from ..util import normalize_line_endings
import sublime
import sublime_plugin
try:
from urllib.parse import parse_qs
from urllib.parse import urljoin
from urllib.parse import urlparse
except ImportError:
# Python 2
from urlparse import parse_qs
from urlparse import urlparse
from urlparse import urljoin
MAX_REDIRECTS = 10
MAX_GROUPS = 10
RE_OVERRIDE = """^\s*@\s*([^\:]*)\s*:\s*(.*)$"""
def _normalize_command(command):
# Return a well formed dictionary for a request or response command
valid = False
# Find the string class. (str for py3, basestring for py2)
string_class = str
try:
# If Python 2, use basestring instead of str
#noinspection PyStatementEffect
basestring
string_class = basestring
except NameError:
pass
if isinstance(command, string_class):
command = {"name": command}
valid = True
elif isinstance(command, dict):
if "name" in command:
valid = True
# Skip here if invalid.
if not valid:
print("Skipping invalid command.")
print("Each command must be a string or a dict with a 'name'")
print(command)
return None
# Ensure each command has all needed fields.
if not "args" in command:
command["args"] = None
return command
class ResterHttpRequestCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
sublime_plugin.WindowCommand.__init__(self, *args, **kwargs)
self.encoding = "UTF-8"
self.eol = "\n"
self.request_view = None
self.response_view = None
self.settings = None
self._command_hash = None
self._completed_message = "Done."
self._redirect_count = 0
self._requesting = False
self._request_view_group = None
self._request_view_index = None
def run(self, pos=None):
# Store references.
self.request_view = self.window.active_view()
self._request_view_group, self._request_view_index = \
self.window.get_view_index(self.request_view)
self.response_view = None
self.eol = get_end_of_line_character(self.request_view)
self.settings = self._get_settings()
self._completed_message = "Done."
self._redirect_count = 0
self._requesting = False
# Determine the encoding of the editor starting the request.
# Sublime returns "Undefined" for views that are not yet saved.
self.encoding = self.request_view.encoding()
if not self.encoding or self.encoding == "Undefined":
self.encoding = "UTF-8"
# Store the text before any request commands are applied.
originalText = self._get_selection(pos)
# Perform commands on the request buffer.
# Store the number of changes made so we can undo them.
try:
changes = self.request_view.change_count()
self._run_request_commands()
changes = self.request_view.change_count() - changes
except AttributeError:
# ST2 does not have a change_count() method.
# It does allow creating an Edit on the fly though.
edit = self.request_view.begin_edit()
self._run_request_commands()
self.request_view.end_edit(edit)
changes = 1
# Read the selected text.
text = self._get_selection(pos)
# Undo the request commands to return to the starting state.
if text != originalText:
for i in range(changes):
self.request_view.run_command("undo")
def replace(m):
return variables.get(m.group(1), '')
view = self.request_view
extractions = []
view.find_all(r'(?:(#)\s*)?@([_a-zA-Z][_a-zA-Z0-9]*)\s*=\s*(.*)', 0, r'\1\2=\3', extractions)
variables = {}
for var in extractions:
var, _, val = var.partition('=')
if var[0] != '#':
variables[var] = val.strip()
for var in re.findall(r'(?:(#)\s*)?@([_a-zA-Z][_a-zA-Z0-9]*)\s*=\s*(.*)', originalText):
if var[0] != '#':
var, val = var[1], var[2]
variables[var] = val.strip()
text = re.sub(r'\{\{\s*([_a-zA-Z][_a-zA-Z0-9]*)\s*\}\}', replace, text)
# Build a message.Request from the text.
request_parser = RequestParser(self.settings, self.eol)
request = request_parser.get_request(text)
# Set the state to requesting.
self._requesting = True
# Create a new hash for this specific run of the command.
command_hash = hashlib.sha1()
command_hash.update(str(time.time()).encode("ascii"))
command_hash = command_hash.hexdigest()
self._command_hash = command_hash
self.check_if_requesting(command_hash)
# Make the request.
self._start_request(request)
def check_if_requesting(self, command_hash, i=0, direction=1):
# Ignore if the command hash does not match.
# That indicates the callback is stale.
if self._command_hash != command_hash:
return
# Show an animation until the command is complete.
if self._requesting:
# This animates a little activity indicator in the status area.
before = i % 8
after = 7 - before
if not after:
direction = -1
if not before:
direction = 1
i += direction
message = "RESTer [%s=%s]" % (" " * before, " " * after)
self.request_view.set_status("rester", message)
fn = lambda: self.check_if_requesting(command_hash, i, direction)
sublime.set_timeout(fn, 100)
else:
if not self._completed_message:
self._completed_message = "Done."
self.request_view.set_status("rester", self._completed_message)
def handle_response_view(self, filepath, title, body_only):
if self.response_view.is_loading():
fn = lambda: self.handle_response_view(filepath, title,
body_only)
sublime.set_timeout(fn, 100)
else:
view = self.response_view
view.set_scratch(self.settings.get("response_scratch", True))
view.set_name(title)
# Delete the temp file.
os.remove(filepath)
# Select the body.
selection = None
if body_only:
selection = sublime.Region(0, view.size())
else:
eol = get_end_of_line_character(view)
headers = view.find(eol * 2, 0)
if headers:
selection = sublime.Region(headers.b, view.size())
if selection:
view.sel().clear()
view.sel().add(selection)
# Run response commands and finish.
self._run_response_commands()
self._complete("Request complete. " + title)
# Close all views in the response group other than the current
# response view.
if (not self.settings.get("response_group", None) is None) \
and self.settings.get("response_group_clean", False):
views = self.window.views_in_group(self.window.active_group())
for other_view in views:
if other_view.id() != view.id():
self.window.focus_view(other_view)
self.window.run_command("close_file")
# Set the focus back to the request group and view.
if self.settings.get("request_focus", False):
self.window.focus_group(self._request_view_group)
self.window.focus_view(self.request_view)
def handle_thread(self, thread):
if thread.is_alive():
# Working...
sublime.set_timeout(lambda: self.handle_thread(thread), 100)
elif thread.success:
# Success.
self._complete_thread(thread)
else:
# Failed.
if thread.message:
self._complete(thread.message)
else:
self._complete("Unable to make request.")
def _complete(self, message):
# End the command and display a message.
self._requesting = False
self._completed_message = message
def _complete_thread(self, thread):
response = thread.response
status_line = response.status_line
# Output the response to the console.
output_headers = self.settings.get("output_response_headers", True)
output_body = self.settings.get("output_response_body", True) and \
response.body
if output_headers or output_body:
if thread.elapsed:
print("\nResponse time:", thread.elapsed)
print("\n[Response]")
if output_headers:
print(status_line)
print("\n".join(response.header_lines))
if output_headers and output_body:
print("")
if output_body:
try:
print(response.body)
except UnicodeEncodeError:
# Python 2
print(response.body.encode("UTF8"))
# Redirect.
follow = self.settings.get("follow_redirects", True)
follow_codes = self.settings.get("follow_redirect_status_codes", [])
if follow and response.status in follow_codes:
self._follow_redirect(response, thread.request)
return
# Stop now if the user does not want a response buffer.
if not self.settings.get("response_buffer", True):
self._complete("Request complete. " + status_line)
return
# Open a temporary file to write the response to.
# (Note: Using codecs to support Python 2.6)
tmpfile = tempfile.NamedTemporaryFile("w", delete=False)
filename = tmpfile.name
tmpfile.close()
tmpfile = codecs.open(filename, "w", encoding="UTF8")
# Body only, but only on success.
success = 200 <= thread.response.status <= 299
if success and self.settings.get("body_only", False):
if response.body:
tmpfile.write(response.body)
body_only = True
# Status line and headers.
else:
tmpfile.write(response.status_line)
tmpfile.write("\n")
for header in response.header_lines:
tmpfile.write(header)
tmpfile.write("\n")
if response.body:
tmpfile.write("\n")
tmpfile.write(response.body)
body_only = False
if not response.body:
body_only = False
# Close the file.
tmpfile.close()
filepath = tmpfile.name
# Open the file in a new view.
title = status_line
if thread.elapsed:
title += " (%.4f sec.)" % thread.elapsed
self.response_view = self.window.open_file(filepath)
self.response_view.set_syntax_file(SYNTAX_FILE)
# Create, if needed, a group specific for responses and move the
# response view to that group.
response_group = self.settings.get("response_group", None)
if response_group is not None:
response_group = min(response_group, MAX_GROUPS)
while self.window.num_groups() < response_group + 1:
self.window.run_command("new_pane")
self.window.set_view_index(self.response_view, response_group, 0)
if not self.settings.get("request_focus", False):
# Set the focus to the response group.
self.window.focus_group(response_group)
self.handle_response_view(tmpfile.name, title, body_only)
def _get_selection(self, pos=None):
# Return a string of the selected text or the entire buffer.
# if there are multiple selections, concatenate them.
view = self.request_view
if pos is None:
sels = view.sel()
if len(sels) == 1 and sels[0].empty():
pos = sels[0].a
if pos is not None:
selection = view.substr(sublime.Region(0, view.size()))
begin = selection.rfind('\n###', 0, pos)
end = selection.find('\n###', pos)
if begin != -1 and end != -1:
selection = selection[begin:end]
elif begin != -1:
selection = selection[begin:]
elif end != -1:
selection = selection[:end]
else:
selection = ""
for sel in sels:
selection += view.substr(sel)
return selection
def _get_settings(self):
# Return a setting-like object that combines the user's settings with
# overrides from the current request.
# Scan the request for overrides.
text = self._get_selection().lstrip()
text = normalize_line_endings(text, self.eol)
headers = text.split(self.eol * 2, 1)[0]
# Build a dictionary of the overrides.
overrides = {}
for (name, value) in re.findall(RE_OVERRIDE, headers, re.MULTILINE):
try:
overrides[name] = json.loads(value)
except ValueError:
# If unable to parse as JSON, assume it's an un-quoted string.
overrides[name] = value
# Return an OverrideableSettings object.
return OverrideableSettings(
settings=sublime.load_settings(SETTINGS_FILE),
overrides=overrides)
def _follow_redirect(self, response, request):
# Stop now in the event of an infinite loop.
if self._redirect_count > MAX_REDIRECTS:
self._complete("Maximum redirects reached.")
return
# Read the location header and start a new request.
location = response.get_header("Location")
# Stop now if no location header.
if not location:
self._complete("Unable to redirect. No Location header found.")
return
# Create a new request instance.
redirect = Request()
# Use GET unless the original request was HEAD.
if request.method == "HEAD":
redirect.method = "HEAD"
# Parse the Location URI
uri = urlparse(location)
if uri.netloc:
# If there is a netloc, it's an absolute path.
redirect.host = uri.netloc
if uri.scheme:
redirect.protocol = uri.scheme
if uri.path:
redirect.path = uri.path
elif uri.path:
# If no netloc, but there is a path, resolve from last.
redirect.host = request.host
redirect.path = urljoin(request.path, uri.path)
# Always add the query.
if uri.query:
redirect.query += parse_qs(uri.query)
print("\n[...redirecting...]")
self._redirect_count += 1
self._start_request(redirect)
return
def _run_request_commands(self):
# Process the request buffer to prepare the contents for the request.
view = self.request_view
commands = self.settings.get("request_commands", [])
for command in commands:
command = _normalize_command(command)
if command:
view.run_command(command["name"], command["args"])
def _run_response_commands(self):
view = self.response_view
commands = self.settings.get("response_commands", [])
for command in commands:
command = _normalize_command(command)
if command:
view.run_command(command["name"], command["args"])
def _start_request(self, request):
# Create, start, and handle a thread for the selection.
if self.settings.get("output_request", True):
print("\n[Request]")
print(request.request_line)
print("Host: %s" % request.host)
for header in request.header_lines:
print(header)
if request.body:
print("")
try:
print(request.body)
except UnicodeEncodeError:
# Python 2
print(request.body.encode("UTF8"))
client = self.settings.get("http_client", "python")
if client == "python":
thread_class = HttpClientRequestThread
elif client == "curl":
thread_class = CurlRequestThread
else:
message = "Invalid request_client. "
message += "Must be 'python' or 'curl'. Found " + client
self._complete(message)
return
thread = thread_class(request, self.settings, encoding=self.encoding)
thread.start()
self.handle_thread(thread)
class ResterHttpResponseCloseEvent(sublime_plugin.ViewEventListener):
@classmethod
def is_applicable(cls, settings):
syntax = settings.get('syntax')
return syntax == SYNTAX_FILE
@classmethod
def applies_to_primary_view_only(cls):
return True
def on_pre_close(self):
settings = sublime.load_settings(SETTINGS_FILE)
response_group = settings.get("response_group", None)
if response_group is not None:
response_group = min(response_group, MAX_GROUPS)
window = self.view.window()
views = window.views_in_group(response_group)
if len(views) == 1 and self.view == views[0]:
window.focus_group(0)
fn = lambda: window.run_command("close_pane")
sublime.set_timeout(fn, 0)
| |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import datastore_errors
from model import *
from photo import create_photo, PhotoError
from utils import *
from detect_spam import SpamDetector
import extend
import reveal
import subscribe
from django.utils.translation import ugettext as _
from urlparse import urlparse
# how many days left before we warn about imminent expiration.
# Make this at least 1.
EXPIRY_WARNING_THRESHOLD = 7
def get_profile_pages(profile_urls, handler):
profile_pages = []
for profile_url in profile_urls.splitlines():
# Use the hostname as the website name by default.
profile_page = {
'name': urlparse(profile_url).hostname,
'url': profile_url }
for website in handler.config.profile_websites or []:
if ('url_regexp' in website and
re.match(website['url_regexp'], profile_url)):
profile_page = add_profile_icon_url(website, handler)
profile_page['url'] = profile_url
break
profile_pages.append(profile_page)
return profile_pages
class Handler(BaseHandler):
def get(self):
# Check the request parameters.
if not self.params.id:
return self.error(404, 'No person id was specified.')
try:
person = Person.get(self.repo, self.params.id)
except ValueError:
return self.error(404,
_("This person's entry does not exist or has been deleted."))
if not person:
return self.error(404,
_("This person's entry does not exist or has been deleted."))
standalone = self.request.get('standalone')
# Check if private info should be revealed.
content_id = 'view:' + self.params.id
reveal_url = reveal.make_reveal_url(self, content_id)
show_private_info = reveal.verify(content_id, self.params.signature)
# Compute the local times for the date fields on the person.
person.source_date_local = self.to_local_time(person.source_date)
person.expiry_date_local = self.to_local_time(
person.get_effective_expiry_date())
# Get the notes and duplicate links.
try:
notes = person.get_notes()
except datastore_errors.NeedIndexError:
notes = []
person.sex_text = get_person_sex_text(person)
for note in notes:
note.status_text = get_note_status_text(note)
note.linked_person_url = \
self.get_url('/view', id=note.linked_person_record_id)
note.flag_spam_url = \
self.get_url('/flag_note', id=note.note_record_id,
hide=(not note.hidden) and 'yes' or 'no',
signature=self.params.signature)
note.source_date_local = self.to_local_time(note.source_date)
try:
linked_persons = person.get_all_linked_persons()
except datastore_errors.NeedIndexError:
linked_persons = []
linked_person_info = [
dict(id=p.record_id,
name=p.primary_full_name,
view_url=self.get_url('/view', id=p.record_id))
for p in linked_persons]
# Render the page.
dupe_notes_url = self.get_url(
'/view', id=self.params.id, dupe_notes='yes')
results_url = self.get_url(
'/results',
role=self.params.role,
query=self.params.query,
given_name=self.params.given_name,
family_name=self.params.family_name)
feed_url = self.get_url(
'/feeds/note',
person_record_id=self.params.id,
repo=self.repo)
subscribe_url = self.get_url('/subscribe', id=self.params.id)
delete_url = self.get_url('/delete', id=self.params.id)
disable_notes_url = self.get_url('/disable_notes', id=self.params.id)
enable_notes_url = self.get_url('/enable_notes', id=self.params.id)
extend_url = None
extension_days = 0
expiration_days = None
expiry_date = person.get_effective_expiry_date()
if expiry_date and not person.is_clone():
expiration_delta = expiry_date - get_utcnow()
extend_url = self.get_url('/extend', id=self.params.id)
extension_days = extend.get_extension_days(self)
if expiration_delta.days < EXPIRY_WARNING_THRESHOLD:
# round 0 up to 1, to make the msg read better.
expiration_days = expiration_delta.days + 1
if person.is_clone():
person.provider_name = person.get_original_domain()
sanitize_urls(person)
for note in notes:
sanitize_urls(note)
if person.profile_urls:
person.profile_pages = get_profile_pages(person.profile_urls, self)
self.render('view.html',
person=person,
notes=notes,
linked_person_info=linked_person_info,
standalone=standalone,
onload_function='view_page_loaded()',
show_private_info=show_private_info,
admin=users.is_current_user_admin(),
dupe_notes_url=dupe_notes_url,
results_url=results_url,
reveal_url=reveal_url,
feed_url=feed_url,
subscribe_url=subscribe_url,
delete_url=delete_url,
disable_notes_url=disable_notes_url,
enable_notes_url=enable_notes_url,
extend_url=extend_url,
extension_days=extension_days,
expiration_days=expiration_days)
def post(self):
if not self.params.text:
return self.error(
200, _('Message is required. Please go back and try again.'))
if not self.params.author_name:
return self.error(
200, _('Your name is required in the "About you" section. '
'Please go back and try again.'))
if (self.params.status == 'is_note_author' and
not self.params.author_made_contact):
return self.error(
200, _('Please check that you have been in contact with '
'the person after the earthquake, or change the '
'"Status of this person" field.'))
if (self.params.status == 'believed_dead' and
not self.config.allow_believed_dead_via_ui):
return self.error(
200, _('Not authorized to post notes with the status '
'"believed_dead".'))
person = Person.get(self.repo, self.params.id)
if person.notes_disabled:
return self.error(
200, _('The author has disabled status updates '
'on this record.'))
# If a photo was uploaded, create and store a new Photo entry and get
# the URL where it's served; otherwise, use the note_photo_url provided.
photo, photo_url = (None, self.params.note_photo_url)
if self.params.note_photo is not None:
try:
photo, photo_url = create_photo(self.params.note_photo, self)
except PhotoError, e:
return self.error(400, e.message)
photo.put()
spam_detector = SpamDetector(self.config.bad_words)
spam_score = spam_detector.estimate_spam_score(self.params.text)
if (spam_score > 0):
note = NoteWithBadWords.create_original(
self.repo,
entry_date=get_utcnow(),
person_record_id=self.params.id,
author_name=self.params.author_name,
author_email=self.params.author_email,
author_phone=self.params.author_phone,
source_date=get_utcnow(),
author_made_contact=bool(self.params.author_made_contact),
status=self.params.status,
email_of_found_person=self.params.email_of_found_person,
phone_of_found_person=self.params.phone_of_found_person,
last_known_location=self.params.last_known_location,
text=self.params.text,
photo=photo,
photo_url=photo_url,
spam_score=spam_score,
confirmed=False)
# Write the new NoteWithBadWords to the datastore
db.put(note)
UserActionLog.put_new('add', note, copy_properties=False)
# When the note is detected as spam, we do not update person record
# or log action. We ask the note author for confirmation first.
return self.redirect('/post_flagged_note', id=note.get_record_id(),
author_email=note.author_email,
repo=self.repo)
else:
note = Note.create_original(
self.repo,
entry_date=get_utcnow(),
person_record_id=self.params.id,
author_name=self.params.author_name,
author_email=self.params.author_email,
author_phone=self.params.author_phone,
source_date=get_utcnow(),
author_made_contact=bool(self.params.author_made_contact),
status=self.params.status,
email_of_found_person=self.params.email_of_found_person,
phone_of_found_person=self.params.phone_of_found_person,
last_known_location=self.params.last_known_location,
text=self.params.text,
photo=photo,
photo_url=photo_url)
# Write the new regular Note to the datastore
db.put(note)
UserActionLog.put_new('add', note, copy_properties=False)
# Specially log 'believed_dead'.
if note.status == 'believed_dead':
UserActionLog.put_new(
'mark_dead', note, person.primary_full_name,
self.request.remote_addr)
# Specially log a switch to an alive status.
if (note.status in ['believed_alive', 'is_note_author'] and
person.latest_status not in ['believed_alive', 'is_note_author']):
UserActionLog.put_new('mark_alive', note, person.primary_full_name)
# Update the Person based on the Note.
if person:
person.update_from_note(note)
# Send notification to all people
# who subscribed to updates on this person
subscribe.send_notifications(self, person, [note])
# write the updated person record to datastore
db.put(person)
# If user wants to subscribe to updates, redirect to the subscribe page
if self.params.subscribe:
return self.redirect('/subscribe', id=person.record_id,
subscribe_email=self.params.author_email)
# Redirect to this page so the browser's back button works properly.
self.redirect('/view', id=self.params.id, query=self.params.query)
| |
from itertools import islice, cycle
from mock import Mock
import struct
from threading import Thread
import unittest
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.metadata import Metadata
from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy,
TokenAwarePolicy, SimpleConvictionPolicy,
HostDistance, ExponentialReconnectionPolicy,
RetryPolicy, WriteType,
DowngradingConsistencyRetryPolicy)
from cassandra.pool import Host
from cassandra.query import Query
class TestRoundRobinPolicy(unittest.TestCase):
def test_basic(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
def test_multiple_query_plans(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
for i in xrange(20):
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
def test_single_host(self):
policy = RoundRobinPolicy()
policy.populate(None, [0])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [0])
def test_status_updates(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
policy.on_down(0)
policy.on_remove(1)
policy.on_up(4)
policy.on_add(5)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), [2, 3, 4, 5])
def test_thread_safety(self):
hosts = range(100)
policy = RoundRobinPolicy()
policy.populate(None, hosts)
def check_query_plan():
for i in range(100):
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
threads = [Thread(target=check_query_plan) for i in range(4)]
map(lambda t: t.start(), threads)
map(lambda t: t.join(), threads)
class TestDCAwareRoundRobinPolicy(unittest.TestCase):
def test_no_remote(self):
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1")
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), sorted(hosts))
def test_with_remotes(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
local_hosts = set(h for h in hosts if h.datacenter == "dc1")
remote_hosts = set(h for h in hosts if h.datacenter != "dc1")
# allow all of the remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2)
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), local_hosts)
self.assertEqual(set(qplan[2:]), remote_hosts)
# allow only one of the remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), local_hosts)
used_remotes = set(qplan[2:])
self.assertEqual(1, len(used_remotes))
self.assertIn(qplan[2], remote_hosts)
# allow no remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(2, len(qplan))
self.assertEqual(local_hosts, set(qplan))
def test_get_distance(self):
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
policy.populate(None, [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
# used_hosts_per_remote_dc is set to 0, so ignore it
remote_host = Host("ip2", SimpleConvictionPolicy)
remote_host.set_location_info("dc2", "rack1")
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# dc2 isn't registered in the policy's live_hosts dict
policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# make sure the policy has both dcs registered
policy.populate(None, [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
# since used_hosts_per_remote_dc is set to 1, only the first
# remote host in dc2 will be REMOTE, the rest are IGNORED
second_remote_host = Host("ip3", SimpleConvictionPolicy)
second_remote_host.set_location_info("dc2", "rack1")
policy.populate(None, [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_status_updates(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(None, hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info("dc1", "rack1")
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info("dc9000", "rack1")
policy.on_add(new_remote_host)
# we now have two local hosts and two remote hosts in separate dcs
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
# since we have hosts in dc9000, the distance shouldn't be IGNORED
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
class TokenAwarePolicyTest(unittest.TestCase):
def test_wrap_round_robin(self):
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
def get_replicas(packed_key):
index = struct.unpack('>i', packed_key)[0]
return list(islice(cycle(hosts), index, index + 2))
cluster.metadata.get_replicas.side_effect = get_replicas
policy = TokenAwarePolicy(RoundRobinPolicy())
policy.populate(cluster, hosts)
for i in range(4):
query = Query(routing_key=struct.pack('>i', i))
qplan = list(policy.make_query_plan(query))
replicas = get_replicas(struct.pack('>i', i))
other = set(h for h in hosts if h not in replicas)
self.assertEquals(replicas, qplan[:2])
self.assertEquals(other, set(qplan[2:]))
def test_wrap_dc_aware(self):
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
def get_replicas(packed_key):
index = struct.unpack('>i', packed_key)[0]
# return one node from each DC
if index % 2 == 0:
return [hosts[0], hosts[2]]
else:
return [hosts[1], hosts[3]]
cluster.metadata.get_replicas.side_effect = get_replicas
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
policy.populate(cluster, hosts)
for i in range(4):
query = Query(routing_key=struct.pack('>i', i))
qplan = list(policy.make_query_plan(query))
replicas = get_replicas(struct.pack('>i', i))
# first should be the only local replica
self.assertIn(qplan[0], replicas)
self.assertEquals(qplan[0].datacenter, "dc1")
# then the local non-replica
self.assertNotIn(qplan[1], replicas)
self.assertEquals(qplan[1].datacenter, "dc1")
# then one of the remotes (used_hosts_per_remote_dc is 1, so we
# shouldn't see two remotes)
self.assertEquals(qplan[2].datacenter, "dc2")
self.assertEquals(3, len(qplan))
class ExponentialReconnectionPolicyTest(unittest.TestCase):
def test_bad_vals(self):
self.assertRaises(ValueError, ExponentialReconnectionPolicy, -1, 0)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 0, -1)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 9000, 1)
def test_schedule(self):
policy = ExponentialReconnectionPolicy(base_delay=2, max_delay=100)
schedule = list(policy.new_schedule())
self.assertEqual(len(schedule), 64)
for i, delay in enumerate(schedule):
if i == 0:
self.assertEqual(delay, 2)
elif i < 6:
self.assertEqual(delay, schedule[i - 1] * 2)
else:
self.assertEqual(delay, 100)
class RetryPolicyTest(unittest.TestCase):
def test_read_timeout(self):
policy = RetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
# if we didn't get enough responses, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
# if we got enough responses, but also got a data response, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
# we got enough reponses but no data response, so retry
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=2,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, "ONE")
def test_write_timeout(self):
policy = RetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
# if it's not a BATCH_LOG write, don't retry it
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
# retry BATCH_LOG writes regardless of received responses
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.BATCH_LOG,
required_responses=10000, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, "ONE")
class DowngradingConsistencyRetryPolicyTest(unittest.TestCase):
def test_read_timeout(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
# if we didn't get enough responses, retry at a lower consistency
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=3, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.TWO)
# retry consistency level goes down based on the # of recv'd responses
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=3, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
# if we got no responses, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=3, received_responses=0,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
# if we got enough response but no data, retry
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=3, received_responses=3,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
# if we got enough responses, but also got a data response, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
def test_write_timeout(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
# ignore failures on these types of writes
for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER):
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=write_type,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.IGNORE)
# downgrade consistency level on unlogged batch writes
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.UNLOGGED_BATCH,
required_responses=3, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
# retry batch log writes at the same consistency level
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.BATCH_LOG,
required_responses=3, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, "ONE")
def test_unavailable(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_unavailable(
query=None, consistency="ONE", required_replicas=3, alive_replicas=1, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
# downgrade consistency on unavailable exceptions
retry, consistency = policy.on_unavailable(
query=None, consistency="ONE", required_replicas=3, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
| |
#!/usr/bin/env python
import argparse
import csv
import os
import shlex
import shutil
import subprocess
import sys
import numpy
parser = argparse.ArgumentParser(description='''
Part II: Conducting the alignments to the psuedogenomes. Before\
doing this step you will require 1) a bamfile of the unique\
alignments with index 2) a fastq file of the reads mapping to\
more than one location. These files can be obtained using the\
following bowtie options [EXAMPLE: bowtie -S -m 1\
--max multimap.fastq mm9 mate1_reads.fastq] Once you have the\
unique alignment bamfile and the reads mapping to more than one\
location in a fastq file you can run this step. EXAMPLE: python\
master_output.py\
/users/nneretti/data/annotation/hg19/hg19_repeatmasker.txt\
/users/nneretti/datasets/repeatmapping/POL3/Pol3_human/
HeLa_InputChIPseq_Rep1 HeLa_InputChIPseq_Rep1\
/users/nneretti/data/annotation/hg19/setup_folder\
HeLa_InputChIPseq_Rep1_multimap.fastq\
HeLa_InputChIPseq_Rep1.bam''')
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
parser.add_argument('annotation_file', action='store',
metavar='annotation_file',
help='List RepeatMasker.org annotation file for your\
organism. The file may be downloaded from the\
RepeatMasker.org website. Example:\
/data/annotation/hg19/hg19_repeatmasker.txt')
parser.add_argument('outputfolder', action='store', metavar='outputfolder',
help='List folder to contain results.\
Example: /outputfolder')
parser.add_argument('outputprefix', action='store', metavar='outputprefix',
help='Enter prefix name for data.\
Example: HeLa_InputChIPseq_Rep1')
parser.add_argument('setup_folder', action='store', metavar='setup_folder',
help='List folder that contains the repeat element\
pseudogenomes.\
Example: /data/annotation/hg19/setup_folder')
parser.add_argument('fastqfile', action='store', metavar='fastqfile',
help='Enter file for the fastq reads that map to multiple\
locations. Example: /data/multimap.fastq')
parser.add_argument('alignment_bam', action='store', metavar='alignment_bam',
help='Enter bamfile output for reads that map uniquely.\
Example /bamfiles/old.bam')
parser.add_argument('--pairedend', action='store', dest='pairedend',
default='FALSE',
help='Designate this option for paired-end sequencing.\
Default FALSE change to TRUE')
parser.add_argument('--collapserepeat', action='store', dest='collapserepeat',
metavar='collapserepeat', default='Simple_repeat',
help='Designate this option to generate a collapsed repeat\
type. Uncollapsed output is generated in addition to\
collapsed repeat type. Simple_repeat is default to\
simplify downstream analysis. You can change the\
default to another repeat name to collapse a\
seperate specific repeat instead or if the name of\
Simple_repeat is different for your organism.\
Default Simple_repeat')
parser.add_argument('--fastqfile2', action='store', dest='fastqfile2',
metavar='fastqfile2', default='none',
help='Enter fastqfile2 when using paired-end option.\
Default none')
parser.add_argument('--cpus', action='store', dest='cpus', metavar='cpus',
default="1", type=int,
help='Enter available cpus per node. The more cpus the\
faster RepEnrich performs. RepEnrich is designed to\
only work on one node. Default: "1"')
parser.add_argument('--allcountmethod', action='store', dest='allcountmethod',
metavar='allcountmethod', default="FALSE",
help='By default the pipeline only outputs the fraction\
count method. Consdidered to be the best way to\
count multimapped reads. Changing this option will\
include the unique count method, a conservative\
count, and the total count method, a liberal\
counting strategy. Our evaluation of simulated data\
indicated fraction counting is best.\
Default = FALSE, change to TRUE')
parser.add_argument('--is_bed', action='store', dest='is_bed',
metavar='is_bed', default='FALSE',
help='Is the annotation file a bed file.\
This is also a compatible format. The file needs to\
be a tab seperated bed with optional fields.\
Ex. format: chr\tstart\tend\tName_element\tclass\
\tfamily. The class and family should identical to\
name_element if not applicable.\
Default FALSE change to TRUE')
args = parser.parse_args()
# parameters
annotation_file = args.annotation_file
outputfolder = args.outputfolder
outputfile_prefix = args.outputprefix
setup_folder = args.setup_folder
repeat_bed = setup_folder + os.path.sep + 'repnames.bed'
unique_mapper_bam = args.alignment_bam
fastqfile_1 = args.fastqfile
fastqfile_2 = args.fastqfile2
cpus = args.cpus
b_opt = "-k1 -p " + str(1) + " --quiet"
simple_repeat = args.collapserepeat
paired_end = args.pairedend
allcountmethod = args.allcountmethod
is_bed = args.is_bed
##############################################################################
# check that the programs we need are available
try:
subprocess.call(shlex.split("coverageBed -h"),
stdout=open(os.devnull, 'wb'),
stderr=open(os.devnull, 'wb'))
subprocess.call(shlex.split("bowtie --version"),
stdout=open(os.devnull, 'wb'),
stderr=open(os.devnull, 'wb'))
except OSError:
print("Error: Bowtie or BEDTools not loaded")
raise
##############################################################################
# define a csv reader that reads space deliminated files
print('Preparing for analysis using RepEnrich...')
csv.field_size_limit(sys.maxsize)
def import_text(filename, separator):
for line in csv.reader(open(filename), delimiter=separator,
skipinitialspace=True):
if line:
yield line
##############################################################################
# build dictionaries to convert repclass and rep families'
if is_bed == "FALSE":
repeatclass = {}
repeatfamily = {}
fin = import_text(annotation_file, ' ')
x = 0
for line in fin:
if x > 2:
classfamily = []
classfamily = line[10].split(os.path.sep)
line9 = line[9].replace("(", "_").replace(
")", "_").replace("/", "_")
repeatclass[line9] = classfamily[0]
if len(classfamily) == 2:
repeatfamily[line9] = classfamily[1]
else:
repeatfamily[line9] = classfamily[0]
x += 1
if is_bed == "TRUE":
repeatclass = {}
repeatfamily = {}
fin = open(annotation_file, 'r')
for line in fin:
line = line.strip('\n')
line = line.split('\t')
theclass = line[4]
thefamily = line[5]
line3 = line[3].replace("(", "_").replace(")", "_").replace("/", "_")
repeatclass[line3] = theclass
repeatfamily[line3] = thefamily
fin.close()
##############################################################################
# build list of repeats initializing dictionaries for downstream analysis'
fin = import_text(setup_folder + os.path.sep + 'repgenomes_key.txt', '\t')
repeat_key = {}
rev_repeat_key = {}
repeat_list = []
reptotalcounts = {}
classfractionalcounts = {}
familyfractionalcounts = {}
classtotalcounts = {}
familytotalcounts = {}
reptotalcounts_simple = {}
fractionalcounts = {}
i = 0
for line in fin:
reptotalcounts[line[0]] = 0
fractionalcounts[line[0]] = 0
if line[0] in repeatclass:
classtotalcounts[repeatclass[line[0]]] = 0
classfractionalcounts[repeatclass[line[0]]] = 0
if line[0] in repeatfamily:
familytotalcounts[repeatfamily[line[0]]] = 0
familyfractionalcounts[repeatfamily[line[0]]] = 0
if line[0] in repeatfamily:
if repeatfamily[line[0]] == simple_repeat:
reptotalcounts_simple[simple_repeat] = 0
else:
reptotalcounts_simple[line[0]] = 0
repeat_list.append(line[0])
repeat_key[line[0]] = int(line[1])
rev_repeat_key[int(line[1])] = line[0]
fin.close()
##############################################################################
# map the repeats to the psuedogenomes:
if not os.path.exists(outputfolder):
os.mkdir(outputfolder)
##############################################################################
# Conduct the regions sorting
print('Conducting region sorting on unique mapping reads....')
fileout = outputfolder + os.path.sep + outputfile_prefix + '_regionsorter.txt'
with open(fileout, 'w') as stdout:
command = shlex.split("coverageBed -abam " + unique_mapper_bam + " -b " +
setup_folder + os.path.sep + 'repnames.bed')
p = subprocess.Popen(command, stdout=stdout)
p.communicate()
stdout.close()
filein = open(outputfolder + os.path.sep + outputfile_prefix
+ '_regionsorter.txt', 'r')
counts = {}
sumofrepeatreads = 0
for line in filein:
line = line.split('\t')
if not str(repeat_key[line[3]]) in counts:
counts[str(repeat_key[line[3]])] = 0
counts[str(repeat_key[line[3]])] += int(line[4])
sumofrepeatreads += int(line[4])
print('Identified ' + str(sumofrepeatreads) +
'unique reads that mapped to repeats.')
##############################################################################
if paired_end == 'TRUE':
if not os.path.exists(outputfolder + os.path.sep + 'pair1_bowtie'):
os.mkdir(outputfolder + os.path.sep + 'pair1_bowtie')
if not os.path.exists(outputfolder + os.path.sep + 'pair2_bowtie'):
os.mkdir(outputfolder + os.path.sep + 'pair2_bowtie')
folder_pair1 = outputfolder + os.path.sep + 'pair1_bowtie'
folder_pair2 = outputfolder + os.path.sep + 'pair2_bowtie'
##############################################################################
print("Processing repeat psuedogenomes...")
ps = []
psb = []
ticker = 0
for metagenome in repeat_list:
metagenomepath = setup_folder + os.path.sep + metagenome
file1 = folder_pair1 + os.path.sep + metagenome + '.bowtie'
file2 = folder_pair2 + os.path.sep + metagenome + '.bowtie'
with open(file1, 'w') as stdout:
command = shlex.split("bowtie " + b_opt + " " +
metagenomepath + " " + fastqfile_1)
p = subprocess.Popen(command, stdout=stdout)
with open(file2, 'w') as stdout:
command = shlex.split("bowtie " + b_opt + " " +
metagenomepath + " " + fastqfile_2)
pp = subprocess.Popen(command, stdout=stdout)
ps.append(p)
ticker += 1
psb.append(pp)
ticker += 1
if ticker == cpus:
for p in ps:
p.communicate()
for p in psb:
p.communicate()
ticker = 0
psb = []
ps = []
if len(ps) > 0:
for p in ps:
p.communicate()
stdout.close()
##############################################################################
# combine the output from both read pairs:
print('sorting and combining the output for both read pairs...')
if not os.path.exists(outputfolder + os.path.sep + 'sorted_bowtie'):
os.mkdir(outputfolder + os.path.sep + 'sorted_bowtie')
sorted_bowtie = outputfolder + os.path.sep + 'sorted_bowtie'
for metagenome in repeat_list:
file1 = folder_pair1 + os.path.sep + metagenome + '.bowtie'
file2 = folder_pair2 + os.path.sep + metagenome + '.bowtie'
fileout = sorted_bowtie + os.path.sep + metagenome + '.bowtie'
with open(fileout, 'w') as stdout:
p1 = subprocess.Popen(['cat', file1, file2],
stdout=subprocess.PIPE)
p2 = subprocess.Popen(['cut', '-f1', "-d "], stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(['cut', '-f1', "-d/"], stdin=p2.stdout,
stdout=subprocess.PIPE)
p4 = subprocess.Popen(['sort'], stdin=p3.stdout,
stdout=subprocess.PIPE)
p5 = subprocess.Popen(['uniq'], stdin=p4.stdout, stdout=stdout)
p5.communicate()
stdout.close()
print('completed ...')
##############################################################################
if paired_end == 'FALSE':
if not os.path.exists(outputfolder + os.path.sep + 'pair1_bowtie'):
os.mkdir(outputfolder + os.path.sep + 'pair1_bowtie')
folder_pair1 = outputfolder + os.path.sep + 'pair1_bowtie'
##############################################################################
ps = []
ticker = 0
print("Processing repeat psuedogenomes...")
for metagenome in repeat_list:
metagenomepath = setup_folder + os.path.sep + metagenome
file1 = folder_pair1 + os.path.sep + metagenome + '.bowtie'
with open(file1, 'w') as stdout:
command = shlex.split("bowtie " + b_opt + " " +
metagenomepath + " " + fastqfile_1)
p = subprocess.Popen(command, stdout=stdout)
ps.append(p)
ticker += 1
if ticker == cpus:
for p in ps:
p.communicate()
ticker = 0
ps = []
if len(ps) > 0:
for p in ps:
p.communicate()
stdout.close()
##############################################################################
# combine the output from both read pairs:
print('Sorting and combining the output for both read pairs....')
if not os.path.exists(outputfolder + os.path.sep + 'sorted_bowtie'):
os.mkdir(outputfolder + os.path.sep + 'sorted_bowtie')
sorted_bowtie = outputfolder + os.path.sep + 'sorted_bowtie'
for metagenome in repeat_list:
file1 = folder_pair1 + os.path.sep + metagenome + '.bowtie'
fileout = sorted_bowtie + os.path.sep + metagenome + '.bowtie'
with open(fileout, 'w') as stdout:
p1 = subprocess.Popen(['cat', file1], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['cut', '-f1'], stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(['cut', '-f1', "-d/"], stdin=p2.stdout,
stdout=subprocess.PIPE)
p4 = subprocess.Popen(['sort'], stdin=p3.stdout,
stdout=subprocess.PIPE)
p5 = subprocess.Popen(['uniq'], stdin=p4.stdout, stdout=stdout)
p5.communicate()
stdout.close()
print('completed ...')
##############################################################################
# build a file of repeat keys for all reads
print('Writing and processing intermediate files...')
sorted_bowtie = outputfolder + os.path.sep + 'sorted_bowtie'
readid = {}
sumofrepeatreads = 0
for rep in repeat_list:
for data in import_text(sorted_bowtie + os.path.sep +
rep + '.bowtie', '\t'):
readid[data[0]] = ''
for rep in repeat_list:
for data in import_text(sorted_bowtie + os.path.sep
+ rep + '.bowtie', '\t'):
readid[data[0]] += str(repeat_key[rep]) + str(',')
for subfamilies in readid.values():
if subfamilies not in counts:
counts[subfamilies] = 0
counts[subfamilies] += 1
sumofrepeatreads += 1
del readid
print('Identified ' + str(sumofrepeatreads) +
' reads that mapped to repeats for unique and multimappers.')
##############################################################################
print("Conducting final calculations...")
def convert(x):
'''
build a converter to numeric label for repeat and yield a combined list
of repnames seperated by backslash
'''
x = x.strip(',')
x = x.split(',')
global repname
repname = ""
for i in x:
repname = repname + os.path.sep + rev_repeat_key[int(i)]
# building the total counts for repeat element enrichment...
for x in counts.keys():
count = counts[x]
x = x.strip(',')
x = x.split(',')
for i in x:
reptotalcounts[rev_repeat_key[int(i)]] += int(count)
# building the fractional counts for repeat element enrichment...
for x in counts.keys():
count = counts[x]
x = x.strip(',')
x = x.split(',')
splits = len(x)
for i in x:
fractionalcounts[rev_repeat_key[int(i)]] += float(
numpy.divide(float(count), float(splits)))
# building categorized table of repeat element enrichment...
repcounts = {}
repcounts['other'] = 0
for key in counts.keys():
convert(key)
repcounts[repname] = counts[key]
# building the total counts for class enrichment...
for key in reptotalcounts.keys():
classtotalcounts[repeatclass[key]] += reptotalcounts[key]
# building total counts for family enrichment...
for key in reptotalcounts.keys():
familytotalcounts[repeatfamily[key]] += reptotalcounts[key]
# building unique counts table'
repcounts2 = {}
for rep in repeat_list:
if "/" + rep in repcounts:
repcounts2[rep] = repcounts["/" + rep]
else:
repcounts2[rep] = 0
# building the fractionalcounts counts for class enrichment...
for key in fractionalcounts.keys():
classfractionalcounts[repeatclass[key]] += fractionalcounts[key]
# building fractional counts for family enrichment...
for key in fractionalcounts.keys():
familyfractionalcounts[repeatfamily[key]] += fractionalcounts[key]
##############################################################################
print('Writing final output and removing intermediate files...')
# print output to file of the categorized counts and total overlapping counts:
if allcountmethod == "TRUE":
fout1 = open(outputfolder + os.path.sep + outputfile_prefix
+ '_total_counts.txt', 'w')
for key in reptotalcounts.keys():
fout1.write(str(key) + '\t' + repeatclass[key] + '\t' +
repeatfamily[key] + '\t' + str(reptotalcounts[key])
+ '\n')
fout2 = open(outputfolder + os.path.sep + outputfile_prefix
+ '_class_total_counts.txt', 'w')
for key in classtotalcounts.keys():
fout2.write(str(key) + '\t' + str(classtotalcounts[key]) + '\n')
fout3 = open(outputfolder + os.path.sep + outputfile_prefix
+ '_family_total_counts.txt', 'w')
for key in familytotalcounts.keys():
fout3.write(str(key) + '\t' + str(familytotalcounts[key]) + '\n')
fout4 = open(outputfolder + os.path.sep + outputfile_prefix +
'_unique_counts.txt', 'w')
for key in repcounts2.keys():
fout4.write(str(key) + '\t' + repeatclass[key] + '\t' +
repeatfamily[key] + '\t' + str(repcounts2[key]) + '\n')
fout5 = open(outputfolder + os.path.sep + outputfile_prefix
+ '_class_fraction_counts.txt', 'w')
for key in classfractionalcounts.keys():
fout5.write(str(key) + '\t' + str(classfractionalcounts[key]) + '\n')
fout6 = open(outputfolder + os.path.sep + outputfile_prefix +
'_family_fraction_counts.txt', 'w')
for key in familyfractionalcounts.keys():
fout6.write(str(key) + '\t' + str(familyfractionalcounts[key]) + '\n')
fout7 = open(outputfolder + os.path.sep + outputfile_prefix
+ '_fraction_counts.txt', 'w')
for key in fractionalcounts.keys():
fout7.write(str(key) + '\t' + repeatclass[key] + '\t' +
repeatfamily[key] + '\t' + str(int(fractionalcounts[key]))
+ '\n')
fout1.close()
fout2.close()
fout3.close()
fout4.close()
fout5.close()
fout6.close()
fout7.close()
else:
fout1 = open(outputfolder + os.path.sep + outputfile_prefix +
'_class_fraction_counts.txt', 'w')
for key in classfractionalcounts.keys():
fout1.write(str(key) + '\t' + str(classfractionalcounts[key]) + '\n')
fout2 = open(outputfolder + os.path.sep + outputfile_prefix +
'_family_fraction_counts.txt', 'w')
for key in familyfractionalcounts.keys():
fout2.write(str(key) + '\t' + str(familyfractionalcounts[key]) + '\n')
fout3 = open(outputfolder + os.path.sep + outputfile_prefix +
'_fraction_counts.txt', 'w')
for key in fractionalcounts.keys():
fout3.write(str(key) + '\t' + repeatclass[key] + '\t' +
repeatfamily[key] + '\t' + str(int(fractionalcounts[key]))
+ '\n')
fout1.close()
fout2.close()
fout3.close()
##############################################################################
# Remove Large intermediate files
if os.path.exists(outputfolder + os.path.sep + outputfile_prefix +
'_regionsorter.txt'):
os.remove(outputfolder + os.path.sep + outputfile_prefix +
'_regionsorter.txt')
if os.path.exists(outputfolder + os.path.sep + 'pair1_bowtie'):
shutil.rmtree(outputfolder + os.path.sep + 'pair1_bowtie')
if os.path.exists(outputfolder + os.path.sep + 'pair2_bowtie'):
shutil.rmtree(outputfolder + os.path.sep + 'pair2_bowtie')
if os.path.exists(outputfolder + os.path.sep + 'sorted_bowtie'):
shutil.rmtree(outputfolder + os.path.sep + 'sorted_bowtie')
print("... Done")
| |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join, basename, splitext
from workspace_tools.toolchains import mbedToolchain
from workspace_tools.settings import GCC_ARM_PATH, GCC_CR_PATH, GCC_CS_PATH, CW_EWL_PATH, CW_GCC_PATH
from workspace_tools.settings import GOANNA_PATH
from workspace_tools.hooks import hook_tool
class GCC(mbedToolchain):
LINKER_EXT = '.ld'
LIBRARY_EXT = '.a'
STD_LIB_NAME = "lib%s.a"
CIRCULAR_DEPENDENCIES = True
DIAGNOSTIC_PATTERN = re.compile('((?P<line>\d+):)(\d+:)? (?P<severity>warning|error): (?P<message>.+)')
def __init__(self, target, options=None, notify=None, macros=None, silent=False, tool_path=""):
mbedToolchain.__init__(self, target, options, notify, macros, silent)
if target.core == "Cortex-M0+":
cpu = "cortex-m0plus"
elif target.core == "Cortex-M4F":
cpu = "cortex-m4"
else:
cpu = target.core.lower()
self.cpu = ["-mcpu=%s" % cpu]
if target.core.startswith("Cortex"):
self.cpu.append("-mthumb")
if target.core == "Cortex-M4F":
self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3-d16")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
# Note: We are using "-O2" instead of "-Os" to avoid this known GCC bug:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46762
common_flags = ["-c", "-Wall", "-Wextra",
"-Wno-unused-parameter", "-Wno-missing-field-initializers",
"-fmessage-length=0", "-fno-exceptions", "-fno-builtin",
"-ffunction-sections", "-fdata-sections",
"-MMD", "-fno-delete-null-pointer-checks", "-fomit-frame-pointer"
] + self.cpu
if "save-asm" in self.options:
common_flags.append("-save-temps")
if "debug-info" in self.options:
common_flags.append("-g")
common_flags.append("-O0")
else:
common_flags.append("-O2")
main_cc = join(tool_path, "arm-none-eabi-gcc")
main_cppc = join(tool_path, "arm-none-eabi-g++")
self.asm = [main_cc, "-x", "assembler-with-cpp"] + common_flags
if not "analyze" in self.options:
self.cc = [main_cc, "-std=gnu99"] + common_flags
self.cppc =[main_cppc, "-std=gnu++98", "-fno-rtti"] + common_flags
else:
self.cc = [join(GOANNA_PATH, "goannacc"), "--with-cc=" + main_cc.replace('\\', '/'), "-std=gnu99", "--dialect=gnu", '--output-format="%s"' % self.GOANNA_FORMAT] + common_flags
self.cppc= [join(GOANNA_PATH, "goannac++"), "--with-cxx=" + main_cppc.replace('\\', '/'), "-std=gnu++98", "-fno-rtti", "--dialect=gnu", '--output-format="%s"' % self.GOANNA_FORMAT] + common_flags
self.ld = [join(tool_path, "arm-none-eabi-gcc"), "-Wl,--gc-sections", "-Wl,--wrap,main"] + self.cpu
self.sys_libs = ["stdc++", "supc++", "m", "c", "gcc"]
self.ar = join(tool_path, "arm-none-eabi-ar")
self.elf2bin = join(tool_path, "arm-none-eabi-objcopy")
def assemble(self, source, object, includes):
return [self.hook.get_cmdline_assembler(self.asm + ['-D%s' % s for s in self.get_symbols() + self.macros] + ["-I%s" % i for i in includes] + ["-o", object, source])]
def parse_dependencies(self, dep_path):
dependencies = []
for line in open(dep_path).readlines()[1:]:
file = line.replace('\\\n', '').strip()
if file:
# GCC might list more than one dependency on a single line, in this case
# the dependencies are separated by a space. However, a space might also
# indicate an actual space character in a dependency path, but in this case
# the space character is prefixed by a backslash.
# Temporary replace all '\ ' with a special char that is not used (\a in this
# case) to keep them from being interpreted by 'split' (they will be converted
# back later to a space char)
file = file.replace('\\ ', '\a')
if file.find(" ") == -1:
dependencies.append(file.replace('\a', ' '))
else:
dependencies = dependencies + [f.replace('\a', ' ') for f in file.split(" ")]
return dependencies
def parse_output(self, output):
# The warning/error notification is multiline
WHERE, WHAT = 0, 1
state, file, message = WHERE, None, None
for line in output.splitlines():
match = self.goanna_parse_line(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message'),
target_name=self.target.name,
toolchain_name=self.name
)
continue
# Each line should start with the file information: "filepath: ..."
# i should point past the file path ^
# avoid the first column in Windows (C:\)
i = line.find(':', 2)
if i == -1: continue
if state == WHERE:
file = line[:i]
message = line[i+1:].strip() + ' '
state = WHAT
elif state == WHAT:
match = GCC.DIAGNOSTIC_PATTERN.match(line[i+1:])
if match is None:
state = WHERE
continue
self.cc_info(
match.group('severity'),
file, match.group('line'),
message + match.group('message')
)
def archive(self, objects, lib_path):
self.default_cmd([self.ar, "rcs", lib_path] + objects)
def link(self, output, objects, libraries, lib_dirs, mem_map):
libs = []
for l in libraries:
name, _ = splitext(basename(l))
libs.append("-l%s" % name[3:])
libs.extend(["-l%s" % l for l in self.sys_libs])
# NOTE: There is a circular dependency between the mbed library and the clib
# We could define a set of week symbols to satisfy the clib dependencies in "sys.o",
# but if an application uses only clib symbols and not mbed symbols, then the final
# image is not correctly retargeted
if self.CIRCULAR_DEPENDENCIES:
libs.extend(libs)
self.default_cmd(self.hook.get_cmdline_linker(self.ld + ["-T%s" % mem_map, "-o", output] +
objects + ["-L%s" % L for L in lib_dirs] + libs))
@hook_tool
def binary(self, resources, elf, bin):
self.default_cmd(self.hook.get_cmdline_binary([self.elf2bin, "-O", "binary", elf, bin]))
class GCC_ARM(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_ARM_PATH)
# Use latest gcc nanolib
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
elif target.name in ["RZ_A1H"]:
self.ld.extend(["-u_printf_float", "-u_scanf_float"])
self.sys_libs.append("nosys")
class GCC_CR(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_CR_PATH)
additional_compiler_flags = [
"-D__NEWLIB__", "-D__CODE_RED", "-D__USE_CMSIS", "-DCPP_USE_HEAP",
]
self.cc += additional_compiler_flags
self.cppc += additional_compiler_flags
# Use latest gcc nanolib
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
self.ld += ["-nostdlib"]
class GCC_CS(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_CS_PATH)
class GCC_CW(GCC):
ARCH_LIB = {
"Cortex-M0+": "armv6-m",
}
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, CW_GCC_PATH)
class GCC_CW_EWL(GCC_CW):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC_CW.__init__(self, target, options, notify, macros, silent)
# Compiler
common = [
'-mfloat-abi=soft',
'-nostdinc', '-I%s' % join(CW_EWL_PATH, "EWL_C", "include"),
]
self.cc += common + [
'-include', join(CW_EWL_PATH, "EWL_C", "include", 'lib_c99.prefix')
]
self.cppc += common + [
'-nostdinc++', '-I%s' % join(CW_EWL_PATH, "EWL_C++", "include"),
'-include', join(CW_EWL_PATH, "EWL_C++", "include", 'lib_ewl_c++.prefix')
]
# Linker
self.sys_libs = []
self.CIRCULAR_DEPENDENCIES = False
self.ld = [join(CW_GCC_PATH, "arm-none-eabi-g++"),
"-Xlinker --gc-sections",
"-L%s" % join(CW_EWL_PATH, "lib", GCC_CW.ARCH_LIB[target.core]),
"-n", "-specs=ewl_c++.specs", "-mfloat-abi=soft",
"-Xlinker --undefined=__pformatter_", "-Xlinker --defsym=__pformatter=__pformatter_",
"-Xlinker --undefined=__sformatter", "-Xlinker --defsym=__sformatter=__sformatter",
] + self.cpu
class GCC_CW_NEWLIB(GCC_CW):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC_CW.__init__(self, target, options, notify, macros, silent)
| |
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.utils import six
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.webapi.resources import resources
from reviewboard.webapi.errors import INVALID_USER
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (user_item_mimetype,
user_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import (get_review_group_user_item_url,
get_review_group_user_list_url)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(BaseWebAPITestCase):
"""Testing the ReviewGroupUserResource list API tests."""
fixtures = ['test_users']
sample_api_url = 'groups/<name>/users/'
resource = resources.review_group_user
basic_post_use_admin = True
def compare_item(self, item_rsp, user):
self.assertEqual(item_rsp['id'], user.pk)
self.assertEqual(item_rsp['username'], user.username)
self.assertEqual(item_rsp['first_name'], user.first_name)
self.assertEqual(item_rsp['last_name'], user.last_name)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
group = self.create_review_group(with_local_site=with_local_site)
if populate_items:
items = [
User.objects.get(username='doc'),
User.objects.get(username='grumpy'),
]
group.users = items
else:
items = []
return (get_review_group_user_list_url(group.name, local_site_name),
user_list_mimetype,
items)
def test_get_with_no_access(self):
"""Testing the GET groups/<name>/users/ API
without access to invite-only group
"""
group = self.create_review_group(name='priv-group', invite_only=True)
rsp = self.api_get(get_review_group_user_list_url(group.name),
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
group = self.create_review_group(with_local_site=with_local_site)
if post_valid_data:
post_data = {
'username': 'doc',
}
else:
post_data = {}
return (get_review_group_user_list_url(group.name, local_site_name),
user_item_mimetype,
post_data,
[group])
def check_post_result(self, user, rsp, group):
users = list(group.users.all())
self.assertEqual(len(users), 1)
self.assertEqual(users[0].username, 'doc')
self.compare_item(rsp['user'], users[0])
def test_post_with_no_access(self, local_site=None):
"""Testing the POST groups/<name>/users/ API with Permission Denied"""
group = self.create_review_group()
user = User.objects.get(pk=1)
rsp = self.api_post(
get_review_group_user_list_url(group.name, local_site),
{'username': user.username},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
def test_post_with_invalid_user(self):
"""Testing the POST groups/<name>/users/ API with invalid user"""
self._login_user(admin=True)
group = self.create_review_group()
rsp = self.api_post(
get_review_group_user_list_url(group.name),
{'username': 'grabl'},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_USER.code)
self.assertEqual(group.users.count(), 0)
def test_post_with_self(self):
"""Testing the POST groups/<name>/users/ API
with the requesting user
"""
group = self.create_review_group()
self.assertFalse(self.user.is_superuser)
rsp = self.api_post(
get_review_group_user_list_url(group.name),
{'username': self.user.username},
expected_mimetype=user_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(group.users.count(), 1)
def test_post_with_self_and_private_group(self):
"""Testing the POST groups/<name>/users/ API
with the requesting user and private group
"""
group = self.create_review_group(invite_only=True)
self.assertFalse(group.is_accessible_by(self.user))
rsp = self.api_post(
get_review_group_user_list_url(group.name),
{'username': self.user.username},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(group.users.count(), 0)
@add_fixtures(['test_site'])
def test_post_with_self_and_site(self):
"""Testing the POST groups/<name>/users/ API
with the requesting user on a local site
"""
self.assertFalse(self.user.is_superuser)
local_site = self.get_local_site(name=self.local_site_name)
local_site.users.add(self.user)
group = self.create_review_group(with_local_site=True)
self.assertEqual(group.users.count(), 0)
rsp = self.api_post(
get_review_group_user_list_url(group.name, self.local_site_name),
{'username': self.user.username},
expected_mimetype=user_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(group.users.count(), 1)
@add_fixtures(['test_site'])
def test_post_with_self_and_unjoined_site(self):
"""Testing the POST groups/<name>/users/ API
with the requesting user on an unjoined local site
"""
self.assertFalse(self.user.is_superuser)
group = self.create_review_group(with_local_site=True)
self.assertEqual(group.users.count(), 0)
rsp = self.api_post(
get_review_group_user_list_url(group.name, self.local_site_name),
{'username': self.user.username},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(group.users.count(), 0)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(BaseWebAPITestCase):
"""Testing the ReviewGroupUserResource item API tests."""
fixtures = ['test_users']
sample_api_url = 'groups/<name>/users/<username>/'
resource = resources.review_group_user
basic_delete_use_admin = True
basic_put_use_admin = True
def setup_http_not_allowed_item_test(self, user):
return get_review_group_user_list_url('my-group')
def compare_item(self, item_rsp, user):
self.assertEqual(item_rsp['id'], user.pk)
self.assertEqual(item_rsp['username'], user.username)
self.assertEqual(item_rsp['first_name'], user.first_name)
self.assertEqual(item_rsp['last_name'], user.last_name)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
group = self.create_review_group(with_local_site=with_local_site)
doc = User.objects.get(username='doc')
group.users.add(doc)
return (get_review_group_user_item_url(group.name, doc.username,
local_site_name),
[group, doc])
def check_delete_result(self, user, group, doc):
self.assertNotIn(doc, group.users.all())
def test_delete_with_self(self):
"""Testing the DELETE groups/<name>/users/<username>/ API
with the requesting user
"""
group = self.create_review_group()
group.users.add(self.user)
self.assertFalse(self.user.is_superuser)
self.api_delete(
get_review_group_user_item_url(group.name, self.user.username))
self.assertEqual(group.users.count(), 0)
@add_fixtures(['test_site'])
def test_delete_with_self_with_site(self):
"""Testing the DELETE groups/<name>/users/<username>/ API
with the requesting user on local site
"""
self.assertFalse(self.user.is_superuser)
local_site = self.get_local_site(name=self.local_site_name)
local_site.users.add(self.user)
group = self.create_review_group(with_local_site=True)
group.users.add(self.user)
self.assertEqual(group.users.count(), 1)
self.api_delete(
get_review_group_user_item_url(group.name, self.user.username,
self.local_site_name))
self.assertEqual(group.users.count(), 0)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
group = self.create_review_group(with_local_site=with_local_site)
doc = User.objects.get(username='doc')
group.users.add(doc)
return (get_review_group_user_item_url(group.name, doc.username,
local_site_name),
user_item_mimetype,
doc)
| |
#!/usr/bin/env python
"""
"**Pycco**" is a Python port of [Docco](http://jashkenas.github.com/docco/):
the original quick-and-dirty, hundred-line-long, literate-programming-style
documentation generator. It produces HTML that displays your comments
alongside your code. Comments are passed through
[Markdown](http://daringfireball.net/projects/markdown/syntax) and
[SmartyPants](http://daringfireball.net/projects/smartypants), while code is
passed through [Pygments](http://pygments.org/) for syntax highlighting.
This page is the result of running Pycco against its own source file.
If you install Pycco, you can run it from the command-line:
pycco src/*.py
This will generate linked HTML documentation for the named source files,
saving it into a `docs` folder by default.
The [source for Pycco](https://github.com/fitzgen/pycco) is available on GitHub,
and released under the MIT license.
To install Pycco, simply
pip install pycco
Or, to install the latest source
git clone git://github.com/fitzgen/pycco.git
cd pycco
python setup.py install
"""
# === Main Documentation Generation Functions ===
def generate_documentation(source, outdir=None, preserve_paths=True,
language=None):
"""
Generate the documentation for a source file by reading it in, splitting it
up into comment/code sections, highlighting them for the appropriate
language, and merging them into an HTML template.
"""
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument.")
code = open(source, "r").read()
language = get_language(source, code, language=language)
sections = parse(source, code, language)
highlight(source, sections, language, preserve_paths=preserve_paths, outdir=outdir)
return generate_html(source, sections, preserve_paths=preserve_paths, outdir=outdir)
def parse(source, code, language):
"""
Given a string of source code, parse out each comment and the code that
follows it, and create an individual **section** for it.
Sections take the form:
{ "docs_text": ...,
"docs_html": ...,
"code_text": ...,
"code_html": ...,
"num": ...
}
"""
lines = code.split("\n")
sections = []
has_code = docs_text = code_text = ""
if lines[0].startswith("#!"):
lines.pop(0)
if language["name"] == "python":
for linenum, line in enumerate(lines[:2]):
if re.search(r'coding[:=]\s*([-\w.]+)', lines[linenum]):
lines.pop(linenum)
break
def save(docs, code):
if docs or code:
sections.append({
"docs_text": docs,
"code_text": code
})
# Setup the variables to get ready to check for multiline comments
multi_line = False
multi_line_delimiters = [language.get("multistart"), language.get("multiend")]
for line in lines:
# Only go into multiline comments section when one of the delimiters is
# found to be at the start of a line
if all(multi_line_delimiters) and any([line.lstrip().startswith(delim) or line.rstrip().endswith(delim) for delim in multi_line_delimiters]):
if not multi_line:
multi_line = True
else:
multi_line = False
if (multi_line
and line.strip().endswith(language.get("multiend"))
and len(line.strip()) > len(language.get("multiend"))):
multi_line = False
# Get rid of the delimiters so that they aren't in the final docs
line = line.replace(language["multistart"], '')
line = line.replace(language["multiend"], '')
docs_text += line.strip() + '\n'
indent_level = re.match("\s*", line).group(0)
if has_code and docs_text.strip():
save(docs_text, code_text[:-1])
code_text = code_text.split('\n')[-1]
has_code = docs_text = ''
elif multi_line:
# Remove leading spaces
if re.match(r' {%d}' % len(indent_level), line):
docs_text += line[len(indent_level):] + '\n'
else:
docs_text += line + '\n'
elif re.match(language["comment_matcher"], line):
if has_code:
save(docs_text, code_text)
has_code = docs_text = code_text = ''
docs_text += re.sub(language["comment_matcher"], "", line) + "\n"
else:
if code_text and any([line.lstrip().startswith(x) for x in ['class ', 'def ', '@']]):
if not code_text.lstrip().startswith("@"):
save(docs_text, code_text)
code_text = has_code = docs_text = ''
has_code = True
code_text += line + '\n'
save(docs_text, code_text)
return sections
# === Preprocessing the comments ===
def preprocess(comment, section_nr, preserve_paths=True, outdir=None):
"""
Add cross-references before having the text processed by markdown. It's
possible to reference another file, like this : `[[main.py]]` which renders
[[main.py]]. You can also reference a specific section of another file, like
this: `[[main.py#highlighting-the-source-code]]` which renders as
[[main.py#highlighting-the-source-code]]. Sections have to be manually
declared; they are written on a single line, and surrounded by equals signs:
`=== like this ===`
"""
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument.")
def sanitize_section_name(name):
return "-".join(name.lower().strip().split(" "))
def replace_crossref(match):
# Check if the match contains an anchor
if '#' in match.group(1):
name, anchor = match.group(1).split('#')
return " [%s](%s#%s)" % (name,
path.basename(destination(name,
preserve_paths=preserve_paths,
outdir=outdir)),
anchor)
else:
return " [%s](%s)" % (match.group(1),
path.basename(destination(match.group(1),
preserve_paths=preserve_paths,
outdir=outdir)))
def replace_section_name(match):
return '%(lvl)s <span id="%(id)s" href="%(id)s">%(name)s</span>' % {
"lvl" : re.sub('=', '#', match.group(1)),
"id" : sanitize_section_name(match.group(2)),
"name" : match.group(2)
}
comment = re.sub('^([=]+)([^=]+)[=]*\s*$', replace_section_name, comment)
comment = re.sub('[^`]\[\[(.+?)\]\]', replace_crossref, comment)
return comment
# === Highlighting the source code ===
def highlight(source, sections, language, preserve_paths=True, outdir=None):
"""
Highlights a single chunk of code using the **Pygments** module, and runs
the text of its corresponding comment through **Markdown**.
We process the entire file in a single call to Pygments by inserting little
marker comments between each section and then splitting the result string
wherever our markers occur.
"""
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument.")
output = pygments.highlight(language["divider_text"].join(section["code_text"].rstrip() for section in sections),
language["lexer"],
formatters.get_formatter_by_name("html"))
output = output.replace(highlight_start, "").replace(highlight_end, "")
fragments = re.split(language["divider_html"], output)
for i, section in enumerate(sections):
section["code_html"] = highlight_start + shift(fragments, "") + highlight_end
try:
docs_text = unicode(section["docs_text"])
except UnicodeError:
docs_text = unicode(section["docs_text"].decode('utf-8'))
section["docs_html"] = markdown(preprocess(docs_text,
i,
preserve_paths=preserve_paths,
outdir=outdir))
section["num"] = i
# === HTML Code generation ===
def generate_html(source, sections, preserve_paths=True, outdir=None):
"""
Once all of the code is finished highlighting, we can generate the HTML file
and write out the documentation. Pass the completed sections into the
template found in `resources/pycco.html`.
Pystache will attempt to recursively render context variables, so we must
replace any occurences of `{{`, which is valid in some languages, with a
"unique enough" identifier before rendering, and then post-process the
rendered template and change the identifier back to `{{`.
"""
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument")
title = path.basename(source)
dest = destination(source, preserve_paths=preserve_paths, outdir=outdir)
csspath = path.relpath(path.join(outdir, "pycco.css"), path.split(dest)[0])
for sect in sections:
sect["code_html"] = re.sub(r"\{\{", r"__DOUBLE_OPEN_STACHE__", sect["code_html"])
rendered = pycco_template({
"title" : title,
"stylesheet" : csspath,
"sections" : sections,
"source" : source,
"path" : path,
"destination" : destination
})
return re.sub(r"__DOUBLE_OPEN_STACHE__", "{{", rendered).encode("utf-8")
# === Helpers & Setup ===
# This module contains all of our static resources.
import pycco_resources
# Import our external dependencies.
import optparse
import os
import pygments
import pystache
import re
import sys
import time
from markdown import markdown
from os import path
from pygments import lexers, formatters
# A list of the languages that Pycco supports, mapping the file extension to
# the name of the Pygments lexer and the symbol that indicates a comment. To
# add another language to Pycco's repertoire, add it here.
languages = {
".coffee": { "name": "coffee-script", "symbol": "#",
"multistart": '###', "multiend": '###' },
".pl": { "name": "perl", "symbol": "#" },
".sql": { "name": "sql", "symbol": "--" },
".c": { "name": "c", "symbol": "//"},
".cpp": { "name": "cpp", "symbol": "//"},
".js": { "name": "javascript", "symbol": "//",
"multistart": "/*", "multiend": "*/"},
".rb": { "name": "ruby", "symbol": "#",
"multistart": "=begin", "multiend": "=end"},
".py": { "name": "python", "symbol": "#",
"multistart": '"""', "multiend": '"""' },
".scm": { "name": "scheme", "symbol": ";;",
"multistart": "#|", "multiend": "|#"},
".lua": { "name": "lua", "symbol": "--",
"multistart": "--[[", "multiend": "--]]"},
".erl": { "name": "erlang", "symbol": "%%" },
".tcl": { "name": "tcl", "symbol": "#" },
".hs": { "name": "haskell", "symbol": "--",
"multistart": "{-", "multiend": "-}"},
}
# Build out the appropriate matchers and delimiters for each language.
for ext, l in languages.items():
# Does the line begin with a comment?
l["comment_matcher"] = re.compile(r"^\s*" + l["symbol"] + "\s?")
# The dividing token we feed into Pygments, to delimit the boundaries between
# sections.
l["divider_text"] = "\n" + l["symbol"] + "DIVIDER\n"
# The mirror of `divider_text` that we expect Pygments to return. We can split
# on this to recover the original sections.
l["divider_html"] = re.compile(r'\n*<span class="c[1]?">' + l["symbol"] + 'DIVIDER</span>\n*')
# Get the Pygments Lexer for this language.
l["lexer"] = lexers.get_lexer_by_name(l["name"])
def get_language(source, code, language=None):
"""Get the current language we're documenting, based on the extension."""
if language is not None:
for l in languages.values():
if l["name"] == language:
return l
else:
raise ValueError("Unknown forced language: " + language)
m = re.match(r'.*(\..+)', os.path.basename(source))
if m and m.group(1) in languages:
return languages[m.group(1)]
else:
lang = lexers.guess_lexer(code).name.lower()
for l in languages.values():
if l["name"] == lang:
return l
else:
raise ValueError("Can't figure out the language!")
def destination(filepath, preserve_paths=True, outdir=None):
"""
Compute the destination HTML path for an input source file path. If the
source is `lib/example.py`, the HTML will be at `docs/example.html`
"""
dirname, filename = path.split(filepath)
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument.")
try:
name = re.sub(r"\.[^.]*$", "", filename)
except ValueError:
name = filename
if preserve_paths:
name = path.join(dirname, name)
return path.join(outdir, "%s.html" % name)
def shift(list, default):
"""
Shift items off the front of the `list` until it is empty, then return
`default`.
"""
try:
return list.pop(0)
except IndexError:
return default
def ensure_directory(directory):
"""Ensure that the destination directory exists."""
if not os.path.isdir(directory):
os.makedirs(directory)
def template(source):
return lambda context: pystache.render(source, context)
# Create the template that we will use to generate the Pycco HTML page.
pycco_template = template(pycco_resources.html)
# The CSS styles we'd like to apply to the documentation.
pycco_styles = pycco_resources.css
# The start of each Pygments highlight block.
highlight_start = "<div class=\"highlight\"><pre>"
# The end of each Pygments highlight block.
highlight_end = "</pre></div>"
def process(sources, preserve_paths=True, outdir=None, language=None):
"""For each source file passed as argument, generate the documentation."""
if not outdir:
raise TypeError("Missing the required 'outdir' keyword argument.")
# Make a copy of sources given on the command line. `main()` needs the
# original list when monitoring for changed files.
sources = sorted(sources)
# Proceed to generating the documentation.
if sources:
ensure_directory(outdir)
css = open(path.join(outdir, "pycco.css"), "w")
css.write(pycco_styles)
css.close()
def next_file():
s = sources.pop(0)
dest = destination(s, preserve_paths=preserve_paths, outdir=outdir)
try:
os.makedirs(path.split(dest)[0])
except OSError:
pass
with open(dest, "w") as f:
f.write(generate_documentation(s, preserve_paths=preserve_paths, outdir=outdir,
language=language))
print "pycco = %s -> %s" % (s, dest)
if sources:
next_file()
next_file()
__all__ = ("process", "generate_documentation")
def monitor(sources, opts):
"""Monitor each source file and re-generate documentation on change."""
# The watchdog modules are imported in `main()` but we need to re-import
# here to bring them into the local namespace.
import watchdog.events
import watchdog.observers
# Watchdog operates on absolute paths, so map those to original paths
# as specified on the command line.
absolute_sources = dict((os.path.abspath(source), source)
for source in sources)
class RegenerateHandler(watchdog.events.FileSystemEventHandler):
"""A handler for recompiling files which triggered watchdog events"""
def on_modified(self, event):
"""Regenerate documentation for a file which triggered an event"""
# Re-generate documentation from a source file if it was listed on
# the command line. Watchdog monitors whole directories, so other
# files may cause notifications as well.
if event.src_path in absolute_sources:
process([absolute_sources[event.src_path]],
outdir=opts.outdir,
preserve_paths=opts.paths)
# Set up an observer which monitors all directories for files given on
# the command line and notifies the handler defined above.
event_handler = RegenerateHandler()
observer = watchdog.observers.Observer()
directories = set(os.path.split(source)[0] for source in sources)
for directory in directories:
observer.schedule(event_handler, path=directory)
# Run the file change monitoring loop until the user hits Ctrl-C.
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def main():
"""Hook spot for the console script."""
parser = optparse.OptionParser()
parser.add_option('-p', '--paths', action='store_true',
help='Preserve path structure of original files')
parser.add_option('-d', '--directory', action='store', type='string',
dest='outdir', default='docs',
help='The output directory that the rendered files should go to.')
parser.add_option('-w', '--watch', action='store_true',
help='Watch original files and re-generate documentation on changes')
parser.add_option('-l', '--force-language', action='store', type='string',
dest='language', default=None,
help='Force the language for the given files')
opts, sources = parser.parse_args()
process(sources, outdir=opts.outdir, preserve_paths=opts.paths,
language=opts.language)
# If the -w / --watch option was present, monitor the source directories
# for changes and re-generate documentation for source files whenever they
# are modified.
if opts.watch:
try:
import watchdog.events
import watchdog.observers
except ImportError:
sys.exit('The -w/--watch option requires the watchdog package.')
monitor(sources, opts)
# Run the script.
if __name__ == "__main__":
main()
| |
#!/usr/bin/python
#
# DHT11 Sensor Library - Temperature and Humidity
#
# Jason A. Cox, @jasonacox
# https://github.com/jasonacox/SentryPI
import time
import RPi.GPIO as GPIO
class DHT11Result:
'DHT11 sensor result returned by DHT11.read() method'
ERR_NO_ERROR = 0
ERR_MISSING_DATA = 1
ERR_CRC = 2
error_code = ERR_NO_ERROR
temperature = -1
humidity = -1
def __init__(self, error_code, temperature, humidity):
self.error_code = error_code
self.temperature = temperature
self.humidity = humidity
def is_valid(self):
return self.error_code == DHT11Result.ERR_NO_ERROR
class DHT11:
'DHT11 sensor reader class for Raspberry'
__pin = 0
def __init__(self, pin):
self.__pin = pin
def read(self):
GPIO.setup(self.__pin, GPIO.OUT)
# send initial high
self.__send_and_sleep(GPIO.HIGH, 0.05)
# pull down to low
self.__send_and_sleep(GPIO.LOW, 0.02)
# change to input using pull up
GPIO.setup(self.__pin, GPIO.IN, GPIO.PUD_UP)
# collect data into an array
data = self.__collect_input()
# parse lengths of all data pull up periods
pull_up_lengths = self.__parse_data_pull_up_lengths(data)
# if bit count mismatch, return error (4 byte data + 1 byte checksum)
if len(pull_up_lengths) != 40:
return DHT11Result(DHT11Result.ERR_MISSING_DATA, 0, 0)
# calculate bits from lengths of the pull up periods
bits = self.__calculate_bits(pull_up_lengths)
# we have the bits, calculate bytes
the_bytes = self.__bits_to_bytes(bits)
# calculate checksum and check
checksum = self.__calculate_checksum(the_bytes)
if the_bytes[4] != checksum:
return DHT11Result(DHT11Result.ERR_CRC, 0, 0)
# ok, we have valid data, return it
return DHT11Result(DHT11Result.ERR_NO_ERROR, the_bytes[2],
the_bytes[0])
def __send_and_sleep(self, output, sleep):
GPIO.output(self.__pin, output)
time.sleep(sleep)
def __collect_input(self):
# collect the data while unchanged found
unchanged_count = 0
# this is used to determine where is the end of the data
max_unchanged_count = 100
last = -1
data = []
while True:
current = GPIO.input(self.__pin)
data.append(current)
if last != current:
unchanged_count = 0
last = current
else:
unchanged_count += 1
if unchanged_count > max_unchanged_count:
break
return data
def __parse_data_pull_up_lengths(self, data):
STATE_INIT_PULL_DOWN = 1
STATE_INIT_PULL_UP = 2
STATE_DATA_FIRST_PULL_DOWN = 3
STATE_DATA_PULL_UP = 4
STATE_DATA_PULL_DOWN = 5
state = STATE_INIT_PULL_DOWN
lengths = [] # will contain the lengths of data pull up periods
current_length = 0 # will contain the length of the previous period
for i in range(len(data)):
current = data[i]
current_length += 1
if state == STATE_INIT_PULL_DOWN:
if current == GPIO.LOW:
# ok, we got the initial pull down
state = STATE_INIT_PULL_UP
continue
else:
continue
if state == STATE_INIT_PULL_UP:
if current == GPIO.HIGH:
# ok, we got the initial pull up
state = STATE_DATA_FIRST_PULL_DOWN
continue
else:
continue
if state == STATE_DATA_FIRST_PULL_DOWN:
if current == GPIO.LOW:
# we have the initial pull down, the next will be the data pull up
state = STATE_DATA_PULL_UP
continue
else:
continue
if state == STATE_DATA_PULL_UP:
if current == GPIO.HIGH:
# data pulled up, the length of this pull up will determine whether it is 0 or 1
current_length = 0
state = STATE_DATA_PULL_DOWN
continue
else:
continue
if state == STATE_DATA_PULL_DOWN:
if current == GPIO.LOW:
# pulled down, we store the length of the previous pull up period
lengths.append(current_length)
state = STATE_DATA_PULL_UP
continue
else:
continue
return lengths
def __calculate_bits(self, pull_up_lengths):
# find shortest and longest period
shortest_pull_up = 1000
longest_pull_up = 0
for i in range(0, len(pull_up_lengths)):
length = pull_up_lengths[i]
if length < shortest_pull_up:
shortest_pull_up = length
if length > longest_pull_up:
longest_pull_up = length
# use the halfway to determine whether the period it is long or short
halfway = shortest_pull_up + (longest_pull_up - shortest_pull_up) / 2
bits = []
for i in range(0, len(pull_up_lengths)):
bit = False
if pull_up_lengths[i] > halfway:
bit = True
bits.append(bit)
return bits
def __bits_to_bytes(self, bits):
the_bytes = []
byte = 0
for i in range(0, len(bits)):
byte = byte << 1
if (bits[i]):
byte = byte | 1
else:
byte = byte | 0
if ((i + 1) % 8 == 0):
the_bytes.append(byte)
byte = 0
return the_bytes
def __calculate_checksum(self, the_bytes):
return the_bytes[0] + the_bytes[1] + the_bytes[2] + the_bytes[3] & 255
| |
import xml.dom.minidom
import logging
import nltk.tag
import nltk.tokenize
from ternip.timex import add_timex_ids
LOGGER = logging.getLogger(__name__)
class XmlDocument(object):
"""
An abstract base class which all XML types can inherit from. This implements
almost everything, apart from the conversion of timex objects to and from
timex tags in the XML. This is done by child classes
"""
@staticmethod
def _add_words_to_node_from_sents(doc, node, sents, tok_offsets=None):
"""
Uses the given node and adds an XML form of sents to it. The node
passed in should have no children (be an empty element)
"""
# Just add text here, then leave it up to reconcile to add all other
# tags
s_offset = 0
for i in range(len(sents)):
for j in range(len(sents[i])):
(tok, pos, ts) = sents[i][j]
# Do we know what token offsets are in order to reinstate them?
if tok_offsets is not None:
# Add whitespace between tokens if needed
while s_offset < tok_offsets[i][j]:
node.appendChild(doc.createTextNode(' '))
s_offset += 1
# Add the text
node.appendChild(doc.createTextNode(tok))
# If we're not using token offsets, assume a single space is
# what's used, except if this is the last element.
if tok_offsets is None:
if not (i == len(sents) - 1 and j == len(sents[i]) - 1):
node.appendChild(doc.createTextNode(' '))
else:
# Increase our current sentence offset
s_offset += len(tok)
node.normalize()
return node
@staticmethod
def create(sents, tok_offsets=None, add_S=False, add_LEX=False, pos_attr=False):
"""
This is an abstract function for building XML documents from the
internal representation only. You are not guaranteed to get out of
get_sents what you put in here. Sentences and words will be retokenised
and retagged unless you explicitly add S and LEX tags and the POS
attribute to the document using the optional arguments.
sents is the [[(word, pos, timexes), ...], ...] format.
tok_offsets is used to correctly reinsert whitespace lost in
tokenisation. It's in the format of a list of lists of integers, where
each integer is the offset from the start of the sentence of that token.
If set to None (the default), then a single space is assumed between
all tokens.
If add_S is set to something other than false, then the tags to indicate
sentence boundaries are added, with the name of the tag being the value
of add_S
add_LEX is similar, but for token boundaries
pos_attr is similar but refers to the name of the attribute on the LEX
(or whatever) tag that holds the POS tag.
"""
raise NotImplementedError
def __init__(self, file, nodename=None, has_S=False, has_LEX=False, pos_attr=False):
"""
Passes in an XML document (as one consecutive string) which is used
as the basis for this object.
Alternatively, you can pass in an xml.dom.Document class which means
that it's not parsed. This is used by the static create function.
Node name is the name of the "body" of this document to be considered.
If set to None (it's default), then the root node is considered to be
the document body.
has_S means that the document uses XML tags to mark sentence boundaries.
This defaults to False, but if your XML document does, you should set it
to the name of your sentence boundary tag (normally 'S').
has_LEX is similar to has_S, but for token boundaries. Again, set this
to your tag for token boundaries (not as common, but sometimes it's
'lex')
pos_attr is the name of the attribute on your LEX (or whatever) tags
that indicates the POS tag for that token.
The tagger needs tokenised sentences and tokenised and POS tagged tokens
in order to be able to tag. If the input does not supply this data, the
NLTK is used to fill the blanks. If this input is supplied, it is
blindly accepted as reasonably sensible. If there are tokens which are
not annotated (for whatever reason), then alignment between XML nodes
and the results of the tagging may fail and give undesirable results.
Similarly, if tokens are embedded inside other tokens, this will also
error in likely undesirable way, and such a tagging is likely erroneous.
"""
if isinstance(file, xml.dom.minidom.Document):
self._xml_doc = file
else:
self._xml_doc = xml.dom.minidom.parseString(file)
if nodename is None:
self._xml_body = self._xml_doc.documentElement
else:
tags = self._xml_doc.getElementsByTagName(nodename)
if len(tags) != 1:
raise BadNodeNameError()
self._xml_body = tags[0]
self._has_S = has_S
self._has_LEX = has_LEX
self._pos_attr = pos_attr
def _strip_tags(self, doc, tagname, node):
"""
Recursively remove a tag from this node
"""
# Recursive step - depth-first search
for child in list(node.childNodes):
# Get the list of nodes which replace this one (if any)
rep = self._strip_tags(doc, tagname, child)
if len(rep) == 1:
# If it's a single node that's taking the place of this one (e.g.,
# if there was no change, or a timex tag that only had some text
# inside it), but only if the node's changed
if rep[0] is not child:
node.replaceChild(rep[0], child)
node.normalize()
else:
# There were multiple child nodes, need to insert all of them
# where in the same location, in order, where their parent
# node was. Unfortunately replaceChild can't do replacement
# of a node with multiple nodes.
before = child.nextSibling
node.removeChild(child)
for new_node in rep:
node.insertBefore(new_node, before)
node.normalize()
# Base step
if node.nodeType == node.ELEMENT_NODE and node.tagName == tagname:
return [child for child in node.childNodes]
else:
return [node]
def strip_tag(self, tagname):
"""
Remove this tag from the document.
"""
self._strip_tags(self._xml_doc, tagname, self._xml_body)
def strip_timexes(self):
"""
Strips all timexes from this document. Useful if we're evaluating the
software - we can just feed in the gold standard directly and compare
the output then.
"""
self._strip_tags(self._xml_doc, self._timex_tag_name, self._xml_body)
def _get_text_recurse(self, element, until=None):
"""
Given an element, returns only the text only nodes in it concatenated
together, up until the node specified by until is reached.
"""
cont = True
text = ""
if element == until:
# Check if we need to stop
cont = False
elif element.nodeType == element.TEXT_NODE:
# If it's a text node, add the data, and no more recursion
text += element.data
else:
# depth-first search, recursive step
for child in element.childNodes:
(cont, t) = self._get_text_recurse(child, until)
text += t
if not cont:
break
return (cont, text)
def _get_text(self, element, until=None):
"""
Given an element, returns only the text only nodes in it concatenated
together, up until the node specified by until is reached.
"""
return self._get_text_recurse(element, until)[1]
def _can_align_node_sent(self, node, sent):
"""
Can this sentence be aligned with this node?
"""
text = self._get_text(node)
texti = 0
# Go through each token and check it can be aligned with somewhere in
# the text
for i in range(len(sent)):
offset = text.find(sent[i][0][0], texti)
if offset == -1:
# This token can't be aligned, so we say we can't align, but do
# say how many tokens were successfully aligned
return (False, i, texti)
else:
texti = offset + len(sent[i][0])
return (True, i, texti)
def _split_text_for_S(self, node, sents, s_name, align_point):
"""
Given a text node, splits it up into sentences and insert these
sentences in to an appropriate point in the parent node
"""
# Don't include leading whitespace in the tag
s_start = node.data.find(sents[0][0][0])
if s_start > 0:
node.parentNode.insertBefore(self._xml_doc.createTextNode(node.data[:s_start]), node)
# Create an S tag containing the matched part
s_tag = self._xml_doc.createElement(s_name)
s_tag.appendChild(self._xml_doc.createTextNode(node.data[s_start:align_point]))
# Insert this where this match tag is
node.parentNode.insertBefore(s_tag, node)
# If there's still some text left, then create a new text node with
# what was left, and then insert that where this text node was, and
# recurse on it to tag any more sentences, if there are any
if align_point < len(node.data):
new_child = self._xml_doc.createTextNode(node.data[align_point:])
node.parentNode.replaceChild(new_child, node)
if len(sents) > 1:
(can_align, tok_aligned, text_aligned) = self._can_align_node_sent(new_child, sents[1])
if can_align:
return self._split_text_for_S(new_child, sents[1:], s_name, text_aligned)
else:
return sents[1:]
else:
return []
else:
node.parentNode.removeChild(node)
return sents[1:]
def _handle_adding_S_tag(self, node, sent, sents, s_tag, s_name):
# If this node contains the entirety of this sentence, and isn't a
# text node, then recurse on it to break it down
(can_align, tok_aligned, text_aligned) = self._can_align_node_sent(node, sent)
if can_align and node.nodeType != node.TEXT_NODE:
if len(sent) == len(sents[0]):
# Current sent isn't a partial match, continue as per usual
sents = self._add_S_tags(node, sents, s_name)
if len(sents) > 0:
sent = list(sents[0])
else:
return (sent, [], s_tag)
else:
# Add, because if this is a partial match but found a full
# node, it contains the rest of the sentence. Or it's a tag
# which spans sentence boundaries. The latter is bad.
s_tag.appendChild(node)
elif can_align and node.nodeType == node.TEXT_NODE:
# If this text node does contain the full sentence so far, then
# break up that text node and add the text between <s> tags as
# appropriate
if len(sent) == len(sents[0]):
sents = self._split_text_for_S(node, sents, s_name, text_aligned)
if len(sents) > 0:
sent = list(sents[0])
else:
return (sent, [], s_tag)
else:
# If we've matched part of a sentence so far, and this
# text node finishes it off, then break up the text node and
# add the first bit of it to this node. Then recurse on the
# rest of it with the remaining sentences
s_tag.appendChild(self._xml_doc.createTextNode(node.data[:text_aligned]))
new_child = self._xml_doc.createTextNode(node.data[text_aligned:])
node.parentNode.replaceChild(new_child, node)
(can_align, tok_aligned, text_aligned) = self._can_align_node_sent(new_child, sents[1])
if len(sents) > 1:
sent = list(sents[1])
sents = sents[1:]
if can_align:
sents = self._split_text_for_S(new_child, sents, s_name, text_aligned)
if len(sents) > 0:
sent = list(sents[0])
else:
return (sent, [], s_tag)
else:
(sent, sents, s_tag) = self._handle_adding_S_tag(new_child, sent, sents, s_tag, s_name)
else:
# What we have didn't match the whole sentence, so just add the
# entire node and then update how little we have left.
# If this is the first incomplete match we've found (that is,
# our partial sentence is the same as the full one), then this
# is a new sentence
if len(sent) == len(sents[0]):
s_tag = self._xml_doc.createElement(s_name)
node.parentNode.insertBefore(s_tag, node)
if node.nodeType == node.TEXT_NODE:
s_start = node.data.find(sent[0][0])
if s_start > 0:
s_tag.parentNode.insertBefore(self._xml_doc.createTextNode(node.data[:s_start]), s_tag)
new_node = self._xml_doc.createTextNode(node.data[s_start:])
node.parentNode.replaceChild(new_node, node)
node = new_node
s_tag.appendChild(node)
# update our sentence to a partial match
sent = sent[tok_aligned:]
return (sent, sents, s_tag)
return (sent, sents, s_tag)
def _add_S_tags(self, node, sents, s_name):
"""
Given a node, and some sentences, add tags called s_name such that these
tags denote sentence boundaries. Return any sentences which could not
be assigned in this node.
"""
# Base case
if len(sents) > 0:
sent = list(sents[0])
else:
return []
s_tag = None
for child in list(node.childNodes):
(sent, sents, s_tag) = self._handle_adding_S_tag(child, sent, sents, s_tag, s_name)
return sents
def _add_LEX_tags(self, node, sent, LEX_name):
"""
Given a node and a sentence, enclose the tokens in that sentence with
tags called LEX_name to mark token boundaries.
"""
if len(sent) > 0:
# Drill down until we reach a text node, then align tokens so far in
# that text node.
if node.nodeType == node.TEXT_NODE:
tok = sent[0][0]
text = self._get_text(node)
start = text.find(tok[0])
# Include any whitespace
if start == -1:
# Could not align in this node, so continue
return sent
elif start > 0:
before = self._xml_doc.createTextNode(text[:start])
node.parentNode.insertBefore(before, node)
# Now create the LEX tag
lex_tag = self._xml_doc.createElement(LEX_name)
lex_tag.appendChild(self._xml_doc.createTextNode(text[start:start + len(tok)]))
node.parentNode.insertBefore(lex_tag, node)
# Replace the text node with the list tail
new_text = self._xml_doc.createTextNode(text[start + len(tok):])
node.parentNode.replaceChild(new_text, node)
# Continue adding for the rest of this LEX node
sent = sent[1:]
return self._add_LEX_tags(new_text, sent, LEX_name)
else:
for child in list(node.childNodes):
sent = self._add_LEX_tags(child, sent, LEX_name)
return sent
def _get_token_extent(self, node, sent):
if node.nodeType == node.TEXT_NODE:
i = 0
texti = 0
text = node.data
for (tok, pos, ts) in sent:
offset = text.find(tok[0], texti)
if offset < 0:
return i
else:
i += 1
texti = offset + len(tok)
else:
i = 0
for child in node.childNodes:
extent = self._get_token_extent(child, sent)
sent = sent[extent:]
i += extent
return i
def _add_timex_child(self, timex, sent, node, start, end):
i = 0
timex_tag = None
for child in list(node.childNodes):
e = self._get_token_extent(child, sent[i:])
if (i + e) >= start and i <= start and e > 0:
if child.nodeType == node.TEXT_NODE:
# get length of bit before TIMEX
texti = 0
for (tok, pos, ts) in sent[i:start]:
offset = child.data.find(tok[0], texti)
if offset == -1:
raise TokeniseError('INTERNAL ERROR: Could not align timex start')
texti = offset + len(tok)
# Now whitespace before first token
texti = child.data.find(sent[start][0][0], texti)
if texti == -1:
# The start of the TIMEX isn't in this text node
texti = len(child.data)
timex_tag = self._xml_doc.createElement(self._timex_tag_name)
self._annotate_node_from_timex(timex, timex_tag)
# Found our split point, so now create two nodes
before_text = self._xml_doc.createTextNode(child.data[:texti])
new_child = self._xml_doc.createTextNode(child.data[texti:])
node.insertBefore(before_text, child)
node.insertBefore(timex_tag, child)
node.replaceChild(new_child, child)
child = new_child
i += self._get_token_extent(before_text, sent[i:])
e = self._get_token_extent(child, sent[i:])
# This node is completely covered by this TIMEX, so include it
# inside the TIMEX, unless the timex is non consuming
if (i + e) <= end and i >= start and not timex.non_consuming and (
e > 0 or (i > start and (i + e) < end)) and (child.nodeType != node.TEXT_NODE or (i + e) < end):
if timex_tag is None:
timex_tag = self._xml_doc.createElement(self._timex_tag_name)
self._annotate_node_from_timex(timex, timex_tag)
node.insertBefore(timex_tag, child)
timex_tag.appendChild(child)
if ((i + e) > end and i < end and not timex.non_consuming) or\
((
i + e) == end and i >= start and not timex.non_consuming and e > 0 and child.nodeType == node.TEXT_NODE):
# This crosses the end boundary, so if our TIMEX consumes text
# then split the node in half (if it's a text node)
if child.nodeType == node.TEXT_NODE:
texti = 0
for (tok, pos, ts) in sent[i:end]:
offset = child.data.find(tok[0], texti)
if offset == -1:
raise TokeniseError('INTERNAL ERROR: Could not align timex end ' + tok + ' ' + child.data)
texti = offset + len(tok)
# Found our split point, so now create two nodes
new_child = self._xml_doc.createTextNode(child.data[texti:])
timex_tag.appendChild(self._xml_doc.createTextNode(child.data[:texti]))
node.replaceChild(new_child, child)
else:
raise NestingError('Can not tag TIMEX (' + str(timex) + ') without causing invalid XML nesting')
i += e
def _add_timex(self, timex, sent, s_node):
# Find start:end indices for this TIMEX
start = 0
end = 0
t_reached = False
for (tok, pos, ts) in sent:
if timex not in ts and not t_reached:
start += 1
end += 1
if timex in ts:
t_reached = True
end += 1
start_extent = 0
for child in list(s_node.childNodes):
extent = self._get_token_extent(child, sent[start_extent:])
end_extent = start_extent + extent
if start_extent <= start and end_extent >= end:
# This child can completely contain the TIMEX, so recurse on it
# unless it's a text node
if child.nodeType == child.TEXT_NODE:
self._add_timex_child(timex, sent, s_node, start, end)
break
else:
self._add_timex(timex, sent[start_extent:end_extent], child)
break
elif start_extent < start and end_extent < end - 1 and end_extent >= start:
# This child contains the start of the TIMEX, but can't
# completely hold it, which must mean the parent node is the
# highest node which contains the TIMEX
self._add_timex_child(timex, sent, s_node, start, end)
break
start_extent = end_extent
def reconcile(self, sents, add_S=False, add_LEX=False, pos_attr=False):
"""
Reconciles this document against the new internal representation. If
add_S is set to anything other than False, this means tags are indicated
to indicate the sentence boundaries, with the tag names being the value
of add_S. add_LEX is the same, but for marking token boundaries, and
pos_attr is the name of the attribute which holds the POS tag for that
token. This is mainly useful for transforming the TERN documents into
something that GUTime can parse.
If your document already contains S and LEX tags, and add_S/add_LEX is
set to add them, old S/LEX tags will be stripped first. If pos_attr is
set and the attribute name differs from the old POS attribute name on
the lex tag, then the old attribute will be removed.
Sentence/token boundaries will not be altered in the final document
unless add_S/add_LEX is set. If you have changed the token boundaries in
the internal representation from the original form, but are not then
adding them back in, reconciliation may give undefined results.
There are some inputs which would output invalid XML. For example, if
this document has elements which span multiple sentences, but not whole
parts of them, then you will be unable to add XML tags and get valid
XML, so failure will occur in unexpected ways.
If you are adding LEX tags, and your XML document contains tags internal
to tokens, then reconciliation will fail, as it expects tokens to be in
a continuous piece of whitespace.
"""
# First, add S tags if need be.
if add_S:
# First, strip any old ones
if self._has_S:
self._strip_tags(self._xml_doc, self._has_S, self._xml_body)
# Then add the new ones
leftover = self._add_S_tags(self._xml_body, sents, add_S)
if len(leftover) > 1:
raise NestingError('Unable to add all S tags, possibly due to bad tag nesting' + str(leftover))
# Update what we consider to be our S tags
self._has_S = add_S
# Now, get a list of the S nodes, which are used to reconcile individual
# tokens
if self._has_S:
s_nodes = self._xml_body.getElementsByTagName(self._has_S)
else:
# There are no S tokens in the text. So, going forward, only
# consider there being one sentence, which belongs to the root node
s_nodes = [self._xml_body]
new_sent = []
for sent in sents:
for part in sent:
new_sent.append(part)
sents = [new_sent]
# Now, add LEX tags if need be
if add_LEX:
# First, strip any old ones
if self._has_LEX:
self._strip_tags(self._xml_doc, self._has_LEX, self._xml_body)
# Now add those LEX tokens
for i in range(len(sents)):
self._add_LEX_tags(s_nodes[i], sents[i], add_LEX)
# Update what we consider to be our LEX tags
self._has_LEX = add_LEX
# Now, add the POS attribute
if pos_attr and self._has_LEX:
# Get each LEX tag and add the attribute
for i in range(len(sents)):
lex_tags = s_nodes[i].getElementsByTagName(self._has_LEX)
for j in range(len(sents[i])):
# Strip the existing attribute if need be
try:
lex_tags[j].removeAttribute(self._pos_attr)
except xml.dom.NotFoundErr:
pass
# Now set the new POS attr
lex_tags[j].setAttribute(pos_attr, sents[i][j][1])
# Update what we think is the pos attr
self._pos_attr = pos_attr
# Strip old TIMEXes to avoid duplicates
self.strip_timexes()
# For XML documents, TIMEXes need unique IDs
all_ts = set()
for sent in sents:
for (tok, pos, ts) in sent:
for t in ts:
all_ts.add(t)
add_timex_ids(all_ts)
# Now iterate over each sentence
for i in range(len(sents)):
# Get all timexes in this sentence
timexes = set()
for (word, pos, ts) in sents[i]:
for t in ts:
timexes.add(t)
# Now, for each timex, add it to the sentence
for timex in timexes:
try:
self._add_timex(timex, sents[i], s_nodes[i])
except NestingError as e:
LOGGER.exception("Error whilst attempting to add TIMEX")
def _nodes_to_sents(self, node, done_sents, nondone_sents, senti):
"""
Given a node (which spans multiple sentences), a list of sentences which
have nodes assigned, and those which don't currently have nodes assigned
"""
# Get next not done sent
(sent, snodes) = nondone_sents[0]
# Align start of node with where we care about
text = self._get_text(node)
text = text[text.find(sent[senti]):]
if len(text) > len(sent) - senti and node.nodeType != node.TEXT_NODE:
# This node is longer than what's remaining in our sentence, so
# try and find a small enough piece.
for child in node.childNodes:
(done_sents, nondone_sents, senti) = self._nodes_to_sents(child, done_sents, nondone_sents, senti)
elif len(text) > len(sent) - senti and node.nodeType == node.TEXT_NODE:
# It's a text node! Append the relevant part of this text node to
# this sent
snodes.append(self._xml_doc.createTextNode(text[:len(sent) - senti]))
# Mark this sentence as done, yay!
done_sents.append(nondone_sents[0])
nondone_sents = nondone_sents[1:]
# Now recurse on the next text node
(done_sents, nondone_sents, senti) = self._nodes_to_sents(
self._xml_doc.createTextNode(text[len(sent) - senti:]), done_sents, nondone_sents, 0)
else:
# This node is shorter or the same length as what's left in this
# sentence! So we can just add this node
snodes.append(node)
nondone_sents[0] = (sent, snodes)
senti += len(text)
# Now, if that sentence is complete, then move it from nondone into
# done
if senti == len(sent):
done_sents.append(nondone_sents[0])
nondone_sents = nondone_sents[1:]
senti = 0
return (done_sents, nondone_sents, senti)
def _timex_node_token_align(self, text, sent, tokeni):
"""
Given a tokenised sentence and some text, with some starting token
offset, figure out which is the token after the last token in this
block of text
"""
texti = 0
for (token, pos, timexes) in sent[tokeni:]:
text_offset = text[texti:].find(sent[tokeni][0][0])
if text_offset == -1:
# can't align with what's left, so next token must be a boundary
break
else:
# Move our text point along to the end of the current token,
# and continue
texti += text_offset + len(token)
tokeni += 1
return tokeni
def get_sents(self):
"""
Returns a representation of this document in the
[[(word, pos, timexes), ...], ...] format.
If there are any TIMEXes in the input document that cross sentence
boundaries (and the input is not already broken up into sentences with
the S tag), then those TIMEXes are disregarded.
"""
# Collect all TIMEXs so we can later find those outside of a sentence
all_timex_nodes = set()
all_timexes_by_id = dict()
all_timexes = []
# Is this pre-tokenised into sentences?
if self._has_S:
# easy
sents = [(self._get_text(sent), sent) for sent in self._xml_body.getElementsByTagName(self._has_S)]
else:
# Get the text, sentence tokenise it and then assign the content
# nodes of a sentence to that sentence. This is used for identifying
# LEX tags, if any, and TIMEX tags, if any, later.
(nodesents, ndsents, i) = self._nodes_to_sents(self._xml_body, [],
[(sent, []) for sent in nltk.tokenize.sent_tokenize(self._get_text(self._xml_body))], 0)
if len(ndsents) > 0:
raise TokeniseError('INTERNAL ERROR: there appears to be sentences not assigned to nodes')
# Combine contents under a 'virtual' S tag
sents = []
for (sent, nodes) in nodesents:
s_node = self._xml_doc.createElement('s')
sents.append((sent, s_node))
for node in nodes:
# Mark any TIMEX nodes as found before the deep copy
if node.nodeType == node.ELEMENT_NODE or node.nodeType == node.DOCUMENT_NODE:
for timex_tag in node.getElementsByTagName(self._timex_tag_name):
all_timex_nodes.add(timex_tag)
if node.nodeType == node.ELEMENT_NODE:
if node.tagName == self._timex_tag_name:
all_timex_nodes.add(node)
# Clone the node to avoid destroying our original document
# and add it to our virtual S node
s_node.appendChild(node.cloneNode(True))
# Is this pre-tokenised into tokens?
if self._has_LEX:
# Go through each node, and find the LEX tags in there
tsents = []
for (sent, s_node) in sents:
toks = []
for node in s_node.childNodes:
if node.nodeType == node.ELEMENT_NODE and node.tagName == self._has_LEX:
# If this is a LEX tag
toks.append((self._get_text(node), node))
elif node.nodeType == node.ELEMENT_NODE or node.nodeType == node.DOCUMENT_NODE:
# get any lex tags which are children of this node
# and add them
for lex in node.getElementsByTagName(self._has_LEX):
toks.append((self._get_text(lex), lex))
tsents.append((toks, s_node))
else:
# Don't need to keep nodes this time, so this is easier than
# sentence tokenisation
tsents = [([(tok, None) for tok in nltk.tokenize.word_tokenize(sent)], nodes) for (sent, nodes) in sents]
# Right, now POS tag. If POS is an attribute on the LEX tag, then just
# use that
if self._has_LEX and self._pos_attr:
psents = [([(tok, tag.getAttribute(self._pos_attr)) for (tok, tag) in sent], nodes) for (sent, nodes) in
tsents]
else:
# use the NLTK
psents = [([t for t in nltk.tag.pos_tag([s for (s, a) in sent])], nodes) for (sent, nodes) in tsents]
# Now do timexes - first get all timex tags in a sent
txsents = []
for (sent, s_node) in psents:
txsent = [(t, pos, set()) for (t, pos) in sent]
# Get all timexes in this sentence
timex_nodes = s_node.getElementsByTagName(self._timex_tag_name)
# Now, for each timex tag, create a timex object to
# represent it
for timex_node in timex_nodes:
all_timex_nodes.add(timex_node)
timex = self._timex_from_node(timex_node)
# Record a reference to it for resolution of attributes which
# refer to other references later
all_timexes_by_id[timex.id] = timex
all_timexes.append(timex)
# Now figure out the extent of it
timex_body = self._get_text(timex_node)
timex_before = self._get_text(s_node, timex_node)
# Go through each part of the before text and find the
# first token in the body of the timex
tokeni = self._timex_node_token_align(timex_before, txsent, 0)
# Now we have the start token, find the end token from
# the body of the timex
tokenj = self._timex_node_token_align(timex_body, txsent, tokeni)
# Handle non-consuming TIMEXes
if tokeni == tokenj:
timex.non_consuming = True
txsent[tokeni][2].add(timex)
else:
# Okay, now add this timex to the relevant tokens
for (tok, pos, timexes) in txsent[tokeni:tokenj]:
timexes.add(timex)
txsents.append(txsent)
# Now get all TIMEX tags which are not inside <s> tags (and assume
# they're non-consuming)
for timex_node in self._xml_body.getElementsByTagName(self._timex_tag_name):
if timex_node not in all_timex_nodes:
# Found a TIMEX that has not been seen before
all_timex_nodes.add(timex_node)
timex = self._timex_from_node(timex_node)
all_timexes_by_id[timex.id] = timex
all_timexes.append(timex)
# Assume it's non-consuming
timex.non_consuming = True
# And just add it at the front
txsents[0][0][2].add(timex)
# Now resolve any dangling references
for timex in all_timexes:
if timex.begin_timex != None:
timex.begin_timex = all_timexes_by_id[timex.begin_timex]
if timex.end_timex != None:
timex.end_timex = all_timexes_by_id[timex.end_timex]
if timex.context != None:
timex.context = all_timexes_by_id[timex.context]
return txsents
def __str__(self):
"""
String representation of this document
"""
return self._xml_doc.toxml()
def get_dct_sents(self):
"""
Returns the creation time sents for this document.
"""
return []
def reconcile_dct(self, dct, add_S=False, add_LEX=False, pos_attr=False):
"""
Adds a TIMEX to the DCT tag and return the DCT
"""
pass
class TokeniseError(Exception):
def __init__(self, s):
self._s = s
def __str__(self):
return str(self._s)
class NestingError(Exception):
def __init__(self, s):
self._s = s
def __str__(self):
return str(self._s)
class BadNodeNameError(Exception):
def __str__(self):
return "The specified tag name does not exist exactly once in the document"
| |
import rope.base.pynames
from rope.base import ast, utils
from rope.refactor.importutils import importinfo
from rope.refactor.importutils import actions
class ModuleImports(object):
def __init__(self, pycore, pymodule, import_filter=None):
self.pycore = pycore
self.pymodule = pymodule
self.separating_lines = 0
self.filter = import_filter
@property
@utils.saveit
def imports(self):
finder = _GlobalImportFinder(self.pymodule, self.pycore)
result = finder.find_import_statements()
self.separating_lines = finder.get_separating_line_count()
if self.filter is not None:
for import_stmt in result:
if not self.filter(import_stmt):
import_stmt.readonly = True
return result
def _get_unbound_names(self, defined_pyobject):
visitor = _GlobalUnboundNameFinder(self.pymodule, defined_pyobject)
ast.walk(self.pymodule.get_ast(), visitor)
return visitor.unbound
def remove_unused_imports(self):
can_select = _OneTimeSelector(self._get_unbound_names(self.pymodule))
visitor = actions.RemovingVisitor(
self.pycore, self._current_folder(), can_select)
for import_statement in self.imports:
import_statement.accept(visitor)
def get_used_imports(self, defined_pyobject):
result = []
can_select = _OneTimeSelector(self._get_unbound_names(defined_pyobject))
visitor = actions.FilteringVisitor(
self.pycore, self._current_folder(), can_select)
for import_statement in self.imports:
new_import = import_statement.accept(visitor)
if new_import is not None and not new_import.is_empty():
result.append(new_import)
return result
def get_changed_source(self):
imports = self.imports
after_removing = self._remove_imports(imports)
imports = [stmt for stmt in imports
if not stmt.import_info.is_empty()]
first_non_blank = self._first_non_blank_line(after_removing, 0)
first_import = self._first_import_line() - 1
result = []
# Writing module docs
result.extend(after_removing[first_non_blank:first_import])
# Writing imports
sorted_imports = sorted(imports, self._compare_import_locations)
for stmt in sorted_imports:
start = self._get_import_location(stmt)
if stmt != sorted_imports[0]:
result.append('\n' * stmt.blank_lines)
result.append(stmt.get_import_statement() + '\n')
if sorted_imports and first_non_blank < len(after_removing):
result.append('\n' * self.separating_lines)
# Writing the body
first_after_imports = self._first_non_blank_line(after_removing,
first_import)
result.extend(after_removing[first_after_imports:])
return ''.join(result)
def _get_import_location(self, stmt):
start = stmt.get_new_start()
if start is None:
start = stmt.get_old_location()[0]
return start
def _compare_import_locations(self, stmt1, stmt2):
def get_location(stmt):
if stmt.get_new_start() is not None:
return stmt.get_new_start()
else:
return stmt.get_old_location()[0]
return cmp(get_location(stmt1), get_location(stmt2))
def _remove_imports(self, imports):
lines = self.pymodule.source_code.splitlines(True)
after_removing = []
last_index = 0
for stmt in imports:
start, end = stmt.get_old_location()
after_removing.extend(lines[last_index:start - 1])
last_index = end - 1
for i in range(start, end):
after_removing.append('')
after_removing.extend(lines[last_index:])
return after_removing
def _first_non_blank_line(self, lines, lineno):
result = lineno
for line in lines[lineno:]:
if line.strip() == '':
result += 1
else:
break
return result
def add_import(self, import_info):
visitor = actions.AddingVisitor(self.pycore, [import_info])
for import_statement in self.imports:
if import_statement.accept(visitor):
break
else:
lineno = self._get_new_import_lineno()
blanks = self._get_new_import_blanks()
self.imports.append(importinfo.ImportStatement(
import_info, lineno, lineno,
blank_lines=blanks))
def _get_new_import_blanks(self):
return 0
def _get_new_import_lineno(self):
if self.imports:
return self.imports[-1].end_line
return 1
def filter_names(self, can_select):
visitor = actions.RemovingVisitor(
self.pycore, self._current_folder(), can_select)
for import_statement in self.imports:
import_statement.accept(visitor)
def expand_stars(self):
can_select = _OneTimeSelector(self._get_unbound_names(self.pymodule))
visitor = actions.ExpandStarsVisitor(
self.pycore, self._current_folder(), can_select)
for import_statement in self.imports:
import_statement.accept(visitor)
def remove_duplicates(self):
added_imports = []
for import_stmt in self.imports:
visitor = actions.AddingVisitor(self.pycore,
[import_stmt.import_info])
for added_import in added_imports:
if added_import.accept(visitor):
import_stmt.empty_import()
else:
added_imports.append(import_stmt)
def get_relative_to_absolute_list(self):
visitor = rope.refactor.importutils.actions.RelativeToAbsoluteVisitor(
self.pycore, self._current_folder())
for import_stmt in self.imports:
if not import_stmt.readonly:
import_stmt.accept(visitor)
return visitor.to_be_absolute
def get_self_import_fix_and_rename_list(self):
visitor = rope.refactor.importutils.actions.SelfImportVisitor(
self.pycore, self._current_folder(), self.pymodule.get_resource())
for import_stmt in self.imports:
if not import_stmt.readonly:
import_stmt.accept(visitor)
return visitor.to_be_fixed, visitor.to_be_renamed
def _current_folder(self):
return self.pymodule.get_resource().parent
def sort_imports(self):
# IDEA: Sort from import list
visitor = actions.SortingVisitor(self.pycore, self._current_folder())
for import_statement in self.imports:
import_statement.accept(visitor)
in_projects = sorted(visitor.in_project, self._compare_imports)
third_party = sorted(visitor.third_party, self._compare_imports)
standards = sorted(visitor.standard, self._compare_imports)
future = sorted(visitor.future, self._compare_imports)
blank_lines = 0
last_index = self._first_import_line()
last_index = self._move_imports(future, last_index, 0)
last_index = self._move_imports(standards, last_index, 1)
last_index = self._move_imports(third_party, last_index, 1)
last_index = self._move_imports(in_projects, last_index, 1)
self.separating_lines = 2
def _first_import_line(self):
nodes = self.pymodule.get_ast().body
lineno = 0
if self.pymodule.get_doc() is not None:
lineno = 1
if len(nodes) > lineno:
lineno = self.pymodule.logical_lines.logical_line_in(
nodes[lineno].lineno)[0]
else:
lineno = self.pymodule.lines.length()
while lineno > 1:
line = self.pymodule.lines.get_line(lineno - 1)
if line.strip() == '':
lineno -= 1
else:
break
return lineno
def _compare_imports(self, stmt1, stmt2):
str1 = stmt1.get_import_statement()
str2 = stmt2.get_import_statement()
if str1.startswith('from ') and not str2.startswith('from '):
return 1
if not str1.startswith('from ') and str2.startswith('from '):
return -1
return cmp(str1, str2)
def _move_imports(self, imports, index, blank_lines):
if imports:
imports[0].move(index, blank_lines)
index += 1
if len(imports) > 1:
for stmt in imports[1:]:
stmt.move(index)
index += 1
return index
def handle_long_imports(self, maxdots, maxlength):
visitor = actions.LongImportVisitor(
self._current_folder(), self.pycore, maxdots, maxlength)
for import_statement in self.imports:
if not import_statement.readonly:
import_statement.accept(visitor)
for import_info in visitor.new_imports:
self.add_import(import_info)
return visitor.to_be_renamed
def remove_pyname(self, pyname):
"""Removes pyname when imported in ``from mod import x``"""
visitor = actions.RemovePyNameVisitor(self.pycore, self.pymodule,
pyname, self._current_folder())
for import_stmt in self.imports:
import_stmt.accept(visitor)
class _OneTimeSelector(object):
def __init__(self, names):
self.names = names
self.selected_names = set()
def __call__(self, imported_primary):
if self._can_name_be_added(imported_primary):
for name in self._get_dotted_tokens(imported_primary):
self.selected_names.add(name)
return True
return False
def _get_dotted_tokens(self, imported_primary):
tokens = imported_primary.split('.')
for i in range(len(tokens)):
yield '.'.join(tokens[:i + 1])
def _can_name_be_added(self, imported_primary):
for name in self._get_dotted_tokens(imported_primary):
if name in self.names and name not in self.selected_names:
return True
return False
class _UnboundNameFinder(object):
def __init__(self, pyobject):
self.pyobject = pyobject
def _visit_child_scope(self, node):
pyobject = self.pyobject.get_module().get_scope().\
get_inner_scope_for_line(node.lineno).pyobject
visitor = _LocalUnboundNameFinder(pyobject, self)
for child in ast.get_child_nodes(node):
ast.walk(child, visitor)
def _FunctionDef(self, node):
self._visit_child_scope(node)
def _ClassDef(self, node):
self._visit_child_scope(node)
def _Name(self, node):
if self._get_root()._is_node_interesting(node) and \
not self.is_bound(node.id):
self.add_unbound(node.id)
def _Attribute(self, node):
result = []
while isinstance(node, ast.Attribute):
result.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
result.append(node.id)
primary = '.'.join(reversed(result))
if self._get_root()._is_node_interesting(node) and \
not self.is_bound(primary):
self.add_unbound(primary)
else:
ast.walk(node, self)
def _get_root(self):
pass
def is_bound(self, name, propagated=False):
pass
def add_unbound(self, name):
pass
class _GlobalUnboundNameFinder(_UnboundNameFinder):
def __init__(self, pymodule, wanted_pyobject):
super(_GlobalUnboundNameFinder, self).__init__(pymodule)
self.unbound = set()
self.names = set()
for name, pyname in pymodule._get_structural_attributes().items():
if not isinstance(pyname, (rope.base.pynames.ImportedName,
rope.base.pynames.ImportedModule)):
self.names.add(name)
wanted_scope = wanted_pyobject.get_scope()
self.start = wanted_scope.get_start()
self.end = wanted_scope.get_end() + 1
def _get_root(self):
return self
def is_bound(self, primary, propagated=False):
name = primary.split('.')[0]
if name in self.names:
return True
return False
def add_unbound(self, name):
names = name.split('.')
for i in range(len(names)):
self.unbound.add('.'.join(names[:i + 1]))
def _is_node_interesting(self, node):
return self.start <= node.lineno < self.end
class _LocalUnboundNameFinder(_UnboundNameFinder):
def __init__(self, pyobject, parent):
super(_LocalUnboundNameFinder, self).__init__(pyobject)
self.parent = parent
def _get_root(self):
return self.parent._get_root()
def is_bound(self, primary, propagated=False):
name = primary.split('.')[0]
if propagated:
names = self.pyobject.get_scope().get_propagated_names()
else:
names = self.pyobject.get_scope().get_names()
if name in names or self.parent.is_bound(name, propagated=True):
return True
return False
def add_unbound(self, name):
self.parent.add_unbound(name)
class _GlobalImportFinder(object):
def __init__(self, pymodule, pycore):
self.current_folder = None
if pymodule.get_resource():
self.current_folder = pymodule.get_resource().parent
self.pymodule = pymodule
self.pycore = pycore
self.imports = []
self.pymodule = pymodule
self.lines = self.pymodule.lines
def visit_import(self, node, end_line):
start_line = node.lineno
import_statement = importinfo.ImportStatement(
importinfo.NormalImport(self._get_names(node.names)),
start_line, end_line, self._get_text(start_line, end_line),
blank_lines=self._count_empty_lines_before(start_line))
self.imports.append(import_statement)
def _count_empty_lines_before(self, lineno):
result = 0
for current in range(lineno - 1, 0, -1):
line = self.lines.get_line(current)
if line.strip() == '':
result += 1
else:
break
return result
def _count_empty_lines_after(self, lineno):
result = 0
for current in range(lineno + 1, self.lines.length()):
line = self.lines.get_line(current)
if line.strip() == '':
result += 1
else:
break
return result
def get_separating_line_count(self):
if not self.imports:
return 0
return self._count_empty_lines_after(self.imports[-1].end_line - 1)
def _get_text(self, start_line, end_line):
result = []
for index in range(start_line, end_line):
result.append(self.lines.get_line(index))
return '\n'.join(result)
def visit_from(self, node, end_line):
level = 0
if node.level:
level = node.level
import_info = importinfo.FromImport(
node.module, level, self._get_names(node.names))
start_line = node.lineno
self.imports.append(importinfo.ImportStatement(
import_info, node.lineno, end_line,
self._get_text(start_line, end_line),
blank_lines=self._count_empty_lines_before(start_line)))
def _get_names(self, alias_names):
result = []
for alias in alias_names:
result.append((alias.name, alias.asname))
return result
def find_import_statements(self):
nodes = self.pymodule.get_ast().body
for index, node in enumerate(nodes):
if isinstance(node, (ast.Import, ast.ImportFrom)):
lines = self.pymodule.logical_lines
end_line = lines.logical_line_in(node.lineno)[1] + 1
if isinstance(node, ast.Import):
self.visit_import(node, end_line)
if isinstance(node, ast.ImportFrom):
self.visit_from(node, end_line)
return self.imports
| |
import os
import ujson
import shutil
import subprocess
import logging
import random
import requests
from collections import defaultdict
from django.conf import settings
from django.utils.timezone import now as timezone_now
from django.forms.models import model_to_dict
from typing import Any, Dict, List, Optional, Tuple, Set, Iterator
from zerver.models import Reaction, RealmEmoji, UserProfile, Recipient, \
CustomProfileField, CustomProfileFieldValue
from zerver.data_import.slack_message_conversion import convert_to_zulip_markdown, \
get_user_full_name
from zerver.data_import.import_util import ZerverFieldsT, build_zerver_realm, \
build_avatar, build_subscription, build_recipient, build_usermessages, \
build_defaultstream, build_attachment, process_avatars, process_uploads, \
process_emojis, build_realm, build_stream, build_huddle, build_message, \
create_converted_data_files, make_subscriber_map
from zerver.data_import.sequencer import NEXT_ID
from zerver.lib.upload import random_name, sanitize_name
from zerver.lib.export import MESSAGE_BATCH_CHUNK_SIZE
from zerver.lib.emoji import NAME_TO_CODEPOINT_PATH
# stubs
AddedUsersT = Dict[str, int]
AddedChannelsT = Dict[str, Tuple[str, int]]
AddedMPIMsT = Dict[str, Tuple[str, int]]
AddedRecipientsT = Dict[str, int]
def rm_tree(path: str) -> None:
if os.path.exists(path):
shutil.rmtree(path)
def slack_workspace_to_realm(domain_name: str, realm_id: int, user_list: List[ZerverFieldsT],
realm_subdomain: str, slack_data_dir: str,
custom_emoji_list: ZerverFieldsT) -> Tuple[ZerverFieldsT, AddedUsersT,
AddedRecipientsT,
AddedChannelsT,
AddedMPIMsT,
List[ZerverFieldsT],
ZerverFieldsT]:
"""
Returns:
1. realm, Converted Realm data
2. added_users, which is a dictionary to map from slack user id to zulip user id
3. added_recipient, which is a dictionary to map from channel name to zulip recipient_id
4. added_channels, which is a dictionary to map from channel name to channel id, zulip stream_id
5. added_mpims, which is a dictionary to map from MPIM name to MPIM id, zulip huddle_id
6. avatars, which is list to map avatars to zulip avatar records.json
7. emoji_url_map, which is maps emoji name to its slack url
"""
NOW = float(timezone_now().timestamp())
zerver_realm = build_zerver_realm(realm_id, realm_subdomain, NOW, 'Slack') # type: List[ZerverFieldsT]
realm = build_realm(zerver_realm, realm_id, domain_name)
zerver_userprofile, avatars, added_users, zerver_customprofilefield, \
zerver_customprofilefield_value = users_to_zerver_userprofile(slack_data_dir, user_list,
realm_id, int(NOW), domain_name)
channels_to_zerver_stream_fields = channels_to_zerver_stream(slack_data_dir,
realm_id,
added_users,
zerver_userprofile)
zerver_realmemoji, emoji_url_map = build_realmemoji(custom_emoji_list, realm_id)
realm['zerver_realmemoji'] = zerver_realmemoji
# See https://zulipchat.com/help/set-default-streams-for-new-users
# for documentation on zerver_defaultstream
realm['zerver_userprofile'] = zerver_userprofile
# Custom profile fields
realm['zerver_customprofilefield'] = zerver_customprofilefield
realm['zerver_customprofilefieldvalue'] = zerver_customprofilefield_value
realm['zerver_defaultstream'] = channels_to_zerver_stream_fields[0]
realm['zerver_stream'] = channels_to_zerver_stream_fields[1]
realm['zerver_huddle'] = channels_to_zerver_stream_fields[2]
realm['zerver_subscription'] = channels_to_zerver_stream_fields[5]
realm['zerver_recipient'] = channels_to_zerver_stream_fields[6]
added_channels = channels_to_zerver_stream_fields[3]
added_mpims = channels_to_zerver_stream_fields[4]
added_recipient = channels_to_zerver_stream_fields[7]
return realm, added_users, added_recipient, added_channels, added_mpims, avatars, emoji_url_map
def build_realmemoji(custom_emoji_list: ZerverFieldsT,
realm_id: int) -> Tuple[List[ZerverFieldsT],
ZerverFieldsT]:
zerver_realmemoji = []
emoji_url_map = {}
emoji_id = 0
for emoji_name, url in custom_emoji_list.items():
if 'emoji.slack-edge.com' in url:
# Some of the emojis we get from the api have invalid links
# this is to prevent errors related to them
realmemoji = RealmEmoji(
name=emoji_name,
id=emoji_id,
file_name=os.path.basename(url),
deactivated=False)
realmemoji_dict = model_to_dict(realmemoji, exclude=['realm', 'author'])
realmemoji_dict['author'] = None
realmemoji_dict['realm'] = realm_id
emoji_url_map[emoji_name] = url
zerver_realmemoji.append(realmemoji_dict)
emoji_id += 1
return zerver_realmemoji, emoji_url_map
def users_to_zerver_userprofile(slack_data_dir: str, users: List[ZerverFieldsT], realm_id: int,
timestamp: Any, domain_name: str) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT],
AddedUsersT,
List[ZerverFieldsT],
List[ZerverFieldsT]]:
"""
Returns:
1. zerver_userprofile, which is a list of user profile
2. avatar_list, which is list to map avatars to zulip avatard records.json
3. added_users, which is a dictionary to map from slack user id to zulip
user id
4. zerver_customprofilefield, which is a list of all custom profile fields
5. zerver_customprofilefield_values, which is a list of user profile fields
"""
logging.info('######### IMPORTING USERS STARTED #########\n')
zerver_userprofile = []
zerver_customprofilefield = [] # type: List[ZerverFieldsT]
zerver_customprofilefield_values = [] # type: List[ZerverFieldsT]
avatar_list = [] # type: List[ZerverFieldsT]
added_users = {}
# The user data we get from the slack api does not contain custom profile data
# Hence we get it from the slack zip file
slack_data_file_user_list = get_data_file(slack_data_dir + '/users.json')
# To map user id with the custom profile fields of the corresponding user
slack_user_custom_field_map = {} # type: ZerverFieldsT
# To store custom fields corresponding to their ids
custom_field_map = {} # type: ZerverFieldsT
for user in slack_data_file_user_list:
process_slack_custom_fields(user, slack_user_custom_field_map)
# We have only one primary owner in slack, see link
# https://get.slack.help/hc/en-us/articles/201912948-Owners-and-Administrators
# This is to import the primary owner first from all the users
user_id_count = custom_field_id_count = customprofilefield_id = 0
primary_owner_id = user_id_count
user_id_count += 1
for user in users:
slack_user_id = user['id']
if user.get('is_primary_owner', False):
user_id = primary_owner_id
else:
user_id = user_id_count
# email
email = get_user_email(user, domain_name)
# avatar
# ref: https://chat.zulip.org/help/set-your-profile-picture
avatar_url = build_avatar_url(slack_user_id, user['team_id'],
user['profile']['avatar_hash'])
build_avatar(user_id, realm_id, email, avatar_url, timestamp, avatar_list)
# check if user is the admin
realm_admin = get_admin(user)
# timezone
timezone = get_user_timezone(user)
# Check for custom profile fields
if slack_user_id in slack_user_custom_field_map:
# For processing the fields
custom_field_map, customprofilefield_id = build_customprofile_field(
zerver_customprofilefield, slack_user_custom_field_map[slack_user_id],
customprofilefield_id, realm_id, custom_field_map)
# Store the custom field values for the corresponding user
custom_field_id_count = build_customprofilefields_values(
custom_field_map, slack_user_custom_field_map[slack_user_id], user_id,
custom_field_id_count, zerver_customprofilefield_values)
userprofile = UserProfile(
full_name=get_user_full_name(user),
short_name=user['name'],
is_active=not user['deleted'],
id=user_id,
email=email,
delivery_email=email,
avatar_source='U',
is_bot=user.get('is_bot', False),
pointer=-1,
is_realm_admin=realm_admin,
bot_type=1 if user.get('is_bot', False) else None,
date_joined=timestamp,
timezone=timezone,
last_login=timestamp)
userprofile_dict = model_to_dict(userprofile)
# Set realm id separately as the corresponding realm is not yet a Realm model instance
userprofile_dict['realm'] = realm_id
zerver_userprofile.append(userprofile_dict)
added_users[slack_user_id] = user_id
if not user.get('is_primary_owner', False):
user_id_count += 1
logging.info(u"{} -> {}".format(user['name'], userprofile_dict['email']))
process_customprofilefields(zerver_customprofilefield, zerver_customprofilefield_values)
logging.info('######### IMPORTING USERS FINISHED #########\n')
return zerver_userprofile, avatar_list, added_users, zerver_customprofilefield, \
zerver_customprofilefield_values
def build_customprofile_field(customprofile_field: List[ZerverFieldsT], fields: ZerverFieldsT,
customprofilefield_id: int, realm_id: int,
custom_field_map: ZerverFieldsT) -> Tuple[ZerverFieldsT, int]:
# The name of the custom profile field is not provided in the slack data
# Hash keys of the fields are provided
# Reference: https://api.slack.com/methods/users.profile.set
for field, value in fields.items():
if field not in custom_field_map:
slack_custom_fields = ['phone', 'skype']
if field in slack_custom_fields:
field_name = field
else:
field_name = "slack custom field %s" % (str(customprofilefield_id + 1),)
customprofilefield = CustomProfileField(
id=customprofilefield_id,
name=field_name,
field_type=1 # For now this is defaulted to 'SHORT_TEXT'
# Processing is done in the function 'process_customprofilefields'
)
customprofilefield_dict = model_to_dict(customprofilefield,
exclude=['realm'])
customprofilefield_dict['realm'] = realm_id
custom_field_map[field] = customprofilefield_id
customprofilefield_id += 1
customprofile_field.append(customprofilefield_dict)
return custom_field_map, customprofilefield_id
def process_slack_custom_fields(user: ZerverFieldsT,
slack_user_custom_field_map: ZerverFieldsT) -> None:
slack_user_custom_field_map[user['id']] = {}
if user['profile'].get('fields'):
slack_user_custom_field_map[user['id']] = user['profile']['fields']
slack_custom_fields = ['phone', 'skype']
for field in slack_custom_fields:
if field in user['profile']:
slack_user_custom_field_map[user['id']][field] = {'value': user['profile'][field]}
def build_customprofilefields_values(custom_field_map: ZerverFieldsT, fields: ZerverFieldsT,
user_id: int, custom_field_id: int,
custom_field_values: List[ZerverFieldsT]) -> int:
for field, value in fields.items():
if value['value'] == "":
# Skip writing entries for fields with an empty value
continue
custom_field_value = CustomProfileFieldValue(
id=custom_field_id,
value=value['value'])
custom_field_value_dict = model_to_dict(custom_field_value,
exclude=['user_profile', 'field'])
custom_field_value_dict['user_profile'] = user_id
custom_field_value_dict['field'] = custom_field_map[field]
custom_field_values.append(custom_field_value_dict)
custom_field_id += 1
return custom_field_id
def process_customprofilefields(customprofilefield: List[ZerverFieldsT],
customprofilefield_value: List[ZerverFieldsT]) -> None:
# Process the field types by checking all field values
for field in customprofilefield:
for field_value in customprofilefield_value:
if field_value['field'] == field['id'] and len(field_value['value']) > 50:
field['field_type'] = 2 # corresponding to Long text
break
def get_user_email(user: ZerverFieldsT, domain_name: str) -> str:
if 'email' in user['profile']:
return user['profile']['email']
if 'bot_id' in user['profile']:
if 'real_name_normalized' in user['profile']:
slack_bot_name = user['profile']['real_name_normalized']
elif 'first_name' in user['profile']:
slack_bot_name = user['profile']['first_name']
else:
raise AssertionError("Could not identify bot type")
return slack_bot_name.replace("Bot", "").replace(" ", "") + "-bot@%s" % (domain_name,)
if get_user_full_name(user).lower() == "slackbot":
return "imported-slackbot-bot@%s" % (domain_name,)
raise AssertionError("Could not find email address for Slack user %s" % (user,))
def build_avatar_url(slack_user_id: str, team_id: str, avatar_hash: str) -> str:
avatar_url = "https://ca.slack-edge.com/{}-{}-{}".format(team_id, slack_user_id,
avatar_hash)
return avatar_url
def get_admin(user: ZerverFieldsT) -> bool:
admin = user.get('is_admin', False)
owner = user.get('is_owner', False)
primary_owner = user.get('is_primary_owner', False)
if admin or owner or primary_owner:
return True
return False
def get_user_timezone(user: ZerverFieldsT) -> str:
_default_timezone = "America/New_York"
timezone = user.get("tz", _default_timezone)
if timezone is None or '/' not in timezone:
timezone = _default_timezone
return timezone
def channels_to_zerver_stream(slack_data_dir: str, realm_id: int, added_users: AddedUsersT,
zerver_userprofile: List[ZerverFieldsT]) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT],
List[ZerverFieldsT],
AddedChannelsT,
AddedMPIMsT,
List[ZerverFieldsT],
List[ZerverFieldsT],
AddedRecipientsT]:
"""
Returns:
1. zerver_defaultstream, which is a list of the default streams
2. zerver_stream, while is a list of all streams
3. zerver_huddle, while is a list of all huddles
3. added_channels, which is a dictionary to map from channel name to channel id, zulip stream_id
4. added_mpims, which is a dictionary to map from MPIM(multiparty IM) name to MPIM id, zulip huddle_id
5. zerver_subscription, which is a list of the subscriptions
6. zerver_recipient, which is a list of the recipients
7. added_recipient, which is a dictionary to map from channel name to zulip recipient_id
"""
logging.info('######### IMPORTING CHANNELS STARTED #########\n')
added_channels = {}
added_mpims = {}
added_recipient = {}
zerver_stream = []
zerver_huddle = []
zerver_subscription = [] # type: List[ZerverFieldsT]
zerver_recipient = []
zerver_defaultstream = []
subscription_id_count = recipient_id_count = 0
stream_id_count = defaultstream_id = 0
huddle_id_count = 0
def process_channels(channels: List[Dict[str, Any]], invite_only: bool=False) -> None:
nonlocal stream_id_count
nonlocal recipient_id_count
nonlocal defaultstream_id
nonlocal subscription_id_count
for channel in channels:
# slack_channel_id = channel['id']
# map Slack's topic and purpose content into Zulip's stream description.
# WARN This mapping is lossy since the topic.creator, topic.last_set,
# purpose.creator, purpose.last_set fields are not preserved.
description = channel["purpose"]["value"]
stream_id = stream_id_count
recipient_id = recipient_id_count
# construct the stream object and append it to zerver_stream
stream = build_stream(float(channel["created"]), realm_id, channel["name"],
description, stream_id, channel["is_archived"], invite_only)
zerver_stream.append(stream)
# construct defaultstream object
# slack has the default channel 'general' and 'random'
# where every user is subscribed
default_channels = ['general', 'random'] # Slack specific
if channel['name'] in default_channels:
defaultstream = build_defaultstream(realm_id, stream_id,
defaultstream_id)
zerver_defaultstream.append(defaultstream)
defaultstream_id += 1
added_channels[stream['name']] = (channel['id'], stream_id)
recipient = build_recipient(stream_id, recipient_id, Recipient.STREAM)
zerver_recipient.append(recipient)
added_recipient[stream['name']] = recipient_id
# TODO add recipients for private message and huddles
# construct the subscription object and append it to zerver_subscription
subscription_id_count = get_subscription(channel['members'], zerver_subscription,
recipient_id, added_users,
subscription_id_count)
# TODO add zerver_subscription which correspond to
# huddles type recipient
# For huddles:
# sub['recipient']=recipient['id'] where recipient['type_id']=added_users[member]
stream_id_count += 1
recipient_id_count += 1
logging.info(u"{} -> created".format(channel['name']))
# TODO map Slack's pins to Zulip's stars
# There is the security model that Slack's pins are known to the team owner
# as evident from where it is stored at (channels)
# "pins": [
# {
# "id": "1444755381.000003",
# "type": "C",
# "user": "U061A5N1G",
# "owner": "U061A5N1G",
# "created": "1444755463"
# }
# ],
public_channels = get_data_file(slack_data_dir + '/channels.json')
process_channels(public_channels)
try:
private_channels = get_data_file(slack_data_dir + '/groups.json')
except FileNotFoundError:
private_channels = []
process_channels(private_channels, True)
# mpim is the Slack equivalent of huddle.
def process_mpims(mpims: List[Dict[str, Any]]) -> None:
nonlocal huddle_id_count
nonlocal recipient_id_count
nonlocal subscription_id_count
for mpim in mpims:
huddle = build_huddle(huddle_id_count)
zerver_huddle.append(huddle)
added_mpims[mpim['name']] = (mpim['id'], huddle_id_count)
recipient = build_recipient(huddle_id_count, recipient_id_count, Recipient.HUDDLE)
zerver_recipient.append(recipient)
added_recipient[mpim['name']] = recipient_id_count
subscription_id_count = get_subscription(mpim['members'], zerver_subscription,
recipient_id_count, added_users,
subscription_id_count)
huddle_id_count += 1
recipient_id_count += 1
logging.info(u"{} -> created".format(mpim['name']))
try:
mpims = get_data_file(slack_data_dir + '/mpims.json')
except FileNotFoundError:
mpims = []
process_mpims(mpims)
for user in zerver_userprofile:
# this maps the recipients and subscriptions
# related to private messages
recipient = build_recipient(user['id'], recipient_id_count, Recipient.PERSONAL)
sub = build_subscription(recipient_id_count, user['id'], subscription_id_count)
zerver_recipient.append(recipient)
zerver_subscription.append(sub)
subscription_id_count += 1
recipient_id_count += 1
logging.info('######### IMPORTING STREAMS FINISHED #########\n')
return zerver_defaultstream, zerver_stream, zerver_huddle, added_channels, added_mpims, \
zerver_subscription, zerver_recipient, added_recipient
def get_subscription(channel_members: List[str], zerver_subscription: List[ZerverFieldsT],
recipient_id: int, added_users: AddedUsersT,
subscription_id: int) -> int:
for member in channel_members:
sub = build_subscription(recipient_id, added_users[member], subscription_id)
# The recipient corresponds to a stream for stream-readable message.
zerver_subscription.append(sub)
subscription_id += 1
return subscription_id
def process_long_term_idle_users(slack_data_dir: str, users: List[ZerverFieldsT],
added_users: AddedUsersT, added_channels: AddedChannelsT,
added_mpims: AddedChannelsT,
zerver_userprofile: List[ZerverFieldsT]) -> Set[int]:
"""Algorithmically, we treat users who have sent at least 10 messages
or have sent a message within the last 60 days as active.
Everyone else is treated as long-term idle, which means they will
have a slighly slower first page load when coming back to
Zulip.
"""
all_messages = get_messages_iterator(slack_data_dir, added_channels, added_mpims)
sender_counts = defaultdict(int) # type: Dict[str, int]
recent_senders = set() # type: Set[str]
NOW = float(timezone_now().timestamp())
for message in all_messages:
timestamp = float(message['ts'])
slack_user_id = get_message_sending_user(message)
if not slack_user_id:
# Ignore messages without user names
continue
if slack_user_id in recent_senders:
continue
if NOW - timestamp < 60:
recent_senders.add(slack_user_id)
sender_counts[slack_user_id] += 1
for (slack_sender_id, count) in sender_counts.items():
if count > 10:
recent_senders.add(slack_sender_id)
long_term_idle = set()
for slack_user in users:
if slack_user["id"] in recent_senders:
continue
zulip_user_id = added_users[slack_user['id']]
long_term_idle.add(zulip_user_id)
# Record long-term idle status in zerver_userprofile
for user_profile_row in zerver_userprofile:
if user_profile_row['id'] in long_term_idle:
user_profile_row['long_term_idle'] = True
# Setting last_active_message_id to 1 means the user, if
# imported, will get the full message history for the
# streams they were on.
user_profile_row['last_active_message_id'] = 1
return long_term_idle
def convert_slack_workspace_messages(slack_data_dir: str, users: List[ZerverFieldsT], realm_id: int,
added_users: AddedUsersT, added_recipient: AddedRecipientsT,
added_channels: AddedChannelsT,
added_mpims: AddedMPIMsT,
realm: ZerverFieldsT,
zerver_userprofile: List[ZerverFieldsT],
zerver_realmemoji: List[ZerverFieldsT], domain_name: str,
output_dir: str,
chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT],
List[ZerverFieldsT]]:
"""
Returns:
1. reactions, which is a list of the reactions
2. uploads, which is a list of uploads to be mapped in uploads records.json
3. attachment, which is a list of the attachments
"""
long_term_idle = process_long_term_idle_users(slack_data_dir, users, added_users,
added_channels, added_mpims, zerver_userprofile)
# Now, we actually import the messages.
all_messages = get_messages_iterator(slack_data_dir, added_channels, added_mpims)
logging.info('######### IMPORTING MESSAGES STARTED #########\n')
total_reactions = [] # type: List[ZerverFieldsT]
total_attachments = [] # type: List[ZerverFieldsT]
total_uploads = [] # type: List[ZerverFieldsT]
# The messages are stored in batches
dump_file_id = 1
subscriber_map = make_subscriber_map(
zerver_subscription=realm['zerver_subscription'],
)
while True:
message_data = []
_counter = 0
for msg in all_messages:
_counter += 1
message_data.append(msg)
if _counter == chunk_size:
break
if len(message_data) == 0:
break
zerver_message, zerver_usermessage, attachment, uploads, reactions = \
channel_message_to_zerver_message(
realm_id, users, added_users, added_recipient, message_data,
zerver_realmemoji, subscriber_map, added_channels,
domain_name, long_term_idle)
message_json = dict(
zerver_message=zerver_message,
zerver_usermessage=zerver_usermessage)
message_file = "/messages-%06d.json" % (dump_file_id,)
logging.info("Writing Messages to %s\n" % (output_dir + message_file,))
create_converted_data_files(message_json, output_dir, message_file)
total_reactions += reactions
total_attachments += attachment
total_uploads += uploads
dump_file_id += 1
logging.info('######### IMPORTING MESSAGES FINISHED #########\n')
return total_reactions, total_uploads, total_attachments
def get_messages_iterator(slack_data_dir: str, added_channels: AddedChannelsT,
added_mpims: AddedMPIMsT) -> Iterator[ZerverFieldsT]:
"""This function is an iterator that returns all the messages across
all Slack channels, in order by timestamp. It's important to
not read all the messages into memory at once, because for
large imports that can OOM kill."""
dir_names = list(added_channels.keys()) + list(added_mpims.keys())
all_json_names = defaultdict(list) # type: Dict[str, List[str]]
for dir_name in dir_names:
dir_path = os.path.join(slack_data_dir, dir_name)
json_names = os.listdir(dir_path)
for json_name in json_names:
all_json_names[json_name].append(dir_path)
# Sort json_name by date
for json_name in sorted(all_json_names.keys()):
messages_for_one_day = [] # type: List[ZerverFieldsT]
for dir_path in all_json_names[json_name]:
message_dir = os.path.join(dir_path, json_name)
messages = get_data_file(message_dir)
dir_name = os.path.basename(dir_path)
for message in messages:
# To give every message the channel information
if dir_name in added_channels.keys():
message['channel_name'] = dir_name
if dir_name in added_mpims.keys():
message['mpim_name'] = dir_name
messages_for_one_day += messages
# we sort the messages according to the timestamp to show messages with
# the proper date order
for message in sorted(messages_for_one_day, key=lambda m: m['ts']):
yield message
def channel_message_to_zerver_message(realm_id: int,
users: List[ZerverFieldsT],
added_users: AddedUsersT,
added_recipient: AddedRecipientsT,
all_messages: List[ZerverFieldsT],
zerver_realmemoji: List[ZerverFieldsT],
subscriber_map: Dict[int, Set[int]],
added_channels: AddedChannelsT,
domain_name: str,
long_term_idle: Set[int]) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT],
List[ZerverFieldsT],
List[ZerverFieldsT],
List[ZerverFieldsT]]:
"""
Returns:
1. zerver_message, which is a list of the messages
2. zerver_usermessage, which is a list of the usermessages
3. zerver_attachment, which is a list of the attachments
4. uploads_list, which is a list of uploads to be mapped in uploads records.json
5. reaction_list, which is a list of all user reactions
"""
zerver_message = []
zerver_usermessage = [] # type: List[ZerverFieldsT]
uploads_list = [] # type: List[ZerverFieldsT]
zerver_attachment = [] # type: List[ZerverFieldsT]
reaction_list = [] # type: List[ZerverFieldsT]
# For unicode emoji
with open(NAME_TO_CODEPOINT_PATH) as fp:
name_to_codepoint = ujson.load(fp)
total_user_messages = 0
total_skipped_user_messages = 0
for message in all_messages:
user = get_message_sending_user(message)
if not user:
# Ignore messages without user names
# These are Sometimes produced by slack
continue
subtype = message.get('subtype', False)
if subtype in [
# Zulip doesn't have a pinned_item concept
"pinned_item",
"unpinned_item",
# Slack's channel join/leave notices are spammy
"channel_join",
"channel_leave",
"channel_name"
]:
continue
try:
content, mentioned_user_ids, has_link = convert_to_zulip_markdown(
message['text'], users, added_channels, added_users)
except Exception:
print("Slack message unexpectedly missing text representation:")
print(ujson.dumps(message, indent=4))
continue
rendered_content = None
if "channel_name" in message:
is_private = False
recipient_id = added_recipient[message['channel_name']]
elif "mpim_name" in message:
is_private = True
recipient_id = added_recipient[message['mpim_name']]
message_id = NEXT_ID('message')
# Process message reactions
if 'reactions' in message.keys():
build_reactions(reaction_list, message['reactions'], added_users,
message_id, name_to_codepoint,
zerver_realmemoji)
# Process different subtypes of slack messages
# Subtypes which have only the action in the message should
# be rendered with '/me' in the content initially
# For example "sh_room_created" has the message 'started a call'
# which should be displayed as '/me started a call'
if subtype in ["bot_add", "sh_room_created", "me_message"]:
content = '/me %s' % (content,)
if subtype == 'file_comment':
# The file_comment message type only indicates the
# responsible user in a subfield.
message['user'] = message['comment']['user']
file_info = process_message_files(
message=message,
domain_name=domain_name,
realm_id=realm_id,
message_id=message_id,
user=user,
users=users,
added_users=added_users,
zerver_attachment=zerver_attachment,
uploads_list=uploads_list,
)
content += file_info['content']
has_link = has_link or file_info['has_link']
has_attachment = file_info['has_attachment']
has_image = file_info['has_image']
# construct message
topic_name = 'imported from slack'
zulip_message = build_message(topic_name, float(message['ts']), message_id, content,
rendered_content, added_users[user], recipient_id,
has_image, has_link, has_attachment)
zerver_message.append(zulip_message)
# construct usermessages
(num_created, num_skipped) = build_usermessages(
zerver_usermessage=zerver_usermessage,
subscriber_map=subscriber_map,
recipient_id=recipient_id,
mentioned_user_ids=mentioned_user_ids,
message_id=message_id,
is_private=is_private,
long_term_idle=long_term_idle,
)
total_user_messages += num_created
total_skipped_user_messages += num_skipped
logging.debug("Created %s UserMessages; deferred %s due to long-term idle" % (
total_user_messages, total_skipped_user_messages))
return zerver_message, zerver_usermessage, zerver_attachment, uploads_list, \
reaction_list
def process_message_files(message: ZerverFieldsT,
domain_name: str,
realm_id: int,
message_id: int,
user: str,
users: List[ZerverFieldsT],
added_users: AddedUsersT,
zerver_attachment: List[ZerverFieldsT],
uploads_list: List[ZerverFieldsT]) -> Dict[str, Any]:
has_attachment = False
has_image = False
has_link = False
files = message.get('files', [])
subtype = message.get('subtype')
if subtype == 'file_share':
# In Slack messages, uploads can either have the subtype as 'file_share' or
# have the upload information in 'files' keyword
files = [message['file']]
markdown_links = []
for fileinfo in files:
if fileinfo.get('mode', '') in ['tombstone', 'hidden_by_limit']:
# Slack sometimes includes tombstone mode files with no
# real data on the actual file (presumably in cases where
# the file was deleted). hidden_by_limit mode is for files
# that are hidden because of 10k cap in free plan.
continue
url = fileinfo['url_private']
if 'files.slack.com' in url:
# For attachments with slack download link
has_attachment = True
has_link = True
has_image = True if 'image' in fileinfo['mimetype'] else False
file_user = [iterate_user for iterate_user in users if message['user'] == iterate_user['id']]
file_user_email = get_user_email(file_user[0], domain_name)
s3_path, content_for_link = get_attachment_path_and_content(fileinfo, realm_id)
markdown_links.append(content_for_link)
# construct attachments
build_uploads(added_users[user], realm_id, file_user_email, fileinfo, s3_path,
uploads_list)
build_attachment(realm_id, {message_id}, added_users[user],
fileinfo, s3_path, zerver_attachment)
else:
# For attachments with link not from slack
# Example: Google drive integration
has_link = True
if 'title' in fileinfo:
file_name = fileinfo['title']
else:
file_name = fileinfo['name']
markdown_links.append('[%s](%s)' % (file_name, fileinfo['url_private']))
content = '\n'.join(markdown_links)
return dict(
content=content,
has_attachment=has_attachment,
has_image=has_image,
has_link=has_link,
)
def get_attachment_path_and_content(fileinfo: ZerverFieldsT, realm_id: int) -> Tuple[str,
str]:
# Should be kept in sync with its equivalent in zerver/lib/uploads in the function
# 'upload_message_file'
s3_path = "/".join([
str(realm_id),
'SlackImportAttachment', # This is a special placeholder which should be kept
# in sync with 'exports.py' function 'import_message_data'
format(random.randint(0, 255), 'x'),
random_name(18),
sanitize_name(fileinfo['name'])
])
attachment_path = '/user_uploads/%s' % (s3_path,)
content = '[%s](%s)' % (fileinfo['title'], attachment_path)
return s3_path, content
def build_reactions(reaction_list: List[ZerverFieldsT], reactions: List[ZerverFieldsT],
added_users: AddedUsersT, message_id: int,
name_to_codepoint: ZerverFieldsT,
zerver_realmemoji: List[ZerverFieldsT]) -> None:
realmemoji = {}
for realm_emoji in zerver_realmemoji:
realmemoji[realm_emoji['name']] = realm_emoji['id']
# For the unicode emoji codes, we use equivalent of
# function 'emoji_name_to_emoji_code' in 'zerver/lib/emoji' here
for slack_reaction in reactions:
emoji_name = slack_reaction['name']
# Check in unicode emoji
if emoji_name in name_to_codepoint:
emoji_code = name_to_codepoint[emoji_name]
reaction_type = Reaction.UNICODE_EMOJI
# Check in realm emoji
elif emoji_name in realmemoji:
emoji_code = realmemoji[emoji_name]
reaction_type = Reaction.REALM_EMOJI
else:
continue
for user in slack_reaction['users']:
reaction_id = NEXT_ID('reaction')
reaction = Reaction(
id=reaction_id,
emoji_code=emoji_code,
emoji_name=emoji_name,
reaction_type=reaction_type)
reaction_dict = model_to_dict(reaction,
exclude=['message', 'user_profile'])
reaction_dict['message'] = message_id
reaction_dict['user_profile'] = added_users[user]
reaction_list.append(reaction_dict)
def build_uploads(user_id: int, realm_id: int, email: str, fileinfo: ZerverFieldsT, s3_path: str,
uploads_list: List[ZerverFieldsT]) -> None:
upload = dict(
path=fileinfo['url_private'], # Save slack's url here, which is used later while processing
realm_id=realm_id,
content_type=None,
user_profile_id=user_id,
last_modified=fileinfo['timestamp'],
user_profile_email=email,
s3_path=s3_path,
size=fileinfo['size'])
uploads_list.append(upload)
def get_message_sending_user(message: ZerverFieldsT) -> Optional[str]:
if 'user' in message:
return message['user']
if message.get('file'):
return message['file'].get('user')
return None
def do_convert_data(slack_zip_file: str, output_dir: str, token: str, threads: int=6) -> None:
# Subdomain is set by the user while running the import command
realm_subdomain = ""
realm_id = 0
domain_name = settings.EXTERNAL_HOST
slack_data_dir = slack_zip_file.replace('.zip', '')
if not os.path.exists(slack_data_dir):
os.makedirs(slack_data_dir)
os.makedirs(output_dir, exist_ok=True)
# output directory should be empty initially
if os.listdir(output_dir):
raise Exception('Output directory should be empty!')
subprocess.check_call(['unzip', '-q', slack_zip_file, '-d', slack_data_dir])
# with zipfile.ZipFile(slack_zip_file, 'r') as zip_ref:
# zip_ref.extractall(slack_data_dir)
# We get the user data from the legacy token method of slack api, which is depreciated
# but we use it as the user email data is provided only in this method
user_list = get_slack_api_data(token, "https://slack.com/api/users.list", "members")
# Get custom emoji from slack api
custom_emoji_list = get_slack_api_data(token, "https://slack.com/api/emoji.list", "emoji")
realm, added_users, added_recipient, added_channels, added_mpims, avatar_list, \
emoji_url_map = slack_workspace_to_realm(domain_name, realm_id, user_list,
realm_subdomain,
slack_data_dir, custom_emoji_list)
reactions, uploads_list, zerver_attachment = convert_slack_workspace_messages(
slack_data_dir, user_list, realm_id, added_users, added_recipient, added_channels, added_mpims,
realm, realm['zerver_userprofile'], realm['zerver_realmemoji'], domain_name, output_dir)
# Move zerver_reactions to realm.json file
realm['zerver_reaction'] = reactions
emoji_folder = os.path.join(output_dir, 'emoji')
os.makedirs(emoji_folder, exist_ok=True)
emoji_records = process_emojis(realm['zerver_realmemoji'], emoji_folder, emoji_url_map, threads)
avatar_folder = os.path.join(output_dir, 'avatars')
avatar_realm_folder = os.path.join(avatar_folder, str(realm_id))
os.makedirs(avatar_realm_folder, exist_ok=True)
avatar_records = process_avatars(avatar_list, avatar_folder, realm_id, threads, size_url_suffix='-512')
uploads_folder = os.path.join(output_dir, 'uploads')
os.makedirs(os.path.join(uploads_folder, str(realm_id)), exist_ok=True)
uploads_records = process_uploads(uploads_list, uploads_folder, threads)
attachment = {"zerver_attachment": zerver_attachment}
# IO realm.json
create_converted_data_files(realm, output_dir, '/realm.json')
# IO emoji records
create_converted_data_files(emoji_records, output_dir, '/emoji/records.json')
# IO avatar records
create_converted_data_files(avatar_records, output_dir, '/avatars/records.json')
# IO uploads records
create_converted_data_files(uploads_records, output_dir, '/uploads/records.json')
# IO attachments records
create_converted_data_files(attachment, output_dir, '/attachment.json')
# remove slack dir
rm_tree(slack_data_dir)
subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P'])
logging.info('######### DATA CONVERSION FINISHED #########\n')
logging.info("Zulip data dump created at %s" % (output_dir,))
def get_data_file(path: str) -> Any:
with open(path, "r") as fp:
data = ujson.load(fp)
return data
def get_slack_api_data(token: str, slack_api_url: str, get_param: str) -> Any:
data = requests.get('%s?token=%s' % (slack_api_url, token))
if data.status_code == requests.codes.ok:
if 'error' in data.json():
raise Exception('Enter a valid token!')
json_data = data.json()[get_param]
return json_data
else:
raise Exception('Something went wrong. Please try again!')
| |
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import array
import contextlib
import inspect
import traceback
import warnings
import xcffib.xproto
from xcffib.xproto import EventMask, SetMode, StackMode
from libqtile import hook, utils
from libqtile.command_object import CommandError, CommandObject
from libqtile.log_utils import logger
# ICCM Constants
NoValue = 0x0000
XValue = 0x0001
YValue = 0x0002
WidthValue = 0x0004
HeightValue = 0x0008
AllValues = 0x000F
XNegative = 0x0010
YNegative = 0x0020
USPosition = (1 << 0)
USSize = (1 << 1)
PPosition = (1 << 2)
PSize = (1 << 3)
PMinSize = (1 << 4)
PMaxSize = (1 << 5)
PResizeInc = (1 << 6)
PAspect = (1 << 7)
PBaseSize = (1 << 8)
PWinGravity = (1 << 9)
PAllHints = (PPosition | PSize | PMinSize | PMaxSize | PResizeInc | PAspect)
InputHint = (1 << 0)
StateHint = (1 << 1)
IconPixmapHint = (1 << 2)
IconWindowHint = (1 << 3)
IconPositionHint = (1 << 4)
IconMaskHint = (1 << 5)
WindowGroupHint = (1 << 6)
MessageHint = (1 << 7)
UrgencyHint = (1 << 8)
AllHints = (InputHint | StateHint | IconPixmapHint | IconWindowHint |
IconPositionHint | IconMaskHint | WindowGroupHint | MessageHint |
UrgencyHint)
WithdrawnState = 0
DontCareState = 0
NormalState = 1
ZoomState = 2
IconicState = 3
InactiveState = 4
RectangleOut = 0
RectangleIn = 1
RectanglePart = 2
VisualNoMask = 0x0
VisualIDMask = 0x1
VisualScreenMask = 0x2
VisualDepthMask = 0x4
VisualClassMask = 0x8
VisualRedMaskMask = 0x10
VisualGreenMaskMask = 0x20
VisualBlueMaskMask = 0x40
VisualColormapSizeMask = 0x80
VisualBitsPerRGBMask = 0x100
VisualAllMask = 0x1FF
ReleaseByFreeingColormap = 1
BitmapSuccess = 0
BitmapOpenFailed = 1
BitmapFileInvalid = 2
BitmapNoMemory = 3
XCSUCCESS = 0
XCNOMEM = 1
XCNOENT = 2
# float states
NOT_FLOATING = 1 # not floating
FLOATING = 2
MAXIMIZED = 3
FULLSCREEN = 4
TOP = 5
MINIMIZED = 6
_NET_WM_STATE_REMOVE = 0
_NET_WM_STATE_ADD = 1
_NET_WM_STATE_TOGGLE = 2
def _geometry_getter(attr):
def get_attr(self):
if getattr(self, "_" + attr) is None:
g = self.window.get_geometry()
# trigger the geometry setter on all these
self.x = g.x
self.y = g.y
self.width = g.width
self.height = g.height
return getattr(self, "_" + attr)
return get_attr
def _geometry_setter(attr):
def f(self, value):
if not isinstance(value, int):
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
logger.error("!!!! setting %s to a non-int %s; please report this!", attr, value)
logger.error(''.join(stack_trace[:-1]))
value = int(value)
setattr(self, "_" + attr, value)
return f
def _float_getter(attr):
def getter(self):
if self._float_info[attr] is not None:
return self._float_info[attr]
# we don't care so much about width or height, if not set, default to the window width/height
if attr in ('width', 'height'):
return getattr(self, attr)
raise AttributeError("Floating not yet configured yet")
return getter
def _float_setter(attr):
def setter(self, value):
self._float_info[attr] = value
return setter
class _Window(CommandObject):
_window_mask = 0 # override in child class
def __init__(self, window, qtile):
self.window, self.qtile = window, qtile
self.hidden = True
self.group = None
self.icons = {}
window.set_attribute(eventmask=self._window_mask)
self._float_info = {
'x': None,
'y': None,
'width': None,
'height': None,
}
try:
g = self.window.get_geometry()
self._x = g.x
self._y = g.y
self._width = g.width
self._height = g.height
self._float_info['width'] = g.width
self._float_info['height'] = g.height
except xcffib.xproto.DrawableError:
# Whoops, we were too early, so let's ignore it for now and get the
# values on demand.
self._x = None
self._y = None
self._width = None
self._height = None
self.borderwidth = 0
self.bordercolor = None
self.name = "<no name>"
self.strut = None
self.state = NormalState
self.window_type = "normal"
self._float_state = NOT_FLOATING
self._demands_attention = False
self.hints = {
'input': True,
'icon_pixmap': None,
'icon_window': None,
'icon_x': 0,
'icon_y': 0,
'icon_mask': 0,
'window_group': None,
'urgent': False,
# normal or size hints
'width_inc': None,
'height_inc': None,
'base_width': 0,
'base_height': 0,
}
self.update_hints()
x = property(fset=_geometry_setter("x"), fget=_geometry_getter("x"))
y = property(fset=_geometry_setter("y"), fget=_geometry_getter("y"))
@property
def width(self):
return _geometry_getter("width")(self)
@width.setter
def width(self, value):
_geometry_setter("width")(self, value)
@property
def height(self):
return _geometry_getter("height")(self)
@height.setter
def height(self, value):
_geometry_setter("height")(self, value)
float_x = property(
fset=_float_setter("x"),
fget=_float_getter("x")
)
float_y = property(
fset=_float_setter("y"),
fget=_float_getter("y")
)
float_width = property(
fset=_float_setter("width"),
fget=_float_getter("width")
)
float_height = property(
fset=_float_setter("height"),
fget=_float_getter("height")
)
@property
def has_focus(self):
return self == self.qtile.current_window
def update_name(self):
try:
self.name = self.window.get_name()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
hook.fire('client_name_updated', self)
def update_hints(self):
"""Update the local copy of the window's WM_HINTS
See http://tronche.com/gui/x/icccm/sec-4.html#WM_HINTS
"""
try:
h = self.window.get_wm_hints()
normh = self.window.get_wm_normal_hints()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
# FIXME
# h values
# {
# 'icon_pixmap': 4194337,
# 'icon_window': 0,
# 'icon_mask': 4194340,
# 'icon_y': 0,
# 'input': 1,
# 'icon_x': 0,
# 'window_group': 4194305
# 'initial_state': 1,
# 'flags': set(['StateHint',
# 'IconMaskHint',
# 'WindowGroupHint',
# 'InputHint',
# 'UrgencyHint',
# 'IconPixmapHint']),
# }
if normh:
normh.pop('flags')
normh['min_width'] = max(0, normh.get('min_width', 0))
normh['min_height'] = max(0, normh.get('min_height', 0))
if not normh['base_width'] and \
normh['min_width'] and \
normh['width_inc']:
# seems xcffib does ignore base width :(
normh['base_width'] = (
normh['min_width'] % normh['width_inc']
)
if not normh['base_height'] and \
normh['min_height'] and \
normh['height_inc']:
# seems xcffib does ignore base height :(
normh['base_height'] = (
normh['min_height'] % normh['height_inc']
)
self.hints.update(normh)
if h and 'UrgencyHint' in h['flags']:
if self.qtile.current_window != self:
self.hints['urgent'] = True
hook.fire('client_urgent_hint_changed', self)
elif self.urgent:
self.hints['urgent'] = False
hook.fire('client_urgent_hint_changed', self)
if getattr(self, 'group', None):
self.group.layout_all()
return
def update_state(self):
triggered = ['urgent']
if self.qtile.config.auto_fullscreen:
triggered.append('fullscreen')
state = self.window.get_net_wm_state()
logger.debug('_NET_WM_STATE: %s', state)
for s in triggered:
setattr(self, s, (s in state))
@property
def urgent(self):
return self.hints['urgent'] or self._demands_attention
@urgent.setter
def urgent(self, val):
self._demands_attention = val
# TODO unset window hint as well?
if not val:
self.hints['urgent'] = False
def info(self):
if self.group:
group = self.group.name
else:
group = None
return dict(
name=self.name,
x=self.x,
y=self.y,
width=self.width,
height=self.height,
group=group,
id=self.window.wid,
floating=self._float_state != NOT_FLOATING,
float_info=self._float_info,
maximized=self._float_state == MAXIMIZED,
minimized=self._float_state == MINIMIZED,
fullscreen=self._float_state == FULLSCREEN
)
@property
def state(self):
return self.window.get_wm_state()[0]
@state.setter
def state(self, val):
if val in (WithdrawnState, NormalState, IconicState):
self.window.set_property('WM_STATE', [val, 0])
def set_opacity(self, opacity):
if 0.0 <= opacity <= 1.0:
real_opacity = int(opacity * 0xffffffff)
self.window.set_property('_NET_WM_WINDOW_OPACITY', real_opacity)
else:
return
def get_opacity(self):
opacity = self.window.get_property(
"_NET_WM_WINDOW_OPACITY", unpack=int
)
if not opacity:
return 1.0
else:
value = opacity[0]
# 2 decimal places
as_float = round(value / 0xffffffff, 2)
return as_float
opacity = property(get_opacity, set_opacity)
def kill(self):
if "WM_DELETE_WINDOW" in self.window.get_wm_protocols():
data = [
self.qtile.conn.atoms["WM_DELETE_WINDOW"],
xcffib.xproto.Time.CurrentTime,
0,
0,
0
]
u = xcffib.xproto.ClientMessageData.synthetic(data, "I" * 5)
e = xcffib.xproto.ClientMessageEvent.synthetic(
format=32,
window=self.window.wid,
type=self.qtile.conn.atoms["WM_PROTOCOLS"],
data=u
)
self.window.send_event(e)
else:
self.window.kill_client()
self.qtile.conn.flush()
def hide(self):
# We don't want to get the UnmapNotify for this unmap
with self.disable_mask(xcffib.xproto.EventMask.StructureNotify):
self.window.unmap()
self.hidden = True
def unhide(self):
self.window.map()
self.state = NormalState
self.hidden = False
@contextlib.contextmanager
def disable_mask(self, mask):
self._disable_mask(mask)
yield
self._reset_mask()
def _disable_mask(self, mask):
self.window.set_attribute(
eventmask=self._window_mask & (~mask)
)
def _reset_mask(self):
self.window.set_attribute(
eventmask=self._window_mask
)
def place(self, x, y, width, height, borderwidth, bordercolor,
above=False, force=False, margin=None):
"""Places the window at the specified location with the given size.
If force is false, than it tries to obey hints
"""
# TODO: self.x/y/height/width are updated BEFORE
# place is called, so there's no way to know if only
# the position is changed, so we are sending
# the ConfigureNotify every time place is called
#
# # if position change and size don't
# # send a configure notify. See ICCCM 4.2.3
# send_notify = False
# if (self.x != x or self.y != y) and \
# (self.width == width and self.height == height):
# send_notify = True
# #for now, we just:
send_notify = True
# Adjust the placement to account for layout margins, if there are any.
if margin is not None:
x += margin
y += margin
width -= margin * 2
height -= margin * 2
# save x and y float offset
if self.group is not None and self.group.screen is not None:
self.float_x = x - self.group.screen.x
self.float_y = y - self.group.screen.y
self.x = x
self.y = y
self.width = width
self.height = height
self.borderwidth = borderwidth
self.bordercolor = bordercolor
kwarg = dict(
x=x,
y=y,
width=width,
height=height,
borderwidth=borderwidth,
)
if above:
kwarg['stackmode'] = StackMode.Above
self.window.configure(**kwarg)
if send_notify:
self.send_configure_notify(x, y, width, height)
if bordercolor is not None:
self.window.set_attribute(borderpixel=bordercolor)
def send_configure_notify(self, x, y, width, height):
"""Send a synthetic ConfigureNotify"""
window = self.window.wid
above_sibling = False
override_redirect = False
event = xcffib.xproto.ConfigureNotifyEvent.synthetic(
event=window,
window=window,
above_sibling=above_sibling,
x=x,
y=y,
width=width,
height=height,
border_width=self.borderwidth,
override_redirect=override_redirect
)
self.window.send_event(event, mask=EventMask.StructureNotify)
def can_steal_focus(self):
return self.window.get_wm_type() != 'notification'
def focus(self, warp):
# Workaround for misbehaving java applications (actually it might be
# qtile who misbehaves by not implementing some X11 protocol correctly)
#
# See this xmonad issue for more information on the problem:
# http://code.google.com/p/xmonad/issues/detail?id=177
#
# 'sun-awt-X11-XFramePeer' is a main window of a java application.
# Only send WM_TAKE_FOCUS not FocusIn
# 'sun-awt-X11-XDialogPeer' is a dialog of a java application. Do not
# send any event.
cls = self.window.get_wm_class() or ''
is_java_main = 'sun-awt-X11-XFramePeer' in cls
is_java_dialog = 'sun-awt-X11-XDialogPeer' in cls
is_java = is_java_main or is_java_dialog
if not self.hidden:
# Never send TAKE_FOCUS on java *dialogs*
if not is_java_dialog and \
"WM_TAKE_FOCUS" in self.window.get_wm_protocols():
data = [
self.qtile.conn.atoms["WM_TAKE_FOCUS"],
# The timestamp here must be a valid timestamp, not CurrentTime.
#
# see https://tronche.com/gui/x/icccm/sec-4.html#s-4.1.7
# > Windows with the atom WM_TAKE_FOCUS in their WM_PROTOCOLS
# > property may receive a ClientMessage event from the
# > window manager (as described in section 4.2.8) with
# > WM_TAKE_FOCUS in its data[0] field and a valid timestamp
# > (i.e. not *CurrentTime* ) in its data[1] field.
self.qtile.core.get_valid_timestamp(),
0,
0,
0
]
u = xcffib.xproto.ClientMessageData.synthetic(data, "I" * 5)
e = xcffib.xproto.ClientMessageEvent.synthetic(
format=32,
window=self.window.wid,
type=self.qtile.conn.atoms["WM_PROTOCOLS"],
data=u
)
self.window.send_event(e)
# Never send FocusIn to java windows
if not is_java and self.hints['input']:
self.window.set_input_focus()
try:
if warp and self.qtile.config.cursor_warp:
self.window.warp_pointer(self.width // 2, self.height // 2)
except AttributeError:
pass
if self.urgent:
self.urgent = False
atom = self.qtile.conn.atoms["_NET_WM_STATE_DEMANDS_ATTENTION"]
state = list(self.window.get_property('_NET_WM_STATE', 'ATOM', unpack=int))
if atom in state:
state.remove(atom)
self.window.set_property('_NET_WM_STATE', state)
self.qtile.root.set_property("_NET_ACTIVE_WINDOW", self.window.wid)
hook.fire("client_focus", self)
def _items(self, name):
return None
def _select(self, name, sel):
return None
def cmd_focus(self, warp=None):
"""Focuses the window."""
if warp is None:
warp = self.qtile.config.cursor_warp
self.focus(warp=warp)
def cmd_info(self):
"""Returns a dictionary of info for this object"""
return self.info()
def cmd_inspect(self):
"""Tells you more than you ever wanted to know about a window"""
a = self.window.get_attributes()
attrs = {
"backing_store": a.backing_store,
"visual": a.visual,
"class": a._class,
"bit_gravity": a.bit_gravity,
"win_gravity": a.win_gravity,
"backing_planes": a.backing_planes,
"backing_pixel": a.backing_pixel,
"save_under": a.save_under,
"map_is_installed": a.map_is_installed,
"map_state": a.map_state,
"override_redirect": a.override_redirect,
# "colormap": a.colormap,
"all_event_masks": a.all_event_masks,
"your_event_mask": a.your_event_mask,
"do_not_propagate_mask": a.do_not_propagate_mask
}
props = self.window.list_properties()
normalhints = self.window.get_wm_normal_hints()
hints = self.window.get_wm_hints()
protocols = []
for i in self.window.get_wm_protocols():
protocols.append(i)
state = self.window.get_wm_state()
return dict(
attributes=attrs,
properties=props,
name=self.window.get_name(),
wm_class=self.window.get_wm_class(),
wm_window_role=self.window.get_wm_window_role(),
wm_type=self.window.get_wm_type(),
wm_transient_for=self.window.get_wm_transient_for(),
protocols=protocols,
wm_icon_name=self.window.get_wm_icon_name(),
wm_client_machine=self.window.get_wm_client_machine(),
normalhints=normalhints,
hints=hints,
state=state,
float_info=self._float_info
)
class Internal(_Window):
"""An internal window, that should not be managed by qtile"""
_window_mask = EventMask.StructureNotify | \
EventMask.PropertyChange | \
EventMask.EnterWindow | \
EventMask.FocusChange | \
EventMask.Exposure | \
EventMask.ButtonPress | \
EventMask.ButtonRelease | \
EventMask.KeyPress
@classmethod
def create(cls, qtile, x, y, width, height, opacity=1.0):
win = qtile.conn.create_window(x, y, width, height)
win.set_property("QTILE_INTERNAL", 1)
i = Internal(win, qtile)
i.place(x, y, width, height, 0, None)
i.opacity = opacity
return i
def __repr__(self):
return "Internal(%r, %s)" % (self.name, self.window.wid)
def kill(self):
self.qtile.conn.conn.core.DestroyWindow(self.window.wid)
def cmd_kill(self):
self.kill()
class Static(_Window):
"""An internal window, that should not be managed by qtile"""
_window_mask = EventMask.StructureNotify | \
EventMask.PropertyChange | \
EventMask.EnterWindow | \
EventMask.FocusChange | \
EventMask.Exposure
def __init__(self, win, qtile, screen,
x=None, y=None, width=None, height=None):
_Window.__init__(self, win, qtile)
self.update_name()
self.conf_x = x
self.conf_y = y
self.conf_width = width
self.conf_height = height
self.x = x or 0
self.y = y or 0
self.width = width or 0
self.height = height or 0
self.screen = screen
if None not in (x, y, width, height):
self.place(x, y, width, height, 0, 0)
self.update_strut()
def handle_ConfigureRequest(self, e): # noqa: N802
cw = xcffib.xproto.ConfigWindow
if self.conf_x is None and e.value_mask & cw.X:
self.x = e.x
if self.conf_y is None and e.value_mask & cw.Y:
self.y = e.y
if self.conf_width is None and e.value_mask & cw.Width:
self.width = e.width
if self.conf_height is None and e.value_mask & cw.Height:
self.height = e.height
self.place(
self.screen.x + self.x,
self.screen.y + self.y,
self.width,
self.height,
self.borderwidth,
self.bordercolor
)
return False
def update_strut(self):
strut = self.window.get_property(
"_NET_WM_STRUT_PARTIAL",
unpack=int
)
strut = strut or self.window.get_property(
"_NET_WM_STRUT",
unpack=int
)
strut = strut or (0, 0, 0, 0)
self.qtile.update_gaps(strut, self.strut)
self.strut = strut
def handle_PropertyNotify(self, e): # noqa: N802
name = self.qtile.conn.atoms.get_name(e.atom)
if name in ("_NET_WM_STRUT_PARTIAL", "_NET_WM_STRUT"):
self.update_strut()
def __repr__(self):
return "Static(%r)" % self.name
class Window(_Window):
_window_mask = EventMask.StructureNotify | \
EventMask.PropertyChange | \
EventMask.EnterWindow | \
EventMask.FocusChange
# Set when this object is being retired.
defunct = False
def __init__(self, window, qtile):
_Window.__init__(self, window, qtile)
self._group = None
self.update_name()
# add to group by position according to _NET_WM_DESKTOP property
group = None
index = window.get_wm_desktop()
if index is not None and index < len(qtile.groups):
group = qtile.groups[index]
elif index is None:
transient_for = window.get_wm_transient_for()
win = qtile.windows_map.get(transient_for)
if win is not None:
group = win._group
if group is not None:
group.add(self)
self._group = group
if group != qtile.current_screen.group:
self.hide()
# add window to the save-set, so it gets mapped when qtile dies
qtile.conn.conn.core.ChangeSaveSet(SetMode.Insert, self.window.wid)
self.update_wm_net_icon()
@property
def group(self):
return self._group
@group.setter
def group(self, group):
if group:
try:
self.window.set_property(
"_NET_WM_DESKTOP",
self.qtile.groups.index(group)
)
except xcffib.xproto.WindowError:
logger.exception("whoops, got error setting _NET_WM_DESKTOP, too early?")
self._group = group
@property
def edges(self):
return (self.x, self.y, self.x + self.width, self.y + self.height)
@property
def floating(self):
return self._float_state != NOT_FLOATING
@floating.setter
def floating(self, do_float):
if do_float and self._float_state == NOT_FLOATING:
if self.group and self.group.screen:
screen = self.group.screen
self._enablefloating(
screen.x + self.float_x, screen.y + self.float_y, self.float_width, self.float_height
)
else:
# if we are setting floating early, e.g. from a hook, we don't have a screen yet
self._float_state = FLOATING
elif (not do_float) and self._float_state != NOT_FLOATING:
if self._float_state == FLOATING:
# store last size
self.float_width = self.width
self.float_height = self.height
self._float_state = NOT_FLOATING
self.group.mark_floating(self, False)
hook.fire('float_change')
def toggle_floating(self):
self.floating = not self.floating
def togglefloating(self):
warnings.warn("togglefloating is deprecated, use toggle_floating", DeprecationWarning)
self.toggle_floating()
def enablefloating(self):
warnings.warn("enablefloating is deprecated, use floating=True", DeprecationWarning)
self.floating = True
def disablefloating(self):
warnings.warn("disablefloating is deprecated, use floating=False", DeprecationWarning)
self.floating = False
@property
def fullscreen(self):
return self._float_state == FULLSCREEN
@fullscreen.setter
def fullscreen(self, do_full):
atom = set([self.qtile.conn.atoms["_NET_WM_STATE_FULLSCREEN"]])
prev_state = set(self.window.get_property('_NET_WM_STATE', 'ATOM', unpack=int))
def set_state(old_state, new_state):
if new_state != old_state:
self.window.set_property('_NET_WM_STATE', list(new_state))
if do_full:
screen = self.group.screen or \
self.qtile.find_closest_screen(self.x, self.y)
self._enablefloating(
screen.x,
screen.y,
screen.width,
screen.height,
new_float_state=FULLSCREEN
)
set_state(prev_state, prev_state | atom)
return
if self._float_state == FULLSCREEN:
# The order of calling set_state() and then
# setting self.floating = False is important
set_state(prev_state, prev_state - atom)
self.floating = False
return
def toggle_fullscreen(self):
self.fullscreen = not self.fullscreen
def togglefullscreen(self):
warnings.warn("togglefullscreen is deprecated, use toggle_fullscreen", DeprecationWarning)
self.toggle_fullscreen()
@property
def maximized(self):
return self._float_state == MAXIMIZED
@maximized.setter
def maximized(self, do_maximize):
if do_maximize:
screen = self.group.screen or \
self.qtile.find_closest_screen(self.x, self.y)
self._enablefloating(
screen.dx,
screen.dy,
screen.dwidth,
screen.dheight,
new_float_state=MAXIMIZED
)
else:
if self._float_state == MAXIMIZED:
self.floating = False
def enablemaximize(self, state=MAXIMIZED):
warnings.warn("enablemaximize is deprecated, use maximized=True", DeprecationWarning)
self.maximized = True
def toggle_maximize(self, state=MAXIMIZED):
self.maximized = not self.maximized
def togglemaximize(self):
warnings.warn("togglemaximize is deprecated, use toggle_maximize", DeprecationWarning)
self.toggle_maximize()
@property
def minimized(self):
return self._float_state == MINIMIZED
@minimized.setter
def minimized(self, do_minimize):
if do_minimize:
if self._float_state != MINIMIZED:
self._enablefloating(new_float_state=MINIMIZED)
else:
if self._float_state == MINIMIZED:
self.floating = False
def enableminimize(self):
warnings.warn("enableminimized is deprecated, use minimized=True", DeprecationWarning)
self.minimized = True
def toggle_minimize(self):
self.minimized = not self.minimized
def toggleminimize(self):
warnings.warn("toggleminimize is deprecated, use toggle_minimize", DeprecationWarning)
self.toggle_minimize()
def static(self, screen, x=None, y=None, width=None, height=None):
"""Makes this window a static window, attached to a Screen
If any of the arguments are left unspecified, the values given by the
window itself are used instead. So, for a window that's aware of its
appropriate size and location (like dzen), you don't have to specify
anything.
"""
self.defunct = True
screen = self.qtile.screens[screen]
if self.group:
self.group.remove(self)
s = Static(self.window, self.qtile, screen, x, y, width, height)
self.qtile.windows_map[self.window.wid] = s
hook.fire("client_managed", s)
return s
def tweak_float(self, x=None, y=None, dx=0, dy=0,
w=None, h=None, dw=0, dh=0):
if x is not None:
self.x = x
self.x += dx
if y is not None:
self.y = y
self.y += dy
if w is not None:
self.width = w
self.width += dw
if h is not None:
self.height = h
self.height += dh
if self.height < 0:
self.height = 0
if self.width < 0:
self.width = 0
screen = self.qtile.find_closest_screen(self.x, self.y)
if self.group and screen is not None and screen != self.group.screen:
self.group.remove(self, force=True)
screen.group.add(self, force=True)
self.qtile.focus_screen(screen.index)
self._reconfigure_floating()
def getsize(self):
return (self.width, self.height)
def getposition(self):
return (self.x, self.y)
def _reconfigure_floating(self, new_float_state=FLOATING):
if new_float_state == MINIMIZED:
self.state = IconicState
self.hide()
else:
width = max(self.width, self.hints.get('min_width', 0))
height = max(self.height, self.hints.get('min_height', 0))
if self.hints['base_width'] and self.hints['width_inc']:
width_adjustment = (width - self.hints['base_width']) % self.hints['width_inc']
width -= width_adjustment
if new_float_state == FULLSCREEN:
self.x += int(width_adjustment / 2)
if self.hints['base_height'] and self.hints['height_inc']:
height_adjustment = (height - self.hints['base_height']) % self.hints['height_inc']
height -= height_adjustment
if new_float_state == FULLSCREEN:
self.y += int(height_adjustment / 2)
self.place(
self.x, self.y,
width, height,
self.borderwidth,
self.bordercolor,
above=True,
)
if self._float_state != new_float_state:
self._float_state = new_float_state
if self.group: # may be not, if it's called from hook
self.group.mark_floating(self, True)
hook.fire('float_change')
def _enablefloating(self, x=None, y=None, w=None, h=None,
new_float_state=FLOATING):
if new_float_state != MINIMIZED:
self.x = x
self.y = y
self.width = w
self.height = h
self._reconfigure_floating(new_float_state=new_float_state)
def togroup(self, group_name=None, *, switch_group=False):
"""Move window to a specified group
Also switch to that group if switch_group is True.
"""
if group_name is None:
group = self.qtile.current_group
else:
group = self.qtile.groups_map.get(group_name)
if group is None:
raise CommandError("No such group: %s" % group_name)
if self.group is not group:
self.hide()
if self.group:
if self.group.screen:
# for floats remove window offset
self.x -= self.group.screen.x
self.group.remove(self)
if group.screen and self.x < group.screen.x:
self.x += group.screen.x
group.add(self)
if switch_group:
group.cmd_toscreen()
def toscreen(self, index=None):
"""Move window to a specified screen, or the current screen."""
if index is None:
screen = self.qtile.current_screen
else:
try:
screen = self.qtile.screens[index]
except IndexError:
raise CommandError('No such screen: %d' % index)
self.togroup(screen.group.name)
def match(self, wname=None, wmclass=None, role=None):
"""Match window against given attributes.
Parameters
==========
wname :
matches against the window name or title, that is, either
``_NET_WM_VISIBLE_NAME``, ``_NET_WM_NAME``, ``WM_NAME``.
wmclass :
matches against any of the two values in the ``WM_CLASS`` property
role :
matches against the ``WM_WINDOW_ROLE`` property
"""
if not (wname or wmclass or role):
raise TypeError(
"Either a name, a wmclass or a role must be specified"
)
if wname and wname == self.name:
return True
try:
cliclass = self.window.get_wm_class()
if wmclass and cliclass and wmclass in cliclass:
return True
clirole = self.window.get_wm_window_role()
if role and clirole and role == clirole:
return True
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return False
return False
def handle_EnterNotify(self, e): # noqa: N802
hook.fire("client_mouse_enter", self)
if self.qtile.config.follow_mouse_focus and \
self.group.current_window != self:
self.group.focus(self, False)
if self.group.screen and \
self.qtile.current_screen != self.group.screen and \
self.qtile.config.follow_mouse_focus:
self.qtile.focus_screen(self.group.screen.index, False)
return True
def handle_ConfigureRequest(self, e): # noqa: N802
if self.qtile._drag and self.qtile.current_window == self:
# ignore requests while user is dragging window
return
if getattr(self, 'floating', False):
# only obey resize for floating windows
cw = xcffib.xproto.ConfigWindow
width = e.width if e.value_mask & cw.Width else self.width
height = e.height if e.value_mask & cw.Height else self.height
x = e.x if e.value_mask & cw.X else self.x
y = e.y if e.value_mask & cw.Y else self.y
else:
width, height, x, y = self.width, self.height, self.x, self.y
if self.group and self.group.screen:
self.place(
x, y,
width, height,
self.borderwidth, self.bordercolor,
)
self.update_state()
return False
def update_wm_net_icon(self):
"""Set a dict with the icons of the window"""
icon = self.window.get_property('_NET_WM_ICON', 'CARDINAL')
if not icon:
return
icon = list(map(ord, icon.value))
icons = {}
while True:
if not icon:
break
size = icon[:8]
if len(size) != 8 or not size[0] or not size[4]:
break
icon = icon[8:]
width = size[0]
height = size[4]
next_pix = width * height * 4
data = icon[:next_pix]
arr = array.array("B", data)
for i in range(0, len(arr), 4):
mult = arr[i + 3] / 255.
arr[i + 0] = int(arr[i + 0] * mult)
arr[i + 1] = int(arr[i + 1] * mult)
arr[i + 2] = int(arr[i + 2] * mult)
icon = icon[next_pix:]
icons["%sx%s" % (width, height)] = arr
self.icons = icons
hook.fire("net_wm_icon_change", self)
def handle_ClientMessage(self, event): # noqa: N802
atoms = self.qtile.conn.atoms
opcode = event.type
data = event.data
if atoms["_NET_WM_STATE"] == opcode:
prev_state = self.window.get_property(
'_NET_WM_STATE',
'ATOM',
unpack=int
)
current_state = set(prev_state)
action = data.data32[0]
for prop in (data.data32[1], data.data32[2]):
if not prop:
# skip 0
continue
if action == _NET_WM_STATE_REMOVE:
current_state.discard(prop)
elif action == _NET_WM_STATE_ADD:
current_state.add(prop)
elif action == _NET_WM_STATE_TOGGLE:
current_state ^= set([prop]) # toggle :D
self.window.set_property('_NET_WM_STATE', list(current_state))
elif atoms["_NET_ACTIVE_WINDOW"] == opcode:
source = data.data32[0]
if source == 2: # XCB_EWMH_CLIENT_SOURCE_TYPE_NORMAL
logger.info("Focusing window by pager")
self.qtile.current_screen.set_group(self.group)
self.group.focus(self)
else: # XCB_EWMH_CLIENT_SOURCE_TYPE_OTHER
focus_behavior = self.qtile.config.focus_on_window_activation
if focus_behavior == "focus":
logger.info("Focusing window")
self.qtile.current_screen.set_group(self.group)
self.group.focus(self)
elif focus_behavior == "smart" and self.group.screen and self.group.screen == self.qtile.current_screen:
logger.info("Focusing window")
self.qtile.current_screen.set_group(self.group)
self.group.focus(self)
elif focus_behavior == "urgent" or (focus_behavior == "smart" and not self.group.screen):
logger.info("Setting urgent flag for window")
self.urgent = True
else:
logger.info("Ignoring focus request")
def handle_PropertyNotify(self, e): # noqa: N802
name = self.qtile.conn.atoms.get_name(e.atom)
logger.debug("PropertyNotifyEvent: %s", name)
if name == "WM_TRANSIENT_FOR":
pass
elif name == "WM_HINTS":
self.update_hints()
elif name == "WM_NORMAL_HINTS":
self.update_hints()
elif name == "WM_NAME":
self.update_name()
elif name == "_NET_WM_NAME":
self.update_name()
elif name == "_NET_WM_VISIBLE_NAME":
self.update_name()
elif name == "WM_ICON_NAME":
pass
elif name == "_NET_WM_ICON_NAME":
pass
elif name == "_NET_WM_ICON":
self.update_wm_net_icon()
elif name == "ZOOM":
pass
elif name == "_NET_WM_WINDOW_OPACITY":
pass
elif name == "WM_STATE":
pass
elif name == "_NET_WM_STATE":
self.update_state()
elif name == "WM_PROTOCOLS":
pass
elif name == "_NET_WM_DESKTOP":
# Some windows set the state(fullscreen) when starts,
# update_state is here because the group and the screen
# are set when the property is emitted
# self.update_state()
self.update_state()
elif name == "_NET_WM_USER_TIME":
if not self.qtile.config.follow_mouse_focus and \
self.group.current_window != self:
self.group.focus(self, False)
else:
logger.info("Unknown window property: %s", name)
return False
def _items(self, name):
if name == "group":
return (True, None)
elif name == "layout":
return (True, list(range(len(self.group.layouts))))
elif name == "screen":
return (True, None)
def _select(self, name, sel):
if name == "group":
return self.group
elif name == "layout":
if sel is None:
return self.group.layout
else:
return utils.lget(self.group.layouts, sel)
elif name == "screen":
return self.group.screen
def __repr__(self):
return "Window(%r)" % self.name
def cmd_static(self, screen, x, y, width, height):
self.static(screen, x, y, width, height)
def cmd_kill(self):
"""Kill this window
Try to do this politely if the client support
this, otherwise be brutal.
"""
self.kill()
def cmd_togroup(self, groupName=None, *, switch_group=False): # noqa: 803
"""Move window to a specified group.
If groupName is not specified, we assume the current group.
If switch_group is True, also switch to that group.
Examples
========
Move window to current group::
togroup()
Move window to group "a"::
togroup("a")
Move window to group "a", and switch to group "a"::
togroup("a", switch_group=True)
"""
self.togroup(groupName, switch_group=switch_group)
def cmd_toscreen(self, index=None):
"""Move window to a specified screen.
If index is not specified, we assume the current screen
Examples
========
Move window to current screen::
toscreen()
Move window to screen 0::
toscreen(0)
"""
self.toscreen(index)
def cmd_move_floating(self, dx, dy):
"""Move window by dx and dy"""
self.tweak_float(dx=dx, dy=dy)
def cmd_resize_floating(self, dw, dh):
"""Add dw and dh to size of window"""
self.tweak_float(dw=dw, dh=dh)
def cmd_set_position_floating(self, x, y):
"""Move window to x and y"""
self.tweak_float(x=x, y=y)
def cmd_set_size_floating(self, w, h):
"""Set window dimensions to w and h"""
self.tweak_float(w=w, h=h)
def cmd_get_position(self):
return self.getposition()
def cmd_get_size(self):
return self.getsize()
def cmd_toggle_floating(self):
self.toggle_floating()
def cmd_enable_floating(self):
self.floating = True
def cmd_disable_floating(self):
self.floating = False
def cmd_toggle_maximize(self):
self.toggle_maximize()
def cmd_enable_maximize(self):
self.maximize = True
def cmd_disable_maximize(self):
self.maximize = False
def cmd_toggle_fullscreen(self):
self.toggle_fullscreen()
def cmd_enable_fullscreen(self):
self.fullscreen = True
def cmd_disable_fullscreen(self):
self.fullscreen = False
def cmd_toggle_minimize(self):
self.toggle_minimize()
def cmd_enable_minimize(self):
self.minimize = True
def cmd_disable_minimize(self):
self.minimize = False
def cmd_bring_to_front(self):
if self.floating:
self.window.configure(stackmode=StackMode.Above)
else:
self._reconfigure_floating() # atomatically above
def cmd_match(self, *args, **kwargs):
return self.match(*args, **kwargs)
def cmd_opacity(self, opacity):
if opacity < .1:
self.opacity = .1
elif opacity > 1:
self.opacity = 1
else:
self.opacity = opacity
def cmd_down_opacity(self):
if self.opacity > .2:
# don't go completely clear
self.opacity -= .1
else:
self.opacity = .1
def cmd_up_opacity(self):
if self.opacity < .9:
self.opacity += .1
else:
self.opacity = 1
def _is_in_window(self, x, y, window):
return (window.edges[0] <= x <= window.edges[2] and
window.edges[1] <= y <= window.edges[3])
def cmd_set_position(self, dx, dy):
if self.floating:
self.tweak_float(dx, dy)
return
for window in self.group.windows:
if window == self or window.floating:
continue
curx, cury = self.qtile.get_mouse_position()
if self._is_in_window(curx, cury, window):
clients = self.group.layout.clients
index1 = clients.index(self)
index2 = clients.index(window)
clients[index1], clients[index2] = clients[index2], clients[index1]
self.group.layout.focused = index2
self.group.layout_all()
break
| |
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
# for file in files:
# if file.endswith("%s.wgt" % PKG_NAME):
# if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
# action_status = False
# (return_code, output) = doRemoteCMD(
# "pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
# doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
# for line in output:
# if "Failure" in line:
# action_status = False
# break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
'''
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/%s" % (PKG_SRC_DIR, item_name)):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| |
"""Helpers for components that manage entities."""
import asyncio
from datetime import timedelta
from itertools import chain
import logging
from homeassistant import config as conf_util
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_SCAN_INTERVAL,
CONF_ENTITY_NAMESPACE,
ENTITY_MATCH_ALL,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.config_validation import ENTITY_SERVICE_SCHEMA
from homeassistant.helpers.service import async_extract_entity_ids
from homeassistant.loader import bind_hass, async_get_integration
from homeassistant.util import slugify
from .entity_platform import EntityPlatform
# mypy: allow-untyped-defs, no-check-untyped-defs
DEFAULT_SCAN_INTERVAL = timedelta(seconds=15)
DATA_INSTANCES = "entity_components"
@bind_hass
async def async_update_entity(hass, entity_id):
"""Trigger an update for an entity."""
domain = entity_id.split(".", 1)[0]
entity_comp = hass.data.get(DATA_INSTANCES, {}).get(domain)
if entity_comp is None:
logging.getLogger(__name__).warning(
"Forced update failed. Component for %s not loaded.", entity_id
)
return
entity = entity_comp.get_entity(entity_id)
if entity is None:
logging.getLogger(__name__).warning(
"Forced update failed. Entity %s not found.", entity_id
)
return
await entity.async_update_ha_state(True)
class EntityComponent:
"""The EntityComponent manages platforms that manages entities.
This class has the following responsibilities:
- Process the configuration and set up a platform based component.
- Manage the platforms and their entities.
- Help extract the entities from a service call.
- Maintain a group that tracks all platform entities.
- Listen for discovery events for platforms related to the domain.
"""
def __init__(
self, logger, domain, hass, scan_interval=DEFAULT_SCAN_INTERVAL, group_name=None
):
"""Initialize an entity component."""
self.logger = logger
self.hass = hass
self.domain = domain
self.scan_interval = scan_interval
self.group_name = group_name
self.config = None
self._platforms = {domain: self._async_init_entity_platform(domain, None)}
self.async_add_entities = self._platforms[domain].async_add_entities
self.add_entities = self._platforms[domain].add_entities
hass.data.setdefault(DATA_INSTANCES, {})[domain] = self
@property
def entities(self):
"""Return an iterable that returns all entities."""
return chain.from_iterable(
platform.entities.values() for platform in self._platforms.values()
)
def get_entity(self, entity_id):
"""Get an entity."""
for platform in self._platforms.values():
entity = platform.entities.get(entity_id)
if entity is not None:
return entity
return None
def setup(self, config):
"""Set up a full entity component.
This doesn't block the executor to protect from deadlocks.
"""
self.hass.add_job(self.async_setup(config))
async def async_setup(self, config):
"""Set up a full entity component.
Loads the platforms from the config and will listen for supported
discovered platforms.
This method must be run in the event loop.
"""
self.config = config
# Look in config for Domain, Domain 2, Domain 3 etc and load them
tasks = []
for p_type, p_config in config_per_platform(config, self.domain):
tasks.append(self.async_setup_platform(p_type, p_config))
if tasks:
await asyncio.wait(tasks)
# Generic discovery listener for loading platform dynamically
# Refer to: homeassistant.components.discovery.load_platform()
async def component_platform_discovered(platform, info):
"""Handle the loading of a platform."""
await self.async_setup_platform(platform, {}, info)
discovery.async_listen_platform(
self.hass, self.domain, component_platform_discovered
)
async def async_setup_entry(self, config_entry):
"""Set up a config entry."""
platform_type = config_entry.domain
platform = await async_prepare_setup_platform(
self.hass,
# In future PR we should make hass_config part of the constructor
# params.
self.config or {},
self.domain,
platform_type,
)
if platform is None:
return False
key = config_entry.entry_id
if key in self._platforms:
raise ValueError("Config entry has already been setup!")
self._platforms[key] = self._async_init_entity_platform(
platform_type,
platform,
scan_interval=getattr(platform, "SCAN_INTERVAL", None),
)
return await self._platforms[key].async_setup_entry(config_entry)
async def async_unload_entry(self, config_entry):
"""Unload a config entry."""
key = config_entry.entry_id
platform = self._platforms.pop(key, None)
if platform is None:
raise ValueError("Config entry was never loaded!")
await platform.async_reset()
return True
async def async_extract_from_service(self, service, expand_group=True):
"""Extract all known and available entities from a service call.
Will return all entities if no entities specified in call.
Will return an empty list if entities specified but unknown.
This method must be run in the event loop.
"""
data_ent_id = service.data.get(ATTR_ENTITY_ID)
if data_ent_id in (None, ENTITY_MATCH_ALL):
if data_ent_id is None:
self.logger.warning(
"Not passing an entity ID to a service to target all "
"entities is deprecated. Update your call to %s.%s to be "
"instead: entity_id: %s",
service.domain,
service.service,
ENTITY_MATCH_ALL,
)
return [entity for entity in self.entities if entity.available]
entity_ids = await async_extract_entity_ids(self.hass, service, expand_group)
return [
entity
for entity in self.entities
if entity.available and entity.entity_id in entity_ids
]
@callback
def async_register_entity_service(self, name, schema, func, required_features=None):
"""Register an entity service."""
if isinstance(schema, dict):
schema = ENTITY_SERVICE_SCHEMA.extend(schema)
async def handle_service(call):
"""Handle the service."""
service_name = f"{self.domain}.{name}"
await self.hass.helpers.service.entity_service_call(
self._platforms.values(), func, call, service_name, required_features
)
self.hass.services.async_register(self.domain, name, handle_service, schema)
async def async_setup_platform(
self, platform_type, platform_config, discovery_info=None
):
"""Set up a platform for this component."""
if self.config is None:
raise RuntimeError("async_setup needs to be called first")
platform = await async_prepare_setup_platform(
self.hass, self.config, self.domain, platform_type
)
if platform is None:
return
# Use config scan interval, fallback to platform if none set
scan_interval = platform_config.get(
CONF_SCAN_INTERVAL, getattr(platform, "SCAN_INTERVAL", None)
)
entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE)
key = (platform_type, scan_interval, entity_namespace)
if key not in self._platforms:
self._platforms[key] = self._async_init_entity_platform(
platform_type, platform, scan_interval, entity_namespace
)
await self._platforms[key].async_setup(platform_config, discovery_info)
@callback
def _async_update_group(self):
"""Set up and/or update component group.
This method must be run in the event loop.
"""
if self.group_name is None:
return
ids = [
entity.entity_id
for entity in sorted(
self.entities, key=lambda entity: entity.name or entity.entity_id
)
]
self.hass.async_create_task(
self.hass.services.async_call(
"group",
"set",
dict(
object_id=slugify(self.group_name),
name=self.group_name,
visible=False,
entities=ids,
),
)
)
async def _async_reset(self):
"""Remove entities and reset the entity component to initial values.
This method must be run in the event loop.
"""
tasks = [platform.async_reset() for platform in self._platforms.values()]
if tasks:
await asyncio.wait(tasks)
self._platforms = {self.domain: self._platforms[self.domain]}
self.config = None
if self.group_name is not None:
await self.hass.services.async_call(
"group", "remove", dict(object_id=slugify(self.group_name))
)
async def async_remove_entity(self, entity_id):
"""Remove an entity managed by one of the platforms."""
for platform in self._platforms.values():
if entity_id in platform.entities:
await platform.async_remove_entity(entity_id)
async def async_prepare_reload(self):
"""Prepare reloading this entity component.
This method must be run in the event loop.
"""
try:
conf = await conf_util.async_hass_config_yaml(self.hass)
except HomeAssistantError as err:
self.logger.error(err)
return None
integration = await async_get_integration(self.hass, self.domain)
conf = await conf_util.async_process_component_config(
self.hass, conf, integration
)
if conf is None:
return None
await self._async_reset()
return conf
def _async_init_entity_platform(
self, platform_type, platform, scan_interval=None, entity_namespace=None
):
"""Initialize an entity platform."""
if scan_interval is None:
scan_interval = self.scan_interval
return EntityPlatform(
hass=self.hass,
logger=self.logger,
domain=self.domain,
platform_name=platform_type,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=self._async_update_group,
)
| |
try:
import builtins
builtin_module = builtins
except ImportError:
import __builtin__
builtin_module = __builtin__
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
try:
import unittest.mock as mock
except ImportError:
import mock
import pytest
import shlex
import re
import os
from bridgy.command import Sshfs
from bridgy.inventory import Instance
from bridgy.error import BadInstanceError, BadConfigError, MissingBastionHost, BadRemoteDir
from bridgy.config import Config
MTAB = u"""\
ysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
udev /dev devtmpfs rw,nosuid,relatime,size=16359216k,nr_inodes=4089804,mode=755 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=3276836k,mode=755 0 0
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
efivarfs /sys/firmware/efi/efivars efivarfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=3276836k,mode=700,uid=1000,gid=1000 0 0
gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
ubuntu@devbox:/tmp /home/dummy/.bridgy/mounts/awesomebox@devbox fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
ubuntu@devbox:/tmp /home/dummy/someotherdir/awesomebox@devbox fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0"""
instance = Instance('name', 'address.com')
whitespace_pattern = re.compile(r'\W+')
def assert_command_results(result1, result2):
result1 = shlex.split(result1)
result2 = shlex.split(result2)
assert len(result1) == len(result2)
for item1, item2 in zip(result1, result2):
item1 = whitespace_pattern.sub(' ', item1)
item2 = whitespace_pattern.sub(' ', item2)
assert item1 == item2
### Mounting / Unmounting ######################################################
def test_sshfs_mount_remotedir_missing():
config = Config({})
sshObj = Sshfs(config, instance)
with pytest.raises(BadRemoteDir):
sshObj.mount()
def test_sshfs_mount_remotedir_dne(mocker):
mock_ls = mocker.patch.object(os, 'listdir')
mock_rmdir = mocker.patch('os.rmdir', side_effect=lambda x: True)
mock_system = mocker.patch('os.system', side_effect=lambda x: 0)
mock_mkdir = mocker.patch('os.mkdir', side_effect=lambda x: True)
mock_exists = mocker.patch('os.path.exists', side_effect=lambda x: False)
mock_ls.return_value = ['/home/dummy/.bridgy/mounts/baddir', '/home/dummy/.bridgy/mounts/awesomebox@devbox']
config = Config({})
sshObj = Sshfs(config, instance, remotedir='/tmp/test')
sshObj.mount()
assert mock_exists.called
assert mock_mkdir.called
assert mock_system.called
assert not mock_rmdir.called
def test_sshfs_mount_failed(mocker):
mock_ls = mocker.patch.object(os, 'listdir')
mock_rmdir = mocker.patch('os.rmdir', side_effect=lambda x: True)
mock_system = mocker.patch('os.system', side_effect=lambda x: 1)
mock_mkdir = mocker.patch('os.mkdir', side_effect=lambda x: True)
mock_exists = mocker.patch('os.path.exists', side_effect=lambda x: False)
mock_ls.return_value = ['/home/dummy/.bridgy/mounts/baddir', '/home/dummy/.bridgy/mounts/awesomebox@devbox']
config = Config({})
sshObj = Sshfs(config, instance, remotedir='/tmp/test')
sshObj.mount()
assert mock_exists.called
assert mock_mkdir.called
assert mock_system.called
assert mock_rmdir.called
def test_sshfs_mounts(mocker):
mock_ls = mocker.patch.object(os, 'listdir')
mock_open = mocker.patch.object(builtin_module, 'open')
mock_open.return_value = StringIO(MTAB)
mock_ls.return_value = ['/home/dummy/.bridgy/mounts/baddir', '/home/dummy/.bridgy/mounts/awesomebox@devbox']
filename = '/etc/mtab'
mounts_root_dir = '/home/dummy/.bridgy/mounts'
owned_mount = os.path.join(mounts_root_dir, 'awesomebox@devbox')
result = Sshfs.mounts(mounts_root_dir)
assert len(result) == 1
assert owned_mount in result
def test_sshfs_unmount_go_case(mocker):
mock_rmdir = mocker.patch.object(os, 'rmdir')
mock_system = mocker.patch.object(os, 'system')
mock_exists = mocker.patch.object(os.path, 'exists')
mock_exists.return_value = True
mock_system.return_value = 0
mock_rmdir.return_value = True
config = Config({})
sshfsObj = Sshfs(config, instance, remotedir='/tmp/test')
success = sshfsObj.unmount()
assert mock_rmdir.call_count == 1
assert success == True
def test_sshfs_unmount_mountpoint_dne(mocker):
mock_rmdir = mocker.patch.object(os, 'rmdir')
mock_system = mocker.patch.object(os, 'system')
mock_exists = mocker.patch.object(os.path, 'exists')
mock_exists.return_value = False
mock_system.return_value = 0
mock_rmdir.return_value = True
config = Config({})
sshfsObj = Sshfs(config, instance, remotedir='/tmp/test')
success = sshfsObj.unmount()
assert mock_rmdir.call_count == 0
assert success == False
def test_sshfs_unmount_fuse_failure(mocker):
mock_rmdir = mocker.patch.object(os, 'rmdir')
mock_system = mocker.patch.object(os, 'system')
mock_exists = mocker.patch.object(os.path, 'exists')
mock_exists.return_value = True
mock_system.return_value = 1
mock_rmdir.return_value = True
config = Config({})
sshfsObj = Sshfs(config, instance, remotedir='/tmp/test')
success = sshfsObj.unmount()
assert mock_rmdir.call_count == 0
assert success == False
### Command Formatting #########################################################
def test_sshfs_command_go_case():
config = Config({
'ssh': {}
})
remotedir = '/tmp'
sshObj = Sshfs(config, instance, remotedir)
mount_arg = '%s/%s@%s'%(config.mount_root_dir, instance.name, instance.address)
assert_command_results(sshObj.command, 'sshfs address.com:/tmp %s' % mount_arg)
def test_sshfs_command_go_case_no_options():
config = Config({})
remotedir = '/tmp'
sshObj = Sshfs(config, instance, remotedir)
mount_arg = '%s/%s@%s'%(config.mount_root_dir, instance.name, instance.address)
assert_command_results(sshObj.command, 'sshfs address.com:/tmp %s' % mount_arg)
def test_sshfs_command_user():
config = Config({
'ssh': {
'user': 'username'
}
})
remotedir = '/tmp'
sshObj = Sshfs(config, instance, remotedir)
mount_arg = '%s/%s@%s'%(config.mount_root_dir, instance.name, instance.address)
assert_command_results(sshObj.command, 'sshfs username@address.com:/tmp %s' % mount_arg)
def test_sshfs_options():
config = Config({
'ssh': {
'user': 'username',
'options': '-o ForwardAgent=yes'
},
'sshfs': {
'options': '-C -o ServerAliveInterval=255'
}
})
remotedir = '/tmp'
sshObj = Sshfs(config, instance, remotedir)
mount_arg = '%s/%s@%s'%(config.mount_root_dir, instance.name, instance.address)
assert_command_results(sshObj.command, 'sshfs -C -o ServerAliveInterval=255 username@address.com:/tmp %s' % mount_arg)
def test_sshfs_command_no_user():
config = Config({
'sshfs': {
'options': '-C -o ServerAliveInterval=255'
}
})
remotedir = '/tmp'
sshObj = Sshfs(config, instance, remotedir)
mount_arg = '%s/%s@%s'%(config.mount_root_dir, instance.name, instance.address)
assert_command_results(sshObj.command, 'sshfs -C -o ServerAliveInterval=255 address.com:/tmp %s' % mount_arg)
def test_sshfs_command_bastion_options():
config = Config({
'bastion': {
'address': 'bastion.com',
'options': '-C -o ServerAliveInterval=255'
}
})
remotedir = '/tmp'
sshObj = Sshfs(config, instance, remotedir)
mount_arg = '%s/%s@%s'%(config.mount_root_dir, instance.name, instance.address)
assert_command_results(sshObj.command, "sshfs -o ProxyCommand='ssh -C -o ServerAliveInterval=255 -W %%h:%%p bastion.com' address.com:/tmp %s" % mount_arg)
def test_sshfs_command_bastion_user():
config = Config({
'bastion': {
'address': 'bastion.com',
'user': 'bastionuser'
}
})
remotedir = '/tmp'
sshObj = Sshfs(config, instance, remotedir)
mount_arg = '%s/%s@%s'%(config.mount_root_dir, instance.name, instance.address)
assert_command_results(sshObj.command, "sshfs -o ProxyCommand='ssh -W %%h:%%p bastionuser@bastion.com' address.com:/tmp %s" % mount_arg)
def test_sshfs_command_mountoptions():
config = Config({
'sshfs': {
'options': '-o auto_cache,reconnect,defer_permissions,noappledouble,nolocalcaches,no_readahead'
}
})
remotedir = '/tmp'
sshObj = Sshfs(config, instance, remotedir)
mount_arg = '%s/%s@%s'%(config.mount_root_dir, instance.name, instance.address)
assert_command_results(sshObj.command, "sshfs %s address.com:/tmp %s" % (config.dig('sshfs', 'options'), mount_arg))
def test_sshfs_command_bastion_missing_address():
config = Config({
'bastion': {}
})
remotedir = '/tmp'
with pytest.raises(MissingBastionHost):
sshObj = Sshfs(config, instance, remotedir)
sshObj.command
def test_sshfs_command_null_instance():
config = Config({})
remotedir = '/tmp'
with pytest.raises(BadInstanceError):
sshObj = Sshfs(config, None)
sshObj.command
def test_sshfs_command_null_config():
remotedir = '/tmp'
with pytest.raises(BadConfigError):
sshObj = Sshfs(None, instance, remotedir)
sshObj.command
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for rietveld.py."""
import logging
import os
import ssl
import sys
import time
import traceback
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.patches_data import GIT, RAW
from testing_support import auto_stub
import patch
import rietveld
def _api(files):
"""Mock a rietveld api request."""
return rietveld.json.dumps({'files': files})
def _file(
status, is_binary=False, num_chunks=1, chunk_id=789, property_changes=''):
"""Mock a file in a rietveld api request."""
return {
'status': status,
'is_binary': is_binary,
'num_chunks': num_chunks,
'id': chunk_id,
'property_changes': property_changes,
}
class BaseFixture(unittest.TestCase):
# Override.
TESTED_CLASS = Exception
def setUp(self):
super(BaseFixture, self).setUp()
# Access to a protected member XX of a client class
# pylint: disable=W0212
self.rietveld = self.TESTED_CLASS('url', None, 'email')
self.rietveld._send = self._rietveld_send
self.requests = []
def tearDown(self):
self.assertEqual([], self.requests)
super(BaseFixture, self).tearDown()
def _rietveld_send(self, url, *args, **kwargs):
self.assertTrue(self.requests, url)
request = self.requests.pop(0)
self.assertEqual(2, len(request))
self.assertEqual(url, request[0])
return request[1]
def _check_patch(self,
p,
filename,
diff,
source_filename=None,
is_binary=False,
is_delete=False,
is_git_diff=False,
is_new=False,
patchlevel=0,
svn_properties=None):
svn_properties = svn_properties or []
self.assertEqual(p.filename, filename)
self.assertEqual(p.source_filename, source_filename)
self.assertEqual(p.is_binary, is_binary)
self.assertEqual(p.is_delete, is_delete)
if hasattr(p, 'is_git_diff'):
self.assertEqual(p.is_git_diff, is_git_diff)
self.assertEqual(p.is_new, is_new)
if hasattr(p, 'patchlevel'):
self.assertEqual(p.patchlevel, patchlevel)
if diff:
self.assertEqual(p.get(True), diff)
if hasattr(p, 'svn_properties'):
self.assertEqual(p.svn_properties, svn_properties)
class RietveldTest(BaseFixture):
TESTED_CLASS = rietveld.Rietveld
def test_get_patch_empty(self):
self.requests = [('/api/123/456', '{}')]
patches = self.rietveld.get_patch(123, 456)
self.assertTrue(isinstance(patches, patch.PatchSet))
self.assertEqual([], patches.patches)
def test_get_patch_no_status(self):
self.requests = [
( '/api/123/456',
_api(
{
'tools/clang_check/README.chromium': {
'status': None,
'id': 789,
}})),
('/download/issue123_456_789.diff', RAW.DELETE),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'tools/clang_check/README.chromium',
RAW.DELETE,
is_delete=True)
def test_get_patch_2_files(self):
self.requests = [
('/api/123/456',
_api({'foo': _file('A'), 'file_a': _file('M', chunk_id=790)})),
('/download/issue123_456_789.diff', RAW.NEW),
('/download/issue123_456_790.diff', RAW.NEW_NOT_NULL),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(2, len(patches.patches))
self._check_patch(
patches.patches[0], 'file_a', RAW.NEW_NOT_NULL, is_new=True)
self._check_patch(patches.patches[1], 'foo', RAW.NEW, is_new=True)
def test_get_patch_add(self):
self.requests = [
('/api/123/456', _api({'foo': _file('A')})),
('/download/issue123_456_789.diff', RAW.NEW),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(patches.patches[0], 'foo', RAW.NEW, is_new=True)
def test_invalid_status(self):
self.requests = [
('/api/123/456', _api({'file_a': _file('B')})),
]
try:
self.rietveld.get_patch(123, 456)
self.fail()
except patch.UnsupportedPatchFormat, e:
self.assertEqual('file_a', e.filename)
def test_add_plus_merge(self):
# svn:mergeinfo is dropped.
properties = (
'\nAdded: svn:mergeinfo\n'
' Merged /branches/funky/file_b:r69-2775\n')
self.requests = [
('/api/123/456',
_api({'pp': _file('A+', property_changes=properties)})),
('/download/issue123_456_789.diff', GIT.COPY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'pp',
GIT.COPY,
is_git_diff=True,
is_new=True,
patchlevel=1,
source_filename='PRESUBMIT.py')
def test_add_plus_eol_style(self):
properties = '\nAdded: svn:eol-style\n + LF\n'
self.requests = [
('/api/123/456',
_api({'pp': _file('A+', property_changes=properties)})),
('/download/issue123_456_789.diff', GIT.COPY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'pp',
GIT.COPY,
is_git_diff=True,
is_new=True,
patchlevel=1,
source_filename='PRESUBMIT.py',
svn_properties=[('svn:eol-style', 'LF')])
def test_add_empty(self):
self.requests = [
('/api/123/456', _api({'__init__.py': _file('A ', num_chunks=0)})),
('/download/issue123_456_789.diff', RAW.CRAP_ONLY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'__init__.py',
RAW.CRAP_ONLY,
is_new=True)
def test_delete(self):
name = 'tools/clang_check/README.chromium'
self.requests = [
('/api/123/456', _api({name: _file('D')})),
('/download/issue123_456_789.diff', RAW.DELETE),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(patches.patches[0], name, RAW.DELETE, is_delete=True)
def test_delete_empty(self):
name = 'tests/__init__.py'
self.requests = [
('/api/123/456', _api({name: _file('D')})),
('/download/issue123_456_789.diff', GIT.DELETE_EMPTY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
name,
GIT.DELETE_EMPTY,
is_delete=True,
is_git_diff=True,
patchlevel=1)
def test_m_plus(self):
properties = '\nAdded: svn:eol-style\n + LF\n'
self.requests = [
('/api/123/456',
_api({'chrome/file.cc': _file('M+', property_changes=properties)})),
('/download/issue123_456_789.diff', RAW.PATCH),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'chrome/file.cc',
RAW.PATCH,
svn_properties=[('svn:eol-style', 'LF')])
def test_m_plus_unknown_prop(self):
properties = '\nAdded: svn:foobar\n + stuff\n'
self.requests = [
('/api/123/456',
_api({'file_a': _file('M+', property_changes=properties)})),
]
try:
self.rietveld.get_patch(123, 456)
self.fail()
except patch.UnsupportedPatchFormat, e:
self.assertEqual('file_a', e.filename)
def test_get_patch_moved(self):
self.requests = [
('/api/123/456', _api({'file_b': _file('A+')})),
('/download/issue123_456_789.diff', RAW.MINIMAL_RENAME),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'file_b',
RAW.MINIMAL_RENAME,
source_filename='file_a',
is_new=True)
def test_svn_properties(self):
# Line too long (N/80)
# pylint: disable=C0301
# To test one of these, run something like
# import json, pprint, urllib
# url = 'http://codereview.chromium.org/api/202046/1'
# pprint.pprint(json.load(urllib.urlopen(url))['files'])
# svn:mergeinfo across branches:
# http://codereview.chromium.org/202046/diff/1/third_party/libxml/xmlcatalog_dummy.cc
self.assertEqual(
[('svn:eol-style', 'LF')],
rietveld.Rietveld.parse_svn_properties(
u'\nAdded: svn:eol-style\n + LF\n', 'foo'))
# svn:eol-style property that is lost in the diff
# http://codereview.chromium.org/202046/diff/1/third_party/libxml/xmllint_dummy.cc
self.assertEqual(
[],
rietveld.Rietveld.parse_svn_properties(
u'\nAdded: svn:mergeinfo\n'
' Merged /branches/chrome_webkit_merge_branch/third_party/'
'libxml/xmldummy_mac.cc:r69-2775\n',
'foo'))
self.assertEqual(
[],
rietveld.Rietveld.parse_svn_properties(u'', 'foo'))
# http://codereview.chromium.org/api/7834045/15001
self.assertEqual(
[('svn:executable', '*'), ('svn:eol-style', 'LF')],
rietveld.Rietveld.parse_svn_properties(
'\n'
'Added: svn:executable\n'
' + *\n'
'Added: svn:eol-style\n'
' + LF\n',
'foo'))
# http://codereview.chromium.org/api/9139006/7001
self.assertEqual(
[('svn:mime-type', 'image/png')],
rietveld.Rietveld.parse_svn_properties(
'\n'
'Added: svn:mime-type\n'
' + image/png\n',
'foo'))
def test_bad_svn_properties(self):
try:
rietveld.Rietveld.parse_svn_properties(u'\n', 'foo')
self.fail()
except rietveld.patch.UnsupportedPatchFormat, e:
self.assertEqual('foo', e.filename)
# TODO(maruel): Change with no diff, only svn property change:
# http://codereview.chromium.org/6462019/
def test_search_all_empty(self):
url = (
'/search?format=json'
'&base=base'
'&created_after=2010-01-02'
'&created_before=2010-01-01'
'&modified_after=2010-02-02'
'&modified_before=2010-02-01'
'&owner=owner%40example.com'
'&reviewer=reviewer%40example.com'
'&closed=2'
'&commit=2'
'&private=2'
'&keys_only=True'
'&with_messages=True'
'&limit=23')
self.requests = [
(url, '{}'),
]
results = list(self.rietveld.search(
'owner@example.com',
'reviewer@example.com',
'base',
True,
True,
True,
'2010-01-01',
'2010-01-02',
'2010-02-01',
'2010-02-02',
23,
True,
True,
))
self.assertEqual([], results)
def test_results_cursor(self):
# Verify cursor iteration is transparent.
self.requests = [
('/search?format=json&base=base',
rietveld.json.dumps({
'cursor': 'MY_CURSOR',
'results': [{'foo': 'bar'}, {'foo': 'baz'}],
})),
('/search?format=json&base=base&cursor=MY_CURSOR',
rietveld.json.dumps({
'cursor': 'NEXT',
'results': [{'foo': 'prout'}],
})),
('/search?format=json&base=base&cursor=NEXT',
rietveld.json.dumps({
'cursor': 'VOID',
'results': [],
})),
]
expected = [
{'foo': 'bar'},
{'foo': 'baz'},
{'foo': 'prout'},
]
for i in self.rietveld.search(base='base'):
self.assertEqual(expected.pop(0), i)
self.assertEqual([], expected)
class CachingRietveldTest(BaseFixture):
# Tests only one request is done.
TESTED_CLASS = rietveld.CachingRietveld
def test_get_description(self):
self.requests = [
('/1/description', 'Blah blah blah'),
]
expected = 'Blah blah blah'
self.assertEqual(expected, self.rietveld.get_description(1))
self.assertEqual(expected, self.rietveld.get_description(1))
def test_get_issue_properties(self):
data = {'description': 'wow\r\nno CR!', 'messages': 'foo'}
self.requests = [
('/api/1?messages=true', rietveld.json.dumps(data)),
]
expected = {u'description': u'wow\nno CR!'}
expected_msg = {u'description': u'wow\nno CR!', u'messages': u'foo'}
self.assertEqual(expected, self.rietveld.get_issue_properties(1, False))
self.assertEqual(expected_msg, self.rietveld.get_issue_properties(1, True))
def test_get_patchset_properties(self):
self.requests = [
('/api/1/2', '{}'),
]
expected = {}
self.assertEqual(expected, self.rietveld.get_patchset_properties(1, 2))
self.assertEqual(expected, self.rietveld.get_patchset_properties(1, 2))
class ProbeException(Exception):
"""Deep-probe a value."""
value = None
def __init__(self, value):
super(ProbeException, self).__init__()
self.value = value
def MockSend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Mock upload.py's Send() to probe the timeout value"""
raise ProbeException(timeout)
def MockSendTimeout(request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Mock upload.py's Send() to raise SSLError"""
raise ssl.SSLError('The read operation timed out')
class DefaultTimeoutTest(auto_stub.TestCase):
TESTED_CLASS = rietveld.Rietveld
def setUp(self):
super(DefaultTimeoutTest, self).setUp()
self.rietveld = self.TESTED_CLASS('url', None, 'email')
self.mock(self.rietveld.rpc_server, 'Send', MockSend)
self.sleep_time = 0
def test_timeout_get(self):
with self.assertRaises(ProbeException) as cm:
self.rietveld.get('/api/1234')
self.assertIsNotNone(cm.exception.value, 'Rietveld timeout was not set: %s'
% traceback.format_exc())
def test_timeout_post(self):
with self.assertRaises(ProbeException) as cm:
self.rietveld.post('/api/1234', [('key', 'data')])
self.assertIsNotNone(cm.exception.value, 'Rietveld timeout was not set: %s'
% traceback.format_exc())
def MockSleep(self, t):
self.sleep_time = t
def test_ssl_timeout_post(self):
self.mock(self.rietveld.rpc_server, 'Send', MockSendTimeout)
self.mock(time, 'sleep', self.MockSleep)
self.sleep_time = 0
with self.assertRaises(ssl.SSLError):
self.rietveld.post('/api/1234', [('key', 'data')])
self.assertNotEqual(self.sleep_time, 0)
if __name__ == '__main__':
logging.basicConfig(level=[
logging.ERROR, logging.INFO, logging.DEBUG][min(2, sys.argv.count('-v'))])
unittest.main()
| |
# -*- coding: utf-8 -*-
"""
sentry_openproject.plugin
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by HBEE,
2017 by Versada, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals
import urlparse
import six
from rest_framework.response import Response
from sentry.exceptions import PluginError
from sentry.plugins.bases.issue2 import IssueGroupActionEndpoint, IssuePlugin2
from sentry.utils.http import absolute_uri
from .client import OpenProjectClient
from .exceptions import ApiError, ApiUnauthorized
from .utils import get_secret_field_config
ERR_INTERNAL = (
'An internal error occurred within the OpenProject plugin'
)
ERR_UNAUTHORIZED = (
'Unauthorized: either your API token is invalid or you do not have access'
)
ERR_404 = (
'OpenProject returned a 404 Not Found error. If such project exists, '
'ensure the API token user has access to it.'
)
class OpenProjectPlugin(IssuePlugin2):
title = 'OpenProject'
slug = 'openproject'
description = 'Integrate OpenProject issue tracking by projects'
conf_title = title
conf_key = 'openproject'
author = 'Versada'
author_url = 'https://github.com/versada/sentry-openproject'
version = '0.2.2'
resource_links = [
(
'Bug Tracker',
'https://github.com/versada/sentry-openproject/issues',
),
('Source', 'https://github.com/versada/sentry-openproject'),
]
def get_group_urls(self):
return super(OpenProjectPlugin, self).get_group_urls() + [
(r'^autocomplete', IssueGroupActionEndpoint.as_view(
view_method_name='view_autocomplete',
plugin=self,
)),
]
def is_configured(self, request, project, **kwargs):
return all(
(self.get_option(k, project)
for k in ('url', 'apikey', 'project_slug'))
)
def get_group_description(self, request, group, event):
'''Override of the function in parent class.
Wraps exception body with <pre> tag to allow it to be parsed
by Textile.
'''
output = [
absolute_uri(group.get_absolute_url()),
]
body = self.get_group_body(request, group, event)
if body:
output.extend([
'',
'<pre>',
body,
'</pre>',
])
return '\n'.join(output)
def get_new_issue_fields(self, request, group, event, **kwargs):
fields = super(OpenProjectPlugin, self).get_new_issue_fields(
request, group, event, **kwargs)
allowed_types = self.get_allowed_types(request, group)
return [{
'name': 'project_slug',
'label': 'OpenProject Project Slug',
'default': self.get_option('project_slug', group.project),
'type': 'text',
'readonly': True,
}] + fields + [
{
'name': 'type',
'label': 'Work Package Type',
'default': allowed_types[0][0],
'type': 'select',
'required': True,
'choices': allowed_types,
},
{
'name': 'assignee',
'label': 'Assignee',
'default': '',
'type': 'select',
'required': False,
'choices': self.get_allowed_assignees(request, group),
},
]
def get_link_existing_issue_fields(self, request, group, event, **kwargs):
return [{
'name': 'issue_id',
'label': 'Work Package',
'default': '',
'type': 'select',
'has_autocomplete': True,
}, {
'name': 'comment',
'label': 'Comment',
'default': absolute_uri(group.get_absolute_url()),
'type': 'textarea',
'help': ('Leave blank if you don\'t want to '
'add a comment to the OpenProject work package.'),
'required': False,
}]
def get_issue_label(self, group, issue_id, **kwargs):
return 'WP#%s' % issue_id
def get_issue_url(self, group, issue_id, **kwargs):
url = self.get_option('url', group.project)
return urlparse.urljoin(url, '/work_packages/{}'.format(issue_id))
def get_client(self, project, user):
url = self.get_option('url', project)
apikey = self.get_option('apikey', project)
if not all((url, apikey)):
raise PluginError('OpenProject plugin not correctly configured!')
return OpenProjectClient(url, apikey)
def create_issue(self, request, group, form_data, **kwargs):
client = self.get_client(group.project, request.user)
try:
response = client.create_work_package(
self.get_option('project_slug', group.project),
form_data['title'],
form_data['type'],
description=form_data.get('description'),
assignee_id=form_data.get('assignee'),
)
except Exception as e:
self.raise_error(e)
return response['id']
def link_issue(self, request, group, form_data, **kwargs):
client = self.get_client(group.project, request.user)
try:
issue = client.get_work_package(form_data['issue_id'])
except Exception as e:
self.raise_error(e)
comment = form_data.get('comment')
if comment:
try:
client.create_comment(issue['id'], comment)
except Exception as e:
self.raise_error(e)
return {
'title': issue['subject'],
}
def get_configure_plugin_fields(self, request, project, **kwargs):
apikey = self.get_option('apikey', project)
apikey_field = get_secret_field_config(
apikey,
'This is API key of an OpenProject user, who will be the author '
'of the issue on OpenProject.',
include_prefix=True,
)
apikey_field.update({
'name': 'apikey',
'label': 'OpenProject API key',
'placeholder': 'e.g. 0123456789abcdef0123456789abcdef01234567',
})
return [
{
'name': 'url',
'label': 'OpenProject URL',
'default': self.get_option('url', project),
'type': 'text',
'placeholder': 'e.g. https://openproject.example.com',
'help': 'The URL to your OpenProject instance.',
},
apikey_field,
{
'name': 'project_slug',
'label': 'Project Slug',
'default': self.get_option('project_slug', project),
'type': 'text',
'placeholder': 'e.g. example-project',
'help': ('This should be the project slug of this project on '
'OpenProject.'),
},
]
def get_allowed_assignees(self, request, group):
client = self.get_client(group.project, request.user)
try:
response = client.list_assignees(
project_id=self.get_option('project_slug', group.project),
)
except Exception as e:
self.raise_error(e)
users = tuple(
(u['id'], u['name'])
for u in response.get('_embedded', {}).get('elements', [])
)
return (('', 'Unassigned'),) + users
def get_allowed_types(self, request, group):
client = self.get_client(group.project, request.user)
try:
response = client.list_project_types(
project_id=self.get_option('project_slug', group.project),
)
except Exception as e:
self.raise_error(e)
return tuple(
(u['id'], u['name'])
for u in response.get('_embedded', {}).get('elements', [])
)
def message_from_error(self, exc):
if isinstance(exc, ApiUnauthorized):
return ERR_UNAUTHORIZED
elif isinstance(exc, ApiError):
if exc.code == 404:
return ERR_404
return ('Error Communicating with OpenProject (HTTP %s): %s' % (
exc.code,
exc.json.get('message', 'unknown error')
if exc.json else 'unknown error',
))
else:
return ERR_INTERNAL
def raise_error(self, exc):
if isinstance(exc, ApiError):
raise PluginError(self.message_from_error(exc))
elif isinstance(exc, PluginError):
raise
else:
self.logger.exception(six.text_type(exc))
raise PluginError(self.message_from_error(exc))
def view_autocomplete(self, request, group, **kwargs):
field = request.GET.get('autocomplete_field')
query = request.GET.get('autocomplete_query')
if not field == 'issue_id' or not query:
return Response({'issue_id': []})
project_slug = self.get_option('project_slug', group.project)
client = self.get_client(group.project, request.user)
try:
response = client.search_work_packages(
project_slug, query.encode('utf-8')
)
except Exception as e:
return Response({
'error_type': 'validation',
'errors': [{'__all__': self.message_from_error(e)}]
}, status=400)
issues = [{
'text': '(#{id}) {subject}'.format(**i),
'id': i['id'],
} for i in response.get('_embedded', {}).get('elements', [])]
return Response({field: issues})
| |
import copy
from threading import Event, RLock, Thread
from pyflipdot.display import Driver, SegmentedDriverMixin, TextDriverMixin
from pyflipdot.lawo import at91PIO, fonts
DATATEMPLATE = [0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0,
0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0,
0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2,
0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0,
0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0,
2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0, 1]
def col_to_b6(map):
b2 = [map & (1 << 9),
map & (1 << 10),
map & (1 << 4),
map & (1 << 2),
map & (1 << 3),
map & (1 << 8)]
b1 = [map & (1 << 12),
map & (1 << 7),
map & (1 << 1),
map & 1,
map & (1 << 6),
map & (1 << 5)]
return [b2, b1]
def col_to_b7(map):
b2 = [map & (1 << 16),
map & (1 << 9),
map & (1 << 10),
map & (1 << 4),
map & (1 << 2),
map & (1 << 3),
map & (1 << 8)]
b1 = [map & (1 << 13),
map & (1 << 12),
map & (1 << 7),
map & (1 << 1),
map & 1,
map & (1 << 6),
map & (1 << 5)]
return [b2, b1]
def col_to_t6(map):
t2 = [map & (1 << 14),
map & (1 << 15),
map & (1 << 20),
map & (1 << 24),
map & (1 << 21),
map & (1 << 22)]
t1 = [map & (1 << 11),
map & (1 << 18),
map & (1 << 17),
map & (1 << 23),
map & (1 << 25),
map & (1 << 19)]
return [t2, t1]
def col_to_t7(map):
t2 = [map & (1 << 14),
map & (1 << 15),
map & (1 << 20),
map & (1 << 24),
map & (1 << 21),
map & (1 << 22),
map & (1 << 16)]
t1 = [map & (1 << 11),
map & (1 << 18),
map & (1 << 17),
map & (1 << 23),
map & (1 << 25),
map & (1 << 19),
map & (1 << 13)]
return [t2, t1]
class LawoDisplay(Driver, SegmentedDriverMixin, TextDriverMixin):
def __init__(self, force_redraw=False):
self.pio = None
self.pio2 = None
self._run = True
self._draw_event = Event()
self._data_lock = RLock()
self._data = None
self._draw_thread = Thread(target=self.display_thread)
self._draw_thread.daemon = True
self._font = None
self._force_redraw = force_redraw
self._last_text = None
self._last_data = None
def init(self):
at91PIO.enable_pwm(0, 2500000, 1250000)
at91PIO.export(64)
at91PIO.set_direction(64, 'out')
at91PIO.export(23)
at91PIO.set_direction(23, 'out')
at91PIO.export(22)
at91PIO.set_direction(22, 'out')
at91PIO.set_value(64, 0)
at91PIO.set_value(22, 0)
at91PIO.set_value(23, 0)
self.pio = at91PIO.AT91PIO(at91PIO.PIOA)
self.pio.init_spi(1 << 23, 1 << 22)
self.pio2 = at91PIO.AT91PIO(at91PIO.PIOC)
self._font = fonts.load_font('default')
self._draw_thread.start()
def put_text(self, data, text, offset=0):
curr_col = offset
for char in text:
if not self._font.char_defined(ord(char)):
print("char not found", char)
continue
for col in self._font.get_char(ord(char)):
data[curr_col] |= col
curr_col += 1
curr_col += 1
return offset
def display_thread(self):
while self._run:
self._draw_event.wait()
data = [0]*96
with self._data_lock:
if self._data is None:
continue
self.put_text(data, self._data)
if data != self._last_data:
self.send(data)
self._last_data = data
def set_text(self, text):
with self._data_lock:
self._data = text
self._draw_event.set()
def send(self, display_data):
data = copy.copy(DATATEMPLATE)
for b in range(0, 8):
col_offset = b * 12
offset_bottom_2 = b * 11
offset_top_2 = (15 - b) * 11
offset_bottom_1 = b * 11 + 176
offset_top_1 = (15 - b) * 11 + 176
bit_offset = 0
for col in range(col_offset, col_offset + 12):
coldata = display_data[col]
if col % 2 == 0:
lcd_data = col_to_b6(coldata)
length = 6
else:
lcd_data = col_to_b7(coldata)
length = 7
# print(length, lcd_data)
for bit in range(length):
if lcd_data[0][bit]:
data[offset_bottom_2 + bit_offset // 8] |= 1 << (7 - (bit_offset - (bit_offset // 8) * 8))
if lcd_data[1][bit]:
data[offset_bottom_1 + bit_offset // 8] |= 1 << (7 - (bit_offset - (bit_offset // 8) * 8))
bit_offset += 1
if bit_offset == 40:
bit_offset += 8
bit_offset = 0
for col in range(col_offset + 11, col_offset - 1, -1):
coldata = display_data[col]
if col % 2 == 0:
lcd_data = col_to_t7(coldata)
length = 7
else:
lcd_data = col_to_t6(coldata)
length = 6
for bit in range(length):
if lcd_data[0][bit]:
data[offset_top_2 + bit_offset // 8] |= 1 << (7 - (bit_offset - (bit_offset // 8) * 8))
if lcd_data[1][bit]:
data[offset_top_1 + bit_offset // 8] |= 1 << (7 - (bit_offset - (bit_offset // 8) * 8))
bit_offset += 1
if bit_offset == 40:
bit_offset += 8
with self.pio as pio, self.pio2 as pio2:
for b in data[0:176]:
pio.soft_spi_write(b)
pio2.strobe(1)
for b in data[176:]:
pio.soft_spi_write(b)
pio2.strobe(1)
| |
#!/usr/bin/env vpython
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run a test.
Sample usage:
./run.py \
-a src/xcodebuild/Release-iphoneos/base_unittests.app \
-o /tmp/out \
-p iPhone 5s \
-v 9.3 \
-b 9b46
Installs base_unittests.app in an iPhone 5s simulator running iOS 9.3 under
Xcode build version 9b46, runs it, and captures all test data in /tmp/out.
"""
import argparse
import json
import logging
import os
import subprocess
import sys
import traceback
import shard_util
import test_runner
import variations_runner
import wpr_runner
import xcodebuild_runner
import xcode_util as xcode
class Runner():
"""
Object to encapsulate iOS test runner execution coordination. Parses
arguments and invokes underlying test runners accordingly.
"""
def __init__(self, args=None):
"""
args = argparse Namespace object.
test_args = string list of args.
"""
self.args = argparse.Namespace()
self.test_args = []
self.should_move_xcode_runtime_to_cache = True
if args:
self.parse_args(args)
def install_xcode(self):
"""Installs the requested Xcode build version.
Returns:
(bool, bool)
First bool: True if installation was successful. False otherwise.
Second bool: True if Xcode is legacy package. False if it's new.
"""
try:
if not self.args.mac_toolchain_cmd:
raise test_runner.MacToolchainNotFoundError(self.args.mac_toolchain_cmd)
# Guard against incorrect install paths. On swarming, this path
# should be a requested named cache, and it must exist.
if not os.path.exists(self.args.xcode_path):
raise test_runner.XcodePathNotFoundError(self.args.xcode_path)
runtime_cache_folder = None
# Runner script only utilizes runtime cache when it's a simulator task.
if self.args.version:
runtime_cache_folder = xcode.construct_runtime_cache_folder(
self.args.runtime_cache_prefix, self.args.version)
if not os.path.exists(runtime_cache_folder):
# Depending on infra project, runtime named cache might not be
# deployed. Create the dir if it doesn't exist since xcode_util
# assumes it exists.
# TODO(crbug.com/1191260): Raise error instead of creating dirs after
# runtime named cache is deployed everywhere.
os.makedirs(runtime_cache_folder)
# xcode.install() installs the Xcode & iOS runtime, and returns a bool
# indicating if the Xcode version in CIPD is a legacy Xcode package (which
# includes iOS runtimes).
is_legacy_xcode = xcode.install(
self.args.mac_toolchain_cmd,
self.args.xcode_build_version,
self.args.xcode_path,
runtime_cache_folder=runtime_cache_folder,
ios_version=self.args.version)
xcode.select(self.args.xcode_path)
except subprocess.CalledProcessError as e:
# Flush buffers to ensure correct output ordering.
sys.stdout.flush()
sys.stderr.write('Xcode build version %s failed to install: %s\n' %
(self.args.xcode_build_version, e))
sys.stderr.flush()
return (False, False)
else:
return (True, is_legacy_xcode)
def resolve_test_cases(self):
"""Forms |self.args.test_cases| considering swarming shard and cmd inputs.
Note:
- Xcode intallation is required before invoking this method since it
requires otool to parse test names from compiled targets.
- It's validated in |parse_args| that test filters won't work in sharding
environment.
"""
args_json = json.loads(self.args.args_json)
# GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS are additional test environment
# variables, set by Swarming, that are only set for a swarming task
# shard count is > 1.
#
# For a given test on a given run, otool should return the same total
# counts and thus, should generate the same sublists. With the shard
# index, each shard would then know the exact test case to run.
gtest_shard_index = shard_util.shard_index()
gtest_total_shards = shard_util.total_shards()
if gtest_total_shards > 1:
self.args.test_cases = shard_util.shard_test_cases(
self.args, gtest_shard_index, gtest_total_shards)
else:
self.args.test_cases = self.args.test_cases or []
if self.args.gtest_filter:
self.args.test_cases.extend(self.args.gtest_filter.split(':'))
if self.args.isolated_script_test_filter:
self.args.test_cases.extend(
self.args.isolated_script_test_filter.split('::'))
self.args.test_cases.extend(args_json.get('test_cases', []))
def run(self, args):
"""
Main coordinating function.
"""
self.parse_args(args)
# This logic is run by default before the otool command is invoked such that
# otool has the correct Xcode selected for command line dev tools.
install_success, is_legacy_xcode = self.install_xcode()
if not install_success:
raise test_runner.XcodeVersionNotFoundError(self.args.xcode_build_version)
self.resolve_test_cases()
summary = {}
tr = None
if not os.path.exists(self.args.out_dir):
os.makedirs(self.args.out_dir)
try:
if self.args.xcode_parallelization:
tr = xcodebuild_runner.SimulatorParallelTestRunner(
self.args.app,
self.args.host_app,
self.args.iossim,
self.args.version,
self.args.platform,
out_dir=self.args.out_dir,
release=self.args.release,
repeat_count=self.args.repeat,
retries=self.args.retries,
shards=self.args.shards,
test_cases=self.args.test_cases,
test_args=self.test_args,
use_clang_coverage=self.args.use_clang_coverage,
env_vars=self.args.env_var)
elif self.args.variations_seed_path != 'NO_PATH':
tr = variations_runner.VariationsSimulatorParallelTestRunner(
self.args.app,
self.args.host_app,
self.args.iossim,
self.args.version,
self.args.platform,
self.args.out_dir,
self.args.variations_seed_path,
release=self.args.release,
test_cases=self.args.test_cases,
test_args=self.test_args,
env_vars=self.args.env_var)
elif self.args.replay_path != 'NO_PATH':
tr = wpr_runner.WprProxySimulatorTestRunner(
self.args.app,
self.args.host_app,
self.args.iossim,
self.args.replay_path,
self.args.platform,
self.args.version,
self.args.wpr_tools_path,
self.args.out_dir,
env_vars=self.args.env_var,
retries=self.args.retries,
shards=self.args.shards,
test_args=self.test_args,
test_cases=self.args.test_cases,
xctest=self.args.xctest,
)
elif self.args.iossim and self.args.platform and self.args.version:
tr = test_runner.SimulatorTestRunner(
self.args.app,
self.args.iossim,
self.args.platform,
self.args.version,
self.args.out_dir,
env_vars=self.args.env_var,
repeat_count=self.args.repeat,
retries=self.args.retries,
shards=self.args.shards,
test_args=self.test_args,
test_cases=self.args.test_cases,
use_clang_coverage=self.args.use_clang_coverage,
wpr_tools_path=self.args.wpr_tools_path,
xctest=self.args.xctest,
)
elif self.args.xcodebuild_device_runner and self.args.xctest:
tr = xcodebuild_runner.DeviceXcodeTestRunner(
app_path=self.args.app,
host_app_path=self.args.host_app,
out_dir=self.args.out_dir,
release=self.args.release,
repeat_count=self.args.repeat,
retries=self.args.retries,
test_cases=self.args.test_cases,
test_args=self.test_args,
env_vars=self.args.env_var)
else:
tr = test_runner.DeviceTestRunner(
self.args.app,
self.args.out_dir,
env_vars=self.args.env_var,
repeat_count=self.args.repeat,
restart=self.args.restart,
retries=self.args.retries,
test_args=self.test_args,
test_cases=self.args.test_cases,
xctest=self.args.xctest,
)
logging.info("Using test runner %s" % type(tr).__name__)
return 0 if tr.launch() else 1
except test_runner.DeviceError as e:
sys.stderr.write(traceback.format_exc())
summary['step_text'] = '%s%s' % (e.__class__.__name__,
': %s' % e.args[0] if e.args else '')
# Swarming infra marks device status unavailable for any device related
# issue using this return code.
return 3
except test_runner.SimulatorNotFoundError as e:
# This means there's probably some issue in simulator runtime so we don't
# want to cache it anymore (when it's in new Xcode format).
self.should_move_xcode_runtime_to_cache = False
sys.stderr.write(traceback.format_exc())
summary['step_text'] = '%s%s' % (e.__class__.__name__,
': %s' % e.args[0] if e.args else '')
return 2
except test_runner.TestRunnerError as e:
sys.stderr.write(traceback.format_exc())
summary['step_text'] = '%s%s' % (e.__class__.__name__,
': %s' % e.args[0] if e.args else '')
# test_runner.Launch returns 0 on success, 1 on failure, so return 2
# on exception to distinguish between a test failure, and a failure
# to launch the test at all.
return 2
finally:
if tr:
summary['logs'] = tr.logs
with open(os.path.join(self.args.out_dir, 'summary.json'), 'w') as f:
json.dump(summary, f)
if tr:
with open(os.path.join(self.args.out_dir, 'full_results.json'),
'w') as f:
json.dump(tr.test_results, f)
# The value of test-launcher-summary-output is set by the recipe
# and passed here via swarming.py. This argument defaults to
# ${ISOLATED_OUTDIR}/output.json. out-dir is set to ${ISOLATED_OUTDIR}
# TODO(crbug.com/1031338) - the content of this output.json will
# work with Chromium recipe because we use the noop_merge merge script,
# but will require structural changes to support the default gtest
# merge script (ref: //testing/merge_scripts/standard_gtest_merge.py)
output_json_path = (
self.args.test_launcher_summary_output or
os.path.join(self.args.out_dir, 'output.json'))
with open(output_json_path, 'w') as f:
json.dump(tr.test_results, f)
# Move the iOS runtime back to cache dir if the Xcode package is not
# legacy (i.e. Xcode program & runtimes are in different CIPD packages.)
# and it's a simulator task.
if not is_legacy_xcode and self.args.version:
if self.should_move_xcode_runtime_to_cache:
runtime_cache_folder = xcode.construct_runtime_cache_folder(
self.args.runtime_cache_prefix, self.args.version)
xcode.move_runtime(runtime_cache_folder, self.args.xcode_path, False)
else:
xcode.remove_runtimes(self.args.xcode_path)
test_runner.defaults_delete('com.apple.CoreSimulator',
'FramebufferServerRendererPolicy')
def parse_args(self, args):
"""Parse the args into args and test_args.
Note: test_cases related arguments are handled in |resolve_test_cases|
instead of this function.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-x',
'--xcode-parallelization',
help='Run tests using xcodebuild\'s parallelization.',
action='store_true',
)
parser.add_argument(
'-a',
'--app',
help='Compiled .app to run for EG1, Compiled -Runner.app for EG2',
metavar='app',
)
parser.add_argument(
'-b',
'--xcode-build-version',
help='Xcode build version to install.',
required=True,
metavar='build_id',
)
parser.add_argument(
'-e',
'--env-var',
action='append',
help='Environment variable to pass to the test itself.',
metavar='ENV=val',
)
parser.add_argument(
'--gtest_filter',
help='List of test names to run. Expected to be in GTest filter format,'
'which should be a colon delimited list. Note: Specifying test cases '
'is not supported in multiple swarming shards environment. Will be '
'merged with tests specified in --test-cases, --args-json and '
'--isolated-script-test-filter.',
metavar='gtest_filter',
)
parser.add_argument(
'--isolated-script-test-filter',
help='A double-colon-separated ("::") list of test names to run. '
'Note: Specifying test cases is not supported in multiple swarming '
'shards environment. Will be merged with tests specified in '
'--test-cases, --args-json and --gtest_filter.',
metavar='isolated_test_filter',
)
parser.add_argument(
'--gtest_repeat',
'--isolated-script-test-repeat',
help='Number of times to repeat each test case.',
metavar='repeat',
dest='repeat',
type=int,
)
parser.add_argument(
'--host-app',
help='Compiled host .app to run.',
default='NO_PATH',
metavar='host_app',
)
parser.add_argument(
'-i',
'--iossim',
help='Compiled iossim to run the app on.',
metavar='iossim',
)
parser.add_argument(
'-j',
'--args-json',
default='{}',
help=
'Specify "env_var": [...] and "test_args": [...] using a JSON dict.',
metavar='{}',
)
parser.add_argument(
'--mac-toolchain-cmd',
help='Command to run mac_toolchain tool. Default: %(default)s.',
default='mac_toolchain',
metavar='mac_toolchain',
)
parser.add_argument(
'-o',
'--out-dir',
help='Directory to store all test data in.',
metavar='dir',
required=True,
)
parser.add_argument(
'-p',
'--platform',
help='Platform to simulate.',
metavar='sim',
)
#TODO(crbug.com/1056887): Implement this arg in infra.
parser.add_argument(
'--release',
help='Indicates if this is a release build.',
action='store_true',
)
parser.add_argument(
'--replay-path',
help=('Path to a directory containing WPR replay and recipe files, for '
'use with WprProxySimulatorTestRunner to replay a test suite '
'against multiple saved website interactions. '
'Default: %(default)s'),
default='NO_PATH',
metavar='replay-path',
)
parser.add_argument(
'--restart',
action='store_true',
help=argparse.SUPPRESS,
)
parser.add_argument(
'-r',
'--retries',
help='Number of times to retry failed test cases.',
metavar='n',
type=int,
)
parser.add_argument(
'--runtime-cache-prefix',
metavar='PATH',
help=(
'Path prefix for runtime cache folder. The prefix will be appended '
'with iOS version to construct the path. iOS simulator will be '
'installed to the path and further copied into Xcode. Default: '
'%(default)s. WARNING: this folder will be overwritten! This '
'folder is intended to be a cached CIPD installation.'),
default='Runtime-ios-',
)
parser.add_argument(
'-s',
'--shards',
help='Number of shards to split test cases.',
metavar='n',
type=int,
)
parser.add_argument(
'-t',
'--test-cases',
action='append',
help=('Tests that should be included in the test run. All other tests '
'will be excluded from this run. If unspecified, run all tests. '
'Note: Specifying test cases is not supported in multiple '
'swarming shards environment. Will be merged with tests '
'specified in --gtest_filter and --args-json.'),
metavar='testcase',
)
parser.add_argument(
'--use-clang-coverage',
help='Enable code coverage related steps in test runner scripts.',
action='store_true',
)
parser.add_argument(
'--use-trusted-cert',
action='store_true',
help=('Whether to install a cert to the simulator to allow for local '
'HTTPS testing.'),
)
parser.add_argument(
'-v',
'--version',
help='Version of iOS the simulator should run.',
metavar='ver',
)
parser.add_argument(
'--variations-seed-path',
help=('Path to a JSON file with variations seed used in variations '
'smoke testing. Default: %(default)s'),
default='NO_PATH',
metavar='variations-seed-path',
)
parser.add_argument(
'--wpr-tools-path',
help=(
'Location of WPR test tools (should be preinstalled, e.g. as part '
'of a swarming task requirement). Default: %(default)s.'),
default='NO_PATH',
metavar='wpr-tools-path',
)
parser.add_argument(
'--xcode-path',
metavar='PATH',
help=('Path to <Xcode>.app folder where contents of the app will be '
'installed. Default: %(default)s. WARNING: this folder will be '
'overwritten! This folder is intended to be a cached CIPD '
'installation.'),
default='Xcode.app',
)
parser.add_argument(
'--xcodebuild-device-runner',
help='Run tests using xcodebuild\'s on real device.',
action='store_true',
)
parser.add_argument(
'--xctest',
action='store_true',
help='Whether or not the given app should be run as an XCTest.',
)
parser.add_argument(
'--test-launcher-summary-output',
default=None,
help='Full path to output.json file. output.json is consumed by both '
'collect_task.py and merge scripts.')
def load_from_json(args):
"""Loads and sets arguments from args_json.
Note: |test_cases| in --args-json is handled in
|Runner.resolve_test_cases()| instead of this function.
"""
args_json = json.loads(args.args_json)
args.env_var = args.env_var or []
args.env_var.extend(args_json.get('env_var', []))
args.restart = args_json.get('restart', args.restart)
args.xctest = args_json.get('xctest', args.xctest)
args.xcode_parallelization = args_json.get('xcode_parallelization',
args.xcode_parallelization)
args.xcodebuild_device_runner = (
args_json.get('xcodebuild_device_runner',
args.xcodebuild_device_runner))
args.shards = args_json.get('shards', args.shards)
test_args.extend(args_json.get('test_args', []))
def validate(args):
"""
Runs argument validation
"""
if (not (args.xcode_parallelization or args.xcodebuild_device_runner) and
(args.iossim or args.platform or args.version)):
# If any of --iossim, --platform, or --version
# are specified then they must all be specified.
if not (args.iossim and args.platform and args.version):
parser.error('must specify all or none of '
'-i/--iossim, -p/--platform, -v/--version')
if args.xcode_parallelization and not (args.platform and args.version):
parser.error('--xcode-parallelization also requires '
'both -p/--platform and -v/--version')
args_json = json.loads(args.args_json)
if (args.gtest_filter or args.test_cases or
args_json.get('test_cases')) and shard_util.total_shards() > 1:
parser.error(
'Specifying test cases is not supported in multiple swarming '
'shards environment.')
args, test_args = parser.parse_known_args(args)
load_from_json(args)
validate(args)
# TODO(crbug.com/1056820): |app| won't contain "Debug" or "Release" after
# recipe migrations.
args.release = args.release or (args.app and "Release" in args.app)
self.args = args
self.test_args = test_args
def main(args):
logging.basicConfig(
format='[%(asctime)s:%(levelname)s] %(message)s',
level=logging.DEBUG,
datefmt='%I:%M:%S')
test_runner.defaults_delete('com.apple.CoreSimulator',
'FramebufferServerRendererPolicy')
runner = Runner()
logging.debug("Arg values passed for this run: %s" % args)
return runner.run(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| |
"""Shape Widgets
=======================
Defines the GUI components used with :mod:`ceed.shape`.
"""
import math
from typing import Type, List, Tuple, Dict, Optional, Union
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.properties import BooleanProperty, NumericProperty, StringProperty, \
ObjectProperty, DictProperty, ListProperty
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.factory import Factory
from kivy.clock import Clock
from kivy.app import App
from kivy_garden.drag_n_drop import DraggableLayoutBehavior
from ceed.graphics import WidgetList, ShowMoreSelection, ShowMoreBehavior
from ceed.shape import CeedPaintCanvasBehavior, CeedShapeGroup, CeedShape
__all__ = (
'CeedPainter', 'ShapeGroupList', 'WidgetShapeGroup', 'ShapeGroupItem',
'ShapeList', 'WidgetShape')
class CeedPainter(CeedPaintCanvasBehavior, Widget):
"""The shapes controller used in the GUI. It is the
canvas widget upon which the shapes are drawn.
"""
show_label = BooleanProperty(False)
'''If True, a label showing the current mouse position is displayed.
'''
pos_label = None
'''The label instance that shows the mouse position.
'''
shape_widgets_list: 'ShapeList' = None
"""The :class:`ShapeList` that contains the shape widgets.
"""
def __init__(self, **kwargs):
super(CeedPainter, self).__init__(**kwargs)
self.pos_label = Factory.XYSizedLabel()
@property
def selected_groups(self) -> List[CeedShapeGroup]:
"""Returns the list of :class:`CeedShapeGroup` that are currently
selected in the GUI.
"""
app = App.get_running_app()
return [widget.group
for widget in app.shape_groups_container.selected_nodes]
def create_shape_with_touch(self, touch):
shape = super(CeedPainter, self).create_shape_with_touch(touch)
if shape is not None:
shape.add_shape_to_canvas(self)
return shape
def reorder_shape(self, shape, before_shape=None):
super(CeedPainter, self).reorder_shape(shape, before_shape=before_shape)
self.shape_widgets_list.remove_widget(shape.widget)
if before_shape is None:
self.shape_widgets_list.add_widget(shape.widget)
else:
i = self.shape_widgets_list.children.index(before_shape.widget)
self.shape_widgets_list.add_widget(shape.widget, index=i + 1)
def add_shape(self, shape):
if super(CeedPainter, self).add_shape(shape):
shape.add_shape_to_canvas(self)
widget = shape.widget = WidgetShape(painter=self, shape=shape)
widget.show_widget()
shape.fbind('on_update', App.get_running_app().changed_callback)
return True
return False
def remove_shape(self, shape):
if super(CeedPainter, self).remove_shape(shape):
shape.remove_shape_from_canvas()
shape.widget.hide_widget()
shape.widget = None
shape.funbind('on_update', App.get_running_app().changed_callback)
return True
return False
def add_group(self, group=None):
group = super(CeedPainter, self).add_group(group)
widget = group.widget = WidgetShapeGroup(group=group)
widget.show_widget()
group.fbind('on_changed', App.get_running_app().changed_callback)
return group
def remove_group(self, group):
if super(CeedPainter, self).remove_group(group):
group.widget.hide_widget()
group.widget = None
group.funbind('on_changed', App.get_running_app().changed_callback)
return True
return False
def on_show_label(self, *largs):
"""Shows/hides the :attr:`pos_label` label depending on the value
of :attr:`show_label`.
"""
state = self.show_label
for shape in self.shapes:
shape.widget.show_label = state
label = self.pos_label
if state:
Window.add_widget(label)
Window.fbind('mouse_pos', self._update_mouse_pos)
self._update_mouse_pos(None, Window.mouse_pos)
else:
Window.remove_widget(label)
Window.funbind('mouse_pos', self._update_mouse_pos)
def _update_mouse_pos(self, instance, pos):
x, y = map(int, self.to_widget(*pos))
if self.collide_point(x, y):
self.pos_label.pos = pos
self.pos_label.text = '{}, {}'.format(x, y)
else:
self.pos_label.text = ''
def add_enclosing_polygon(self):
"""Adds a polygon shape named ``'enclosed'`` that encloses the whole
drawing area.
"""
w, h = self.size
self.create_add_shape(
'polygon', points=[0, 0, w, 0, w, h, 0, h], name='enclosed')
def select_shape(self, shape):
if super(CeedPainter, self).select_shape(shape):
if shape.widget is not None:
App.get_running_app().shapes_container.select_node(
shape.widget)
return True
return False
def deselect_shape(self, shape):
if super(CeedPainter, self).deselect_shape(shape):
if shape.widget is not None:
App.get_running_app().shapes_container.deselect_node(
shape.widget)
return True
return False
class ShapeGroupDraggableLayoutBehavior(DraggableLayoutBehavior):
"""The container widget of a shape group that displays the shapes in the
group.
"""
group_widget: 'WidgetShapeGroup' = ObjectProperty(None)
def handle_drag_release(self, index, drag_widget):
group = self.group_widget.group
group.add_shape(drag_widget.obj_dragged.shape)
if drag_widget.obj_dragged.selected:
App.get_running_app().shape_factory.add_selected_shapes_to_group(
group)
class ShapeGroupList(
DraggableLayoutBehavior, ShowMoreSelection, WidgetList, BoxLayout):
"""Container widget that shows all the groups.
"""
def add_selected_shapes(self):
"""Adds all the shapes currently selected in the painter to the
currently selected group. If no group is selected, a new one is
created.
"""
group = None
if self.selected_nodes:
group = self.selected_nodes[-1].group
App.get_running_app().shape_factory.add_selected_shapes_to_group(group)
def handle_drag_release(self, index, drag_widget):
app = App.get_running_app()
if drag_widget.drag_cls == 'shape':
group = app.shape_factory.add_group()
group.add_shape(drag_widget.obj_dragged.shape)
if drag_widget.obj_dragged.selected:
app.shape_factory.add_selected_shapes_to_group(group)
group.widget.expand_widget.state = 'down'
else:
group = app.shape_factory.add_group()
for shape in drag_widget.obj_dragged.group.shapes:
group.add_shape(shape)
group.widget.expand_widget.state = 'down'
class WidgetShapeGroup(ShowMoreBehavior, BoxLayout):
"""The widget that is displayed for a :class:`ceed.shape.CeedShapeGroup`
instance.
"""
selected = BooleanProperty(False)
'''If the group is :attr:`~ceed.shape.CeedShapeGroup.selected`.
'''
group: CeedShapeGroup = ObjectProperty(None, rebind=True)
'''The :class:`~ceed.shape.CeedShapeGroup` this widget represents.
'''
expand_widget = None
"""The ExpandWidget that when hit wil show the list of shapes of the group.
"""
@property
def name(self):
"""The :attr:`ceed.shape.CeedShapeGroup.name` of the group.
"""
return self.group.name
def show_widget(self):
"""Displays this widget group in the GUI.
"""
App.get_running_app().shape_groups_container.add_widget(self)
def hide_widget(self):
"""Hides this widget group from the GUI.
"""
App.get_running_app().shape_groups_container.deselect_node(self)
App.get_running_app().shape_groups_container.remove_widget(self)
@property
def shape_widgets(self) -> List['ShapeGroupItem']:
"""Returns the :class:`ShapeGroupItem` instances representing the
shapes in this group.
"""
return self.more.children[:-1]
def add_shape(self, shape):
"""Adds and displays a :class:`ShapeGroupItem` widget representing the
:class:`ceed.shape.CeedShape`, to this group's widget.
"""
self.more.add_widget(ShapeGroupItem(shape=shape, group=self))
def remove_shape(self, shape):
"""Removes the :class:`ShapeGroupItem` associated with the
:class:`ceed.shape.CeedShape` from this group's widget.
"""
for widget in self.shape_widgets:
if widget.shape is shape:
self.more.remove_widget(widget)
return
class ShapeGroupItem(BoxLayout):
"""The shape's widget, displayed in the :class:`WidgetShapeGroup` widget
tree, for a shape from that group.
"""
shape: CeedShape = ObjectProperty(None, rebind=True)
'''The :class:`~ceed.shape.CeedShape` with which this widget is associated.
'''
group: CeedShapeGroup = ObjectProperty(None)
'''The :class:`~ceed.shape.CeedShapeGroup` to which this shape belongs.
'''
@property
def name(self):
"""The :attr:`~ceed.shape.CeedShape.name` of the shape.
"""
return self.shape.name
class ShapeList(DraggableLayoutBehavior, ShowMoreSelection, WidgetList,
BoxLayout):
"""Container widget that shows all the shapes.
"""
def select_node(self, node):
if super(ShapeList, self).select_node(node):
App.get_running_app().shape_factory.select_shape(node.shape)
return True
return False
def deselect_node(self, node):
if super(ShapeList, self).deselect_node(node):
App.get_running_app().shape_factory.deselect_shape(node.shape)
return True
return False
def handle_drag_release(self, index, drag_widget):
if drag_widget.obj_dragged.selected:
App.get_running_app().shape_factory.duplicate_selected_shapes()
else:
App.get_running_app().shape_factory.duplicate_shape(
drag_widget.obj_dragged.shape)
class WidgetShape(ShowMoreBehavior, BoxLayout):
"""The widget displayed for and associated with a
:class:`~ceed.shape.CeedShape` instance.
"""
painter: CeedPainter = ObjectProperty(None, rebind=True)
'''The :class:`CeedPainter` this shape belongs to.
'''
shape: CeedShape = ObjectProperty(None, rebind=True)
'''The :class:`~ceed.shape.CeedShape` instance associated with the widget.
'''
label = None
'''The label widget that displays the name of the shape in the center
of the shape, in the drawing area, when enabled.
'''
show_label = BooleanProperty(False)
'''Whether :attr:`label` is currently displayed.
'''
centroid_x = NumericProperty(0)
'''The x center of the shape (e.g. the x-center of the polygon).
'''
centroid_y = NumericProperty(0)
'''The y center of the shape (e.g. the y-center of the polygon).
'''
area = NumericProperty(0)
'''The enclosed area of the shape (e.g. the area of the polygon).
'''
selected = BooleanProperty(False)
'''Whether the shape is :attr:`kivy_garden.painter.PaintShape.selected`.
'''
_shape_update_trigger = None
def __init__(self, **kwargs):
super(WidgetShape, self).__init__(**kwargs)
self.show_label = self.painter.show_label
self.label = Label()
trigger = Clock.create_trigger(self._shape_update, 0)
self._shape_update_trigger = lambda *largs: trigger() and False
@property
def name(self):
"""The :attr:`kivy_garden.painter.PaintShape.name` of the shape.
"""
return self.shape.name
def show_widget(self, index=None):
"""Displays this widget in the list of shape widgets at the given
index. The index is in the same order as the shapes, i.e. zero is shape
zero etc.
"""
if index is None:
App.get_running_app().shapes_container.add_widget(self)
else:
App.get_running_app().shapes_container.add_widget(
self, index=len(
App.get_running_app().shapes_container.children) - index)
self.fbind('show_label', self._show_label)
self.shape.fbind('name', self._label_text)
f = self._shape_update_trigger
self.shape.fbind('on_update', f)
self.label.fbind('size', f)
f()
self._label_text()
self._show_label()
def hide_widget(self):
"""Hides this widget from the list of shape widgets.
"""
self.shape.funbind('on_update', self._shape_update_trigger)
App.get_running_app().shapes_container.remove_widget(self)
label = self.label
label.funbind('size', self._shape_update_trigger)
self.funbind('show_label', self._show_label)
self.shape.funbind('name', self._label_text)
self._show_label(force_hide=True)
def _show_label(self, *largs, force_hide=False):
"""Displays/hides the label in the shapes center containing the name of
shape.
"""
if self.show_label and not force_hide:
if self.label.parent is not None: # already showing
return
self.painter.add_widget(self.label)
self._shape_update_trigger()
self._label_text()
elif self.label.parent is not None:
self.painter.remove_widget(self.label)
def _label_text(self, *largs):
"""Updates the :attr:`label` with the current name of the shape.
"""
if self.show_label:
self.label.text = self.shape.name
def _show_more(self, *largs):
super(WidgetShape, self)._show_more(*largs)
if self.show_more:
self._shape_update_trigger()
def _shape_update(self, *largs):
"""Update the centroids and area when the shape is changed.
"""
if not self.shape.finished:
return
self.centroid_x, self.centroid_y = tuple(
map(round, self.shape.centroid))
self.area = round(self.shape.area)
if self.show_label:
self.label.center = self.shape.centroid
def _update_centroid(self, x=None, y=None):
"""Sets the centroid from the GUI.
"""
x1, y1 = map(round, self.shape.centroid)
dx = 0 if x is None else x - x1
dy = 0 if y is None else y - y1
if dx or dy:
self.shape.translate(dpos=(dx, dy))
def _update_area(self, area):
"""Sets the area from the GUI.
"""
if not math.isclose(area, self.area):
self.shape.set_area(area)
# make it available from kv
Factory.register('ShapeGroupDraggableLayoutBehavior',
cls=ShapeGroupDraggableLayoutBehavior)
| |
#!/usr/bin/python3
"""This script generates a family file from a given URL.
This script must be invoked with the pwb wrapper script/code entry point.
Usage::
pwb generate_family_file.py [<url>] [<name>] [<dointerwiki>] [<verify>]
Parameters are optional. They must be given consecutively but may be
omitted if there is no successor parameter. The parameters are::
<url>: an url from where the family settings are loaded
<name>: the family name without "_family.py" tail.
<dointerwiki>: predefined answer (y|n) to add multiple site codes
<verify>: disable certificate validaton `(y|n)
Example::
pwb generate_family_file.py https://www.mywiki.bogus/wiki/Main_Page mywiki
This will create the file mywiki_family.py in families folder of your
base directory.
.. versionchanged:: 7.0
moved to pywikibot.scripts folder; create family files in families
folder of your base directory instead of pywikibot/families.
"""
#
# (C) Pywikibot team, 2010-2022
#
# Distributed under the terms of the MIT license
#
import codecs
import os
import string
import sys
from typing import Optional
from urllib.parse import urlparse
# see pywikibot.family.py
# Legal characters for Family name and Family langs keys
NAME_CHARACTERS = string.ascii_letters + string.digits
# nds_nl code alias requires "_"n
# dash must be the last char to be reused as regex in update_linktrails
CODE_CHARACTERS = string.ascii_lowercase + string.digits + '_-'
class FamilyFileGenerator:
"""Family file creator object."""
def __init__(self,
url: Optional[str] = None,
name: Optional[str] = None,
dointerwiki: Optional[str] = None,
verify: Optional[str] = None) -> None:
"""
Parameters are optional. If not given the script asks for the values.
:param url: an url from where the family settings are loaded
:param name: the family name without "_family.py" tail.
:param dointerwiki: Predefined answer to add multiple site
codes. Pass `Y` or `y` for yes `N` or `n` for no and
`E` or `e` if you want to edit the collection of sites.
:param verify: If a certificate verification failes, you may
pass `Y` or `y` to disable certificate validaton `N` or `n`
to keep it enabled.
"""
from pywikibot.scripts import _import_with_no_user_config
# from pywikibot.site_detect import MWSite and
# from pywikibot.config import base_dir
# when required but disable user-config checks
# so the family can be created first,
# and then used when generating the user-config
self.Wiki = _import_with_no_user_config(
'pywikibot.site_detect').site_detect.MWSite
self.base_dir = _import_with_no_user_config(
'pywikibot.config').config.base_dir
self.base_url = url
self.name = name
self.dointerwiki = dointerwiki
self.verify = verify
self.wikis = {} # {'https://wiki/$1': Wiki('https://wiki/$1'), ...}
self.langs = [] # [Wiki('https://wiki/$1'), ...]
def get_params(self) -> bool: # pragma: no cover
"""Ask for parameters if necessary."""
if self.base_url is None:
self.base_url = input('Please insert URL to wiki: ')
if not self.base_url:
return False
if self.name is None:
self.name = input('Please insert a short name (eg: freeciv): ')
if not self.name:
return False
if any(x not in NAME_CHARACTERS for x in self.name):
print('ERROR: Name of family "{}" must be ASCII letters and '
'digits [a-zA-Z0-9]'.format(self.name))
return False
return True
def get_wiki(self):
"""Get wiki from base_url."""
import pywikibot
from pywikibot.exceptions import FatalServerError
print('Generating family file from ' + self.base_url)
for verify in (True, False):
try:
w = self.Wiki(self.base_url, verify=verify)
except FatalServerError: # pragma: no cover
print('ERROR: '
+ pywikibot.comms.http.SSL_CERT_VERIFY_FAILED_MSG)
pywikibot.exception()
if not pywikibot.bot.input_yn(
'Retry with disabled ssl certificate validation',
default=self.verify, automatic_quit=False,
force=self.verify is not None):
break
else:
return w, verify
return None, None
def run(self) -> None:
"""Main method, generate family file."""
if not self.get_params():
return
w, verify = self.get_wiki()
if w is None:
return
self.wikis[w.lang] = w
print('\n=================================='
'\nAPI url: {w.api}'
'\nMediaWiki version: {w.version}'
'\n==================================\n'.format(w=w))
self.getlangs(w)
self.getapis()
self.writefile(verify)
def getlangs(self, w) -> None:
"""Determine site code of a family."""
print('Determining other sites...', end='')
try:
self.langs = w.langs
print(' '.join(sorted(wiki['prefix'] for wiki in self.langs)))
except Exception as e: # pragma: no cover
self.langs = []
print(e, '; continuing...')
if len([lang for lang in self.langs if lang['url'] == w.iwpath]) == 0:
if w.private_wiki:
w.lang = self.name
self.langs.append({'language': w.lang,
'local': '',
'prefix': w.lang,
'url': w.iwpath})
code_len = len(self.langs)
if code_len > 1:
if self.dointerwiki is None:
makeiw = input(
'\nThere are {} sites available.'
'\nDo you want to generate interwiki links? '
'This might take a long time. ([y]es/[N]o/[e]dit)'
.format(code_len)).lower()
else:
makeiw = self.dointerwiki
if makeiw == 'n':
self.langs = [wiki for wiki in self.langs
if wiki['url'] == w.iwpath]
elif makeiw == 'e':
for wiki in self.langs:
print(wiki['prefix'], wiki['url'])
do_langs = input('Which sites do you want: ')
self.langs = [wiki for wiki in self.langs
if wiki['prefix'] in do_langs
or wiki['url'] == w.iwpath]
for wiki in self.langs:
assert all(x in CODE_CHARACTERS for x in wiki['prefix']), \
'Family {} code {} must be ASCII lowercase ' \
'letters and digits [a-z0-9] or underscore/dash [_-]' \
.format(self.name, wiki['prefix'])
def getapis(self) -> None:
"""Load other site pages."""
print('Loading wikis... ')
for lang in self.langs:
key = lang['prefix']
print(' * {}... '.format(key), end='')
if key not in self.wikis:
try:
self.wikis[key] = self.Wiki(lang['url'])
print('downloaded')
except Exception as e: # pragma: no cover
print(e)
else:
print('in cache')
def writefile(self, verify) -> None:
"""Write the family file."""
fn = os.path.join(self.base_dir, 'families',
'{}_family.py'.format(self.name))
print('Writing %s... ' % fn)
try:
open(fn)
if input('{} already exists. Overwrite? (y/n)'
.format(fn)).lower() == 'n':
print('Terminating.')
sys.exit(1)
except OSError: # file not found
pass
code_hostname_pairs = '\n '.join(
"'{code}': '{hostname}',".format(
code=k, hostname=urlparse(w.server).netloc
) for k, w in self.wikis.items())
code_path_pairs = '\n '.join(
"'{code}': '{path}',".format(code=k, path=w.scriptpath)
for k, w in self.wikis.items())
code_protocol_pairs = '\n '.join(
"'{code}': '{protocol}',".format(
code=k, protocol=urlparse(w.server).scheme
) for k, w in self.wikis.items())
content = family_template % {
'url': self.base_url, 'name': self.name,
'code_hostname_pairs': code_hostname_pairs,
'code_path_pairs': code_path_pairs,
'code_protocol_pairs': code_protocol_pairs}
if not verify:
# assuming this is the same for all codes
content += """
def verify_SSL_certificate(self, code: str) -> bool:
return False
"""
os.makedirs(os.path.dirname(fn), exist_ok=True)
with codecs.open(fn, 'w', 'utf-8') as fh:
fh.write(content)
family_template = """\
\"\"\"
This family file was auto-generated by generate_family_file.py script.
Configuration parameters:
url = %(url)s
name = %(name)s
Please do not commit this to the Git repository!
\"\"\"
from pywikibot import family
class Family(family.Family): # noqa: D101
name = '%(name)s'
langs = {
%(code_hostname_pairs)s
}
def scriptpath(self, code):
return {
%(code_path_pairs)s
}[code]
def protocol(self, code):
return {
%(code_protocol_pairs)s
}[code]
"""
def main() -> None:
"""Process command line arguments and generate a family file."""
if len(sys.argv) > 1 and sys.argv[1] == '-help':
print(__doc__)
else:
FamilyFileGenerator(*sys.argv[1:]).run()
if __name__ == '__main__':
main()
| |
import pytest
from math import isclose, ceil
import numpy as np
import pathlib
from pytest_dependency import depends
import ceed
from .examples.shapes import CircleShapeP1, CircleShapeP2
from .examples import assert_image_same, create_test_image
from .examples.experiment import create_basic_experiment, run_experiment, \
set_serializer_even_count_bits, wait_experiment_done, measure_fps
from ceed.tests.ceed_app import CeedTestApp
from ceed.tests.test_stages import get_stage_time_intensity
from ceed.analysis.merge_data import CeedMCSDataMerger
from ceed.tests.test_app.examples.stages import ParaAllStage
from ceed.stage import last_experiment_stage_name
stored_images = []
stored_b_values = [(.0, .2), (.1, .3)]
stored_shape_names = CircleShapeP1.name, CircleShapeP2.name
# r, g is active, b is inactive
stored_colors = [((0, 1, ), (2,)), ] * 2
stored_stage_name = ParaAllStage.name
data_root = pathlib.Path(ceed.__file__).parent.joinpath('examples', 'data')
existing_experiment_filename_v1_0_0_dev0 = str(
data_root.joinpath('ceed_data_v1.0.0.dev0.h5'))
existing_template_filename_v1_0_0_dev0 = str(
data_root.joinpath('ceed_template_v1.0.0.dev0.yml'))
existing_merged_experiment_filename_v1_0_0_dev0 = str(
data_root.joinpath('ceed_mcs_data_merged_v1.0.0.dev0.h5'))
mcs_filename_v1_0_0_dev0 = str(data_root.joinpath('mcs_data_v1.0.0.dev0.h5'))
existing_experiment_filename_v1_0_0_dev1 = str(
data_root.joinpath('ceed_data_v1.0.0.dev1.h5'))
existing_template_filename_v1_0_0_dev1 = str(
data_root.joinpath('ceed_template_v1.0.0.dev1.yml'))
existing_merged_experiment_filename_v1_0_0_dev1 = str(
data_root.joinpath('ceed_mcs_data_merged_v1.0.0.dev1.h5'))
mcs_filename_v1_0_0_dev1 = str(data_root.joinpath('mcs_data_v1.0.0.dev1.h5'))
pytestmark = pytest.mark.ceed_app
def verify_experiment(values, n, first):
shape1, shape2 = stored_shape_names
if first:
b1, b2 = stored_b_values[0]
else:
b1, b2 = stored_b_values[1]
assert n == 240
for i in range(240):
for name, b, (active, inactive) in zip(
(shape1, shape2), (b1, b2), stored_colors):
d = values[name]
assert d[i][3] == 1
for j in inactive:
assert d[i][j] == 0
# 2.4 = .6 * 120 / 30
val = .6 * (i % 30) / 30 + b
for j in active:
assert isclose(float(d[i][j]), val, abs_tol=.001)
def exp_source(filename):
filename = pathlib.Path(filename).name
return not ('internal' in filename or 'external' in filename), \
'merged' in filename
@pytest.fixture(scope="module", autouse=True)
def init_written_data():
global stored_images
stored_images = [
create_test_image(500, 500), create_test_image(250, 500),
create_test_image(500, 250), create_test_image(250, 200)
]
async def run_data_experiment(stage_app: CeedTestApp):
from ..test_stages import create_2_shape_stage
from ceed.function.plugin import LinearFunc
root, s1, s2, shape1, shape2 = create_2_shape_stage(
stage_app.app.stage_factory, show_in_gui=True, app=stage_app)
await stage_app.wait_clock_frames(2)
# 30 frames
f1 = LinearFunc(
function_factory=stage_app.app.function_factory, duration=.25, loop=8,
m=2.4
)
f2 = LinearFunc(
function_factory=stage_app.app.function_factory, duration=.25, loop=8,
m=2.4
)
s1.stage.add_func(f1)
s2.stage.add_func(f2)
await stage_app.wait_clock_frames(2)
stage_app.app.view_controller.frame_rate = 120
# count frames
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.pad_to_stage_handshake = True
for image, (b1, b2) in zip(stored_images, stored_b_values):
f1.b = b1
f2.b = b2
# set background image
stage_app.app.central_display.update_img(image)
stage_app.app.player.last_image = image
await stage_app.wait_clock_frames(2)
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app, timeout=10 * 60)
for i, image in enumerate(stored_images[2:]):
stage_app.app.ceed_data.add_image_to_file(image, f'image {i}')
await stage_app.wait_clock_frames(2)
async def test_function_plugin_source_in_data_file(
stage_app: CeedTestApp, tmp_path):
import ceed.function.plugin
src_contents = pathlib.Path(ceed.function.plugin.__file__).read_bytes()
stage = await create_basic_experiment(stage_app)
f = await run_experiment(
stage_app, stage.name, tmp_path, num_clock_frames=10)
target_root = tmp_path / 'test_dump_target'
target_root.mkdir()
with f:
f.dump_plugin_sources('function', target_root)
plugin = target_root / 'ceed.function.plugin' / '__init__.py'
dumped_contents = plugin.read_bytes()
assert dumped_contents == src_contents
async def test_stage_plugin_source_in_data_file(
stage_app: CeedTestApp, tmp_path):
import ceed.stage.plugin
src_contents = pathlib.Path(ceed.stage.plugin.__file__).read_bytes()
stage = await create_basic_experiment(stage_app)
f = await run_experiment(
stage_app, stage.name, tmp_path, num_clock_frames=10)
target_root = tmp_path / 'test_dump_target'
target_root.mkdir()
with f:
f.dump_plugin_sources('stage', target_root)
plugin = target_root / 'ceed.stage.plugin' / '__init__.py'
dumped_contents = plugin.read_bytes()
assert dumped_contents == src_contents
@pytest.fixture(scope='module')
def internal_experiment_filename(tmp_path_factory):
"""All tests depending on this, also depend on
test_create_internal_experiment."""
return str(
tmp_path_factory.mktemp('experiment') / 'new_experiment_internal.h5')
@pytest.fixture(scope='module')
def external_experiment_filename(tmp_path_factory):
"""All tests depending on this, also depend on
test_create_external_experiment."""
return str(
tmp_path_factory.mktemp('experiment') / 'new_experiment_external.h5')
@pytest.fixture(scope='module')
def re_merge_experiment_filename_v1_0_0_0_dev0(tmp_path_factory):
"""All tests depending on this, also depend on
test_create_merge_experiment."""
filename = str(
tmp_path_factory.mktemp(
'experiment') / 'new_experiment_merged_v1_0_0_dev0.h5')
return filename, existing_experiment_filename_v1_0_0_dev0, \
mcs_filename_v1_0_0_dev0
@pytest.fixture(scope='module')
def re_merge_experiment_filename_v1_0_0_0_dev1(tmp_path_factory):
"""All tests depending on this, also depend on
test_create_merge_experiment."""
filename = str(
tmp_path_factory.mktemp(
'experiment') / 'new_experiment_merged_v1_0_0_dev1.h5')
return filename, existing_experiment_filename_v1_0_0_dev1, \
mcs_filename_v1_0_0_dev1
@pytest.fixture(params=[
'internal', 'external', 're_merged_v1_0_0_dev0',
'existing_merged_v1_0_0_dev0', 'existing_unmerged_v1_0_0_dev0',
're_merged_v1_0_0_dev1', 'existing_merged_v1_0_0_dev1',
'existing_unmerged_v1_0_0_dev1'])
def experiment_ceed_filename(
request, internal_experiment_filename, external_experiment_filename,
re_merge_experiment_filename_v1_0_0_0_dev0,
re_merge_experiment_filename_v1_0_0_0_dev1):
src = request.param
if src == 'internal':
depends(request, ['internal_experiment'])
return internal_experiment_filename
if src == 'external':
depends(request, ['external_experiment'])
return external_experiment_filename
if src == 're_merged_v1_0_0_dev0':
depends(request, ['merge_experiment'])
return re_merge_experiment_filename_v1_0_0_0_dev0[0]
if src == 're_merged_v1_0_0_dev1':
depends(request, ['merge_experiment'])
return re_merge_experiment_filename_v1_0_0_0_dev1[0]
if src == 'existing_merged_v1_0_0_dev0':
if not pathlib.Path(
existing_merged_experiment_filename_v1_0_0_dev0).exists():
pytest.skip(
f'"{existing_merged_experiment_filename_v1_0_0_dev0}" '
f'does not exist')
return existing_merged_experiment_filename_v1_0_0_dev0
if src == 'existing_merged_v1_0_0_dev1':
if not pathlib.Path(
existing_merged_experiment_filename_v1_0_0_dev1).exists():
pytest.skip(
f'"{existing_merged_experiment_filename_v1_0_0_dev1}" '
f'does not exist')
return existing_merged_experiment_filename_v1_0_0_dev1
if src == 'existing_unmerged_v1_0_0_dev0':
if not pathlib.Path(existing_experiment_filename_v1_0_0_dev0).exists():
pytest.skip(
f'"{existing_experiment_filename_v1_0_0_dev0}" does not exist')
return existing_experiment_filename_v1_0_0_dev0
if not pathlib.Path(existing_experiment_filename_v1_0_0_dev1).exists():
pytest.skip(
f'"{existing_experiment_filename_v1_0_0_dev1}" does not exist')
return existing_experiment_filename_v1_0_0_dev1
@pytest.fixture(params=[
're_merged_v1_0_0_dev0', 'existing_merged_v1_0_0_dev0',
're_merged_v1_0_0_dev1', 'existing_merged_v1_0_0_dev1'])
def merged_filename(
request, re_merge_experiment_filename_v1_0_0_0_dev0,
re_merge_experiment_filename_v1_0_0_0_dev1):
src = request.param
if src == 're_merged_v1_0_0_dev0':
depends(request, ['merge_experiment'])
return re_merge_experiment_filename_v1_0_0_0_dev0[0]
if src == 're_merged_v1_0_0_dev1':
depends(request, ['merge_experiment'])
return re_merge_experiment_filename_v1_0_0_0_dev1[0]
if src == 'existing_merged_v1_0_0_dev0':
if not pathlib.Path(
existing_merged_experiment_filename_v1_0_0_dev0).exists():
pytest.skip(
f'"{existing_merged_experiment_filename_v1_0_0_dev0}" '
f'does not exist')
return existing_merged_experiment_filename_v1_0_0_dev0
if not pathlib.Path(
existing_merged_experiment_filename_v1_0_0_dev1).exists():
pytest.skip(
f'"{existing_merged_experiment_filename_v1_0_0_dev1}" '
f'does not exist')
return existing_merged_experiment_filename_v1_0_0_dev1
@pytest.mark.dependency(name='internal_experiment')
async def test_create_internal_experiment(
stage_app: CeedTestApp, internal_experiment_filename):
filename = internal_experiment_filename
await run_data_experiment(stage_app)
stage_app.app.ceed_data.save(filename=filename)
base = filename[:-3]
stage_app.app.ceed_data.write_yaml_config(base + '.yml', stages_only=True)
stage_app.app.ceed_data.write_yaml_config(
base + 'app.yml', stages_only=False)
@pytest.mark.dependency(name='external_experiment')
async def test_create_external_experiment(
stage_app: CeedTestApp, external_experiment_filename):
filename = external_experiment_filename
stage_app.app.view_controller.start_process()
await stage_app.wait_clock_frames(2)
await run_data_experiment(stage_app)
stage_app.app.view_controller.stop_process()
await stage_app.wait_clock_frames(2)
stage_app.app.ceed_data.save(filename=filename)
@pytest.mark.dependency(name='merge_experiment')
@pytest.mark.parametrize(
'triplet', ['re_merged_v1_0_0_dev0', 're_merged_v1_0_0_dev1'])
def test_create_merge_experiment(
triplet, re_merge_experiment_filename_v1_0_0_0_dev0,
re_merge_experiment_filename_v1_0_0_0_dev1):
if triplet == 're_merged_v1_0_0_dev0':
filename, ceed_filename, mcs_filename = \
re_merge_experiment_filename_v1_0_0_0_dev0
else:
filename, ceed_filename, mcs_filename = \
re_merge_experiment_filename_v1_0_0_0_dev1
if not pathlib.Path(ceed_filename).exists():
pytest.skip(f'"{ceed_filename}" does not exist')
if not pathlib.Path(mcs_filename).exists():
pytest.skip(f'"{mcs_filename}" does not exist')
merger = CeedMCSDataMerger(
ceed_filename=ceed_filename, mcs_filename=mcs_filename)
experiments = merger.get_experiment_numbers()
assert experiments == ['0', '1'] or experiments == ['0', '1', '2', '3']
merger.read_mcs_data()
merger.read_ceed_data()
merger.parse_mcs_data()
alignment = {}
for experiment in experiments:
merger.read_ceed_experiment_data(experiment)
merger.parse_ceed_experiment_data()
alignment[experiment] = merger.get_alignment()
merger.merge_data(filename, alignment, notes='app notes')
@pytest.mark.dependency()
def test_saved_metadata(experiment_ceed_filename):
from ceed.analysis import CeedDataReader
existing_exp, merged_exp = exp_source(experiment_ceed_filename)
with CeedDataReader(experiment_ceed_filename) as f:
experiments = f.experiments_in_file
def verify_app_props():
assert f.filename == experiment_ceed_filename
assert experiments == ['0', '1'] or \
experiments == ['0', '1', '2', '3']
assert f.num_images_in_file == 2
if merged_exp:
assert f.app_notes == 'app notes'
else:
assert not f.app_notes
for name in {
'view_controller', 'data_serializer', 'function_factory',
'shape_factory', 'stage_factory'}:
assert f.app_config[name] is not None
verify_app_props()
assert f.view_controller is None
assert f.data_serializer is None
assert f.function_factory is None
assert f.stage_factory is None
assert f.shape_factory is None
assert f.loaded_experiment is None
assert f.experiment_cam_image is None
assert not f.experiment_stage_name
assert not f.experiment_notes
assert not f.external_function_plugin_package
assert not f.external_stage_plugin_package
assert not f.electrodes_data
assert not f.electrodes_metadata
assert f.electrode_dig_data is None
assert f.electrode_intensity_alignment is None
assert f.electrode_intensity_alignment_gpu_rate is None
mcs_props = [
'electrodes_data', 'electrodes_metadata', 'electrode_dig_data']
if merged_exp:
f.load_mcs_data()
else:
with pytest.raises(TypeError):
f.load_mcs_data()
mcs_values = [getattr(f, name) for name in mcs_props]
if merged_exp:
for name in mcs_props:
assert getattr(f, name) is not None and len(getattr(f, name))
else:
assert not f.electrodes_data
assert not f.electrodes_metadata
assert f.electrode_dig_data is None
assert f.electrode_intensity_alignment is None
assert f.electrode_intensity_alignment_gpu_rate is None
alignment = f.electrode_intensity_alignment
alignment_gpu = f.electrode_intensity_alignment_gpu_rate
exp_props = [
'view_controller', 'data_serializer', 'function_factory',
'stage_factory', 'shape_factory']
exp_values = [getattr(f, name) for name in exp_props]
for exp in experiments:
f.load_experiment(exp)
verify_app_props()
for name, value in zip(mcs_props, mcs_values):
assert getattr(f, name) is value
if merged_exp:
assert f.electrode_intensity_alignment is not None
assert f.electrode_intensity_alignment_gpu_rate is not None
assert len(f.electrode_intensity_alignment)
assert len(f.electrode_intensity_alignment_gpu_rate)
assert len(f.electrode_intensity_alignment_gpu_rate) >= \
len(f.electrode_intensity_alignment)
assert f.electrode_intensity_alignment is not alignment
assert f.electrode_intensity_alignment_gpu_rate is not \
alignment_gpu
alignment = f.electrode_intensity_alignment
alignment_gpu = f.electrode_intensity_alignment_gpu_rate
else:
assert f.electrode_intensity_alignment is None
assert f.electrode_intensity_alignment_gpu_rate is None
assert f.loaded_experiment == str(exp)
assert f.experiment_cam_image is not None
assert f.view_controller is not None
assert f.data_serializer is not None
assert f.function_factory is not None
assert f.stage_factory is not None
assert f.shape_factory is not None
# it should change for each exp
for name, value in zip(exp_props, exp_values):
assert getattr(f, name) is not value
exp_values = [getattr(f, name) for name in exp_props]
assert f.experiment_stage_name
assert not f.experiment_notes
assert not f.external_function_plugin_package
assert not f.external_stage_plugin_package
@pytest.mark.dependency()
def test_saved_data(experiment_ceed_filename):
from ceed.analysis import CeedDataReader
existing_exp, merged_exp = exp_source(experiment_ceed_filename)
shape1, shape2 = stored_shape_names
with CeedDataReader(experiment_ceed_filename) as f:
assert f.led_state is None
for exp, image, (b1, b2) in zip((0, 1), stored_images, stored_b_values):
f.load_experiment(exp)
d1 = f.shapes_intensity[shape1]
d2 = f.shapes_intensity[shape2]
assert d1.shape == (240, 4)
assert d2.shape == (240, 4)
assert len(
np.asarray(f._block.data_arrays['frame_time_counter'])) == 240
assert len(np.asarray(f._block.data_arrays['frame_bits'])) == 240
counter = np.asarray(f._block.data_arrays['frame_counter'])
assert len(counter) == 240
assert np.all(counter == np.arange(1, 241))
render = np.asarray(f._block.data_arrays['frame_time_counter'])
assert len(render) == 240
assert np.all(render == np.arange(1, 241))
assert len(np.asarray(f._block.data_arrays['frame_time'])) == 240
assert f.shapes_intensity_rendered[shape1].shape[0] <= d1.shape[0]
if f.electrode_intensity_alignment is not None:
assert f.shapes_intensity_rendered[shape1].shape[0] <= \
f.shapes_intensity_rendered_gpu_rate[shape1].shape[0]
assert f.shapes_intensity_rendered[shape1].shape[1] == d1.shape[1]
if f.electrode_intensity_alignment is not None:
assert f.shapes_intensity_rendered[shape1].shape[1] == \
f.shapes_intensity_rendered_gpu_rate[shape1].shape[1]
for i in range(240):
for d, b, (active, inactive) in zip(
(d1, d2), (b1, b2), stored_colors):
assert d[i, 3] == 1
for j in inactive:
assert d[i, j] == 0
# 2.4 = .6 * 120 / 30
val = .6 * (i % 30) / 30 + b
for j in active:
matched = isclose(float(d[i, j]), val, abs_tol=.001)
# original ceed sometimes treated last sample as first
# because of float point issues
if existing_exp and not (i % 30):
assert matched or \
isclose(float(d[i, j]), b + .6, abs_tol=.001)
else:
assert matched
assert_image_same(
image, f.experiment_cam_image, exact=not existing_exp)
assert f.led_state.tolist() == [(0, 1, 1, 1)]
@pytest.mark.dependency()
@pytest.mark.parametrize('src', ['internal', 'external'])
def test_event_data(
src, internal_experiment_filename, external_experiment_filename):
from ceed.analysis import CeedDataReader
fname = internal_experiment_filename
if src == 'external':
fname = external_experiment_filename
loops = [
[0, i, 'start' + s, [0, 0]]
for i in (0, 1, 3, 2, 4) for s in ('', '_loop')
]
for i in range(1, 8):
for s in (2, 4):
loops.append([i * 30, s, 'end_loop', [i - 1, ] * 2])
loops.append([i * 30, s, 'start_loop', [i, ] * 2])
for i in (2, 1, 4, 3, 0):
loops.append([8 * 30, i, 'end_loop', [7 if i in (2, 4) else 0, ] * 2])
loops.append([8 * 30, i, 'end', [7 if i in (2, 4) else 0, ] * 2])
with CeedDataReader(fname) as f:
for exp in (0, 1):
f.load_experiment(exp)
events = [d[:-1] + [d[-1][:-1], ] for d in f.event_data]
assert events == loops
stage = f.stage_factory.stage_names[last_experiment_stage_name]
assert stage.ceed_id == 0
assert stage.stages[0].ceed_id == 1
assert stage.stages[0].functions[0].ceed_id == 2
assert stage.stages[1].ceed_id == 3
assert stage.stages[1].functions[0].ceed_id == 4
@pytest.mark.dependency()
def test_saved_image(experiment_ceed_filename):
from ceed.analysis import CeedDataReader
existing_exp, merged_exp = exp_source(experiment_ceed_filename)
with CeedDataReader(experiment_ceed_filename) as f:
for i in range(2):
image, notes, _ = f.get_image_from_file(i)
assert f'image {i}' == notes
assert_image_same(
image, stored_images[2 + i], exact=not existing_exp)
for exp in (0, 1):
f.load_experiment(exp)
for i in range(2):
image, notes, _ = f.get_image_from_file(i)
assert f'image {i}' == notes
assert_image_same(
image, stored_images[2 + i], exact=not existing_exp)
@pytest.mark.dependency()
def test_replay_experiment_data(experiment_ceed_filename):
from ceed.analysis import CeedDataReader
shape1, shape2 = stored_shape_names
b1, b2 = stored_b_values[1]
def verify_values():
assert n == 240
for i in range(240):
for name, b, (active, inactive) in zip(
(shape1, shape2), (b1, b2), stored_colors):
d = values[name]
assert d[i][3] == 1
for j in inactive:
assert d[i][j] == 0
# 2.4 = .6 * 120 / 30
val = .6 * (i % 30) / 30 + b
for j in active:
assert isclose(float(d[i][j]), val, abs_tol=.001)
with CeedDataReader(experiment_ceed_filename) as f:
values, n, _ = get_stage_time_intensity(
f.app_config['stage_factory'], stored_stage_name, 120)
verify_values()
for exp, image, (b1, b2) in zip((0, 1), stored_images, stored_b_values):
f.load_experiment(exp)
values, n, _ = get_stage_time_intensity(
f.stage_factory, f.experiment_stage_name, 120)
verify_values()
@pytest.mark.dependency()
def test_mcs_data(merged_filename):
from ceed.analysis import CeedDataReader
shape1, shape2 = stored_shape_names
with CeedDataReader(merged_filename) as f:
dev0 = 'dev0' in f.ceed_version
f.load_mcs_data()
assert f.electrodes_data
assert f.electrodes_data.keys() == f.electrodes_metadata.keys()
assert f.electrode_dig_data is not None
n = len(f.electrodes_data[list(f.electrodes_data.keys())[0]])
assert f.electrode_dig_data.shape == (n, )
last_end_sample = 0
for exp in f.experiments_in_file:
n_sub_frames = 1
if exp == '2':
n_sub_frames = 4
elif exp == '3':
n_sub_frames = 12
f.load_experiment(exp)
assert f.electrode_intensity_alignment is not None
assert f.electrode_intensity_alignment_gpu_rate is not None
n = f.shapes_intensity[shape1].shape[0]
assert n == 240 * n_sub_frames
assert f.electrode_intensity_alignment[0] > last_end_sample
last_end_sample = f.electrode_intensity_alignment[-1]
n_align = f.electrode_intensity_alignment.shape[0]
samples_per_frames = f.electrode_intensity_alignment[1:] - \
f.electrode_intensity_alignment[:-1]
n_samples_min = np.min(samples_per_frames)
n_samples_max = np.max(samples_per_frames)
if dev0:
assert n_align == 240 or n_align == 239
# we used 1khz, and no quad mode to generate data
bot = 1000 // 120
# in case of missed frame
top = ceil(2 * 1000 / 120)
else:
assert n_align == 240 * n_sub_frames
# sampled at 5khz
bot = 5000 // (120 * n_sub_frames)
# in case of missed frame
top = ceil(2 * 5000 / (120 * n_sub_frames))
assert bot <= n_samples_min <= n_samples_max <= top
def test_create_movie(tmp_path):
from ceed.analysis import CeedDataReader
if not pathlib.Path(
existing_merged_experiment_filename_v1_0_0_dev1).exists():
pytest.skip(f'"{existing_merged_experiment_filename_v1_0_0_dev1}" '
f'does not exist')
with CeedDataReader(existing_merged_experiment_filename_v1_0_0_dev1) as f:
f.load_mcs_data()
f.load_experiment(0)
paint_funcs = [
f.paint_background_image(
f.experiment_cam_image,
transform_matrix=f.view_controller.cam_transform),
f.show_mea_outline(f.view_controller.mea_transform),
# this function shows the electrode voltage data
f.paint_electrodes_data_callbacks(
f.get_electrode_names(), draw_pos_hint=(1, 0),
volt_axis=50)
]
filename = tmp_path / 'movie.mp4'
assert not filename.exists()
f.generate_movie(
str(filename),
end=.1,
lum=1,
canvas_size_hint=(2, 1),
# show the data at the normal speed
speed=1.,
paint_funcs=paint_funcs
)
assert filename.exists()
@pytest.mark.parametrize('stages_only', [True, False])
@pytest.mark.parametrize('suffix', ['app.yml', '.yml'])
@pytest.mark.dependency(depends=['internal_experiment'])
async def test_import_yml_stages(
stage_app: CeedTestApp, internal_experiment_filename, suffix,
stages_only):
filename = internal_experiment_filename[:-3] + suffix
if not stages_only and suffix == '.yml':
with pytest.raises(KeyError):
stage_app.app.ceed_data.read_yaml_config(filename)
return
stage_app.app.ceed_data.read_yaml_config(filename, stages_only=stages_only)
await stage_app.wait_clock_frames(2)
values, n, _ = get_stage_time_intensity(
stage_app.app.stage_factory, stored_stage_name, 120)
verify_experiment(values, n, False)
@pytest.mark.parametrize('existing_template_filename', [
existing_template_filename_v1_0_0_dev0,
existing_template_filename_v1_0_0_dev1])
async def test_import_yml_existing(
stage_app: CeedTestApp, existing_template_filename):
if not pathlib.Path(existing_template_filename).exists():
pytest.skip(f'"{existing_template_filename}" does not exist')
stage_app.app.ceed_data.read_yaml_config(
existing_template_filename, stages_only=True)
await stage_app.wait_clock_frames(2)
values, n, _ = get_stage_time_intensity(
stage_app.app.stage_factory, stored_stage_name, 120)
verify_experiment(values, n, True)
@pytest.mark.parametrize('stages_only', [True, False])
@pytest.mark.parametrize('src', [
'internal', 'existing_v1_0_0_dev0', 'existing_v1_0_0_dev1'])
async def test_import_h5_stages(
request, stage_app: CeedTestApp, internal_experiment_filename,
src, stages_only):
if src == 'internal':
depends(request, ['internal_experiment'])
filename = internal_experiment_filename
elif src == 'existing_v1_0_0_dev0':
if not pathlib.Path(existing_experiment_filename_v1_0_0_dev0).exists():
pytest.skip(
f'"{existing_experiment_filename_v1_0_0_dev0}" does not exist')
filename = existing_experiment_filename_v1_0_0_dev0
else:
if not pathlib.Path(existing_experiment_filename_v1_0_0_dev1).exists():
pytest.skip(
f'"{existing_experiment_filename_v1_0_0_dev1}" does not exist')
filename = existing_experiment_filename_v1_0_0_dev1
stage_app.app.ceed_data.import_file(filename, stages_only=stages_only)
await stage_app.wait_clock_frames(2)
values, n, _ = get_stage_time_intensity(
stage_app.app.stage_factory, stored_stage_name, 120)
verify_experiment(values, n, False)
@pytest.mark.ceed_single_pixel
@pytest.mark.parametrize('video_mode', ['RGB', 'QUAD4X', 'QUAD12X'])
@pytest.mark.parametrize(
'flip,skip', [(True, False), (False, True), (False, False)])
async def test_serializer_corner_pixel(
stage_app: CeedTestApp, flip, skip, video_mode):
# for can't use stage_app because that zooms out leading to pixel being too
# small to see, seemingly
from kivy.clock import Clock
from ceed.function.plugin import ConstFunc
from ..test_stages import create_2_shape_stage
n_sub_frames = 1
if video_mode == 'QUAD4X':
n_sub_frames = 4
elif video_mode == 'QUAD12X':
n_sub_frames = 12
config, num_handshake_ticks, counter, short_values, clock_values = \
set_serializer_even_count_bits(
stage_app.app.data_serializer, n_sub_frames)
stage_app.app.data_serializer.projector_to_aquisition_map = {
i: i for i in range(16)}
root, s1, s2, shape1, shape2 = create_2_shape_stage(
stage_app.app.stage_factory, show_in_gui=True, app=stage_app)
s1.stage.add_func(ConstFunc(
function_factory=stage_app.app.function_factory, duration=20))
await stage_app.wait_clock_frames(2)
fps = await measure_fps(stage_app) + 10
stage_app.app.view_controller.frame_rate = fps
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.skip_estimated_missed_frames = skip
stage_app.app.view_controller.pad_to_stage_handshake = True
stage_app.app.view_controller.flip_projector = flip
stage_app.app.view_controller.output_count = True
stage_app.app.view_controller.video_mode = video_mode
assert stage_app.app.view_controller.do_quad_mode == (video_mode != 'RGB')
assert stage_app.app.view_controller.effective_frame_rate == \
fps * n_sub_frames
frame = 0
expected_values = list(zip(counter, short_values, clock_values))
clock_or_short = 1 << stage_app.app.data_serializer.clock_idx
for i in stage_app.app.data_serializer.short_count_indices:
clock_or_short |= 1 << i
def verify_serializer(*largs):
nonlocal frame
# wait to start
if not stage_app.app.view_controller.count:
return
# stop when we exhausted predicted frames
if frame >= len(counter):
stage_app.app.view_controller.request_stage_end()
return
(r, g, b, a), = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, [(0, 1079)])
value = r | g << 8 | b << 16
count, short, clock = expected_values[frame]
print(frame, f'{value:010b}, {count:010b}, {short:010b}, {clock:08b}')
if skip:
# only count may be different if frames are skipped. Short and clock
# are the same even if frames are dropped because corner pixel
# values are not skipped
assert value & clock_or_short == short | clock
else:
assert value == count | short | clock
assert not stage_app.app.view_controller._n_missed_frames
frame += 1
stage_app.app.view_controller.request_stage_start(
root.name, experiment_uuid=config)
event = Clock.create_trigger(verify_serializer, timeout=0, interval=True)
event()
await wait_experiment_done(stage_app)
assert frame == len(counter)
@pytest.mark.parametrize('video_mode', ['RGB', 'QUAD4X', 'QUAD12X'])
@pytest.mark.parametrize('skip', [True, False])
async def test_serializer_saved_data(
stage_app: CeedTestApp, tmp_path, video_mode, skip):
from kivy.clock import Clock
from ceed.function.plugin import ConstFunc
from ..test_stages import create_2_shape_stage
n_sub_frames = 1
if video_mode == 'QUAD4X':
n_sub_frames = 4
elif video_mode == 'QUAD12X':
n_sub_frames = 12
config, num_handshake_ticks, counter, short_values, clock_values = \
set_serializer_even_count_bits(
stage_app.app.data_serializer, n_sub_frames)
stage_app.app.data_serializer.projector_to_aquisition_map = {
i: i for i in range(16)}
expected_values = list(zip(counter, short_values, clock_values))
root, s1, s2, shape1, shape2 = create_2_shape_stage(
stage_app.app.stage_factory, show_in_gui=True, app=stage_app)
s1.stage.add_func(ConstFunc(
function_factory=stage_app.app.function_factory, duration=4))
await stage_app.wait_clock_frames(2)
fps = await measure_fps(stage_app) + 10
stage_app.app.view_controller.frame_rate = fps
stage_app.app.view_controller.skip_estimated_missed_frames = skip
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.pad_to_stage_handshake = True
stage_app.app.view_controller.output_count = True
stage_app.app.view_controller.video_mode = video_mode
flip_counter = []
skip_counter = []
def verify_serializer(*largs):
count_val = stage_app.app.view_controller.count
if not count_val or not stage_app.app.view_controller.stage_active:
return
for i in range(n_sub_frames - 1, -1, -1):
flip_counter.append(count_val - i)
skip_counter.append(stage_app.app.view_controller._n_missed_frames)
event = Clock.create_trigger(verify_serializer, timeout=0, interval=True)
event()
stage_app.app.view_controller.request_stage_start(
root.name, experiment_uuid=config)
await wait_experiment_done(stage_app)
event.cancel()
filename = str(tmp_path / 'serializer_data.h5')
stage_app.app.ceed_data.save(filename=filename)
merger = CeedMCSDataMerger(ceed_filename=filename, mcs_filename='')
merger.read_ceed_data()
merger.read_ceed_experiment_data('0')
merger.parse_ceed_experiment_data()
# logged data is one per frame (where e.g. in 12x each is still a frame)
# when skipping, this data doesn't include skipped frames so we don't have
# to filter them out here
raw_data = merger.ceed_data_container.data
clock_data = merger.ceed_data_container.clock_data
short_count_data = merger.ceed_data_container.short_count_data
short_count_max = 2 ** len(
stage_app.app.data_serializer.short_count_indices)
clock_or_short = 1 << stage_app.app.data_serializer.clock_idx
for i in stage_app.app.data_serializer.short_count_indices:
clock_or_short |= 1 << i
# clock and short count are one-per group of n_sub_frames
for i, (raw_s, short_s, clock_s) in enumerate(
zip(raw_data, short_count_data, clock_data)):
root_frame_i = i // n_sub_frames
if root_frame_i < len(expected_values):
count, short, clock = expected_values[root_frame_i]
if skip:
# count may be different if frames are skipped
assert raw_s & clock_or_short == short | clock
else:
assert raw_s == count | short | clock
if root_frame_i % 2:
assert not clock_s
else:
assert clock_s == 1
assert short_s == root_frame_i % short_count_max
# counter is one per frame, including sub frames
n_skipped = sum(skip_counter) * n_sub_frames
frame_counter = merger.ceed_data['frame_counter']
if skip:
assert len(frame_counter) > len(merger.ceed_data_container.counter)
# last frame could have been indicted to be skipped, but stage ended
assert len(flip_counter) + n_skipped \
>= len(merger.ceed_data_container.counter)
else:
assert len(frame_counter) == len(merger.ceed_data_container.counter)
assert np.all(merger.ceed_data_container.counter == np.arange(
1, 1 + len(raw_data)))
assert not n_skipped
assert np.all(
merger.ceed_data_container.counter == np.asarray(flip_counter))
# even when skipping frames, we should have sent enough frames not not cut
# off handshake (ideally)
n_bytes_per_int = stage_app.app.data_serializer.counter_bit_width // 8
config += b'\0' * (n_bytes_per_int - len(config) % n_bytes_per_int)
if skip:
# can't assume message was sent full in case of dropped frames
if merger.ceed_data_container.handshake_data:
assert merger.ceed_data_container.expected_handshake_len \
== len(config)
assert 0 <= merger.ceed_data_container.expected_handshake_len <= 50
assert config.startswith(merger.ceed_data_container.handshake_data)
else:
assert merger.ceed_data_container.expected_handshake_len == len(config)
assert merger.ceed_data_container.handshake_data == config
| |
import os
import random
import time
from io import BytesIO
from tempfile import mkdtemp
from shutil import rmtree
from unittest import mock
from urllib.parse import urlparse
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.pipelines.files import FilesPipeline, FSFilesStore, S3FilesStore, GCSFilesStore, FTPFilesStore
from scrapy.item import Item, Field
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.utils.test import assert_aws_environ, get_s3_content_and_delete
from scrapy.utils.test import assert_gcs_environ, get_gcs_content_and_delete
from scrapy.utils.test import get_ftp_content_and_delete
from scrapy.utils.boto import is_botocore
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class FilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def tearDown(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
self.assertEqual(file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(file_path(Request("http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.txt")),
'full/4ce274dd83db0368bafd7e406f382ae088e39219.txt')
self.assertEqual(file_path(Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.doc")),
'full/94ccc495a17b9ac5d40e3eabf3afcb8c2c9b9e1a.doc')
self.assertEqual(file_path(Request("http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg")),
'full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532/")),
'full/97ee6f8a46cbbb418ea91502fd24176865cf39b2')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532")),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object()),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
self.assertEqual(file_path(Request("http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg.bohaha")),
'full/76c00cef2ef669ae65052661f68d451162829507')
self.assertEqual(file_path(Request("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAR0AAACxCAMAAADOHZloAAACClBMVEX/\
//+F0tzCwMK76ZKQ21AMqr7oAAC96JvD5aWM2kvZ78J0N7fmAAC46Y4Ap7y")),
'full/178059cbeba2e34120a67f2dc1afc3ecc09b61cb.png')
def test_fs_store(self):
assert isinstance(self.pipeline.store, FSFilesStore)
self.assertEqual(self.pipeline.store.basedir, self.tempdir)
path = 'some/image/key.jpg'
fullpath = os.path.join(self.tempdir, 'some', 'image', 'key.jpg')
self.assertEqual(self.pipeline.store._get_filesystem_path(path), fullpath)
@defer.inlineCallbacks
def test_file_not_expired(self):
item_url = "http://example.com/file.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True),
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc', 'last_modified': time.time()}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)])
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
@defer.inlineCallbacks
def test_file_expired(self):
item_url = "http://example.com/file2.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc',
'last_modified': time.time() - (self.pipeline.expires * 60 * 60 * 24 * 2)}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)]),
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True)
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertNotEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
class FilesPipelineTestCaseFields(unittest.TestCase):
def test_item_fields_default(self):
class TestItem(Item):
name = Field()
file_urls = Field()
files = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'file_urls': [url]})
pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': 's3://example/files/'}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['files'], [results[0][1]])
def test_item_fields_override_settings(self):
class TestItem(Item):
name = Field()
files = Field()
stored_file = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'files': [url]})
pipeline = FilesPipeline.from_settings(Settings({
'FILES_STORE': 's3://example/files/',
'FILES_URLS_FIELD': 'files',
'FILES_RESULT_FIELD': 'stored_file'
}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['stored_file'], [results[0][1]])
class FilesPipelineTestCaseCustomSettings(unittest.TestCase):
default_cls_settings = {
"EXPIRES": 90,
"FILES_URLS_FIELD": "file_urls",
"FILES_RESULT_FIELD": "files"
}
file_cls_attr_settings_map = {
("EXPIRES", "FILES_EXPIRES", "expires"),
("FILES_URLS_FIELD", "FILES_URLS_FIELD", "files_urls_field"),
("FILES_RESULT_FIELD", "FILES_RESULT_FIELD", "files_result_field")
}
def setUp(self):
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
def _generate_fake_settings(self, prefix=None):
def random_string():
return "".join([chr(random.randint(97, 123)) for _ in range(10)])
settings = {
"FILES_EXPIRES": random.randint(100, 1000),
"FILES_URLS_FIELD": random_string(),
"FILES_RESULT_FIELD": random_string(),
"FILES_STORE": self.tempdir
}
if not prefix:
return settings
return {prefix.upper() + "_" + k if k != "FILES_STORE" else k: v for k, v in settings.items()}
def _generate_fake_pipeline(self):
class UserDefinedFilePipeline(FilesPipeline):
EXPIRES = 1001
FILES_URLS_FIELD = "alfa"
FILES_RESULT_FIELD = "beta"
return UserDefinedFilePipeline
def test_different_settings_for_different_instances(self):
"""
If there are different instances with different settings they should keep
different settings.
"""
custom_settings = self._generate_fake_settings()
another_pipeline = FilesPipeline.from_settings(Settings(custom_settings))
one_pipeline = FilesPipeline(self.tempdir)
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
default_value = self.default_cls_settings[pipe_attr]
self.assertEqual(getattr(one_pipeline, pipe_attr), default_value)
custom_value = custom_settings[settings_attr]
self.assertNotEqual(default_value, custom_value)
self.assertEqual(getattr(another_pipeline, pipe_ins_attr), custom_value)
def test_subclass_attributes_preserved_if_no_settings(self):
"""
If subclasses override class attributes and there are no special settings those values should be kept.
"""
pipe_cls = self._generate_fake_pipeline()
pipe = pipe_cls.from_settings(Settings({"FILES_STORE": self.tempdir}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
custom_value = getattr(pipe, pipe_ins_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr])
self.assertEqual(getattr(pipe, pipe_ins_attr), getattr(pipe, pipe_attr))
def test_subclass_attrs_preserved_custom_settings(self):
"""
If file settings are defined but they are not defined for subclass
settings should be preserved.
"""
pipeline_cls = self._generate_fake_pipeline()
settings = self._generate_fake_settings()
pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
value = getattr(pipeline, pipe_ins_attr)
setting_value = settings.get(settings_attr)
self.assertNotEqual(value, self.default_cls_settings[pipe_attr])
self.assertEqual(value, setting_value)
def test_no_custom_settings_for_subclasses(self):
"""
If there are no settings for subclass and no subclass attributes, pipeline should use
attributes of base class.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
user_pipeline = UserDefinedFilesPipeline.from_settings(Settings({"FILES_STORE": self.tempdir}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = self.default_cls_settings.get(pipe_attr.upper())
self.assertEqual(getattr(user_pipeline, pipe_ins_attr), custom_value)
def test_custom_settings_for_subclasses(self):
"""
If there are custom settings for subclass and NO class attributes, pipeline should use custom
settings.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
prefix = UserDefinedFilesPipeline.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = UserDefinedFilesPipeline.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr])
self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
def test_custom_settings_and_class_attrs_for_subclasses(self):
"""
If there are custom settings for subclass AND class attributes
setting keys are preferred and override attributes.
"""
pipeline_cls = self._generate_fake_pipeline()
prefix = pipeline_cls.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_cls_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_cls_attr])
self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
def test_cls_attrs_with_DEFAULT_prefix(self):
class UserDefinedFilesPipeline(FilesPipeline):
DEFAULT_FILES_RESULT_FIELD = "this"
DEFAULT_FILES_URLS_FIELD = "that"
pipeline = UserDefinedFilesPipeline.from_settings(Settings({"FILES_STORE": self.tempdir}))
self.assertEqual(pipeline.files_result_field, "this")
self.assertEqual(pipeline.files_urls_field, "that")
def test_user_defined_subclass_default_key_names(self):
"""Test situation when user defines subclass of FilesPipeline,
but uses attribute names for default pipeline (without prefixing
them with pipeline class name).
"""
settings = self._generate_fake_settings()
class UserPipe(FilesPipeline):
pass
pipeline_cls = UserPipe.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
expected_value = settings.get(settings_attr)
self.assertEqual(getattr(pipeline_cls, pipe_inst_attr),
expected_value)
class TestS3FilesStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
assert_aws_environ()
uri = os.environ.get('S3_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No S3 URI available for testing")
data = b"TestS3FilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = ''
store = S3FilesStore(uri)
yield store.persist_file(
path, buf, info=None, meta=meta,
headers={'Content-Type': 'image/png'})
s = yield store.stat_file(path, info=None)
self.assertIn('last_modified', s)
self.assertIn('checksum', s)
self.assertEqual(s['checksum'], '3187896a9657a28163abb31667df64c8')
u = urlparse(uri)
content, key = get_s3_content_and_delete(
u.hostname, u.path[1:], with_key=True)
self.assertEqual(content, data)
if is_botocore():
self.assertEqual(key['Metadata'], {'foo': 'bar'})
self.assertEqual(
key['CacheControl'], S3FilesStore.HEADERS['Cache-Control'])
self.assertEqual(key['ContentType'], 'image/png')
else:
self.assertEqual(key.metadata, {'foo': 'bar'})
self.assertEqual(
key.cache_control, S3FilesStore.HEADERS['Cache-Control'])
self.assertEqual(key.content_type, 'image/png')
class TestGCSFilesStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
assert_gcs_environ()
uri = os.environ.get('GCS_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No GCS URI available for testing")
data = b"TestGCSFilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = 'full/filename'
store = GCSFilesStore(uri)
store.POLICY = 'authenticatedRead'
expected_policy = {'role': 'READER', 'entity': 'allAuthenticatedUsers'}
yield store.persist_file(path, buf, info=None, meta=meta, headers=None)
s = yield store.stat_file(path, info=None)
self.assertIn('last_modified', s)
self.assertIn('checksum', s)
self.assertEqual(s['checksum'], 'zc2oVgXkbQr2EQdSdw3OPA==')
u = urlparse(uri)
content, acl, blob = get_gcs_content_and_delete(u.hostname, u.path[1:]+path)
self.assertEqual(content, data)
self.assertEqual(blob.metadata, {'foo': 'bar'})
self.assertEqual(blob.cache_control, GCSFilesStore.CACHE_CONTROL)
self.assertEqual(blob.content_type, 'application/octet-stream')
self.assertIn(expected_policy, acl)
class TestFTPFileStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
uri = os.environ.get('FTP_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No FTP URI available for testing")
data = b"TestFTPFilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = 'full/filename'
store = FTPFilesStore(uri)
empty_dict = yield store.stat_file(path, info=None)
self.assertEqual(empty_dict, {})
yield store.persist_file(path, buf, info=None, meta=meta, headers=None)
stat = yield store.stat_file(path, info=None)
self.assertIn('last_modified', stat)
self.assertIn('checksum', stat)
self.assertEqual(stat['checksum'], 'd113d66b2ec7258724a268bd88eef6b6')
path = '%s/%s' % (store.basedir, path)
content = get_ftp_content_and_delete(
path, store.host, store.port,
store.username, store.password, store.USE_ACTIVE_MODE)
self.assertEqual(data.decode(), content)
class ItemWithFiles(Item):
file_urls = Field()
files = Field()
def _create_item_with_files(*files):
item = ItemWithFiles()
item['file_urls'] = files
return item
def _prepare_request_object(item_url):
return Request(
item_url,
meta={'response': Response(item_url, status=200, body=b'data')})
if __name__ == "__main__":
unittest.main()
| |
"""
Implementation of the overall simulation process, divided into steps.
The simulation is split into phases or steps, and the sequence and actions of
each step are implemented in the phases module. Each step is implemented as a
class with a ``do'' method where the processing for that step takes place. They
all inherit from a common interface.
Phases:
CoastalPlanning
Hearing
GovernmentDecision
Fishing
Building
Learning
"""
import vote
import priority
import entities
import plan
import numpy
class Round(object):
def __init__(self, info):
self.info = info
LEARNING = Learning(info, None, "LEARNING")
FISHING2 = Fishing(info, LEARNING, "FISHING2")
BUILDING = Building(info, FISHING2, "BUILDING")
FISHING1 = Fishing(info, BUILDING, "FISHING1")
GOVDECISION = GovernmentDecision(info, None, "GOVDECISION")
HEARING = Hearing(info, GOVDECISION, "HEARING")
COASTPLAN = CoastalPlanning(info, HEARING, "COASTPLAN")
GOVDECISION.set_next_table({
plan.Decision.APPROVE: FISHING1,
plan.Decision.REVIEW: COASTPLAN
})
self._current_step = self._start = COASTPLAN
self._round_counter = 0
self._step_counter = 0
def next(self):
self._step_counter += 1
result = self._current_step.action(
self._round_counter,
self._step_counter
)
self.info.logger.add_phase_statistics(
self._round_counter,
result.data.get("statistics", {})
)
self._current_step = self._current_step.next()
if self._current_step is None:
self.new_round()
return result
def new_round(self):
self.info.logger.write_round_statistics(
self._round_counter,
self.info.map,
self.info.directory.get_agents(type = entities.Aquaculture)
)
self.info.logger.write_round(self._round_counter)
self._step_counter = 0
self._round_counter += 1
self._current_step = self._start
# reset
for a in self.info.directory.get_agents(): a.round_reset()
def current(self):
return self._current_step.name
def rounds(self):
return self._round_counter
class StepResult(object):
def __init__(self, phase, messages_sent, cells_changed, world_map, data,
round_number, votes):
self.phase = phase
self.messages_sent = messages_sent
self.cells_changed = cells_changed
self.world_map = world_map
self.data = data
self.round_number = round_number
self.votes = votes
@classmethod
def cells_changed(c, phase, cells_changed, world_map, data, round):
return c(phase, [], cells_changed, world_map, data, round, {})
@classmethod
def no_cells_changed(c, phase, world_map, data, round):
return c(phase, [], [], world_map, data, round, {})
@classmethod
def votes_cast(c, phase, world_map, data, round, votes):
return c(phase, [], [], world_map, data, round, votes)
## Abstract Step classes ##
class Step(object):
def __init__(self, info, next, name):
self.info = info
self._next = next
self.name = name
def do(self, round, step):
raise NotImplementedException()
def next(self):
return self._next
def action(self, round, step):
self.info.directory.start_recording()
result = self.do(round, step)
result.messages = self.info.directory.stop_recording()
return result
class DecisionStep(Step):
def __init__(self, info, next_table, name):
Step.__init__(self, info, None, name)
self.set_next_table(next_table)
self._decision_value = None
def set_next_table(self, next_table):
self._next_table = next_table
def action(self, round, step):
self.info.directory.start_recording()
(result, decision) = self.do(round, step)
self.decide(decision)
result.messages = self.info.directory.stop_recording()
return result
def decide(self, value):
self._decision_value = value
def next(self):
return self._next_table[self._decision_value]
## Concrete Step Implementations ##
class CoastalPlanning(Step):
def do(self, round, step):
data = {"statistics": {}}
for a in self.info.directory.get_agents():
self.info.logger.vote_fitness_relation(round, a)
municipality = self.info.directory.get_municipality()
plan = municipality.coastal_planning(
self.info.map,
self.info.directory.get_government().get_approved_complaints()
)
data["statistics"]["planned aquaculture sites"] = {
"mode": "set",
"value": float(len(plan.aquaculture_sites()))
}
return StepResult.no_cells_changed(self, self.info.map, data, round)
class Hearing(Step):
def do(self, round, step):
data = {"statistics": {}}
self.info.directory.get_government().new_vote_round()
votes = {}
for agent in self.info.directory.get_voting_agents():
votes[agent] = agent.hearing(
self.info.map
)
self.info.logger.add_vote(round, agent,
len([v for v in votes[agent] if v.is_complaint()]))
data["statistics"]["average number of complaints"] = {
"mode": "add",
"value": numpy.mean(
[len([v for v in votes[a] if v.is_complaint()]) for a in votes]
)
}
data["statistics"]["average number of complaints step %d" % step] = {
"mode": "set",
"value": numpy.mean(
[len([v for v in votes[a] if v.is_complaint()]) for a in votes]
),
"plot": False
}
return StepResult.votes_cast(self, self.info.map, data,
round, votes)
class GovernmentDecision(DecisionStep):
def do(self, round, step):
government = self.info.directory.get_government()
decision = government.voting_decision()
return (
StepResult.no_cells_changed(self, self.info.map, {}, round),
decision
)
class Fishing(Step):
def do(self, round, step):
data = {"statistics": {}}
# Agents do profit activities
government = self.info.directory.get_government()
working_agents = \
self.info.directory.get_agents(type = entities.Fisherman) + \
self.info.directory.get_agents(type = entities.Aquaculture)
fishermen = self.info.directory.get_agents(type = entities.Fisherman)
affected_cells = []
for f in fishermen:
old_home = f.home
f.find_fishing_spot(self.info.map)
if old_home != f.home:
affected_cells.extend([f.home, old_home])
for a in working_agents:
a.work()
agent_type_labels = [
(entities.Fisherman, "fisherman")
]
for t, l in agent_type_labels:
agents = self.info.directory.get_agents(type = t)
if len(agents) > 0:
data["statistics"]["average %s capital" % l] = {
"mode": "set",
"value":
numpy.mean(
[a.capital for a in agents]
)
}
# (Local) Aquaculture companies pay some of their revenue to locals
# through taxation
for a in self.info.directory.get_agents(type = entities.Aquaculture):
a.pay_taxes()
return StepResult.cells_changed(self, affected_cells, self.info.map,
data, round)
class Building(Step):
def do(self, round, step):
data = {"statistics": {}}
government = self.info.directory.get_government()
municipality = self.info.directory.get_municipality()
licenses = government.distribute_licenses()
spawner = self.info.aquaculture_spawner
plan = municipality.get_plan()
affected_cells = []
for license in licenses:
location = spawner.choose_cell(plan)
if not location is None:
agent = spawner.create(
self.info.agent_factory,
location
)
affected_cells.append(location)
affected_cells.extend(self.info.map.build_aquaculture(
agent,
location
))
cells = self.info.map.get_all_cells()
data["statistics"]["total fish quantity"] = {
"value": float(sum(cell.get_fish_quantity() for cell in cells))
}
data["statistics"]["number of aquacultures"] = {
"value": float(len(
self.info.directory.get_agents(type = entities.Aquaculture)
))
}
data["statistics"]["unblocked cells"] = {
"plot": False,
"value": float(sum(1 for e in cells if not e.is_blocked()))
}
return StepResult.cells_changed(self, affected_cells, self.info.map,
data, round)
class Learning(Step):
def do(self, round, step):
data_dict = {"average fitness": {}, "statistics": {}}
dir = self.info.directory
all_agents = dir.get_agents()
fishermen = dir.get_agents(type = entities.Fisherman)
community_members = \
dir.get_agents(type = entities.Fisherman) + \
dir.get_agents(type = entities.Aquaculture) + \
dir.get_agents(type = entities.Civilian)# + \
#dir.get_agents(type = entities.Tourist)
market = self.info.market
world_map = self.info.map
aquaculture_agents = dir.get_agents(type = entities.Aquaculture)
fitnesses = {
agent: agent.get_priorities_satisfaction(
priority.Influences(
agent, all_agents, market, community_members, fishermen,
world_map, aquaculture_agents
)
) for agent in self.info.directory.get_agents()
}
# record average fitness
for t, l in [(entities.Fisherman, "fisherman")]:
data_dict["average fitness"][l] = numpy.mean(
[fitnesses[a] for a in fitnesses if a.__class__ == t]
)
# average of all fitnesses
for t, l in [(entities.Fisherman, "fisherman")]:
data_dict["statistics"][u"average %s fitness" % l] = { # f: 20^f / 20
"value": numpy.mean(
[fitnesses[a] for a in fitnesses if a.__class__ == t]
)
}
# log fitness
for agent in fitnesses:
self.info.logger.add_fitness(round, agent, fitnesses[agent])
for group in self.info.learning_mechanisms:
self.info.learning_mechanisms[group].learn(fitnesses)
return StepResult.no_cells_changed(self, self.info.map, data_dict,
round)
| |
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import atexit
from warnings import warn
from six.moves import xrange
from pysmt.exceptions import SolverAPINotFound
try:
import yicespy
except ImportError:
raise SolverAPINotFound
from pysmt.solvers.eager import EagerModel
from pysmt.solvers.solver import Solver, Converter, SolverOptions
from pysmt.solvers.smtlib import SmtLibBasicSolver, SmtLibIgnoreMixin
from pysmt.walkers import DagWalker
from pysmt.exceptions import SolverReturnedUnknownResultError
from pysmt.exceptions import (InternalSolverError, NonLinearError,
PysmtValueError, PysmtTypeError)
from pysmt.decorators import clear_pending_pop, catch_conversion_error
from pysmt.constants import Fraction, is_pysmt_integer
import pysmt.logics
# Initialization
def init():
if not getattr(init, 'initialized', False):
yicespy.yices_init()
init.initialized = True
def reset_yices():
yicespy.yices_reset()
init()
@atexit.register
def cleanup():
yicespy.yices_exit()
# Yices constants
STATUS_UNKNOWN = 2
STATUS_SAT = 3
STATUS_UNSAT = 4
def yices_logic(pysmt_logic):
"""Return a Yices String representing the given pySMT logic."""
ylogic = str(pysmt_logic)
if ylogic == "QF_BOOL":
ylogic = "NONE"
return ylogic
class YicesOptions(SolverOptions):
def __init__(self, **base_options):
SolverOptions.__init__(self, **base_options)
# TODO: Yices Supports UnsatCore extraction
# but we did not wrapped it yet.
if self.unsat_cores_mode is not None:
raise PysmtValueError("'unsat_cores_mode' option not supported.")
@staticmethod
def _set_option(cfg, name, value):
rv = yicespy.yices_set_config(cfg, name, value)
if rv != 0:
# This might be a parameter to be set later (see set_params)
# We raise the exception only if the parameter exists but the value
# provided to the parameter is invalid.
err = yicespy.yices_error_code()
if err == yicespy.CTX_INVALID_PARAMETER_VALUE:
raise PysmtValueError("Error setting the option "
"'%s=%s'" % (name,value))
def __call__(self, solver):
if self.generate_models:
# Yices always generates models
pass
if self.incremental:
self._set_option(solver.yices_config, "mode", "push-pop")
else:
self._set_option(solver.yices_config, "mode", "one-shot")
if self.random_seed is not None:
self._set_option(solver.yices_config,
"random-seed", str(self.random_seed))
for k,v in self.solver_options.items():
self._set_option(solver.yices_config, str(k), str(v))
def set_params(self, solver):
"""Set Search Parameters.
Yices makes a distinction between configuratin and search
parameters. The first are fixed for the lifetime of a
context, while the latter can be different for every call to
check_context.
A list of available parameters is available at:
http://yices.csl.sri.com/doc/parameters.html
"""
params = yicespy.yices_new_param_record()
yicespy.yices_default_params_for_context(solver.yices, params)
for k,v in self.solver_options.items():
rv = yicespy.yices_set_param(params, k, v)
if rv != 0:
raise PysmtValueError("Error setting the option '%s=%s'" % (k,v))
solver.yices_params = params
# EOC YicesOptions
class YicesSolver(Solver, SmtLibBasicSolver, SmtLibIgnoreMixin):
LOGICS = pysmt.logics.PYSMT_QF_LOGICS - pysmt.logics.ARRAYS_LOGICS -\
set(l for l in pysmt.logics.PYSMT_QF_LOGICS if not l.theory.linear)
OptionsClass = YicesOptions
def __init__(self, environment, logic, **options):
Solver.__init__(self,
environment=environment,
logic=logic,
**options)
self.declarations = set()
self.yices_config = yicespy.yices_new_config()
if yicespy.yices_default_config_for_logic(self.yices_config,
yices_logic(logic)) != 0:
warn("Error setting config for logic %s" % logic)
self.options(self)
self.yices = yicespy.yices_new_context(self.yices_config)
self.options.set_params(self)
yicespy.yices_free_config(self.yices_config)
self.converter = YicesConverter(environment)
self.mgr = environment.formula_manager
self.model = None
self.failed_pushes = 0
return
@clear_pending_pop
def reset_assertions(self):
yicespy.yices_reset_context(self.yices)
@clear_pending_pop
def declare_variable(self, var):
raise NotImplementedError
@clear_pending_pop
def add_assertion(self, formula, named=None):
self._assert_is_boolean(formula)
term = self.converter.convert(formula)
code = yicespy.yices_assert_formula(self.yices, term)
if code != 0:
msg = yicespy.yices_error_string()
if code == -1 and "non-linear arithmetic" in msg:
raise NonLinearError(formula)
raise InternalSolverError("Yices returned non-zero code upon assert"\
": %s (code: %s)" % \
(msg, code))
def get_model(self):
assignment = {}
# MG: This iteration is probelmatic, since it assumes that all
# defined symbols have a type that is compatible with this
# solver. In this case, the problem occurs with Arrays and
# Strings that are not supported.
for s in self.environment.formula_manager.get_all_symbols():
if s.is_term():
if s.symbol_type().is_array_type(): continue
v = self.get_value(s)
assignment[s] = v
return EagerModel(assignment=assignment, environment=self.environment)
@clear_pending_pop
def solve(self, assumptions=None):
if assumptions is not None:
self.push()
self.add_assertion(self.mgr.And(assumptions))
self.pending_pop = True
out = yicespy.yices_check_context(self.yices, self.yices_params)
if self.model is not None:
yicespy.yices_free_model(self.model)
self.model = None
assert out in [STATUS_SAT, STATUS_UNSAT, STATUS_UNKNOWN]
if out == STATUS_UNKNOWN:
raise SolverReturnedUnknownResultError()
elif out == STATUS_SAT:
self.model = yicespy.yices_get_model(self.yices, 1)
return True
else:
return False
@clear_pending_pop
def all_sat(self, important, callback):
raise NotImplementedError
@clear_pending_pop
def push(self, levels=1):
for _ in xrange(levels):
c = yicespy.yices_push(self.yices)
if c != 0:
# 4 is STATUS_UNSAT
if yicespy.yices_context_status(self.yices) == 4:
# Yices fails to push if the context is in UNSAT state
# (It makes no sense to conjoin formulae to False)
# PySMT allows this and we support it by counting the
# spurious push calls
self.failed_pushes += 1
else:
raise InternalSolverError("Error in push: %s" % \
yicespy.yices_error_string())
@clear_pending_pop
def pop(self, levels=1):
for _ in xrange(levels):
if self.failed_pushes > 0:
self.failed_pushes -= 1
else:
c = yicespy.yices_pop(self.yices)
if c != 0:
raise InternalSolverError("Error in pop: %s" % \
yicespy.yices_error_string())
def print_model(self, name_filter=None):
for var in self.declarations:
if name_filter is None or not var.symbol_name().startswith(name_filter):
print("%s = %s", (var.symbol_name(), self.get_value(var)))
def _check_error(self, res):
if res != 0:
err = yicespy.yices_error_string()
raise InternalSolverError("Yices returned an error: " + err)
def get_value(self, item):
self._assert_no_function_type(item)
titem = self.converter.convert(item)
ty = self.environment.stc.get_type(item)
if ty.is_bool_type():
status, res = yicespy.yices_get_bool_value(self.model, titem)
self._check_error(status)
return self.mgr.Bool(bool(res))
elif ty.is_int_type():
res = yicespy.yices_get_integer_value(self.model, titem)
return self.mgr.Int(res)
elif ty.is_real_type():
status, val = yicespy.yices_get_rational_value(self.model, titem)
self._check_error(status)
return self.mgr.Real(Fraction(val))
elif ty.is_bv_type():
status, res = yicespy.yices_get_bv_value(self.model, titem, ty.width)
self._check_error(status)
str_val = "".join(str(x) for x in reversed(res))
return self.mgr.BV("#b" + str_val)
else:
raise NotImplementedError()
def _exit(self):
yicespy.yices_free_context(self.yices)
yicespy.yices_free_param_record(self.yices_params)
# EOC YicesSolver
class YicesConverter(Converter, DagWalker):
def __init__(self, environment):
DagWalker.__init__(self, environment)
self.backconversion = {}
self.mgr = environment.formula_manager
self._get_type = environment.stc.get_type
# Maps a Symbol into the corresponding internal yices instance
self.symbol_to_decl = {}
# Maps an internal yices instance into the corresponding symbol
self.decl_to_symbol = {}
@catch_conversion_error
def convert(self, formula):
return self.walk(formula)
def _check_term_result(self, res):
if res == -1:
err = yicespy.yices_error_string()
raise InternalSolverError("Yices returned an error: " + err)
def walk_and(self, formula, args, **kwargs):
res = yicespy.yices_and(len(args), args)
self._check_term_result(res)
return res
def walk_or(self, formula, args, **kwargs):
res = yicespy.yices_or(len(args), args)
self._check_term_result(res)
return res
def walk_not(self, formula, args, **kwargs):
res = yicespy.yices_not(args[0])
self._check_term_result(res)
return res
def walk_symbol(self, formula, **kwargs):
symbol_type = formula.symbol_type()
var_type = self._type_to_yices(symbol_type)
term = yicespy.yices_new_uninterpreted_term(var_type)
yicespy.yices_set_term_name(term, formula.symbol_name())
self._check_term_result(term)
return term
def _bound_symbol(self, var):
symbol_type = var.symbol_type()
var_type = self._type_to_yices(symbol_type)
term = yicespy.yices_new_variable(var_type)
yicespy.yices_set_term_name(term, var.symbol_name())
return term
def walk_iff(self, formula, args, **kwargs):
res = yicespy.yices_iff(args[0], args[1])
self._check_term_result(res)
return res
def walk_implies(self, formula, args, **kwargs):
res = yicespy.yices_implies(args[0], args[1])
self._check_term_result(res)
return res
def walk_le(self, formula, args, **kwargs):
res = yicespy.yices_arith_leq_atom(args[0], args[1])
self._check_term_result(res)
return res
def walk_lt(self, formula, args, **kwargs):
res = yicespy.yices_arith_lt_atom(args[0], args[1])
self._check_term_result(res)
return res
def walk_ite(self, formula, args, **kwargs):
i, t, e = args
res = yicespy.yices_ite(i, t, e)
self._check_term_result(res)
return res
def walk_real_constant(self, formula, **kwargs):
frac = formula.constant_value()
n,d = frac.numerator, frac.denominator
rep = str(n) + "/" + str(d)
res = yicespy.yices_parse_rational(rep)
self._check_term_result(res)
return res
def walk_int_constant(self, formula, **kwargs):
assert is_pysmt_integer(formula.constant_value())
rep = str(formula.constant_value())
res = yicespy.yices_parse_rational(rep)
self._check_term_result(res)
return res
def walk_bool_constant(self, formula, **kwargs):
if formula.constant_value():
return yicespy.yices_true()
else:
return yicespy.yices_false()
def walk_exists(self, formula, args, **kwargs):
(bound_formula, var_list) = \
self._rename_bound_variables(args[0], formula.quantifier_vars())
res = yicespy.yices_exists(len(var_list), var_list, bound_formula)
self._check_term_result(res)
return res
def walk_forall(self, formula, args, **kwargs):
(bound_formula, var_list) = \
self._rename_bound_variables(args[0], formula.quantifier_vars())
res = yicespy.yices_forall(len(var_list), var_list, bound_formula)
self._check_term_result(res)
return res
def _rename_bound_variables(self, formula, variables):
"""Bounds the variables in formula.
Returns a tuple (new_formula, new_var_list) in which the old
variables have been replaced by the new variables in the list.
"""
new_vars = [self._bound_symbol(x) for x in variables]
old_vars = [self.walk_symbol(x, []) for x in variables]
new_formula = yicespy.yices_subst_term(len(variables), new_vars,
old_vars, formula)
return (new_formula, new_vars)
def walk_plus(self, formula, args, **kwargs):
res = yicespy.yices_sum(len(args), args)
self._check_term_result(res)
return res
def walk_minus(self, formula, args, **kwargs):
res = yicespy.yices_sub(args[0], args[1])
self._check_term_result(res)
return res
def walk_equals(self, formula, args, **kwargs):
tp = self._get_type(formula.arg(0))
res = None
if tp.is_bv_type():
res = yicespy.yices_bveq_atom(args[0], args[1])
else:
assert tp.is_int_type() or tp.is_real_type()
res = yicespy.yices_arith_eq_atom(args[0], args[1])
self._check_term_result(res)
return res
def walk_times(self, formula, args, **kwargs):
res = args[0]
for x in args[1:]:
res = yicespy.yices_mul(res, x)
self._check_term_result(res)
return res
def walk_toreal(self, formula, args, **kwargs):
return args[0]
def walk_function(self, formula, args, **kwargs):
name = formula.function_name()
if name not in self.symbol_to_decl:
self.declare_variable(name)
decl = self.symbol_to_decl[name]
res = yicespy.yices_application(decl, len(args), args)
self._check_term_result(res)
return res
def walk_bv_constant(self, formula, **kwargs):
width = formula.bv_width()
res = None
if width <= 64:
# we can use the numberical representation
value = formula.constant_value()
res = yicespy.yices_bvconst_uint64(width, value)
else:
# we must resort to strings to communicate the result to yices
res = yicespy.yices_parse_bvbin(formula.bv_bin_str())
self._check_term_result(res)
return res
def walk_bv_ult(self, formula, args, **kwargs):
res = yicespy.yices_bvlt_atom(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_ule(self, formula, args, **kwargs):
res = yicespy.yices_bvle_atom(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_concat(self, formula, args, **kwargs):
res = yicespy.yices_bvconcat2(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_extract(self, formula, args, **kwargs):
res = yicespy.yices_bvextract(args[0],
formula.bv_extract_start(),
formula.bv_extract_end())
self._check_term_result(res)
return res
def walk_bv_or(self, formula, args, **kwargs):
res = yicespy.yices_bvor2(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_not(self, formula, args, **kwargs):
res = yicespy.yices_bvnot(args[0])
self._check_term_result(res)
return res
def walk_bv_and(self, formula, args, **kwargs):
res = yicespy.yices_bvand2(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_xor(self, formula, args, **kwargs):
res = yicespy.yices_bvxor2(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_add(self, formula, args, **kwargs):
res = yicespy.yices_bvadd(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_sub(self, formula, args, **kwargs):
res = yicespy.yices_bvsub(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_neg(self, formula, args, **kwargs):
res = yicespy.yices_bvneg(args[0])
self._check_term_result(res)
return res
def walk_bv_mul(self, formula, args, **kwargs):
res = yicespy.yices_bvmul(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_udiv(self, formula, args, **kwargs):
res = yicespy.yices_bvdiv(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_urem(self, formula, args, **kwargs):
res = yicespy.yices_bvrem(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_lshl(self, formula, args, **kwargs):
res = yicespy.yices_bvshl(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_lshr(self, formula, args, **kwargs):
res = yicespy.yices_bvlshr(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_rol(self, formula, args, **kwargs):
res = yicespy.yices_rotate_left(args[0], formula.bv_rotation_step())
self._check_term_result(res)
return res
def walk_bv_ror(self, formula, args, **kwargs):
res = yicespy.yices_rotate_right(args[0], formula.bv_rotation_step())
self._check_term_result(res)
return res
def walk_bv_zext(self, formula, args, **kwargs):
res = yicespy.yices_zero_extend(args[0], formula.bv_extend_step())
self._check_term_result(res)
return res
def walk_bv_sext (self, formula, args, **kwargs):
res = yicespy.yices_sign_extend(args[0], formula.bv_extend_step())
self._check_term_result(res)
return res
def walk_bv_slt(self, formula, args, **kwargs):
res = yicespy.yices_bvslt_atom(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_sle (self, formula, args, **kwargs):
res = yicespy.yices_bvsle_atom(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_comp (self, formula, args, **kwargs):
a,b = args
eq = yicespy.yices_bveq_atom(a, b)
self._check_term_result(eq)
one = yicespy.yices_bvconst_int32(1, 1)
zero = yicespy.yices_bvconst_int32(1, 0)
res = yicespy.yices_ite(eq, one, zero)
self._check_term_result(res)
return res
def walk_bv_sdiv (self, formula, args, **kwargs):
res = yicespy.yices_bvsdiv(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_srem (self, formula, args, **kwargs):
res = yicespy.yices_bvsrem(args[0], args[1])
self._check_term_result(res)
return res
def walk_bv_ashr (self, formula, args, **kwargs):
res = yicespy.yices_bvashr(args[0], args[1])
self._check_term_result(res)
return res
def _type_to_yices(self, tp):
if tp.is_bool_type():
return yicespy.yices_bool_type()
elif tp.is_real_type():
return yicespy.yices_real_type()
elif tp.is_int_type():
return yicespy.yices_int_type()
elif tp.is_function_type():
stps = [self._type_to_yices(x) for x in tp.param_types]
rtp = self._type_to_yices(tp.return_type)
#arr = (yicespy.type_t * len(stps))(*stps)
return yicespy.yices_function_type(len(stps),
stps,
rtp)
elif tp.is_bv_type():
return yicespy.yices_bv_type(tp.width)
else:
raise NotImplementedError(tp)
def declare_variable(self, var):
if not var.is_symbol():
raise PysmtTypeError("Trying to declare as a variable something "
"that is not a symbol: %s" % var)
if var.symbol_name() not in self.symbol_to_decl:
tp = self._type_to_yices(var.symbol_type())
decl = yicespy.yices_new_uninterpreted_term(tp)
yicespy.yices_set_term_name(decl, var.symbol_name())
self.symbol_to_decl[var] = decl
self.decl_to_symbol[decl] = var
| |
#!/usr/bin/env python
"""This module has tests for the pvl lang functions."""
# Copyright 2019, Ross A. Beyer (rbeyer@seti.org)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pvl.grammar import PVLGrammar
from pvl.decoder import PVLDecoder
from pvl.token import Token
class TestToken(unittest.TestCase):
def test_init(self):
s = "token"
self.assertEqual(s, Token(s))
self.assertEqual(s, Token(s, grammar=PVLGrammar()))
self.assertEqual(s, Token(s, decoder=PVLDecoder()))
self.assertRaises(TypeError, Token, s, grammar="not a grammar")
self.assertRaises(
TypeError, Token, s, grammar=PVLGrammar(), decoder="not a decoder"
)
def test_is_comment(self):
c = Token("/* comment */")
self.assertTrue(c.is_comment())
n = Token("not comment */")
self.assertFalse(n.is_comment())
def test_is_begin_aggregation(self):
for s in ("BEGIN_GROUP", "Begin_Group", "ObJeCt"):
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_begin_aggregation())
b = Token("END_GROUP")
self.assertFalse(b.is_begin_aggregation())
def test_is_end_statement(self):
t = Token("END")
self.assertTrue(t.is_end_statement())
t = Token("Start")
self.assertFalse(t.is_end_statement())
def test_is_datetime(self):
for s in (
"2001-027T23:45",
"2001-01-01T01:34Z",
"01:42:57Z",
"23:45",
"01:42:57",
"12:34:56.789",
"2001-01-01",
"2001-027",
):
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_datetime())
for s in ("3:450", "frank"):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.is_datetime())
def test_is_parameter_name(self):
for s in ("Hello", "ProductId"):
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_parameter_name())
for s in ("Group", "/*comment*/", "2001-027"):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.is_parameter_name())
def test_is_decimal(self):
for s in (
"125",
"+211109",
"-79", # Integers
"69.35",
"+12456.345",
"-0.23456",
".05",
"-7.", # Floating
"-2.345678E12",
"1.567E-10",
"+4.99E+3",
): # Exponential
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_decimal())
for s in ("2#0101#", "frank"):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.is_decimal())
def test_is_binary(self):
for s in ("2#0101#", "+2#0101#", "-2#0101#"):
with self.subTest(string=s):
t = Token(s)
# self.assertTrue(t.is_binary())
self.assertTrue(t.is_non_decimal())
# for s in ('+211109', 'echo', '+8#0156#'):
for s in ("+211109", "echo"):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.is_non_decimal())
def test_is_octal(self):
for s in ("8#0107#", "+8#0156#", "-8#0134#"):
with self.subTest(string=s):
t = Token(s)
# self.assertTrue(t.is_octal())
self.assertTrue(t.is_non_decimal())
# for s in ('+211109', 'echo', '2#0101#'):
# with self.subTest(string=s):
# t = Token(s)
# self.assertFalse(t.is_octal())
def test_is_hex(self):
for s in ("16#100A#", "+16#23Bc#", "-16#98ef#"):
with self.subTest(string=s):
t = Token(s)
# self.assertTrue(t.is_hex())
self.assertTrue(t.is_non_decimal())
# for s in ('+211109', 'echo', '2#0101#', '8#0107#'):
# with self.subTest(string=s):
# t = Token(s)
# self.assertFalse(t.is_hex())
def test_isnumeric(self):
for s in (
"125",
"+211109",
"-79", # Integers
"69.35",
"+12456.345",
"-0.23456",
".05",
"-7.", # Floating
"-2.345678E12",
"1.567E-10",
"+4.99E+3", # Exponential
"2#0101#",
"+2#0101#",
"-2#0101#", # Binary
"8#0107#",
"+8#0156#",
"-8#0134#", # Octal
"16#100A#",
"+16#23Bc#",
"-16#98ef#",
): # Hex
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.isnumeric())
for s in ("frank", "#", "-apple"):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.isnumeric())
def test_is_space(self):
for s in (" ", "\t\n"):
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_space())
self.assertTrue(t.isspace())
for s in ("not space", ""):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.is_space())
def test_is_WSC(self):
for s in (" /*com*/ ", "/*c1*/\n/*c2*/", " "):
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_WSC())
for s in (" /*com*/ not comment", " surrounding "):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.is_WSC())
def test_is_delimiter(self):
t = Token(";")
self.assertTrue(t.is_delimiter())
t = Token("not")
self.assertFalse(t.is_delimiter())
def test_is_quote(self):
for s in ('"', "'"):
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_quote())
t = Token("not a quote mark")
self.assertFalse(t.is_quote())
def test_is_unquoted_string(self):
for s in ("Hello", "Product", "Group"):
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_unquoted_string())
for s in (
"/*comment*/",
"second line of comment*/",
"2001-027",
'"quoted"',
"\t"
):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.is_unquoted_string())
def test_is_quoted_string(self):
for s in ('"Hello &"', "'Product Id'", '""'):
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_quoted_string())
for s in ("/*comment*/", "2001-027", '"'):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.is_quoted_string())
def test_is_string(self):
for s in (
'"Hello &"',
"'Product Id'",
'""',
"Hello",
"Product",
"Group",
):
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_string())
for s in ("/*comment*/", "2001-027", '"'):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.is_string())
def test_is_simple_value(self):
for s in (
'"Hello &"',
"'Product Id'",
'""', # Quoted Strings
"Hello",
"Group" "2001-01-01", # Unquoted Strings
"2001-027", # Date
"23:45",
"01:42:57",
"12:34:56.789" "2001-027T23:45", # Time
"2001-01-01T01:34Z", # Datetime
"125",
"+211109",
"-79", # Integers
"69.35",
"+12456.345",
"-0.23456",
".05",
"-7.", # Floating
"-2.345678E12",
"1.567E-10",
"+4.99E+3", # Exponential
"2#0101#",
"+2#0101#",
"-2#0101#", # Binary
"8#0107#",
"+8#0156#",
"-8#0134#", # Octal
"16#100A#",
"+16#23Bc#",
"-16#98ef#",
): # Hex
with self.subTest(string=s):
t = Token(s)
self.assertTrue(t.is_simple_value())
for s in ("/*comment*/", "=", '"', "{", "(", "Product Id"):
with self.subTest(string=s):
t = Token(s)
self.assertFalse(t.is_simple_value())
def test_split(self):
s = "Hello Bob"
t = Token(s)
t_list = t.split()
for x in t_list:
with self.subTest(token=x):
self.assertIsInstance(x, Token)
def test_index(self):
s = "3"
t = Token(s)
self.assertEqual(3, int(t))
self.assertEqual(3, t.__index__())
self.assertRaises(ValueError, Token("3.4").__index__)
self.assertRaises(ValueError, Token("a").__index__)
def test_float(self):
s = "3.14"
t = Token(s)
self.assertEqual(3.14, float(t))
def test_lstrip(self):
s = " leftward space "
t = Token(s)
self.assertEqual("leftward space ", t.lstrip())
def test_rstrip(self):
s = " rightward space "
t = Token(s)
self.assertEqual(" rightward space", t.rstrip())
| |
# -*- coding: utf-8 -*-
import logging
import httplib as http
import math
from itertools import islice
from flask import request
from modularodm import Q
from modularodm.exceptions import ModularOdmException, ValidationValueError
from framework import status
from framework.utils import iso8601format
from framework.mongo import StoredObject
from framework.flask import redirect
from framework.auth.decorators import must_be_logged_in, collect_auth
from framework.exceptions import HTTPError, PermissionsError
from framework.mongo.utils import from_mongo, get_or_http_error
from website import language
from website.util import paths
from website.util import rubeus
from website.exceptions import NodeStateError
from website.project import clean_template_name, new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public,
must_be_contributor,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
)
from website.tokens import process_token_or_pass
from website.util.permissions import ADMIN, READ, WRITE
from website.util.rubeus import collect_addon_js
from website.project.model import has_anonymous_link, get_pointer_parent, NodeUpdateError
from website.project.forms import NewNodeForm
from website.models import Node, Pointer, WatchConfig, PrivateLink
from website import settings
from website.views import _render_nodes, find_dashboard, validate_page_num
from website.profile import utils
from website.project import new_folder
from website.util.sanitize import strip_html
from website.util import rapply
r_strip_html = lambda collection: rapply(collection, strip_html)
logger = logging.getLogger(__name__)
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def edit_node(auth, node, **kwargs):
post_data = request.json
edited_field = post_data.get('name')
value = strip_html(post_data.get('value', ''))
if edited_field == 'title':
try:
node.set_title(value, auth=auth)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
elif edited_field == 'description':
node.set_description(value, auth=auth)
node.save()
return {'status': 'success'}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
data = request.get_json()
title = strip_html(data.get('title'))
title = title.strip()
category = data.get('category', 'project')
template = data.get('template')
description = strip_html(data.get('description'))
new_project = {}
if template:
original_node = Node.load(template)
changes = {
'title': title,
'category': category,
'template_node': original_node,
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes,
}
)
else:
try:
project = new_node(category, title, user, description)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
new_project = _view_project(project, auth)
return {
'projectUrl': project.url,
'newNode': new_project['node'] if new_project else None
}, http.CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(auth, node, **kwargs):
new_node = node.use_as_template(
auth=auth,
changes=dict(),
)
return {'url': new_node.url}, http.CREATED, None
##############################################################################
# New Folder
##############################################################################
@must_be_valid_project
@must_be_logged_in
def folder_new_post(auth, node, **kwargs):
user = auth.user
title = request.json.get('title')
if not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(strip_html(title), user)
folders = [folder]
try:
_add_pointers(node, folders, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {
'projectUrl': '/dashboard/',
}, http.CREATED
@collect_auth
def add_folder(auth, **kwargs):
data = request.get_json()
node_id = data.get('node_id')
node = get_or_http_error(Node, node_id)
user = auth.user
title = strip_html(data.get('title'))
if not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(
title, user
)
folders = [folder]
try:
_add_pointers(node, folders, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 201, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def project_new_node(auth, node, **kwargs):
form = NewNodeForm(request.form)
user = auth.user
if form.validate():
try:
new_component = new_node(
title=strip_html(form.title.data),
user=user,
category=form.category.data,
parent=node,
)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
message = (
'Your component was created successfully. You can keep working on the component page below, '
'or return to the <u><a href="{url}">project page</a></u>.'
).format(url=node.url)
status.push_status_message(message, kind='info', trust=True)
return {
'status': 'success',
}, 201, None, new_component.url
else:
# TODO: This function doesn't seem to exist anymore?
status.push_errors_to_status(form.errors)
raise HTTPError(http.BAD_REQUEST, redirect_url=node.url)
@must_be_logged_in
@must_be_valid_project
def project_before_fork(auth, node, **kwargs):
user = auth.user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def project_before_template(auth, node, **kwargs):
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def node_fork_page(auth, node, **kwargs):
if settings.DISK_SAVING_MODE:
raise HTTPError(
http.METHOD_NOT_ALLOWED,
redirect_url=node.url
)
try:
fork = node.fork_node(auth)
except PermissionsError:
raise HTTPError(
http.FORBIDDEN,
redirect_url=node.url
)
return fork.url
@must_be_valid_project
@must_be_contributor_or_public
def node_registrations(auth, node, **kwargs):
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_contributor_or_public
def node_forks(auth, node, **kwargs):
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_logged_in
@must_be_contributor
def node_setting(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
addons_enabled = []
addon_enabled_settings = []
for addon in node.get_addons():
addons_enabled.append(addon.config.short_name)
if 'node' in addon.config.configs:
config = addon.to_json(auth.user)
# inject the MakoTemplateLookup into the template context
# TODO inject only short_name and render fully client side
config['template_lookup'] = addon.config.template_lookup
config['addon_icon_url'] = addon.config.icon_url
addon_enabled_settings.append(config)
addon_enabled_settings = sorted(addon_enabled_settings, key=lambda addon: addon['addon_full_name'].lower())
ret['addon_categories'] = settings.ADDON_CATEGORIES
ret['addons_available'] = sorted([
addon
for addon in settings.ADDONS_AVAILABLE
if 'node' in addon.owners
and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node']
], key=lambda addon: addon.full_name.lower())
ret['addons_enabled'] = addons_enabled
ret['addon_enabled_settings'] = addon_enabled_settings
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
ret['addon_js'] = collect_node_config_js(node.get_addons())
ret['include_wiki_settings'] = node.include_wiki_settings(auth.user)
ret['comments'] = {
'level': node.comment_level,
}
ret['categories'] = Node.CATEGORY_MAP
ret['categories'].update({
'project': 'Project'
})
return ret
def collect_node_config_js(addons):
"""Collect webpack bundles for each of the addons' node-cfg.js modules. Return
the URLs for each of the JS modules to be included on the node addons config page.
:param list addons: List of node's addon config records.
"""
js_modules = []
for addon in addons:
js_path = paths.resolve_addon_path(addon.config, 'node-cfg.js')
if js_path:
js_modules.append(js_path)
return js_modules
@must_have_permission(WRITE)
@must_not_be_registration
def node_choose_addons(auth, node, **kwargs):
node.config_addons(request.json, auth)
@must_be_valid_project
@must_have_permission(READ)
def node_contributors(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['contributors'] = utils.serialize_contributors(node.contributors, node)
ret['adminContributors'] = utils.serialize_contributors(node.admin_contributors, node, admin=True)
return ret
@must_have_permission(ADMIN)
def configure_comments(node, **kwargs):
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http.BAD_REQUEST)
node.save()
##############################################################################
# View Project
##############################################################################
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
@process_token_or_pass
def view_project(auth, node, **kwargs):
primary = '/api/v1' not in request.path
ret = _view_project(node, auth, primary=primary)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# Collect the URIs to the static assets for addons that have widgets
ret['addon_widget_js'] = list(collect_addon_js(
node,
filename='widget-cfg.js',
config_entry='widget'
))
ret.update(rubeus.collect_addon_assets(node))
return ret
# Expand/Collapse
@must_be_valid_project
@must_be_contributor_or_public
def expand(auth, node, **kwargs):
node.expand(user=auth.user)
return {}, 200, None
@must_be_valid_project
@must_be_contributor_or_public
def collapse(auth, node, **kwargs):
node.collapse(user=auth.user)
return {}, 200, None
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def project_reorder_components(node, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node IDs and
node type delimited by ':'.
"""
# TODO(sloria): Change new_list parameter to be an array of objects
# {
# 'newList': {
# {'key': 'abc123', 'type': 'node'}
# }
# }
new_list = [
tuple(n.split(':'))
for n in request.json.get('new_list', [])
]
nodes_new = [
StoredObject.get_collection(schema).load(key)
for key, schema in new_list
]
valid_nodes = [
n for n in node.nodes
if not n.is_deleted
]
deleted_nodes = [
n for n in node.nodes
if n.is_deleted
]
if len(valid_nodes) == len(nodes_new) and set(valid_nodes) == set(nodes_new):
node.nodes = nodes_new + deleted_nodes
node.save()
return {}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http.BAD_REQUEST)
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public
def project_statistics(auth, node, **kwargs):
if not (node.can_edit(auth) or node.is_public):
raise HTTPError(http.FORBIDDEN)
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_contributor_or_public
def project_statistics_redirect(auth, node, **kwargs):
return redirect(node.web_url_for("project_statistics", _guid=True))
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission(ADMIN)
def project_before_set_public(node, **kwargs):
prompt = node.callback('before_make_public')
anonymous_link_warning = any(private_link.anonymous for private_link in node.private_links_active)
if anonymous_link_warning:
prompt.append('Anonymized view-only links <b>DO NOT</b> anonymize '
'contributors after a project or component is made public.')
return {
'prompts': prompt
}
@must_be_valid_project
@must_have_permission(ADMIN)
def project_set_privacy(auth, node, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.set_privacy(permissions, auth)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def watch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.watch(watch_config)
except ValueError: # Node is already being watched
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def unwatch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.unwatch(watch_config)
except ValueError: # Node isn't being watched
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def togglewatch_post(auth, node, **kwargs):
'''View for toggling watch mode for a node.'''
# TODO: refactor this, watch_post, unwatch_post (@mambocab)
user = auth.user
watch_config = WatchConfig(
node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False)
)
try:
if user.is_watching(node):
user.unwatch(watch_config)
else:
user.watch(watch_config)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched),
'watched': user.is_watching(node)
}
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def update_node(auth, node, **kwargs):
# in node.update() method there is a key list node.WRITABLE_WHITELIST only allow user to modify
# category, title, and discription which can be edited by write permission contributor
data = r_strip_html(request.get_json())
try:
return {
'updated_fields': {
key: getattr(node, key)
for key in
node.update(data, auth=auth)
}
}
except NodeUpdateError as e:
raise HTTPError(400, data=dict(
message_short="Failed to update attribute '{0}'".format(e.key),
message_long=e.reason
))
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def component_remove(auth, node, **kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
node.save()
message = '{} deleted'.format(
node.project_or_component.capitalize()
)
status.push_status_message(message, kind='success', trust=False)
parent = node.parent_node
if parent and parent.can_view(auth):
redirect_url = node.node__parent[0].url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
@must_have_permission(ADMIN)
@must_not_be_registration
def delete_folder(auth, node, **kwargs):
"""Remove folder node
"""
if node is None:
raise HTTPError(http.BAD_REQUEST)
if not node.is_folder or node.is_dashboard:
raise HTTPError(http.BAD_REQUEST)
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
return {}
@must_be_valid_project
@must_have_permission(ADMIN)
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.load(link_id)
link.is_deleted = True
link.save()
except ModularOdmException:
raise HTTPError(http.NOT_FOUND)
# TODO: Split into separate functions
def _render_addon(node):
widgets = {}
configs = {}
js = []
css = []
for addon in node.get_addons():
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _should_show_wiki_widget(node, user):
has_wiki = bool(node.get_addon('wiki'))
wiki_page = node.get_wiki_page('home', None)
if not node.has_permission(user, 'write'):
return has_wiki and wiki_page and wiki_page.html(node)
else:
return has_wiki
def _view_project(node, auth, primary=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
user = auth.user
parent = node.parent_node
if user:
dashboard = find_dashboard(user)
dashboard_id = dashboard._id
in_dashboard = dashboard.pointing_at(node._primary_key) is not None
else:
in_dashboard = False
dashboard_id = ''
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
widgets, configs, js, css = _render_addon(node)
redirect_url = node.url + '?view_only=None'
# Before page load callback; skip if not primary call
if primary:
for addon in node.get_addons():
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message, kind='info', dismissible=False, trust=True)
data = {
'node': {
'id': node._primary_key,
'title': node.title,
'category': node.category_display,
'category_short': node.category,
'node_type': node.project_or_component,
'description': node.description or '',
'license': node.license,
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'update_url': node.api_url_for('update_node'),
'in_dashboard': in_dashboard,
'is_public': node.is_public,
'is_archiving': node.archiving,
'date_created': iso8601format(node.date_created),
'date_modified': iso8601format(node.logs[-1].date) if node.logs else '',
'tags': [tag._primary_key for tag in node.tags],
'children': bool(node.nodes_active),
'is_registration': node.is_registration,
'is_pending_registration': node.is_pending_registration,
'is_retracted': node.is_retracted,
'is_pending_retraction': node.is_pending_retraction,
'retracted_justification': getattr(node.retraction, 'justification', None),
'embargo_end_date': node.embargo_end_date.strftime("%A, %b. %d, %Y") if node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo,
'registered_from_url': node.registered_from.url if node.is_registration else '',
'registered_date': iso8601format(node.registered_date) if node.is_registration else '',
'root_id': node.root._id,
'registered_meta': [
{
'name_no_ext': from_mongo(meta),
'name_clean': clean_template_name(meta),
}
for meta in node.registered_meta or []
],
'registration_count': len(node.node__registrations),
'is_fork': node.is_fork,
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': iso8601format(node.forked_date) if node.is_fork else '',
'fork_count': len(node.forks),
'templated_count': len(node.templated_list),
'watched_count': len(node.watchconfig__watched),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'anonymous': anonymous,
'points': len(node.get_points(deleted=False, folders=False)),
'piwik_site_id': node.piwik_site_id,
'comment_level': node.comment_level,
'has_comments': bool(getattr(node, 'commented', [])),
'has_children': bool(getattr(node, 'commented', False)),
'identifiers': {
'doi': node.get_identifier_value('doi'),
'ark': node.get_identifier_value('ark'),
},
},
'parent_node': {
'exists': parent is not None,
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'category': parent.category_display if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'registrations_url': parent.web_url_for('node_registrations') if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': parent.can_view(auth) if parent else False
},
'user': {
'is_contributor': node.is_contributor(user),
'is_admin': node.has_permission(user, ADMIN),
'is_admin_parent': parent.is_admin_parent(user) if parent else False,
'can_edit': (node.can_edit(auth)
and not node.is_registration),
'has_read_permissions': node.has_permission(user, 'read'),
'permissions': node.get_permissions(user) if user else [],
'is_watching': user.is_watching(node) if user else False,
'piwik_token': user.piwik_token if user else '',
'id': user._id if user else None,
'username': user.username if user else None,
'fullname': user.fullname if user else '',
'can_comment': node.can_comment(auth),
'show_wiki_widget': _should_show_wiki_widget(node, user),
'dashboard_id': dashboard_id,
},
'badges': _get_badge(user),
# TODO: Namespace with nested dicts
'addons_enabled': node.get_addon_names(),
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
'node_categories': Node.CATEGORY_MAP,
}
return data
def _get_badge(user):
if user:
badger = user.get_addon('badges')
if badger:
return {
'can_award': badger.can_award,
'badges': badger.get_badges_json()
}
return {}
def _get_children(node, auth, indent=0):
children = []
for child in node.nodes_primary:
if not child.is_deleted and child.has_permission(auth.user, 'admin'):
children.append({
'id': child._primary_key,
'title': child.title,
'indent': indent,
'is_public': child.is_public,
'parent_id': child.parent_id,
})
children.extend(_get_children(child, auth, indent + 1))
return children
@must_be_valid_project
@must_have_permission(ADMIN)
def private_link_table(node, **kwargs):
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
@must_have_permission(ADMIN)
def get_editable_children(auth, node, **kwargs):
children = _get_children(node, auth)
return {
'node': {'id': node._id, 'title': node.title, 'is_public': node.is_public},
'children': children,
}
def _get_user_activity(node, auth, rescale_ratio):
# Counters
total_count = len(node.logs)
# Note: It's typically much faster to find logs of a given node
# attached to a given user using node.logs.find(...) than by
# loading the logs into Python and checking each one. However,
# using deep caching might be even faster down the road.
if auth.user:
ua_count = node.logs.find(Q('user', 'eq', auth.user)).count()
else:
ua_count = 0
non_ua_count = total_count - ua_count # base length of blue bar
# Normalize over all nodes
try:
ua = ua_count / rescale_ratio * 100
except ZeroDivisionError:
ua = 0
try:
non_ua = non_ua_count / rescale_ratio * 100
except ZeroDivisionError:
non_ua = 0
return ua_count, ua, non_ua
@must_be_valid_project
def get_recent_logs(node, **kwargs):
logs = list(reversed(node.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_summary(node, auth, rescale_ratio, primary=True, link_id=None, show_path=False):
# TODO(sloria): Refactor this or remove (lots of duplication with _view_project)
summary = {
'id': link_id if link_id else node._id,
'primary': primary,
'is_registration': node.is_registration,
'is_fork': node.is_fork,
'is_pending_registration': node.is_pending_registration,
'is_retracted': node.is_retracted,
'is_pending_retraction': node.is_pending_retraction,
'embargo_end_date': node.embargo_end_date.strftime("%A, %b. %d, %Y") if node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo,
'archiving': node.archiving,
}
if node.can_view(auth):
summary.update({
'can_view': True,
'can_edit': node.can_edit(auth),
'primary_id': node._id,
'url': node.url,
'primary': primary,
'api_url': node.api_url,
'title': node.title,
'category': node.category,
'node_type': node.project_or_component,
'is_fork': node.is_fork,
'is_registration': node.is_registration,
'anonymous': has_anonymous_link(node, auth),
'registered_date': node.registered_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_registration
else None,
'forked_date': node.forked_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_fork
else None,
'nlogs': None,
'ua_count': None,
'ua': None,
'non_ua': None,
'addons_enabled': node.get_addon_names(),
'is_public': node.is_public,
'parent_title': node.parent_node.title if node.parent_node else None,
'parent_is_public': node.parent_node.is_public if node.parent_node else False,
'show_path': show_path
})
if rescale_ratio:
ua_count, ua, non_ua = _get_user_activity(node, auth, rescale_ratio)
summary.update({
'nlogs': len(node.logs),
'ua_count': ua_count,
'ua': ua,
'non_ua': non_ua,
})
else:
summary['can_view'] = False
# TODO: Make output format consistent with _view_project
return {
'summary': summary,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_summary(auth, node, **kwargs):
rescale_ratio = kwargs.get('rescale_ratio')
if rescale_ratio is None and request.args.get('rescale_ratio'):
try:
rescale_ratio = float(request.args.get('rescale_ratio'))
except (TypeError, ValueError):
raise HTTPError(http.BAD_REQUEST)
primary = kwargs.get('primary')
link_id = kwargs.get('link_id')
show_path = kwargs.get('show_path', False)
return _get_summary(
node, auth, rescale_ratio, primary=primary, link_id=link_id, show_path=show_path
)
@must_be_contributor_or_public
def get_children(auth, node, **kwargs):
user = auth.user
if request.args.get('permissions'):
perm = request.args['permissions'].lower().strip()
nodes = [
each
for each in node.nodes
if perm in each.get_permissions(user) and not each.is_deleted
]
else:
nodes = [
each
for each in node.nodes
if not each.is_deleted
]
return _render_nodes(nodes, auth)
@must_be_contributor_or_public
def get_folder_pointers(auth, node, **kwargs):
if not node.is_folder:
return []
nodes = [
each.resolve()._id
for each in node.nodes
if each is not None and not each.is_deleted and not each.primary
]
return nodes
@must_be_contributor_or_public
def get_forks(auth, node, **kwargs):
fork_list = sorted(node.forks, key=lambda fork: fork.forked_date, reverse=True)
return _render_nodes(nodes=fork_list, auth=auth)
@must_be_contributor_or_public
def get_registrations(auth, node, **kwargs):
registrations = [n for n in reversed(node.node__registrations) if not n.is_deleted] # get all registrations, including archiving
return _render_nodes(registrations, auth)
@must_be_valid_project
@must_have_permission(ADMIN)
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [Node.load(node_id) for node_id in node_ids]
has_public_node = any(node.is_public for node in nodes)
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
if anonymous and has_public_node:
status.push_status_message(
'Anonymized view-only links <b>DO NOT</b> '
'anonymize contributors of public projects or components.',
trust=True
)
return new_link
@must_be_valid_project
@must_have_permission(ADMIN)
def project_private_link_edit(auth, **kwargs):
new_name = request.json.get('value', '')
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
private_link.name = new_name
private_link.save()
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
title = node.title
if node.is_registration:
title += ' (registration)'
first_author = node.visible_contributors[0]
return {
'id': node._id,
'title': title,
'firstAuthor': first_author.family_name or first_author.given_name or first_author.full_name,
'etal': len(node.visible_contributors) > 1,
}
@must_be_logged_in
def search_node(auth, **kwargs):
"""
"""
# Get arguments
node = Node.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
size = float(request.json.get('size', '5').strip())
page = request.json.get('page', 0)
query = request.json.get('query', '').strip()
start = (page * size)
if not query:
return {'nodes': []}
# Build ODM query
title_query = Q('title', 'icontains', query)
not_deleted_query = Q('is_deleted', 'eq', False)
visibility_query = Q('contributors', 'eq', auth.user)
no_folders_query = Q('is_folder', 'eq', False)
if include_public:
visibility_query = visibility_query | Q('is_public', 'eq', True)
odm_query = title_query & not_deleted_query & visibility_query & no_folders_query
# Exclude current node from query if provided
if node:
nin = [node._id] + node.node_ids
odm_query = (
odm_query &
Q('_id', 'nin', nin)
)
nodes = Node.find(odm_query)
count = nodes.count()
pages = math.ceil(count / size)
validate_page_num(page, pages)
return {
'nodes': [
_serialize_node_search(each)
for each in islice(nodes, start, start + size)
if each.contributors
],
'total': count,
'pages': pages,
'page': page
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def move_pointers(auth):
"""Move pointer from one node to another node.
"""
from_node_id = request.json.get('fromNodeId')
to_node_id = request.json.get('toNodeId')
pointers_to_move = request.json.get('pointerIds')
if from_node_id is None or to_node_id is None or pointers_to_move is None:
raise HTTPError(http.BAD_REQUEST)
from_node = Node.load(from_node_id)
to_node = Node.load(to_node_id)
if to_node is None or from_node is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_to_move in pointers_to_move:
pointer_id = from_node.pointing_at(pointer_to_move)
pointer_node = Node.load(pointer_to_move)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
from_node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
from_node.save()
try:
_add_pointers(to_node, [pointer_node], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 200, None
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http.BAD_REQUEST)
pointer = Node.load(pointer_to_move)
to_node = Node.load(to_node_id)
try:
_add_pointers(to_node, [pointer], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
@must_have_permission(WRITE)
@must_not_be_registration
def add_pointers(auth, node, **kwargs):
"""Add pointers to a node.
"""
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http.BAD_REQUEST)
nodes = [
Node.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer(auth, node, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # injects project
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer_from_folder(auth, node, pointer_id, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # injects project
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointers_from_folder(auth, node, **kwargs):
"""Remove multiple pointers from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
pointer_ids = request.json.get('pointerIds')
if pointer_ids is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_id in pointer_ids:
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_have_permission(WRITE)
@must_not_be_registration
def fork_pointer(auth, node, **kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
"""
pointer_id = request.json.get('pointerId')
pointer = Pointer.load(pointer_id)
if pointer is None:
# TODO: Change this to 404?
raise HTTPError(http.BAD_REQUEST)
try:
node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if len(node.visible_contributor_ids) > 1:
ret += ' et al.'
return ret
def serialize_pointer(pointer, auth):
node = get_pointer_parent(pointer)
if node.can_view(auth):
return {
'id': node._id,
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, node, **kwargs):
"""View that returns the pointers for a project."""
# exclude folders
return {'pointed': [
serialize_pointer(each, auth)
for each in node.pointed
if not get_pointer_parent(each).is_folder
]}
| |
# Copyright (c) 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A number of common spirv result checks coded in mixin classes.
A test case can use these checks by declaring their enclosing mixin classes
as superclass and providing the expected_* variables required by the check_*()
methods in the mixin classes.
"""
import difflib
import functools
import os
import re
import subprocess
import traceback
from spirv_test_framework import SpirvTest
from builtins import bytes
def convert_to_unix_line_endings(source):
"""Converts all line endings in source to be unix line endings."""
result = source.replace('\r\n', '\n').replace('\r', '\n')
return result
def substitute_file_extension(filename, extension):
"""Substitutes file extension, respecting known shader extensions.
foo.vert -> foo.vert.[extension] [similarly for .frag, .comp, etc.]
foo.glsl -> foo.[extension]
foo.unknown -> foo.[extension]
foo -> foo.[extension]
"""
if filename[-5:] not in [
'.vert', '.frag', '.tesc', '.tese', '.geom', '.comp', '.spvasm'
]:
return filename.rsplit('.', 1)[0] + '.' + extension
else:
return filename + '.' + extension
def get_object_filename(source_filename):
"""Gets the object filename for the given source file."""
return substitute_file_extension(source_filename, 'spv')
def get_assembly_filename(source_filename):
"""Gets the assembly filename for the given source file."""
return substitute_file_extension(source_filename, 'spvasm')
def verify_file_non_empty(filename):
"""Checks that a given file exists and is not empty."""
if not os.path.isfile(filename):
return False, 'Cannot find file: ' + filename
if not os.path.getsize(filename):
return False, 'Empty file: ' + filename
return True, ''
class ReturnCodeIsZero(SpirvTest):
"""Mixin class for checking that the return code is zero."""
def check_return_code_is_zero(self, status):
if status.returncode:
return False, 'Non-zero return code: {ret}\n'.format(
ret=status.returncode)
return True, ''
class NoOutputOnStdout(SpirvTest):
"""Mixin class for checking that there is no output on stdout."""
def check_no_output_on_stdout(self, status):
if status.stdout:
return False, 'Non empty stdout: {out}\n'.format(out=status.stdout)
return True, ''
class NoOutputOnStderr(SpirvTest):
"""Mixin class for checking that there is no output on stderr."""
def check_no_output_on_stderr(self, status):
if status.stderr:
return False, 'Non empty stderr: {err}\n'.format(err=status.stderr)
return True, ''
class SuccessfulReturn(ReturnCodeIsZero, NoOutputOnStdout, NoOutputOnStderr):
"""Mixin class for checking that return code is zero and no output on
stdout and stderr."""
pass
class NoGeneratedFiles(SpirvTest):
"""Mixin class for checking that there is no file generated."""
def check_no_generated_files(self, status):
all_files = os.listdir(status.directory)
input_files = status.input_filenames
if all([f.startswith(status.directory) for f in input_files]):
all_files = [os.path.join(status.directory, f) for f in all_files]
generated_files = set(all_files) - set(input_files)
if len(generated_files) == 0:
return True, ''
else:
return False, 'Extra files generated: {}'.format(generated_files)
class CorrectBinaryLengthAndPreamble(SpirvTest):
"""Provides methods for verifying preamble for a SPIR-V binary."""
def verify_binary_length_and_header(self, binary, spv_version=0x10000):
"""Checks that the given SPIR-V binary has valid length and header.
Returns:
False, error string if anything is invalid
True, '' otherwise
Args:
binary: a bytes object containing the SPIR-V binary
spv_version: target SPIR-V version number, with same encoding
as the version word in a SPIR-V header.
"""
def read_word(binary, index, little_endian):
"""Reads the index-th word from the given binary file."""
word = binary[index * 4:(index + 1) * 4]
if little_endian:
word = reversed(word)
return functools.reduce(lambda w, b: (w << 8) | b, word, 0)
def check_endianness(binary):
"""Checks the endianness of the given SPIR-V binary.
Returns:
True if it's little endian, False if it's big endian.
None if magic number is wrong.
"""
first_word = read_word(binary, 0, True)
if first_word == 0x07230203:
return True
first_word = read_word(binary, 0, False)
if first_word == 0x07230203:
return False
return None
num_bytes = len(binary)
if num_bytes % 4 != 0:
return False, ('Incorrect SPV binary: size should be a multiple'
' of words')
if num_bytes < 20:
return False, 'Incorrect SPV binary: size less than 5 words'
preamble = binary[0:19]
little_endian = check_endianness(preamble)
# SPIR-V module magic number
if little_endian is None:
return False, 'Incorrect SPV binary: wrong magic number'
# SPIR-V version number
version = read_word(preamble, 1, little_endian)
# TODO(dneto): Recent Glslang uses version word 0 for opengl_compat
# profile
if version != spv_version and version != 0:
return False, 'Incorrect SPV binary: wrong version number'
# Shaderc-over-Glslang (0x000d....) or
# SPIRV-Tools (0x0007....) generator number
if read_word(preamble, 2, little_endian) != 0x000d0007 and \
read_word(preamble, 2, little_endian) != 0x00070000:
return False, ('Incorrect SPV binary: wrong generator magic ' 'number')
# reserved for instruction schema
if read_word(preamble, 4, little_endian) != 0:
return False, 'Incorrect SPV binary: the 5th byte should be 0'
return True, ''
class CorrectObjectFilePreamble(CorrectBinaryLengthAndPreamble):
"""Provides methods for verifying preamble for a SPV object file."""
def verify_object_file_preamble(self, filename, spv_version=0x10000):
"""Checks that the given SPIR-V binary file has correct preamble."""
success, message = verify_file_non_empty(filename)
if not success:
return False, message
with open(filename, 'rb') as object_file:
object_file.seek(0, os.SEEK_END)
num_bytes = object_file.tell()
object_file.seek(0)
binary = bytes(object_file.read())
return self.verify_binary_length_and_header(binary, spv_version)
return True, ''
class CorrectAssemblyFilePreamble(SpirvTest):
"""Provides methods for verifying preamble for a SPV assembly file."""
def verify_assembly_file_preamble(self, filename):
success, message = verify_file_non_empty(filename)
if not success:
return False, message
with open(filename) as assembly_file:
line1 = assembly_file.readline()
line2 = assembly_file.readline()
line3 = assembly_file.readline()
if (line1 != '; SPIR-V\n' or line2 != '; Version: 1.0\n' or
(not line3.startswith('; Generator: Google Shaderc over Glslang;'))):
return False, 'Incorrect SPV assembly'
return True, ''
class ValidObjectFile(SuccessfulReturn, CorrectObjectFilePreamble):
"""Mixin class for checking that every input file generates a valid SPIR-V 1.0
object file following the object file naming rule, and there is no output on
stdout/stderr."""
def check_object_file_preamble(self, status):
for input_filename in status.input_filenames:
object_filename = get_object_filename(input_filename)
success, message = self.verify_object_file_preamble(
os.path.join(status.directory, object_filename))
if not success:
return False, message
return True, ''
class ValidObjectFile1_3(ReturnCodeIsZero, CorrectObjectFilePreamble):
"""Mixin class for checking that every input file generates a valid SPIR-V 1.3
object file following the object file naming rule, and there is no output on
stdout/stderr."""
def check_object_file_preamble(self, status):
for input_filename in status.input_filenames:
object_filename = get_object_filename(input_filename)
success, message = self.verify_object_file_preamble(
os.path.join(status.directory, object_filename), 0x10300)
if not success:
return False, message
return True, ''
class ValidObjectFileWithAssemblySubstr(SuccessfulReturn,
CorrectObjectFilePreamble):
"""Mixin class for checking that every input file generates a valid object
file following the object file naming rule, there is no output on
stdout/stderr, and the disassmbly contains a specified substring per
input.
"""
def check_object_file_disassembly(self, status):
for an_input in status.inputs:
object_filename = get_object_filename(an_input.filename)
obj_file = str(os.path.join(status.directory, object_filename))
success, message = self.verify_object_file_preamble(obj_file)
if not success:
return False, message
cmd = [status.test_manager.disassembler_path, '--no-color', obj_file]
process = subprocess.Popen(
args=cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=status.directory)
output = process.communicate(None)
disassembly = output[0]
if not isinstance(an_input.assembly_substr, str):
return False, 'Missing assembly_substr member'
if an_input.assembly_substr not in disassembly:
return False, ('Incorrect disassembly output:\n{asm}\n'
'Expected substring not found:\n{exp}'.format(
asm=disassembly, exp=an_input.assembly_substr))
return True, ''
class ValidNamedObjectFile(SuccessfulReturn, CorrectObjectFilePreamble):
"""Mixin class for checking that a list of object files with the given
names are correctly generated, and there is no output on stdout/stderr.
To mix in this class, subclasses need to provide expected_object_filenames
as the expected object filenames.
"""
def check_object_file_preamble(self, status):
for object_filename in self.expected_object_filenames:
success, message = self.verify_object_file_preamble(
os.path.join(status.directory, object_filename))
if not success:
return False, message
return True, ''
class ValidFileContents(SpirvTest):
"""Mixin class to test that a specific file contains specific text
To mix in this class, subclasses need to provide expected_file_contents as
the contents of the file and target_filename to determine the location."""
def check_file(self, status):
target_filename = os.path.join(status.directory, self.target_filename)
if not os.path.isfile(target_filename):
return False, 'Cannot find file: ' + target_filename
with open(target_filename, 'r') as target_file:
file_contents = target_file.read()
if isinstance(self.expected_file_contents, str):
if file_contents == self.expected_file_contents:
return True, ''
return False, ('Incorrect file output: \n{act}\n'
'Expected:\n{exp}'
'With diff:\n{diff}'.format(
act=file_contents,
exp=self.expected_file_contents,
diff='\n'.join(
list(
difflib.unified_diff(
self.expected_file_contents.split('\n'),
file_contents.split('\n'),
fromfile='expected_output',
tofile='actual_output')))))
elif isinstance(self.expected_file_contents, type(re.compile(''))):
if self.expected_file_contents.search(file_contents):
return True, ''
return False, ('Incorrect file output: \n{act}\n'
'Expected matching regex pattern:\n{exp}'.format(
act=file_contents,
exp=self.expected_file_contents.pattern))
return False, (
'Could not open target file ' + target_filename + ' for reading')
class ValidAssemblyFile(SuccessfulReturn, CorrectAssemblyFilePreamble):
"""Mixin class for checking that every input file generates a valid assembly
file following the assembly file naming rule, and there is no output on
stdout/stderr."""
def check_assembly_file_preamble(self, status):
for input_filename in status.input_filenames:
assembly_filename = get_assembly_filename(input_filename)
success, message = self.verify_assembly_file_preamble(
os.path.join(status.directory, assembly_filename))
if not success:
return False, message
return True, ''
class ValidAssemblyFileWithSubstr(ValidAssemblyFile):
"""Mixin class for checking that every input file generates a valid assembly
file following the assembly file naming rule, there is no output on
stdout/stderr, and all assembly files have the given substring specified
by expected_assembly_substr.
To mix in this class, subclasses need to provde expected_assembly_substr
as the expected substring.
"""
def check_assembly_with_substr(self, status):
for input_filename in status.input_filenames:
assembly_filename = get_assembly_filename(input_filename)
success, message = self.verify_assembly_file_preamble(
os.path.join(status.directory, assembly_filename))
if not success:
return False, message
with open(assembly_filename, 'r') as f:
content = f.read()
if self.expected_assembly_substr not in convert_to_unix_line_endings(
content):
return False, ('Incorrect assembly output:\n{asm}\n'
'Expected substring not found:\n{exp}'.format(
asm=content, exp=self.expected_assembly_substr))
return True, ''
class ValidAssemblyFileWithoutSubstr(ValidAssemblyFile):
"""Mixin class for checking that every input file generates a valid assembly
file following the assembly file naming rule, there is no output on
stdout/stderr, and no assembly files have the given substring specified
by unexpected_assembly_substr.
To mix in this class, subclasses need to provde unexpected_assembly_substr
as the substring we expect not to see.
"""
def check_assembly_for_substr(self, status):
for input_filename in status.input_filenames:
assembly_filename = get_assembly_filename(input_filename)
success, message = self.verify_assembly_file_preamble(
os.path.join(status.directory, assembly_filename))
if not success:
return False, message
with open(assembly_filename, 'r') as f:
content = f.read()
if self.unexpected_assembly_substr in convert_to_unix_line_endings(
content):
return False, ('Incorrect assembly output:\n{asm}\n'
'Unexpected substring found:\n{unexp}'.format(
asm=content, exp=self.unexpected_assembly_substr))
return True, ''
class ValidNamedAssemblyFile(SuccessfulReturn, CorrectAssemblyFilePreamble):
"""Mixin class for checking that a list of assembly files with the given
names are correctly generated, and there is no output on stdout/stderr.
To mix in this class, subclasses need to provide expected_assembly_filenames
as the expected assembly filenames.
"""
def check_object_file_preamble(self, status):
for assembly_filename in self.expected_assembly_filenames:
success, message = self.verify_assembly_file_preamble(
os.path.join(status.directory, assembly_filename))
if not success:
return False, message
return True, ''
class ErrorMessage(SpirvTest):
"""Mixin class for tests that fail with a specific error message.
To mix in this class, subclasses need to provide expected_error as the
expected error message.
The test should fail if the subprocess was terminated by a signal.
"""
def check_has_error_message(self, status):
if not status.returncode:
return False, ('Expected error message, but returned success from '
'command execution')
if status.returncode < 0:
# On Unix, a negative value -N for Popen.returncode indicates
# termination by signal N.
# https://docs.python.org/2/library/subprocess.html
return False, ('Expected error message, but command was terminated by '
'signal ' + str(status.returncode))
if not status.stderr:
return False, 'Expected error message, but no output on stderr'
if self.expected_error != convert_to_unix_line_endings(status.stderr):
return False, ('Incorrect stderr output:\n{act}\n'
'Expected:\n{exp}'.format(
act=status.stderr, exp=self.expected_error))
return True, ''
class ErrorMessageSubstr(SpirvTest):
"""Mixin class for tests that fail with a specific substring in the error
message.
To mix in this class, subclasses need to provide expected_error_substr as
the expected error message substring.
The test should fail if the subprocess was terminated by a signal.
"""
def check_has_error_message_as_substring(self, status):
if not status.returncode:
return False, ('Expected error message, but returned success from '
'command execution')
if status.returncode < 0:
# On Unix, a negative value -N for Popen.returncode indicates
# termination by signal N.
# https://docs.python.org/2/library/subprocess.html
return False, ('Expected error message, but command was terminated by '
'signal ' + str(status.returncode))
if not status.stderr:
return False, 'Expected error message, but no output on stderr'
if self.expected_error_substr not in convert_to_unix_line_endings(
status.stderr.decode('utf8')):
return False, ('Incorrect stderr output:\n{act}\n'
'Expected substring not found in stderr:\n{exp}'.format(
act=status.stderr, exp=self.expected_error_substr))
return True, ''
class WarningMessage(SpirvTest):
"""Mixin class for tests that succeed but have a specific warning message.
To mix in this class, subclasses need to provide expected_warning as the
expected warning message.
"""
def check_has_warning_message(self, status):
if status.returncode:
return False, ('Expected warning message, but returned failure from'
' command execution')
if not status.stderr:
return False, 'Expected warning message, but no output on stderr'
if self.expected_warning != convert_to_unix_line_endings(status.stderr.decode('utf8')):
return False, ('Incorrect stderr output:\n{act}\n'
'Expected:\n{exp}'.format(
act=status.stderr, exp=self.expected_warning))
return True, ''
class ValidObjectFileWithWarning(NoOutputOnStdout, CorrectObjectFilePreamble,
WarningMessage):
"""Mixin class for checking that every input file generates a valid object
file following the object file naming rule, with a specific warning message.
"""
def check_object_file_preamble(self, status):
for input_filename in status.input_filenames:
object_filename = get_object_filename(input_filename)
success, message = self.verify_object_file_preamble(
os.path.join(status.directory, object_filename))
if not success:
return False, message
return True, ''
class ValidAssemblyFileWithWarning(NoOutputOnStdout,
CorrectAssemblyFilePreamble, WarningMessage):
"""Mixin class for checking that every input file generates a valid assembly
file following the assembly file naming rule, with a specific warning
message."""
def check_assembly_file_preamble(self, status):
for input_filename in status.input_filenames:
assembly_filename = get_assembly_filename(input_filename)
success, message = self.verify_assembly_file_preamble(
os.path.join(status.directory, assembly_filename))
if not success:
return False, message
return True, ''
class StdoutMatch(SpirvTest):
"""Mixin class for tests that can expect output on stdout.
To mix in this class, subclasses need to provide expected_stdout as the
expected stdout output.
For expected_stdout, if it's True, then they expect something on stdout but
will not check what it is. If it's a string, expect an exact match. If it's
anything else, it is assumed to be a compiled regular expression which will
be matched against re.search(). It will expect
expected_stdout.search(status.stdout) to be true.
"""
def check_stdout_match(self, status):
# "True" in this case means we expect something on stdout, but we do not
# care what it is, we want to distinguish this from "blah" which means we
# expect exactly the string "blah".
if self.expected_stdout is True:
if not status.stdout:
return False, 'Expected something on stdout'
elif type(self.expected_stdout) == str:
if self.expected_stdout != convert_to_unix_line_endings(status.stdout.decode('utf8')):
return False, ('Incorrect stdout output:\n{ac}\n'
'Expected:\n{ex}'.format(
ac=status.stdout, ex=self.expected_stdout))
else:
converted = convert_to_unix_line_endings(status.stdout.decode('utf8'))
if not self.expected_stdout.search(converted):
return False, ('Incorrect stdout output:\n{ac}\n'
'Expected to match regex:\n{ex}'.format(
ac=status.stdout.decode('utf8'), ex=self.expected_stdout.pattern))
return True, ''
class StderrMatch(SpirvTest):
"""Mixin class for tests that can expect output on stderr.
To mix in this class, subclasses need to provide expected_stderr as the
expected stderr output.
For expected_stderr, if it's True, then they expect something on stderr,
but will not check what it is. If it's a string, expect an exact match.
If it's anything else, it is assumed to be a compiled regular expression
which will be matched against re.search(). It will expect
expected_stderr.search(status.stderr) to be true.
"""
def check_stderr_match(self, status):
# "True" in this case means we expect something on stderr, but we do not
# care what it is, we want to distinguish this from "blah" which means we
# expect exactly the string "blah".
if self.expected_stderr is True:
if not status.stderr:
return False, 'Expected something on stderr'
elif type(self.expected_stderr) == str:
if self.expected_stderr != convert_to_unix_line_endings(status.stderr.decode('utf8')):
return False, ('Incorrect stderr output:\n{ac}\n'
'Expected:\n{ex}'.format(
ac=status.stderr, ex=self.expected_stderr))
else:
if not self.expected_stderr.search(
convert_to_unix_line_endings(status.stderr.decode('utf8'))):
return False, ('Incorrect stderr output:\n{ac}\n'
'Expected to match regex:\n{ex}'.format(
ac=status.stderr, ex=self.expected_stderr.pattern))
return True, ''
class StdoutNoWiderThan80Columns(SpirvTest):
"""Mixin class for tests that require stdout to 80 characters or narrower.
To mix in this class, subclasses need to provide expected_stdout as the
expected stdout output.
"""
def check_stdout_not_too_wide(self, status):
if not status.stdout:
return True, ''
else:
for line in status.stdout.splitlines():
if len(line) > 80:
return False, ('Stdout line longer than 80 columns: %s' % line)
return True, ''
class NoObjectFile(SpirvTest):
"""Mixin class for checking that no input file has a corresponding object
file."""
def check_no_object_file(self, status):
for input_filename in status.input_filenames:
object_filename = get_object_filename(input_filename)
full_object_file = os.path.join(status.directory, object_filename)
print('checking %s' % full_object_file)
if os.path.isfile(full_object_file):
return False, (
'Expected no object file, but found: %s' % full_object_file)
return True, ''
class NoNamedOutputFiles(SpirvTest):
"""Mixin class for checking that no specified output files exist.
The expected_output_filenames member should be full pathnames."""
def check_no_named_output_files(self, status):
for object_filename in self.expected_output_filenames:
if os.path.isfile(object_filename):
return False, (
'Expected no output file, but found: %s' % object_filename)
return True, ''
class ExecutedListOfPasses(SpirvTest):
"""Mixin class for checking that a list of passes where executed.
It works by analyzing the output of the --print-all flag to spirv-opt.
For this mixin to work, the class member expected_passes should be a sequence
of pass names as returned by Pass::name().
"""
def check_list_of_executed_passes(self, status):
# Collect all the output lines containing a pass name.
pass_names = []
pass_name_re = re.compile(r'.*IR before pass (?P<pass_name>[\S]+)')
for line in status.stderr.decode('utf8').splitlines():
match = pass_name_re.match(line)
if match:
pass_names.append(match.group('pass_name'))
for (expected, actual) in zip(self.expected_passes, pass_names):
if expected != actual:
return False, (
'Expected pass "%s" but found pass "%s"\n' % (expected, actual))
return True, ''
| |
#!/usr/bin/python
# AdaptML
import os
import sys
import pdb
import time
import random
from scipy.io import write_array
from scipy.io import read_array
from numpy.linalg import *
from numpy.core import *
from numpy.lib import *
from numpy import *
import multitree
import ML
start_time = time.time()
sys.setrecursionlimit(25000)
# load inputs #
tree_filename = None
hab_num = 16
outgroup = None
write_dir = './'
rateopt = 'avg'
mu = 1.00000000001
habitat_thresh = 0.10
converge_thresh = 0.001
inputs = sys.argv
for ind in range(1,len(inputs)):
arg_parts = inputs[ind].split('=')
code = arg_parts[0]
arg = arg_parts[1]
if code == 'tree':
tree_filename = arg
elif code == 'init_hab_num':
hab_num = int(arg)
elif code == 'outgroup':
outgroup = [arg]
elif code == 'converge_thresh':
converge_thresh = float(arg)
elif code == 'write_dir':
write_dir = arg
elif code == 'mu':
mu = float(arg)
elif code == 'rateopt':
rateopt = arg
elif code == 'collapse_thresh':
habitat_thresh = float(arg)
# track stats
stats_file = open(write_dir + '/stats.file','w')
# build the tree #
print "\nBuilding Tree"
tree_file = open(tree_filename,"r")
tree_string = tree_file.read().strip()
tree = multitree.multitree()
tree.build(tree_string)
tree_file.close()
# remove zero branches
min_len = min([b.length for b in tree.branch_list if b.length > 0.0])
for b in tree.branch_list:
if b.length <= 0.0:
b.length = min_len
################################
# build an initial rate matrix #
################################
# how many species are there?
species_dict = tree.species_count
total_leaves = sum(species_dict.values())
habitat_list = []
filter_list = species_dict.keys()
for i in range(hab_num):
habitat_list.append("habitat " + str(i))
# create O(n^3) habitat matrix
print "Instantiating Habitat Matrix"
habitat_matrix = {}
for habitat in habitat_list:
habitat_matrix[habitat] = {}
for filt in filter_list:
habitat_matrix[habitat][filt] = random.random()
# normalize
scale = sum(habitat_matrix[habitat].values())
for filt in filter_list:
habitat_matrix[habitat][filt] /= scale
score = -9999.99999999
diff = 1.0
old_diff = 1.0
print "Learning Habitats:"
while 1:
counter = 0
print "\t" + str(len(habitat_matrix)) + " habitats"
print "\tRefinement Steps [d(Habitat Score)]: "
stats_str = ""
stats_str += "counter\t"
stats_str += "habs\t"
stats_str += "ML score\t"
stats_str += "mu\t"
stats_str += "\thabitat dist diff\t"
stats_file.write(stats_str + "\n")
while 1:
stats_str = ""
stats_str += str(counter) + "\t"
stats_str += str(len(habitat_matrix)) + "\t"
stats_str += str(score) + "\t"
stats_str += str(mu) + "\t"
stats_str += str(diff) + "\t"
stats_file.write(stats_str + "\n")
stats_file.flush()
print "\t\t" + str(counter) + "\t" + str(diff)
# wipe the likelihoods off of the tree
ML.TreeWipe(tree)
# learn the likelihoods
ML.LearnLiks(tree,mu,habitat_matrix)
# estimate the states (by making each node trifurcating...)
ML.EstimateStates(tree.a_node,habitat_matrix)
# upgrade guesses for mu and habitat matrix
this_migrate = habitat_matrix
mu, habitat_matrix = ML.LearnRates(tree,mu,habitat_matrix,rateopt)
new_migrate = habitat_matrix
# stop?
old_diff = diff
score, diff = ML.CheckConverge(tree,new_migrate,this_migrate)
if diff < converge_thresh:
break
# this should break the loop if you end up bouncing back and
# forth between the same values
sig_figs = 8
diff1 = math.floor(diff*math.pow(10,sig_figs))
diff2 = math.floor(old_diff*math.pow(10,sig_figs))
if diff1 > 0:
if diff1 == diff2:
break
if counter > 500:
break
counter += 1
#########################
# remove similar groups #
#########################
print "Removing Redundant Habitats"
new_habitats = {}
for habitat_1 in habitat_matrix:
old_habitat = habitat_matrix[habitat_1]
add_habitat = True
for habitat_2 in new_habitats:
new_habitat = new_habitats[habitat_2]
score = 0
for this_filter in old_habitat:
diff = old_habitat[this_filter] - new_habitat[this_filter]
score += math.pow(diff,2)
if score < habitat_thresh:
add_habitat = False
if add_habitat:
new_habitats[habitat_1] = habitat_matrix[habitat_1]
if len(new_habitats) == len(habitat_matrix):
break
habitat_matrix = new_habitats
if len(habitat_matrix) < 2:
break
print "Learned " + str(len(habitat_matrix)) + " habitats",
print "in " + str(time.clock()) + " seconds"
stats_file.write("\nEnd Of Run\n")
############################
# take the best parameters #
############################
# find the branch you're interested in by looking at the partition on
# leaves the branch defines
for b in tree.branch_list:
names_1 = b.ends[0].name_dict[b.ends[1]]
names_2 = b.ends[1].name_dict[b.ends[0]]
if names_1 == outgroup or names_2 == outgroup:
tree.rootify(b)
# write out the results
mu_file = open(write_dir + '/mu.val','w')
mu_file.write(str(mu))
mu_file.close()
habitat_file = open(write_dir + '/habitat.matrix','w')
habitat_file.write(str(habitat_matrix))
habitat_file.close()
stats_file.close()
| |
# Predicting Continuous Target Variable with Regression Analysis
# Explore the Housing Dataset
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data', header=None, sep='\s+')
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', \
'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
df.head()
# Visualizing the important characteristics of a dataset
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='whitegrid', context='notebook')
cols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'MEDV']
sns.pairplot(df[cols], size=2.5)
plt.show()
'''
In the following code, we will use Numpy's corrcoef function on the five feature columns that
we previously visualized in the scatterplot matrix, and we will use seaborn's heatmap function
to plot the correlation matrix array as a heat map.
'''
import numpy as np
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.5)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size':15}, yticklabels=cols, xticklabels=cols)
plt.show()
# Implementing an ordinary least squares linear regression model
# Solving regression for regression parameters with gradient descent
class LinearRegressionGD(object):
def __init__(self, eta=0.001, n_iter=20):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
self.w_ = np.zeros(1+X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y-output)
self.w_[1] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return self.net_input(X)
'''
To see our LinearRegressionGD regressor in action, let's use the RM(number of rooms)
variable from the Housing Data Set as the explanatory variable to train a model that can
predict MEDV (the housing prices). Furthermore, we will standardize the variable for
better convergence of the GD algorithm. '''
X = df[['RM']].values
y = df['MEDV'].values
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
sc_y = StandardScaler()
X_std = sc_x.fit_transform(X)
y_std = sc_y.fit_transform(y)
lr = LinearRegressionGD()
lr.fit(X_std, y_std)
plt.plot(range(1, lr.n_iter+1), lr.cost_)
plt.ylabel('SSE')
plt.xlabel('Epoch')
plt.show()
'''
Now let's visualize how well the linear regression line fits the training data. To to
so, we will define a simple helper function that will plot a scatterplot of the training
samples and add the regression line:'''
def lin_regplot(X, y, model):
plt.scatter(X, y, c='blue')
plt.plot(X, model.predict(X), color='red')
return None
'''
Now we will use this lin_regplot function to plot the number of rooms against
house pricces:'''
lin_regplot(X_std, y_std, lr)
plt.xlabel('Average number of rooms [RM] (standardized)')
plt.ylabel('Price in $1000\'s [MEDV] (standardized)')
plt.show()
num_rooms_std = sc_x.transform([5.0])
price_std = lr.predict(num_rooms_std)
print "Price in $1000's: %.3f" % \
sc_y.inverse_transform(price_std)
print 'Slope: %.3f' % lr.w_[1]
print 'Intercept: %.3f' % lr.w_[0]
# Estimating the coefficient of a regression model via scikit-learn
from sklearn.linear_model import LinearRegression
slr = LinearRegression()
slr.fit(X, y)
print 'Slope: %.3f' % slr.coef_[0]
print 'Intercept: %.3f' % slr.intercept_
lin_regplot(X, y, slr)
plt.xlabel('Average number of rooms [RM]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.show()
# Fitting a robust regression model using RANSAC
from sklearn.linear_model import RANSACRegressor
ransac = RANSACRegressor(LinearRegression(),
max_trials=100,
min_samples=50,
residual_metric=lambda x: np.sum(np.abs(x), axis=1),
residual_threshold=5.0,
random_state=0)
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
line_X = np.arange(3, 10, 1)
line_y_ransac = ransac.predict(line_X[:, np.newaxis])
plt.scatter(X[inlier_mask], y[inlier_mask], c='blue', marker='o', label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask], c='lightgreen', marker='s', label='Outliers')
plt.plot(line_X, line_y_ransac, color='red')
plt.xlabel('Average number of rooms [RM]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.legend(loc='upper left')
plt.show()
print 'Slope: %.3f' % ransac.estimator_.coef_[0]
print 'Intercept: %.3f' % ransac.estimator_.intercept_
# Evaluating the performance of linear regression models
from sklearn.cross_validation import train_test_split
X = df.iloc[:, :-1].values
y = df['MEDV'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
slr = LinearRegression()
slr.fit(X_train, y_train)
y_train_pred = slr.predict(X_train)
y_test_pred = slr.predict(X_test)
'''Using the following code, we will now plot a residual plot where we simply substract
the true target variables from our predicted responses:'''
plt.scatter(y_train_pred, y_train_pred - y_train, c='blue', marker='o', label='Training data')
plt.scatter(y_test_pred, y_test_pred - y_test, c='lightgreen', marker='s', label='Test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='red')
plt.xlim([-10, 50])
plt.show()
# MSE
from sklearn.metrics import mean_squared_error
print 'MSE train: %.3f, test: %.3f' % (mean_squared_error(y_train, y_train_pred), mean_squared_error(y_test, y_test_pred))
#R^2
from sklearn.metrics import r2_score
print 'R^2 train: %3f, test: %.3f' % (r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred))
# Using regularized methods for regression
'''A Ridge Regression model can be initialized as follows:'''
from sklearn.linear_model import Ridge
ridge = Ridge(alpha=1.0)
'''Note that the regularization strength is regulated by the parameter alpha, which is
similar to the parameter lambda. Likewise, we can initialize a LASSO regressor from
the linear_model submodule:'''
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=1.0)
'''Lastly, the ElasticNet implementation allows us to vary the L1 to L2 ratio'''
from sklearn.linear_model import ElasticNet
lasso = ElasticNet(alpha=1.0, l1_ratio=0.5)
'''For example, if we set l1_ratio to 1.0, the ElasticNet regressor would be equal to LASSO regression.'''
# Turning a linear regression model into a curve-polynomial regression
'''1. Add a second degree polynomial term:'''
from sklearn.preprocessing import PolynomialFeatures
X = np.array([258.0, 270.0, 294.0,
320.0, 342.0, 368.0,
396.0, 446.0, 480.0, 586.0])[:, np.newaxis]
y = np.array([236.4, 234.4, 252.8,
298.6, 314.2, 342.2,
360.8, 368.0, 391.2,
390.8])
lr = LinearRegression()
pr = LinearRegression()
quadratic = PolynomialFeatures(degree=2)
X_quad = quadratic.fit_transform(X)
'''2. Fit a simple linear regression model for comparison:'''
lr.fit(X, y)
X_fit = np.arange(250, 600, 10)[:, np.newaxis]
y_lin_fit = lr.predict(X_fit)
'''3. Fit a multiple regression model on the transformed feature for polynomial regression'''
pr.fit(X_quad, y)
y_quad_fit = pr.predict(quadratic.fit_transform(X_fit))
plt.scatter(X, y, label='training points')
plt.plot(X_fit, y_lin_fit, label='linear fit', linestyle='--')
plt.plot(X_fit, y_quad_fit, label='quadratic fit')
plt.legend(loc='upper left')
plt.show()
y_lin_pred = lr.predict(X)
y_quad_pred = pr.predict(X_quad)
print 'Training MSE linear: %.3f, quadratic: %.3f' % (mean_squared_error(y, y_lin_pred), mean_squared_error(y, y_quad_pred))
print 'Training R^2 linear: %.3f, quadratic: %.3f' % (r2_score(y, y_lin_pred), r2_score(y, y_quad_pred))
# Modeling nonlinear relationships in the Housing Dataset
'''By executing the following code, we will model the relationship between house prices and LSTAT
(percent lower status of the population) using second degree(quadratic) and third degree (cubic)
polynomials and compare it to a linear fit'''
X = df[['LSTAT']].values
y = df['MEDV'].values
regr = LinearRegression()
# create polynomial features
quadratic = PolynomialFeatures(degree=2)
cubic = PolynomialFeatures(degree=3)
X_quad = quadratic.fit_transform(X)
X_cubic = cubic.fit_transform(X)
# linear fit
X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis]
regr = regr.fit(X, y)
y_lin_fit = regr.predict(X_fit)
linear_r2 = r2_score(y, regr.predict(X))
# quadratic fit
regr = regr.fit(X_quad, y)
y_quad_fit = regr.predict(quadratic.fit_transform(X_fit))
quadratic_r2 = r2_score(y, regr.predict(X_quad))
# cubic fit
regr = regr.fit(X_cubic, y)
y_cubic_fit = regr.predict(cubic.fit_transform(X_fit))
cubic_r2 = r2_score(y, regr.predict(X_cubic))
# plot results
plt.scatter(X, y, label='training points', color='lightgray')
plt.plot(X_fit, y_lin_fit, label='linear (d=1), $R^2=%.2f$)' % linear_r2, color='blue', lw=2, linestyle=':')
plt.plot(X_fit, y_quad_fit, label='quadratic (d=2), $R^2=%.2f$)' % quadratic_r2, color='red', lw=2, linestyle='-')
plt.plot(X_fit, y_cubic_fit, label='cubic (d=3), $R^2=%.2f$)' % cubic_r2, color='green', lw=2, linestyle='--')
plt.xlabel('% lower status of the population [LSTAT]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.legend(loc='upper right')
plt.show()
'''
In addition, polynomial features are not always the best choice for modeling nonlinear
relationships. For example, just by looking at the MEDV-LSTAT scatterplot, we could
propose that a log transformation of the LSTAT feature variable and the square root of
MEDV may project the data onto a linear feature space suitable for a linear regression
fit. Let's test this hypothesis by executing the following code:
'''
# transform features
X_log = np.log(X)
y_sqrt = np.sqrt(y)
# fit features
X_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis]
regr = regr.fit(X_log, y_sqrt)
y_lin_fit = regr.predict(X_fit)
linear_r2 = r2_score(y_sqrt, regr.predict(X_log))
# plot results
plt.scatter(X_log, y_sqrt,
label='training points',
color='lightgray')
plt.plot(X_fit, y_lin_fit,
label='linear (d=1), $R^2=%.2f$' % linear_r2,
color='blue',
lw=2)
plt.xlabel('log(% lower status of the population [LSTAT])')
plt.ylabel('$\sqrt{Price \; in \; \$1000\'s [MEDV]}$')
plt.legend(loc='lower left')
plt.show()
# Dealing with nonlinear relationships using random forests
# Decision tree regression
from sklearn.tree import DecisionTreeRegressor
X = df[['LSTAT']].values
y = df['MEDV'].values
tree = DecisionTreeRegressor(max_depth=3)
tree.fit(X, y)
sort_idx = X.flatten().argsort()
lin_regplot(X[sort_idx], y[sort_idx], tree)
plt.xlabel('% lower status of the population [LSTAT]')
plt.show()
# Random forest regression
'''Now, let's use all the features in the Housing Dataset to fit a random forest
regression model on 60 percent of the samples and evaluate its performance
on the remaining 40 percent.'''
X = df.iloc[:, :-1].values
y = df['MEDV'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=1)
from sklearn.ensemble import RandomForestRegressor
forest = RandomForestRegressor(n_estimators=1000, criterion='mse', random_state=1, n_jobs=-1)
forest.fit(X_train, y_train)
y_train_pred = forest.predict(X_train)
y_test_pred = forest.predict(X_test)
print 'MSE train: %.3f, test: %.3f' % (mean_squared_error(y_train, y_train_pred), mean_squared_error(y_test, y_test_pred))
print 'R^2 train: %.3f, test: %.3f' % (r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred))
'''Lastly, let's also take a look at the residuals of the prediction:'''
plt.scatter(y_train_pred,
y_train_pred - y_train,
c='black',
marker='o',
s=35,
alpha=0.5,
label='Training data')
plt.scatter(y_test_pred,
y_test_pred - y_test,
c='lightgreen',
marker='s',
s=35,
alpha=0.7,
label='Test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='red')
plt.xlim([-10, 50])
plt.show()
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for resampler."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_addons.image import resampler_ops
from tensorflow_addons.utils import test_utils
def _bilinearly_interpolate(data, x, y):
"""Performs bilinenar interpolation of grid data at user defined
coordinates.
This interpolation function:
a) implicitly pads the input data with 0s.
b) returns 0 when sampling outside the (padded) image.
The effect is that the sampled signal smoothly goes to 0 outside the
original input domain, rather than producing a jump discontinuity at
the image boundaries.
Args:
data: numpy array of shape `[data_height, data_width]` containing data
samples assumed to be defined at the corresponding pixel coordinates.
x: numpy array of shape `[warp_height, warp_width]` containing
x coordinates at which interpolation will be performed.
y: numpy array of shape `[warp_height, warp_width]` containing
y coordinates at which interpolation will be performed.
Returns:
Numpy array of shape `[warp_height, warp_width]` containing interpolated
values.
"""
shape = x.shape
x = np.asarray(x) + 1
y = np.asarray(y) + 1
data = np.pad(data, 1, "constant", constant_values=0)
x_0 = np.floor(x).astype(int)
x_1 = x_0 + 1
y_0 = np.floor(y).astype(int)
y_1 = y_0 + 1
x_0 = np.clip(x_0, 0, data.shape[1] - 1)
x_1 = np.clip(x_1, 0, data.shape[1] - 1)
y_0 = np.clip(y_0, 0, data.shape[0] - 1)
y_1 = np.clip(y_1, 0, data.shape[0] - 1)
i_a = data[y_0, x_0]
i_b = data[y_1, x_0]
i_c = data[y_0, x_1]
i_d = data[y_1, x_1]
w_a = (x_1 - x) * (y_1 - y)
w_b = (x_1 - x) * (y - y_0)
w_c = (x - x_0) * (y_1 - y)
w_d = (x - x_0) * (y - y_0)
samples = w_a * i_a + w_b * i_b + w_c * i_c + w_d * i_d
samples = samples.reshape(shape)
return samples
def _make_warp(batch_size, warp_height, warp_width, dtype):
"""Creates batch of warping coordinates."""
x, y = np.meshgrid(
np.linspace(0, warp_width - 1, warp_width),
np.linspace(0, warp_height - 1, warp_height),
)
warp = np.concatenate(
(
x.reshape([warp_height, warp_width, 1]),
y.reshape([warp_height, warp_width, 1]),
),
2,
)
warp = np.tile(warp.reshape([1, warp_height, warp_width, 2]), [batch_size, 1, 1, 1])
warp += np.random.randn(*warp.shape)
return warp.astype(dtype)
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
def test_op_forward_pass(dtype):
np.random.seed(0)
data_width = 7
data_height = 9
data_channels = 5
warp_width = 4
warp_height = 8
batch_size = 10
warp = _make_warp(batch_size, warp_height, warp_width, dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.random.rand(*data_shape).astype(dtype)
data_ph = tf.constant(data)
warp_ph = tf.constant(warp)
outputs = resampler_ops.resampler(data=data_ph, warp=warp_ph)
assert outputs.shape == (10, warp_height, warp_width, data_channels)
# Generate reference output via bilinear interpolation in numpy
reference_output = np.zeros_like(outputs)
for batch in range(batch_size):
for c in range(data_channels):
reference_output[batch, :, :, c] = _bilinearly_interpolate(
data[batch, :, :, c], warp[batch, :, :, 0], warp[batch, :, :, 1]
)
test_utils.assert_allclose_according_to_type(
outputs, reference_output, half_rtol=5e-3, half_atol=5e-3
)
def test_op_errors():
batch_size = 10
data_height = 9
data_width = 7
data_depth = 3
data_channels = 5
warp_width = 4
warp_height = 8
# Input data shape is not defined over a 2D grid, i.e. its shape is not like
# (batch_size, data_height, data_width, data_channels).
data_shape = (batch_size, data_height, data_width, data_depth, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size, warp_height, warp_width, 2)
warp = np.zeros(warp_shape)
with pytest.raises(
tf.errors.UnimplementedError,
match="Only bilinear interpolation is currently supported.",
):
resampler_ops.resampler(data, warp)
# Warp tensor must be at least a matrix, with shape [batch_size, 2].
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size,)
warp = np.zeros(warp_shape)
with pytest.raises(
tf.errors.InvalidArgumentError, match="warp should be at least a matrix"
):
resampler_ops.resampler(data, warp)
# The batch size of the data and warp tensors must be the same.
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size + 1, warp_height, warp_width, 2)
warp = np.zeros(warp_shape)
with pytest.raises(
tf.errors.InvalidArgumentError, match="Batch size of data and warp tensor"
):
resampler_ops.resampler(data, warp)
# The warp tensor must contain 2D coordinates, i.e. its shape last dimension
# must be 2.
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size, warp_height, warp_width, 3)
warp = np.zeros(warp_shape)
with pytest.raises(
tf.errors.UnimplementedError,
match="Only bilinear interpolation is supported, warping",
):
resampler_ops.resampler(data, warp)
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
def test_op_backward_pass(dtype):
np.random.seed(13)
data_width = 5
data_height = 4
data_channels = 3
warp_width = 2
warp_height = 6
batch_size = 3
warp = _make_warp(batch_size, warp_height, warp_width, dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.random.rand(*data_shape).astype(dtype)
data_tensor = tf.constant(data)
warp_tensor = tf.constant(warp)
theoretical, _ = tf.test.compute_gradient(
resampler_ops.resampler, [data_tensor, warp_tensor]
)
data_tensor_64 = tf.constant(data, dtype=tf.float64)
warp_tensor_64 = tf.constant(warp, dtype=tf.float64)
_, numerical_64 = tf.test.compute_gradient(
resampler_ops.resampler, [data_tensor_64, warp_tensor_64]
)
for t, n in zip(theoretical, numerical_64):
test_utils.assert_allclose_according_to_type(
t, n, float_rtol=5e-5, float_atol=5e-5
)
@pytest.mark.with_device(["cpu", "gpu"])
def test_op_empty_batch():
np.random.seed(13)
data_width = 5
data_height = 4
data_channels = 3
warp_width = 2
warp_height = 6
batch_size = 0
dtype = np.float32
warp = _make_warp(batch_size, warp_height, warp_width, dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape).astype(dtype)
data_tensor = tf.constant(data)
warp_tensor = tf.constant(warp)
with tf.GradientTape() as tape:
tape.watch(data_tensor)
tape.watch(warp_tensor)
outputs = resampler_ops.resampler(data=data_tensor, warp=warp_tensor)
data_grad, warp_grad = tape.gradient(outputs, (data_tensor, warp_tensor))
assert data_grad.shape == data.shape
assert warp_grad.shape == warp.shape
| |
from __future__ import absolute_import
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields import FieldDoesNotExist
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .models import Article
class ModelTest(TestCase):
def test_lookup(self):
# No articles are in the system yet.
self.assertQuerysetEqual(Article.objects.all(), [])
# Create an Article.
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
a.save()
# Now it has an ID.
self.assertTrue(a.id != None)
# Models have a pk property that is an alias for the primary key
# attribute (by default, the 'id' attribute).
self.assertEqual(a.pk, a.id)
# Access database columns via Python attributes.
self.assertEqual(a.headline, 'Area man programs in Python')
self.assertEqual(a.pub_date, datetime(2005, 7, 28, 0, 0))
# Change values by changing the attributes, then calling save().
a.headline = 'Area woman programs in Python'
a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Area woman programs in Python>'])
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=a.id), a)
self.assertEqual(Article.objects.get(headline__startswith='Area woman'), a)
self.assertEqual(Article.objects.get(pub_date__year=2005), a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), a)
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=a.id), a)
self.assertEqual(Article.objects.get(headline='Area woman programs in Python'), a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
self.assertRaisesRegexp(
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
self.assertRaisesRegexp(
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
self.assertRaisesRegexp(
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=a.id), a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[a.id]),
["<Article: Area woman programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=a.id)
b = Article.objects.get(pk=a.id)
self.assertEqual(a, b)
def test_object_creation(self):
# Create an Article.
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
a.save()
# You can initialize a model instance using positional arguments,
# which should match the field order as defined in the model.
a2 = Article(None, 'Second article', datetime(2005, 7, 29))
a2.save()
self.assertNotEqual(a2.id, a.id)
self.assertEqual(a2.headline, 'Second article')
self.assertEqual(a2.pub_date, datetime(2005, 7, 29, 0, 0))
# ...or, you can use keyword arguments.
a3 = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a3.save()
self.assertNotEqual(a3.id, a.id)
self.assertNotEqual(a3.id, a2.id)
self.assertEqual(a3.headline, 'Third article')
self.assertEqual(a3.pub_date, datetime(2005, 7, 30, 0, 0))
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a4 = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a4.save()
self.assertEqual(a4.headline, 'Fourth article')
# Don't use invalid keyword arguments.
self.assertRaisesRegexp(
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Invalid',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
# You can leave off the value for an AutoField when creating an
# object, because it'll get filled in automatically when you save().
a5 = Article(headline='Article 6', pub_date=datetime(2005, 7, 31))
a5.save()
self.assertEqual(a5.headline, 'Article 6')
# If you leave off a field with "default" set, Django will use
# the default.
a6 = Article(pub_date=datetime(2005, 7, 31))
a6.save()
self.assertEqual(a6.headline, u'Default headline')
# For DateTimeFields, Django saves as much precision (in seconds)
# as you give it.
a7 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a7.save()
self.assertEqual(Article.objects.get(id__exact=a7.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a8 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a8.save()
self.assertEqual(Article.objects.get(id__exact=a8.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
# Saving an object again doesn't create a new object -- it just saves
# the old one.
current_id = a8.id
a8.save()
self.assertEqual(a8.id, current_id)
a8.headline = 'Updated article 8'
a8.save()
self.assertEqual(a8.id, current_id)
# Check that != and == operators behave as expecte on instances
self.assertTrue(a7 != a8)
self.assertFalse(a7 == a8)
self.assertEqual(a8, Article.objects.get(id__exact=a8.id))
self.assertTrue(Article.objects.get(id__exact=a8.id) != Article.objects.get(id__exact=a7.id))
self.assertFalse(Article.objects.get(id__exact=a8.id) == Article.objects.get(id__exact=a7.id))
# You can use 'in' to test for membership...
self.assertTrue(a8 in Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a8.id).exists())
# dates() returns a list of available dates of the given scope for
# the given field.
self.assertQuerysetEqual(
Article.objects.dates('pub_date', 'year'),
["datetime.datetime(2005, 1, 1, 0, 0)"])
self.assertQuerysetEqual(
Article.objects.dates('pub_date', 'month'),
["datetime.datetime(2005, 7, 1, 0, 0)"])
self.assertQuerysetEqual(
Article.objects.dates('pub_date', 'day'),
["datetime.datetime(2005, 7, 28, 0, 0)",
"datetime.datetime(2005, 7, 29, 0, 0)",
"datetime.datetime(2005, 7, 30, 0, 0)",
"datetime.datetime(2005, 7, 31, 0, 0)"])
self.assertQuerysetEqual(
Article.objects.dates('pub_date', 'day', order='ASC'),
["datetime.datetime(2005, 7, 28, 0, 0)",
"datetime.datetime(2005, 7, 29, 0, 0)",
"datetime.datetime(2005, 7, 30, 0, 0)",
"datetime.datetime(2005, 7, 31, 0, 0)"])
self.assertQuerysetEqual(
Article.objects.dates('pub_date', 'day', order='DESC'),
["datetime.datetime(2005, 7, 31, 0, 0)",
"datetime.datetime(2005, 7, 30, 0, 0)",
"datetime.datetime(2005, 7, 29, 0, 0)",
"datetime.datetime(2005, 7, 28, 0, 0)"])
# dates() requires valid arguments.
self.assertRaisesRegexp(
TypeError,
"dates\(\) takes at least 3 arguments \(1 given\)",
Article.objects.dates,
)
self.assertRaisesRegexp(
FieldDoesNotExist,
"Article has no field named 'invalid_field'",
Article.objects.dates,
"invalid_field",
"year",
)
self.assertRaisesRegexp(
AssertionError,
"'kind' must be one of 'year', 'month' or 'day'.",
Article.objects.dates,
"pub_date",
"bad_kind",
)
self.assertRaisesRegexp(
AssertionError,
"'order' must be either 'ASC' or 'DESC'.",
Article.objects.dates,
"pub_date",
"year",
order="bad order",
)
# Use iterator() with dates() to return a generator that lazily
# requests each result one at a time, to save memory.
dates = []
for article in Article.objects.dates('pub_date', 'day', order='DESC').iterator():
dates.append(article)
self.assertEqual(dates, [
datetime(2005, 7, 31, 0, 0),
datetime(2005, 7, 30, 0, 0),
datetime(2005, 7, 29, 0, 0),
datetime(2005, 7, 28, 0, 0)])
# You can combine queries with & and |.
s1 = Article.objects.filter(id__exact=a.id)
s2 = Article.objects.filter(id__exact=a2.id)
self.assertQuerysetEqual(s1 | s2,
["<Article: Area man programs in Python>",
"<Article: Second article>"])
self.assertQuerysetEqual(s1 & s2, [])
# You can get the number of objects like this:
self.assertEqual(len(Article.objects.filter(id__exact=a.id)), 1)
# You can get items using index and slice notation.
self.assertEqual(Article.objects.all()[0], a)
self.assertQuerysetEqual(Article.objects.all()[1:3],
["<Article: Second article>", "<Article: Third article>"])
s3 = Article.objects.filter(id__exact=a3.id)
self.assertQuerysetEqual((s1 | s2 | s3)[::2],
["<Article: Area man programs in Python>",
"<Article: Third article>"])
# Slicing works with longs.
self.assertEqual(Article.objects.all()[0L], a)
self.assertQuerysetEqual(Article.objects.all()[1L:3L],
["<Article: Second article>", "<Article: Third article>"])
self.assertQuerysetEqual((s1 | s2 | s3)[::2L],
["<Article: Area man programs in Python>",
"<Article: Third article>"])
# And can be mixed with ints.
self.assertQuerysetEqual(Article.objects.all()[1:3L],
["<Article: Second article>", "<Article: Third article>"])
# Slices (without step) are lazy:
self.assertQuerysetEqual(Article.objects.all()[0:5].filter(),
["<Article: Area man programs in Python>",
"<Article: Second article>",
"<Article: Third article>",
"<Article: Article 6>",
"<Article: Default headline>"])
# Slicing again works:
self.assertQuerysetEqual(Article.objects.all()[0:5][0:2],
["<Article: Area man programs in Python>",
"<Article: Second article>"])
self.assertQuerysetEqual(Article.objects.all()[0:5][:2],
["<Article: Area man programs in Python>",
"<Article: Second article>"])
self.assertQuerysetEqual(Article.objects.all()[0:5][4:],
["<Article: Default headline>"])
self.assertQuerysetEqual(Article.objects.all()[0:5][5:], [])
# Some more tests!
self.assertQuerysetEqual(Article.objects.all()[2:][0:2],
["<Article: Third article>", "<Article: Article 6>"])
self.assertQuerysetEqual(Article.objects.all()[2:][:2],
["<Article: Third article>", "<Article: Article 6>"])
self.assertQuerysetEqual(Article.objects.all()[2:][2:3],
["<Article: Default headline>"])
# Using an offset without a limit is also possible.
self.assertQuerysetEqual(Article.objects.all()[5:],
["<Article: Fourth article>",
"<Article: Article 7>",
"<Article: Updated article 8>"])
# Also, once you have sliced you can't filter, re-order or combine
self.assertRaisesRegexp(
AssertionError,
"Cannot filter a query once a slice has been taken.",
Article.objects.all()[0:5].filter,
id=a.id,
)
self.assertRaisesRegexp(
AssertionError,
"Cannot reorder a query once a slice has been taken.",
Article.objects.all()[0:5].order_by,
'id',
)
try:
Article.objects.all()[0:1] & Article.objects.all()[4:5]
self.fail('Should raise an AssertionError')
except AssertionError, e:
self.assertEqual(str(e), "Cannot combine queries once a slice has been taken.")
except Exception, e:
self.fail('Should raise an AssertionError, not %s' % e)
# Negative slices are not supported, due to database constraints.
# (hint: inverting your ordering might do what you need).
try:
Article.objects.all()[-1]
self.fail('Should raise an AssertionError')
except AssertionError, e:
self.assertEqual(str(e), "Negative indexing is not supported.")
except Exception, e:
self.fail('Should raise an AssertionError, not %s' % e)
error = None
try:
Article.objects.all()[0:-5]
except Exception, e:
error = e
self.assertTrue(isinstance(error, AssertionError))
self.assertEqual(str(error), "Negative indexing is not supported.")
# An Article instance doesn't have access to the "objects" attribute.
# That's only available on the class.
self.assertRaisesRegexp(
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
a7,
"objects",
)
# Bulk delete test: How many objects before and after the delete?
self.assertQuerysetEqual(Article.objects.all(),
["<Article: Area man programs in Python>",
"<Article: Second article>",
"<Article: Third article>",
"<Article: Article 6>",
"<Article: Default headline>",
"<Article: Fourth article>",
"<Article: Article 7>",
"<Article: Updated article 8>"])
Article.objects.filter(id__lte=a4.id).delete()
self.assertQuerysetEqual(Article.objects.all(),
["<Article: Article 6>",
"<Article: Default headline>",
"<Article: Article 7>",
"<Article: Updated article 8>"])
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't available. You'll lose
# microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, u'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline=u'\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
u'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = set([a10, a11, a12])
self.assertTrue(Article.objects.get(headline='Article 11') in s)
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}
).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', u'Article 11')], [('dashed-value', 1), ('headline', u'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
| |
# Copyright (c) 2005 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Gabe Black
# metric prefixes
exa = 1.0e18
peta = 1.0e15
tera = 1.0e12
giga = 1.0e9
mega = 1.0e6
kilo = 1.0e3
milli = 1.0e-3
micro = 1.0e-6
nano = 1.0e-9
pico = 1.0e-12
femto = 1.0e-15
atto = 1.0e-18
# power of 2 prefixes
kibi = 1024
mebi = kibi * 1024
gibi = mebi * 1024
tebi = gibi * 1024
pebi = tebi * 1024
exbi = pebi * 1024
# memory size configuration stuff
def toFloat(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('Ei'):
return float(value[:-2]) * exbi
elif value.endswith('Pi'):
return float(value[:-2]) * pebi
elif value.endswith('Ti'):
return float(value[:-2]) * tebi
elif value.endswith('Gi'):
return float(value[:-2]) * gibi
elif value.endswith('Mi'):
return float(value[:-2]) * mebi
elif value.endswith('ki'):
return float(value[:-2]) * kibi
elif value.endswith('E'):
return float(value[:-1]) * exa
elif value.endswith('P'):
return float(value[:-1]) * peta
elif value.endswith('T'):
return float(value[:-1]) * tera
elif value.endswith('G'):
return float(value[:-1]) * giga
elif value.endswith('M'):
return float(value[:-1]) * mega
elif value.endswith('k'):
return float(value[:-1]) * kilo
elif value.endswith('m'):
return float(value[:-1]) * milli
elif value.endswith('u'):
return float(value[:-1]) * micro
elif value.endswith('n'):
return float(value[:-1]) * nano
elif value.endswith('p'):
return float(value[:-1]) * pico
elif value.endswith('f'):
return float(value[:-1]) * femto
else:
return float(value)
def toInteger(value):
value = toFloat(value)
result = long(value)
if value != result:
raise ValueError, "cannot convert '%s' to integer" % value
return result
_bool_dict = {
'true' : True, 't' : True, 'yes' : True, 'y' : True, '1' : True,
'false' : False, 'f' : False, 'no' : False, 'n' : False, '0' : False
}
def toBool(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
value = value.lower()
result = _bool_dict.get(value, None)
if result == None:
raise ValueError, "cannot convert '%s' to bool" % value
return result
def toFrequency(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('THz'):
return float(value[:-3]) * tera
elif value.endswith('GHz'):
return float(value[:-3]) * giga
elif value.endswith('MHz'):
return float(value[:-3]) * mega
elif value.endswith('kHz'):
return float(value[:-3]) * kilo
elif value.endswith('Hz'):
return float(value[:-2])
raise ValueError, "cannot convert '%s' to frequency" % value
def toLatency(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('ps'):
return float(value[:-2]) * pico
elif value.endswith('ns'):
return float(value[:-2]) * nano
elif value.endswith('us'):
return float(value[:-2]) * micro
elif value.endswith('ms'):
return float(value[:-2]) * milli
elif value.endswith('s'):
return float(value[:-1])
raise ValueError, "cannot convert '%s' to latency" % value
def anyToLatency(value):
"""result is a clock period"""
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
try:
val = toFrequency(value)
if val != 0:
val = 1 / val
return val
except ValueError:
pass
try:
val = toLatency(value)
return val
except ValueError:
pass
raise ValueError, "cannot convert '%s' to clock period" % value
def anyToFrequency(value):
"""result is a clock period"""
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
try:
val = toFrequency(value)
return val
except ValueError:
pass
try:
val = toLatency(value)
if val != 0:
val = 1 / val
return val
except ValueError:
pass
raise ValueError, "cannot convert '%s' to clock period" % value
def toNetworkBandwidth(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('Tbps'):
return float(value[:-4]) * tera
elif value.endswith('Gbps'):
return float(value[:-4]) * giga
elif value.endswith('Mbps'):
return float(value[:-4]) * mega
elif value.endswith('kbps'):
return float(value[:-4]) * kilo
elif value.endswith('bps'):
return float(value[:-3])
else:
return float(value)
raise ValueError, "cannot convert '%s' to network bandwidth" % value
def toMemoryBandwidth(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('PB/s'):
return float(value[:-4]) * pebi
elif value.endswith('TB/s'):
return float(value[:-4]) * tebi
elif value.endswith('GB/s'):
return float(value[:-4]) * gibi
elif value.endswith('MB/s'):
return float(value[:-4]) * mebi
elif value.endswith('kB/s'):
return float(value[:-4]) * kibi
elif value.endswith('B/s'):
return float(value[:-3])
raise ValueError, "cannot convert '%s' to memory bandwidth" % value
def toMemorySize(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('PB'):
return long(value[:-2]) * pebi
elif value.endswith('TB'):
return long(value[:-2]) * tebi
elif value.endswith('GB'):
return long(value[:-2]) * gibi
elif value.endswith('MB'):
return long(value[:-2]) * mebi
elif value.endswith('kB'):
return long(value[:-2]) * kibi
elif value.endswith('B'):
return long(value[:-1])
raise ValueError, "cannot convert '%s' to memory size" % value
def toIpAddress(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
bytes = value.split('.')
if len(bytes) != 4:
raise ValueError, 'invalid ip address %s' % value
for byte in bytes:
if not 0 <= int(byte) <= 0xff:
raise ValueError, 'invalid ip address %s' % value
return (int(bytes[0]) << 24) | (int(bytes[1]) << 16) | \
(int(bytes[2]) << 8) | (int(bytes[3]) << 0)
def toIpNetmask(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
(ip, netmask) = value.split('/')
ip = toIpAddress(ip)
netmaskParts = netmask.split('.')
if len(netmaskParts) == 1:
if not 0 <= int(netmask) <= 32:
raise ValueError, 'invalid netmask %s' % netmask
return (ip, int(netmask))
elif len(netmaskParts) == 4:
netmaskNum = toIpAddress(netmask)
if netmaskNum == 0:
return (ip, 0)
testVal = 0
for i in range(32):
testVal |= (1 << (31 - i))
if testVal == netmaskNum:
return (ip, i + 1)
raise ValueError, 'invalid netmask %s' % netmask
else:
raise ValueError, 'invalid netmask %s' % netmask
def toIpWithPort(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
(ip, port) = value.split(':')
ip = toIpAddress(ip)
if not 0 <= int(port) <= 0xffff:
raise ValueError, 'invalid port %s' % port
return (ip, int(port))
| |
import asyncio
import collections
import logging
import os
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from io import StringIO
from itertools import chain
from types import SimpleNamespace
from unittest import TestCase, skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.apps.registry import Apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import request_started
from django.db import DEFAULT_DB_ALIAS, connections, reset_queries
from django.db.models.options import Options
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'CaptureQueriesContext',
'ignore_warnings', 'isolate_apps', 'modify_settings', 'override_settings',
'override_system_checks', 'tag',
'requires_tz_support',
'setup_databases', 'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate:
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
return self.val == other or round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""
A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, str):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super().__getitem__(key)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
return set(chain.from_iterable(d for subcontext in self for d in subcontext))
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal that can be
intercepted by the test Client.
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
class _TestState:
pass
def setup_test_environment(debug=None):
"""
Perform global pre-test setup, such as installing the instrumented template
renderer and setting the email backend to the locmem email backend.
"""
if hasattr(_TestState, 'saved_data'):
# Executing this function twice would overwrite the saved values.
raise RuntimeError(
"setup_test_environment() was already called and can't be called "
"again without first calling teardown_test_environment()."
)
if debug is None:
debug = settings.DEBUG
saved_data = SimpleNamespace()
_TestState.saved_data = saved_data
saved_data.allowed_hosts = settings.ALLOWED_HOSTS
# Add the default host of the test client.
settings.ALLOWED_HOSTS = [*settings.ALLOWED_HOSTS, 'testserver']
saved_data.debug = settings.DEBUG
settings.DEBUG = debug
saved_data.email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
saved_data.template_render = Template._render
Template._render = instrumented_test_render
mail.outbox = []
deactivate()
def teardown_test_environment():
"""
Perform any global post-test teardown, such as restoring the original
template renderer and restoring the email sending functions.
"""
saved_data = _TestState.saved_data
settings.ALLOWED_HOSTS = saved_data.allowed_hosts
settings.DEBUG = saved_data.debug
settings.EMAIL_BACKEND = saved_data.email_backend
Template._render = saved_data.template_render
del _TestState.saved_data
del mail.outbox
def setup_databases(verbosity, interactive, *, time_keeper=None, keepdb=False, debug_sql=False, parallel=0,
aliases=None):
"""Create the test databases."""
if time_keeper is None:
time_keeper = NullTimeKeeper()
test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases)
old_names = []
for db_name, aliases in test_databases.values():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
with time_keeper.timed(" Creating '%s'" % alias):
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict['TEST'].get('SERIALIZE', True),
)
if parallel > 1:
for index in range(parallel):
with time_keeper.timed(" Cloning '%s'" % alias):
connection.creation.clone_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(connections[first_alias].settings_dict)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all its aliases
dependencies_map = {}
# Check that no database depends on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases
)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all its dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def get_unique_databases_and_mirrors(aliases=None):
"""
Figure out which databases actually need to be created.
Deduplicate entries in DATABASES that correspond the same database or are
configured as test mirrors.
Return two values:
- test_databases: ordered mapping of signatures to (name, list of aliases)
where all aliases share the same underlying database.
- mirrored_aliases: mapping of mirror aliases to original aliases.
"""
if aliases is None:
aliases = connections
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
# If the database is marked as a test mirror, save the alias.
mirrored_aliases[alias] = test_settings['MIRROR']
elif alias in aliases:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], []),
)
# The default database must be the first because data migrations
# use the default alias by default.
if alias == DEFAULT_DB_ALIAS:
item[1].insert(0, alias)
else:
item[1].append(alias)
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
test_databases = dict(dependency_ordered(test_databases.items(), dependencies))
return test_databases, mirrored_aliases
def teardown_databases(old_config, verbosity, parallel=0, keepdb=False):
"""Destroy all the non-mirror databases."""
for connection, old_name, destroy in old_config:
if destroy:
if parallel > 1:
for index in range(parallel):
connection.creation.destroy_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
connection.creation.destroy_test_db(old_name, verbosity, keepdb)
def get_runner(settings, test_runner_class=None):
test_runner_class = test_runner_class or settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
return getattr(test_module, test_path[-1])
class TestContextDecorator:
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
def setUp(inner_self):
context = self.enable()
inner_self.addCleanup(self.disable)
if self.attr_name:
setattr(inner_self, self.attr_name, context)
decorated_setUp(inner_self)
cls.setUp = setUp
return cls
raise TypeError('Can only decorate subclasses of unittest.TestCase')
def decorate_callable(self, func):
if asyncio.iscoroutinefunction(func):
# If the inner function is an async function, we must execute async
# as well so that the `with` statement executes at the right time.
@wraps(func)
async def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return await func(*args, **kwargs)
else:
@wraps(func)
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError('Cannot decorate object of type %s' % type(decorated))
class override_settings(TestContextDecorator):
"""
Act as either a decorator or a context manager. If it's a decorator, take a
function and return a wrapped function. If it's a contextmanager, use it
with the ``with`` statement. In either event, entering/exiting are called
before and after, respectively, the function/block is executed.
"""
enable_exception = None
def __init__(self, **kwargs):
self.options = kwargs
super().__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
try:
setting_changed.send(
sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True,
)
except Exception as exc:
self.enable_exception = exc
self.disable()
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
responses = []
for key in self.options:
new_value = getattr(settings, key, None)
responses_for_setting = setting_changed.send_robust(
sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False,
)
responses.extend(responses_for_setting)
if self.enable_exception is not None:
exc = self.enable_exception
self.enable_exception = None
raise exc
for _, response in responses:
if isinstance(response, Exception):
raise response
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = {
**test_func._overridden_settings,
**self.options,
}
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(cls)
return cls
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend, or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
super(override_settings, self).__init__()
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, str):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super().enable()
class override_system_checks(TestContextDecorator):
"""
Act as a decorator. Override list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super().__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = set()
for check in self.new_checks:
self.registry.register(check, *getattr(check, 'tags', ()))
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = set()
for check in self.deployment_checks:
self.registry.register(check, *getattr(check, 'tags', ()), deploy=True)
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""
Try to do a 'xml-comparison' of want and got. Plain string comparison
doesn't always work because, for example, attribute ordering should not be
important. Ignore comment nodes, processing instructions, document type
node, and leading and trailing whitespaces.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
return all(check_element(want, got) for want, got in zip(want_children, got_children))
def first_node(document):
for node in document.childNodes:
if node.nodeType not in (
Node.COMMENT_NODE,
Node.DOCUMENT_TYPE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
):
return node
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
class CaptureQueriesContext:
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
# Run any initialization queries if needed so that they won't be
# included as part of the count.
self.connection.ensure_connection()
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super().__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that."
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
return override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
class override_script_prefix(TestContextDecorator):
"""Decorator or context manager to temporary override the script prefix."""
def __init__(self, prefix):
self.prefix = prefix
super().__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
class LoggingCaptureMixin:
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
class isolate_apps(TestContextDecorator):
"""
Act as either a decorator or a context manager to register models defined
in its wrapped context to an isolated registry.
The list of installed apps the isolated registry should contain must be
passed as arguments.
Two optional keyword arguments can be specified:
`attr_name`: attribute assigned the isolated registry if used as a class
decorator.
`kwarg_name`: keyword argument passing the isolated registry if used as a
function decorator.
"""
def __init__(self, *installed_apps, **kwargs):
self.installed_apps = installed_apps
super().__init__(**kwargs)
def enable(self):
self.old_apps = Options.default_apps
apps = Apps(self.installed_apps)
setattr(Options, 'default_apps', apps)
return apps
def disable(self):
setattr(Options, 'default_apps', self.old_apps)
class TimeKeeper:
def __init__(self):
self.records = collections.defaultdict(list)
@contextmanager
def timed(self, name):
self.records[name]
start_time = time.perf_counter()
try:
yield
finally:
end_time = time.perf_counter() - start_time
self.records[name].append(end_time)
def print_results(self):
for name, end_times in self.records.items():
for record_time in end_times:
record = '%s took %.3fs' % (name, record_time)
sys.stderr.write(record + os.linesep)
class NullTimeKeeper:
@contextmanager
def timed(self, name):
yield
def print_results(self):
pass
def tag(*tags):
"""Decorator to add tags to a test class or method."""
def decorator(obj):
if hasattr(obj, 'tags'):
obj.tags = obj.tags.union(tags)
else:
setattr(obj, 'tags', set(tags))
return obj
return decorator
@contextmanager
def register_lookup(field, *lookups, lookup_name=None):
"""
Context manager to temporarily register lookups on a model field using
lookup_name (or the lookup's lookup_name if not provided).
"""
try:
for lookup in lookups:
field.register_lookup(lookup, lookup_name)
yield
finally:
for lookup in lookups:
field._unregister_lookup(lookup, lookup_name)
| |
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
from . import util
import functools
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of14']
class queue_desc_prop(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = queue_desc_prop.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = queue_desc_prop()
obj.type = reader.read("!H")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("queue_desc_prop {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class experimenter(queue_desc_prop):
subtypes = {}
type = 65535
def __init__(self, experimenter=None, exp_type=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if exp_type != None:
self.exp_type = exp_type
else:
self.exp_type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.experimenter = reader.read("!L")[0]
obj.exp_type = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
if self.exp_type != other.exp_type: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("exp_type = ");
q.text("%#x" % self.exp_type)
q.breakable()
q.text('}')
queue_desc_prop.subtypes[65535] = experimenter
class bsn(experimenter):
subtypes = {}
type = 65535
experimenter = 6035143
def __init__(self, exp_type=None):
if exp_type != None:
self.exp_type = exp_type
else:
self.exp_type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 8)
subclass = bsn.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.exp_type = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.exp_type != other.exp_type: return False
return True
def pretty_print(self, q):
q.text("bsn {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[6035143] = bsn
class bsn_queue_name(bsn):
type = 65535
experimenter = 6035143
exp_type = 0
def __init__(self, name=None):
if name != None:
self.name = name
else:
self.name = b''
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
packed.append(self.name)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_queue_name()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_exp_type = reader.read("!L")[0]
assert(_exp_type == 0)
obj.name = reader.read_all()
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.name != other.name: return False
return True
def pretty_print(self, q):
q.text("bsn_queue_name {")
with q.group():
with q.indent(2):
q.breakable()
q.text("name = ");
q.pp(self.name)
q.breakable()
q.text('}')
bsn.subtypes[0] = bsn_queue_name
class max_rate(queue_desc_prop):
type = 2
def __init__(self, rate=None):
if rate != None:
self.rate = rate
else:
self.rate = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!H", self.rate))
packed.append(b'\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = max_rate()
_type = reader.read("!H")[0]
assert(_type == 2)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.rate = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.rate != other.rate: return False
return True
def pretty_print(self, q):
q.text("max_rate {")
with q.group():
with q.indent(2):
q.breakable()
q.text("rate = ");
q.text("%#x" % self.rate)
q.breakable()
q.text('}')
queue_desc_prop.subtypes[2] = max_rate
class min_rate(queue_desc_prop):
type = 1
def __init__(self, rate=None):
if rate != None:
self.rate = rate
else:
self.rate = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!H", self.rate))
packed.append(b'\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = min_rate()
_type = reader.read("!H")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.rate = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.rate != other.rate: return False
return True
def pretty_print(self, q):
q.text("min_rate {")
with q.group():
with q.indent(2):
q.breakable()
q.text("rate = ");
q.text("%#x" % self.rate)
q.breakable()
q.text('}')
queue_desc_prop.subtypes[1] = min_rate
| |
"""
ltdexec.processor.validator
===========================
Validator classes verify that the raw source code or abstract syntax tree is
permissible before it is finally compiled to Python byte code. They signal
errors through exceptions, which may be used to provide an error message.
"""
import ast
import re
from .. import config
from .. import exceptions
#==============================================================================#
def syntax_error(node, msg, reason=None):
"""Raise a SyntaxError from the given node. This is intended for use
within abstract syntax tree validators.
"""
lineno = getattr(node, 'lineno', -1)
# Offset in AST objects is 0-based, while in SyntaxError exception
# it is 1-based...
offset = getattr(node, 'col_offset', -1) + 1
filename = config.misc.DEFAULT_SCRIPT_FILE_NAME
text = config.misc.DEFAULT_SCRIPT_TEXT_LINE
raise exceptions.SyntaxError(msg, filename, lineno, offset, text, reason)
#==============================================================================#
class SourceValidator(object):
""" A SourceValidator verifies the correctness of raw source code. """
def __init__(self, dialect):
self.dialect = dialect
def __call__(self, source):
""" Perform the validation.
This default implementation does nothing; it simply returns the
source unchanged.
"""
pass
#==============================================================================#
class MinimalAstValidator(ast.NodeVisitor):
""" The MinimalSourceValidator must be the base class of all abstract
syntax tree validators. It verifies the few rules that all LimitedExec
scripts must follow.
"""
def __init__(self, dialect):
super(MinimalAstValidator, self).__init__()
self.dialect = dialect
def check_name(self, node):
""" Verifies that a name node does not use a name reserved for use by
LimitedExec. Such names begin with ``_LX_``.
All abstract syntax tree validators **must** call this method,
directly or indirectly.
"""
if node.id.startswith(config.names.LTDEXEC_PRIVATE_PREFIX):
m = 'Names may not begin with "{0}". '
m += 'This is reserved for library-internal use.'
m = m.format(config.names.LTDEXEC_PRIVATE_PREFIX)
syntax_error(node, m, reason='private_prefix_name')
def check_attribute(self, node):
""" Verifies that a name node does not use a name reserved for use by
LimitedExec. Such names begin with ``_LX_``.
All abstract syntax tree validators **must** call this method,
directly or indirectly.
"""
if node.attr.startswith(config.names.LTDEXEC_PRIVATE_PREFIX):
m = 'Attributes may not begin with "{0}". '
m += 'This is reserved for library-internal use.'
m = m.format(config.names.LTDEXEC_PRIVATE_PREFIX)
syntax_error(node, m, reason='private_prefix_attr')
def visit_Name(self, node):
self.check_name(node)
self.generic_visit(node)
def visit_Attribute(self, node):
self.check_attribute(node)
self.generic_visit(node)
def __call__(self, tree):
""" Perform the validation. """
self.visit(tree)
#==============================================================================#
class AstValidator(MinimalAstValidator):
""" Base class of the default abstract syntax tree validator class created
by a Dialect.
This class checks that imports, if permitted, only import modules in
the Dialect's approved list. It also checks that the names and
attributes used are permitted. It also makes sure that names and
attributes that are the target of an assignment operation are allowed
to be the result of such an operation.
"""
def __init__(self, dialect):
super(AstValidator, self).__init__(dialect)
def check_import_from(self, node, module, name, asname, level):
if level > 0:
syntax_error(node, 'Relative imports are not permitted.',
reason='relative_import')
if self.dialect.allowed_imports:
if module not in self.dialect.allowed_imports:
m = 'Cannot import "{0}", it is not among the allowed imports.'
m = m.format(module)
syntax_error(node, m, reason='not_in_allowed_imports')
allowed_froms = self.dialect.allowed_imports[module]
if allowed_froms and (name not in allowed_froms):
m = 'Importing "{0}" from "{1}" is not permitted.'
m = m.format(name, module)
syntax_error(node, m, reason='not_in_allowed_imports_froms')
elif module in self.dialect.forbidden_imports:
m = 'Importing of "{0}" is not permitted.'.format(module)
syntax_error(node, m, reason='forbidden_import')
if ((name in self.dialect.forbidden_names_set) or
(asname and asname in self.dialect.forbidden_names_set)):
m = 'Cannot import as "{0}", it is a forbidden name.'.format(asname)
syntax_error(node, m, reason='import_forbidden_name')
def check_import(self, node, name, asname):
if self.dialect.allowed_imports:
if name not in self.dialect.allowed_imports:
m = 'Cannot import "{0}", it is not among the allowed imports.'
m = m.format(name)
syntax_error(node, m, reason='not_in_allowed_imports')
elif name in self.dialect.forbidden_imports:
m = 'Importing of "{0}" is not permitted.'.format(name)
syntax_error(node, m, reason='forbidden_import')
if ((name in self.dialect.forbidden_names_set) or
(asname and asname in self.dialect.forbidden_names_set)):
m = 'Cannot import as "{0}", it is a forbidden name.'.format(asname)
syntax_error(node, m, reason='import_forbidden_name')
def check_name(self, node):
super(AstValidator, self).check_name(node)
ctx = node.ctx
name = node.id
if name in self.dialect.forbidden_names_set:
m = 'Use of the name "{0}" is forbidden.'.format(name)
syntax_error(node, m, reason='forbidden_name')
elif (ctx == ast.Store or ctx == ast.AugStore or ctx == ast.Del) and \
name in self.dialect.unassignable_names_set:
m = 'The name "{0}" may not be assigned to.'.format(name)
syntax_error(node, m, reason='unassignable_name')
if self.dialect.no_double_underscore_names and len(name)>1:
if name[:2]=='__' and name[-2:]=='__':
m = 'Use of the name "{0}" is forbidden--'
m += 'it starts and ends with double underscores.'
m = m.format(name)
syntax_error(node, m, reason='double_underscore_name')
def check_attribute(self, node):
super(AstValidator, self).check_attribute(node)
ctx = node.ctx
attr = node.attr
if attr in self.dialect.forbidden_attrs_set:
m = 'Use of the attribute "{0}" is forbidden.'.format(attr)
syntax_error(node, m, reason='forbidden_attr')
elif (ctx == ast.Store or ctx == ast.AugStore or ctx == ast.Del) and \
attr in self.dialect.unassignable_attrs_set:
m = 'The attribute "{0}" may not be assigned to.'.format(attr)
syntax_error(node, m, reason='unassignable_attr')
if self.dialect.no_double_underscore_attrs and len(attr)>1:
if attr[:2]=='__' and attr[-2:]=='__':
m = 'Use of the attribute "{0}" is forbidden--'
m += 'it starts and ends with double underscores.'
m = m.format(attr)
syntax_error(node, m, reason='double_underscore_attr')
if isinstance(node.value, ast.Name) and \
node.value.id in config.names.BUILTIN_NAMES_SET:
m = 'Attributes of builtins may not be accessed.'
syntax_error(node, m)
# TODO: check name of function definitions, class defintions, method
# definitions, etc.
def visit_Name(self, node):
self.check_name(node)
self.generic_visit(node)
def visit_Attribute(self, node):
self.check_attribute(node)
self.generic_visit(node)
def visit_Import(self, node):
for alias in node.names:
self.check_import(node, alias.name, alias.asname)
self.generic_visit(node)
def visit_ImportFrom(self, node):
for alias in node.names:
self.check_import_from(node, node.module, alias.name, alias.asname,
node.level)
self.generic_visit(node)
#------------------------------------------------------------------------------#
def make_forbidden_visitor(name, description):
def func(self, node):
msg = 'The following is not allowed in this script: {0}.'
msg = msg.format(description)
syntax_error(node, msg, reason='node_'+name)
func.__name__ = 'visit_' + name
return func
def create_ast_validator_class(dialect):
""" Create an abstract syntax tree validator class using the given
dialect.
By default, a Dialect uses this function to create an ast validator.
The validator produced will have AstValdiator as a base class.
"""
attrs = {}
for flag, flagtraits in config.flags.node_leafflag_traits.iteritems():
if getattr(dialect, flag) == False:
nodetraits = config.nodes.node_traits[flagtraits.node]
visitor = make_forbidden_visitor(nodetraits.name,
nodetraits.description)
attrs['visit_' + nodetraits.name] = visitor
return type('AutoAstValidator', (AstValidator,), attrs)
#==============================================================================#
| |
# Copyright (c) 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from oslo.config import cfg
from cinder.brick.iscsi import iscsi
from cinder import exception
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
from cinder import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class _ExportMixin(object):
def __init__(self, *args, **kwargs):
self.db = kwargs.pop('db', None)
super(_ExportMixin, self).__init__(*args, **kwargs)
def create_export(self, context, volume, volume_path):
"""Creates an export for a logical volume."""
iscsi_name = "%s%s" % (CONF.iscsi_target_prefix,
volume['name'])
iscsi_target, lun = self._get_target_and_lun(context, volume)
chap_username = utils.generate_username()
chap_password = utils.generate_password()
chap_auth = self._iscsi_authentication('IncomingUser', chap_username,
chap_password)
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
# should clean this all up at some point in the future
tid = self.create_iscsi_target(iscsi_name,
iscsi_target,
0,
volume_path,
chap_auth)
data = {}
data['location'] = self._iscsi_location(
CONF.iscsi_ip_address, tid, iscsi_name, lun)
data['auth'] = self._iscsi_authentication(
'CHAP', chap_username, chap_password)
return data
def remove_export(self, context, volume):
try:
iscsi_target = self._get_iscsi_target(context, volume['id'])
except exception.NotFound:
LOG.info(_("Skipping remove_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
return
try:
# NOTE: provider_location may be unset if the volume hasn't
# been exported
location = volume['provider_location'].split(' ')
iqn = location[1]
# ietadm show will exit with an error
# this export has already been removed
self.show_target(iscsi_target, iqn=iqn)
except Exception:
LOG.info(_("Skipping remove_export. No iscsi_target "
"is presently exported for volume: %s"), volume['id'])
return
self.remove_iscsi_target(iscsi_target, 0, volume['id'], volume['name'])
def ensure_export(self, context, volume, iscsi_name, volume_path,
old_name=None):
iscsi_target = self._get_target_for_ensure_export(context,
volume['id'])
if iscsi_target is None:
LOG.info(_("Skipping remove_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
return
chap_auth = None
# Check for https://bugs.launchpad.net/cinder/+bug/1065702
old_name = None
if (volume['provider_location'] is not None and
volume['name'] not in volume['provider_location']):
msg = _('Detected inconsistency in provider_location id')
LOG.debug(_('%s'), msg)
old_name = self._fix_id_migration(context, volume)
if 'in-use' in volume['status']:
old_name = None
self.create_iscsi_target(iscsi_name, iscsi_target, 0, volume_path,
chap_auth, check_exit_code=False,
old_name=old_name)
def _ensure_iscsi_targets(self, context, host):
"""Ensure that target ids have been created in datastore."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
# cooresponding target admin class
host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
host)
if host_iscsi_targets >= CONF.iscsi_num_targets:
return
# NOTE(vish): Target ids start at 1, not 0.
target_end = CONF.iscsi_num_targets + 1
for target_num in xrange(1, target_end):
target = {'host': host, 'target_num': target_num}
self.db.iscsi_target_create_safe(context, target)
def _get_target_for_ensure_export(self, context, volume_id):
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume_id)
return iscsi_target
except exception.NotFound:
return None
def _get_target_and_lun(self, context, volume):
lun = 0
self._ensure_iscsi_targets(context, volume['host'])
iscsi_target = self.db.volume_allocate_iscsi_target(context,
volume['id'],
volume['host'])
return iscsi_target, lun
def _get_iscsi_target(self, context, vol_id):
return self.db.volume_get_iscsi_target_num(context, vol_id)
def _iscsi_authentication(self, chap, name, password):
return "%s %s %s" % (chap, name, password)
def _iscsi_location(self, ip, target, iqn, lun=None):
return "%s:%s,%s %s %s" % (ip, CONF.iscsi_port,
target, iqn, lun)
def _fix_id_migration(self, context, volume):
"""Fix provider_location and dev files to address bug 1065702.
For volumes that the provider_location has NOT been updated
and are not currently in-use we'll create a new iscsi target
and remove the persist file.
If the volume is in-use, we'll just stick with the old name
and when detach is called we'll feed back into ensure_export
again if necessary and fix things up then.
Details at: https://bugs.launchpad.net/cinder/+bug/1065702
"""
model_update = {}
pattern = re.compile(r":|\s")
fields = pattern.split(volume['provider_location'])
old_name = fields[3]
volume['provider_location'] = \
volume['provider_location'].replace(old_name, volume['name'])
model_update['provider_location'] = volume['provider_location']
self.db.volume_update(context, volume['id'], model_update)
start = os.getcwd()
os.chdir('/dev/%s' % CONF.volume_group)
try:
(out, err) = self._execute('readlink', old_name)
except putils.ProcessExecutionError:
link_path = '/dev/%s/%s' % (CONF.volume_group,
old_name)
LOG.debug(_('Symbolic link %s not found') % link_path)
os.chdir(start)
return
rel_path = out.rstrip()
self._execute('ln',
'-s',
rel_path, volume['name'],
run_as_root=True)
os.chdir(start)
return old_name
class TgtAdm(_ExportMixin, iscsi.TgtAdm):
def _get_target_and_lun(self, context, volume):
lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1
iscsi_target = 0 # NOTE(jdg): Not used by tgtadm
return iscsi_target, lun
def _get_iscsi_target(self, context, vol_id):
return 0
def _get_target_for_ensure_export(self, context, volume_id):
return 1
class FakeIscsiHelper(_ExportMixin, iscsi.FakeIscsiHelper):
def create_export(self, context, volume, volume_path):
return {
'location': "fake_location",
'auth': "fake_auth"
}
def remove_export(self, context, volume):
pass
def ensure_export(self, context, volume_id, iscsi_name, volume_path,
old_name=None):
pass
class LioAdm(_ExportMixin, iscsi.LioAdm):
def remove_export(self, context, volume):
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping remove_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
return
self.remove_iscsi_target(iscsi_target, 0, volume['id'], volume['name'])
def ensure_export(self, context, volume_id, iscsi_name, volume_path,
old_name=None):
try:
volume_info = self.db.volume_get(context, volume_id)
(auth_method,
auth_user,
auth_pass) = volume_info['provider_auth'].split(' ', 3)
chap_auth = self._iscsi_authentication(auth_method,
auth_user,
auth_pass)
except exception.NotFound:
LOG.debug(_("volume_info:%s"), volume_info)
LOG.info(_("Skipping ensure_export. No iscsi_target "
"provision for volume: %s"), volume_id)
iscsi_target = 1
self.create_iscsi_target(iscsi_name, iscsi_target, 0, volume_path,
chap_auth, check_exit_code=False)
class IetAdm(_ExportMixin, iscsi.IetAdm):
pass
class ISERTgtAdm(_ExportMixin, iscsi.ISERTgtAdm):
def _get_target_and_lun(self, context, volume):
lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1
iscsi_target = 0
return iscsi_target, lun
def _get_iscsi_target(self, context, vol_id):
return 0
def _get_target_for_ensure_export(self, context, volume_id):
return 1
| |
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from six import iteritems, iterkeys
import pandas as pd
import numpy as np
from . utils.protocol_utils import Enum
from . utils.math_utils import nanstd, nanmean, nansum
from zipline.utils.algo_instance import get_algo_instance
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be an asset identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
# Have to convert to primitive dict
state_dict['positions'] = dict(self.positions)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Portfolio saved state is too old.")
self.positions = Positions()
self.positions.update(state.pop('positions'))
self.__dict__.update(state)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Account saved state is too old.")
self.__dict__.update(state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Protocol Position saved state is too old.")
self.__dict__.update(state)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price', raw=False):
"""
Gets the result of history for the given number of bars and field.
This will cache the results internally.
"""
cls = self.__class__
algo = get_algo_instance()
now = algo.datetime
if now != cls._history_cache_dt:
# For a given dt, the history call for this field will not change.
# We have a new dt, so we should reset the cache.
cls._history_cache_dt = now
cls._history_cache = {}
if field not in self._history_cache \
or bars > len(cls._history_cache[field][0].index):
# If we have never cached this field OR the amount of bars that we
# need for this field is greater than the amount we have cached,
# then we need to get more history.
hst = algo.history(
bars, self._freqstr, field, ffill=True,
)
# Assert that the column holds ints, not security objects.
if not isinstance(self._sid, str):
hst.columns = hst.columns.astype(int)
self._history_cache[field] = (hst, hst.values, hst.columns)
# Slice of only the bars needed. This is because we strore the LARGEST
# amount of history for the field, and we might request less than the
# largest from the cache.
buffer_, values, columns = cls._history_cache[field]
if raw:
sid_index = columns.get_loc(self._sid)
return values[-bars:, sid_index]
else:
return buffer_[self._sid][-bars:]
def _get_bars(self, days):
"""
Gets the number of bars needed for the current number of days.
Figures this out based on the algo datafrequency and caches the result.
This caches the result by replacing this function on the object.
This means that after the first call to _get_bars, this method will
point to a new function object.
"""
def daily_get_max_bars(days):
return days
def minute_get_max_bars(days):
# max number of minute. regardless of current days or short
# sessions
return days * 390
def daily_get_bars(days):
return days
def minute_get_bars(days):
cls = self.__class__
now = get_algo_instance().datetime
if now != cls._minute_bar_cache_dt:
cls._minute_bar_cache_dt = now
cls._minute_bar_cache = {}
if days not in cls._minute_bar_cache:
# Cache this calculation to happen once per bar, even if we
# use another transform with the same number of days.
env = get_algo_instance().trading_environment
prev = env.previous_trading_day(now)
ds = env.days_in_range(
env.add_trading_days(-days + 2, prev),
prev,
)
# compute the number of minutes in the (days - 1) days before
# today.
# 210 minutes in a an early close and 390 in a full day.
ms = sum(210 if d in env.early_closes else 390 for d in ds)
# Add the number of minutes for today.
ms += int(
(now - env.get_open_and_close(now)[0]).total_seconds() / 60
)
cls._minute_bar_cache[days] = ms + 1 # Account for this minute
return cls._minute_bar_cache[days]
if get_algo_instance().sim_params.data_frequency == 'daily':
self._freqstr = '1d'
# update this method to point to the daily variant.
self._get_bars = daily_get_bars
self._get_max_bars = daily_get_max_bars
else:
self._freqstr = '1m'
# update this method to point to the minute variant.
self._get_bars = minute_get_bars
self._get_max_bars = minute_get_max_bars
# Not actually recursive because we have already cached the new method.
return self._get_bars(days)
def mavg(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanmean(prices)
def stddev(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanstd(prices, ddof=1)
def vwap(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
vols = self._get_buffer(max_bars, field='volume', raw=True)[-bars:]
vol_sum = nansum(vols)
try:
ret = nansum(prices * vols) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def returns(self):
algo = get_algo_instance()
now = algo.datetime
if now != self._returns_cache_dt:
self._returns_cache_dt = now
self._returns_cache = algo.history(2, '1d', 'price', ffill=True)
hst = self._returns_cache[self._sid]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
| |
import datetime
import errno
import json
import os
import shutil
import stat
import pytest
import pytz
import stix2
from stix2.datastore.filesystem import (
AuthSet, _find_search_optimizations, _get_matching_dir_entries,
_timestamp2filename,
)
from stix2.exceptions import STIXError
from .constants import (
CAMPAIGN_ID, CAMPAIGN_KWARGS, IDENTITY_ID, IDENTITY_KWARGS, INDICATOR_ID,
INDICATOR_KWARGS, MALWARE_ID, MALWARE_KWARGS, RELATIONSHIP_IDS,
)
FS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "stix2_data")
@pytest.fixture
def fs_store():
# create
yield stix2.FileSystemStore(FS_PATH)
# remove campaign dir
shutil.rmtree(os.path.join(FS_PATH, "campaign"), True)
@pytest.fixture
def fs_source():
# create
fs = stix2.FileSystemSource(FS_PATH)
assert fs.stix_dir == FS_PATH
yield fs
# remove campaign dir
shutil.rmtree(os.path.join(FS_PATH, "campaign"), True)
@pytest.fixture
def fs_sink():
# create
fs = stix2.FileSystemSink(FS_PATH)
assert fs.stix_dir == FS_PATH
yield fs
# remove campaign dir
shutil.rmtree(os.path.join(FS_PATH, "campaign"), True)
@pytest.fixture
def bad_json_files():
# create erroneous JSON files for tests to make sure handled gracefully
with open(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-non-json.txt"), "w+") as f:
f.write("Im not a JSON file")
with open(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-bad-json.json"), "w+") as f:
f.write("Im not a JSON formatted file")
yield True # dummy yield so can have teardown
os.remove(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-non-json.txt"))
os.remove(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-bad-json.json"))
@pytest.fixture
def bad_stix_files():
# create erroneous STIX JSON files for tests to make sure handled correctly
# bad STIX object
stix_obj = {
"id": "intrusion-set--test-bad-stix",
"spec_version": "2.0",
# no "type" field
}
with open(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-non-stix.json"), "w+") as f:
f.write(json.dumps(stix_obj))
yield True # dummy yield so can have teardown
os.remove(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-non-stix.json"))
@pytest.fixture(scope='module')
def rel_fs_store():
cam = stix2.v21.Campaign(id=CAMPAIGN_ID, **CAMPAIGN_KWARGS)
idy = stix2.v21.Identity(id=IDENTITY_ID, **IDENTITY_KWARGS)
ind = stix2.v21.Indicator(id=INDICATOR_ID, **INDICATOR_KWARGS)
mal = stix2.v21.Malware(id=MALWARE_ID, **MALWARE_KWARGS)
rel1 = stix2.v21.Relationship(ind, 'indicates', mal, id=RELATIONSHIP_IDS[0])
rel2 = stix2.v21.Relationship(mal, 'targets', idy, id=RELATIONSHIP_IDS[1])
rel3 = stix2.v21.Relationship(cam, 'uses', mal, id=RELATIONSHIP_IDS[2])
stix_objs = [cam, idy, ind, mal, rel1, rel2, rel3]
fs = stix2.FileSystemStore(FS_PATH)
for o in stix_objs:
fs.add(o)
yield fs
for o in stix_objs:
filepath = os.path.join(
FS_PATH, o.type, o.id,
_timestamp2filename(o.modified) + '.json',
)
# Some test-scoped fixtures (e.g. fs_store) delete all campaigns, so by
# the time this module-scoped fixture tears itself down, it may find
# its campaigns already gone, which causes not-found errors.
try:
os.remove(filepath)
except OSError as e:
# 3 is the ERROR_PATH_NOT_FOUND windows error code. Which has an
# errno symbolic value, but not the windows meaning...
if e.errno in (errno.ENOENT, 3):
continue
raise
def test_filesystem_source_nonexistent_folder():
with pytest.raises(ValueError):
stix2.FileSystemSource('nonexistent-folder')
def test_filesystem_sink_nonexistent_folder():
with pytest.raises(ValueError):
stix2.FileSystemSink('nonexistent-folder')
def test_filesystem_source_bad_json_file(fs_source, bad_json_files):
# this tests the handling of two bad json files
# - one file should just be skipped (silently) as its a ".txt" extension
# - one file should be parsed and raise Exception bc its not JSON
try:
fs_source.get("intrusion-set--test-bad-json")
except TypeError as e:
assert "intrusion-set--test-bad-json" in str(e)
assert "could either not be parsed to JSON or was not valid STIX JSON" in str(e)
def test_filesystem_source_bad_stix_file(fs_source, bad_stix_files):
# this tests handling of bad STIX json object
try:
fs_source.get("intrusion-set--test-non-stix")
except STIXError as e:
assert "Can't parse object with no 'type' property" in str(e)
def test_filesystem_source_get_object(fs_source):
# get (latest) object
mal = fs_source.get("malware--6b616fc1-1505-48e3-8b2c-0d19337bff38")
assert mal.id == "malware--6b616fc1-1505-48e3-8b2c-0d19337bff38"
assert mal.name == "Rover"
assert mal.modified == datetime.datetime(
2018, 11, 16, 22, 54, 20, 390000,
pytz.utc,
)
def test_filesystem_source_get_nonexistent_object(fs_source):
ind = fs_source.get("indicator--6b616fc1-1505-48e3-8b2c-0d19337bff38")
assert ind is None
def test_filesystem_source_all_versions(fs_source):
ids = fs_source.all_versions(
"identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5",
)
assert len(ids) == 2
assert all(
id_.id == "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5"
for id_ in ids
)
assert all(id_.name == "The MITRE Corporation" for id_ in ids)
assert all(id_.type == "identity" for id_ in ids)
def test_filesystem_source_query_single(fs_source):
# query2
is_2 = fs_source.query([stix2.Filter("external_references.external_id", '=', "T1027")])
assert len(is_2) == 1
is_2 = is_2[0]
assert is_2.id == "attack-pattern--b3d682b6-98f2-4fb0-aa3b-b4df007ca70a"
assert is_2.type == "attack-pattern"
def test_filesystem_source_query_multiple(fs_source):
# query
intrusion_sets = fs_source.query([stix2.Filter("type", '=', "intrusion-set")])
assert len(intrusion_sets) == 2
assert "intrusion-set--a653431d-6a5e-4600-8ad3-609b5af57064" in [is_.id for is_ in intrusion_sets]
assert "intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a" in [is_.id for is_ in intrusion_sets]
is_1 = [is_ for is_ in intrusion_sets if is_.id == "intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a"][0]
assert "DragonOK" in is_1.aliases
assert len(is_1.external_references) == 4
def test_filesystem_source_backward_compatible(fs_source):
# this specific object is outside an "ID" directory; make sure we can get
# it.
modified = datetime.datetime(2018, 11, 16, 22, 54, 20, 390000, pytz.utc)
results = fs_source.query([
stix2.Filter("type", "=", "malware"),
stix2.Filter("id", "=", "malware--6b616fc1-1505-48e3-8b2c-0d19337bff38"),
stix2.Filter("modified", "=", modified),
])
assert len(results) == 1
result = results[0]
assert result.type == "malware"
assert result.id == "malware--6b616fc1-1505-48e3-8b2c-0d19337bff38"
assert result.modified == modified
assert result.malware_types == ["version four"]
def test_filesystem_source_sco(fs_source):
results = fs_source.query([stix2.Filter("type", "=", "directory")])
assert len(results) == 1
result = results[0]
assert result["type"] == "directory"
assert result["id"] == "directory--572827aa-e0cd-44fd-afd5-a717a7585f39"
assert result["path"] == "/performance/Democrat.gif"
def test_filesystem_sink_add_python_stix_object(fs_sink, fs_source):
# add python stix object
camp1 = stix2.v21.Campaign(
name="Hannibal",
objective="Targeting Italian and Spanish Diplomat internet accounts",
aliases=["War Elephant"],
)
fs_sink.add(camp1)
filepath = os.path.join(
FS_PATH, "campaign", camp1.id,
_timestamp2filename(camp1.modified) + ".json",
)
assert os.path.exists(filepath)
camp1_r = fs_source.get(camp1.id)
assert camp1_r.id == camp1.id
assert camp1_r.name == "Hannibal"
assert "War Elephant" in camp1_r.aliases
os.remove(filepath)
def test_filesystem_sink_add_stix_object_dict(fs_sink, fs_source):
# add stix object dict
camp2 = {
"name": "Aurelius",
"type": "campaign",
"objective": "German and French Intelligence Services",
"aliases": ["Purple Robes"],
"id": "campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
"created": "2017-05-31T21:31:53.197755Z",
"modified": "2017-05-31T21:31:53.197755Z",
}
fs_sink.add(camp2)
# Need to get the exact "modified" timestamp which would have been
# in effect at the time the object was saved to the sink, which determines
# the filename it would have been saved as. It may not be exactly the same
# as what's in the dict, since the parsing process can enforce a precision
# constraint (e.g. truncate to milliseconds), which results in a slightly
# different name.
camp2obj = stix2.parse(camp2)
filepath = os.path.join(
FS_PATH, "campaign", camp2obj["id"],
_timestamp2filename(camp2obj["modified"]) + ".json",
)
assert os.path.exists(filepath)
camp2_r = fs_source.get(camp2["id"])
assert camp2_r.id == camp2["id"]
assert camp2_r.name == camp2["name"]
assert "Purple Robes" in camp2_r.aliases
os.remove(filepath)
def test_filesystem_sink_add_stix_bundle_dict(fs_sink, fs_source):
# add stix bundle dict
bund = {
"type": "bundle",
"id": "bundle--040ae5ec-2e91-4e94-b075-bc8b368e8ca3",
"objects": [
{
"name": "Atilla",
"type": "campaign",
"objective": "Bulgarian, Albanian and Romanian Intelligence Services",
"aliases": ["Huns"],
"id": "campaign--b8f86161-ccae-49de-973a-4ca320c62478",
"created": "2017-05-31T21:31:53.197755Z",
"modified": "2017-05-31T21:31:53.197755Z",
},
],
}
fs_sink.add(bund)
camp_obj = stix2.parse(bund["objects"][0])
filepath = os.path.join(
FS_PATH, "campaign", camp_obj["id"],
_timestamp2filename(camp_obj["modified"]) + ".json",
)
assert os.path.exists(filepath)
camp3_r = fs_source.get(bund["objects"][0]["id"])
assert camp3_r.id == bund["objects"][0]["id"]
assert camp3_r.name == bund["objects"][0]["name"]
assert "Huns" in camp3_r.aliases
os.remove(filepath)
def test_filesystem_sink_add_json_stix_object(fs_sink, fs_source):
# add json-encoded stix obj
camp4 = '{"type": "campaign", "id":"campaign--6a6ca372-ba07-42cc-81ef-9840fc1f963d",'\
' "created":"2017-05-31T21:31:53.197755Z",'\
' "modified":"2017-05-31T21:31:53.197755Z",'\
' "name": "Ghengis Khan", "objective": "China and Russian infrastructure"}'
fs_sink.add(camp4)
camp4obj = stix2.parse(camp4)
filepath = os.path.join(
FS_PATH, "campaign",
"campaign--6a6ca372-ba07-42cc-81ef-9840fc1f963d",
_timestamp2filename(camp4obj["modified"]) + ".json",
)
assert os.path.exists(filepath)
camp4_r = fs_source.get("campaign--6a6ca372-ba07-42cc-81ef-9840fc1f963d")
assert camp4_r.id == "campaign--6a6ca372-ba07-42cc-81ef-9840fc1f963d"
assert camp4_r.name == "Ghengis Khan"
os.remove(filepath)
def test_filesystem_sink_json_stix_bundle(fs_sink, fs_source):
# add json-encoded stix bundle
bund2 = '{"type": "bundle", "id": "bundle--3d267103-8475-4d8f-b321-35ec6eccfa37",' \
' "spec_version": "2.0", "objects": [{"type": "campaign", "id": "campaign--2c03b8bf-82ee-433e-9918-ca2cb6e9534b",' \
' "created":"2017-05-31T21:31:53.197755Z",'\
' "modified":"2017-05-31T21:31:53.197755Z",'\
' "name": "Spartacus", "objective": "Oppressive regimes of Africa and Middle East"}]}'
fs_sink.add(bund2)
bund2obj = stix2.parse(bund2)
camp_obj = bund2obj["objects"][0]
filepath = os.path.join(
FS_PATH, "campaign",
"campaign--2c03b8bf-82ee-433e-9918-ca2cb6e9534b",
_timestamp2filename(camp_obj["modified"]) + ".json",
)
assert os.path.exists(filepath)
camp5_r = fs_source.get("campaign--2c03b8bf-82ee-433e-9918-ca2cb6e9534b")
assert camp5_r.id == "campaign--2c03b8bf-82ee-433e-9918-ca2cb6e9534b"
assert camp5_r.name == "Spartacus"
os.remove(filepath)
def test_filesystem_sink_add_objects_list(fs_sink, fs_source):
# add list of objects
camp6 = stix2.v21.Campaign(
name="Comanche",
objective="US Midwest manufacturing firms, oil refineries, and businesses",
aliases=["Horse Warrior"],
)
camp7 = {
"name": "Napolean",
"type": "campaign",
"objective": "Central and Eastern Europe military commands and departments",
"aliases": ["The Frenchmen"],
"id": "campaign--122818b6-1112-4fb0-b11b-b111107ca70a",
"created": "2017-05-31T21:31:53.197755Z",
"modified": "2017-05-31T21:31:53.197755Z",
}
fs_sink.add([camp6, camp7])
camp7obj = stix2.parse(camp7)
camp6filepath = os.path.join(
FS_PATH, "campaign", camp6.id,
_timestamp2filename(camp6["modified"]) +
".json",
)
camp7filepath = os.path.join(
FS_PATH, "campaign", "campaign--122818b6-1112-4fb0-b11b-b111107ca70a",
_timestamp2filename(camp7obj["modified"]) + ".json",
)
assert os.path.exists(camp6filepath)
assert os.path.exists(camp7filepath)
camp6_r = fs_source.get(camp6.id)
assert camp6_r.id == camp6.id
assert "Horse Warrior" in camp6_r.aliases
camp7_r = fs_source.get(camp7["id"])
assert camp7_r.id == camp7["id"]
assert "The Frenchmen" in camp7_r.aliases
# remove all added objects
os.remove(camp6filepath)
os.remove(camp7filepath)
def test_filesystem_sink_marking(fs_sink):
marking = stix2.v21.MarkingDefinition(
id="marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da",
created="2017-01-20T00:00:00.000Z",
definition_type="tlp",
definition=stix2.v21.TLPMarking(tlp="green"),
)
fs_sink.add(marking)
marking_filepath = os.path.join(
FS_PATH, "marking-definition", marking["id"] + ".json",
)
assert os.path.exists(marking_filepath)
os.remove(marking_filepath)
def test_filesystem_sink_sco(fs_sink):
file_sco = {
"type": "file",
"id": "file--decfcc48-31b3-45f5-87c8-1b3a5d71a307",
"name": "cats.png",
}
fs_sink.add(file_sco)
sco_filepath = os.path.join(
FS_PATH, "file", file_sco["id"] + ".json",
)
assert os.path.exists(sco_filepath)
os.remove(sco_filepath)
os.rmdir(os.path.dirname(sco_filepath))
def test_filesystem_store_get_stored_as_bundle(fs_store):
coa = fs_store.get("course-of-action--95ddb356-7ba0-4bd9-a889-247262b8946f")
assert coa.id == "course-of-action--95ddb356-7ba0-4bd9-a889-247262b8946f"
assert coa.type == "course-of-action"
def test_filesystem_store_get_stored_as_object(fs_store):
coa = fs_store.get("course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd")
assert coa.id == "course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd"
assert coa.type == "course-of-action"
def test_filesystem_store_all_versions(fs_store):
rels = fs_store.all_versions("relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1")
assert len(rels) == 1
rel = rels[0]
assert rel.id == "relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1"
assert rel.type == "relationship"
def test_filesystem_store_query(fs_store):
# query()
tools = fs_store.query([stix2.Filter("tool_types", "in", "tool")])
assert len(tools) == 2
assert "tool--242f3da3-4425-4d11-8f5c-b842886da966" in [tool.id for tool in tools]
assert "tool--03342581-f790-4f03-ba41-e82e67392e23" in [tool.id for tool in tools]
def test_filesystem_store_query_single_filter(fs_store):
query = stix2.Filter("tool_types", "in", "tool")
tools = fs_store.query(query)
assert len(tools) == 2
assert "tool--242f3da3-4425-4d11-8f5c-b842886da966" in [tool.id for tool in tools]
assert "tool--03342581-f790-4f03-ba41-e82e67392e23" in [tool.id for tool in tools]
def test_filesystem_store_empty_query(fs_store):
results = fs_store.query() # returns all
assert len(results) == 31
assert "tool--242f3da3-4425-4d11-8f5c-b842886da966" in [obj.id for obj in results]
assert "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" in [obj.id for obj in results]
assert "directory--572827aa-e0cd-44fd-afd5-a717a7585f39" in [obj.id for obj in results]
def test_filesystem_store_query_multiple_filters(fs_store):
fs_store.source.filters.add(stix2.Filter("tool_types", "in", "tool"))
tools = fs_store.query(stix2.Filter("id", "=", "tool--242f3da3-4425-4d11-8f5c-b842886da966"))
assert len(tools) == 1
assert tools[0].id == "tool--242f3da3-4425-4d11-8f5c-b842886da966"
def test_filesystem_store_query_dont_include_type_folder(fs_store):
results = fs_store.query(stix2.Filter("type", "!=", "tool"))
assert len(results) == 29
def test_filesystem_store_add(fs_store):
# add()
camp1 = stix2.v21.Campaign(
name="Great Heathen Army",
objective="Targeting the government of United Kingdom and insitutions affiliated with the Church Of England",
aliases=["Ragnar"],
)
fs_store.add(camp1)
camp1_r = fs_store.get(camp1.id)
assert camp1_r.id == camp1.id
assert camp1_r.name == camp1.name
filepath = os.path.join(
FS_PATH, "campaign", camp1_r.id,
_timestamp2filename(camp1_r.modified) + ".json",
)
# remove
os.remove(filepath)
def test_filesystem_store_add_as_bundle():
fs_store = stix2.FileSystemStore(FS_PATH, bundlify=True)
camp1 = stix2.v21.Campaign(
name="Great Heathen Army",
objective="Targeting the government of United Kingdom and insitutions affiliated with the Church Of England",
aliases=["Ragnar"],
)
fs_store.add(camp1)
filepath = os.path.join(
FS_PATH, "campaign", camp1.id,
_timestamp2filename(camp1.modified) + ".json",
)
with open(filepath) as bundle_file:
assert '"type": "bundle"' in bundle_file.read()
camp1_r = fs_store.get(camp1.id)
assert camp1_r.id == camp1.id
assert camp1_r.name == camp1.name
shutil.rmtree(os.path.join(FS_PATH, "campaign"), True)
def test_filesystem_add_bundle_object(fs_store):
bundle = stix2.v21.Bundle()
fs_store.add(bundle)
def test_filesystem_store_add_invalid_object(fs_store):
ind = ('campaign', 'campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f') # tuple isn't valid
with pytest.raises(TypeError) as excinfo:
fs_store.add(ind)
assert 'stix_data must be' in str(excinfo.value)
assert 'a STIX object' in str(excinfo.value)
assert 'JSON formatted STIX' in str(excinfo.value)
assert 'JSON formatted STIX bundle' in str(excinfo.value)
def test_filesystem_store_add_marking(fs_store):
marking = stix2.v21.MarkingDefinition(
id="marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da",
created="2017-01-20T00:00:00.000Z",
definition_type="tlp",
definition=stix2.v21.TLPMarking(tlp="green"),
)
fs_store.add(marking)
marking_filepath = os.path.join(
FS_PATH, "marking-definition", marking["id"] + ".json",
)
assert os.path.exists(marking_filepath)
marking_r = fs_store.get(marking["id"])
assert marking_r["id"] == marking["id"]
assert marking_r["definition"]["tlp"] == "green"
os.remove(marking_filepath)
def test_filesystem_store_add_sco(fs_store):
sco = stix2.v21.EmailAddress(
value="jdoe@example.com",
)
fs_store.add(sco)
sco_filepath = os.path.join(
FS_PATH, "email-addr", sco["id"] + ".json",
)
assert os.path.exists(sco_filepath)
sco_r = fs_store.get(sco["id"])
assert sco_r["id"] == sco["id"]
assert sco_r["value"] == sco["value"]
os.remove(sco_filepath)
os.rmdir(os.path.dirname(sco_filepath))
def test_filesystem_object_with_custom_property(fs_store):
camp = stix2.v21.Campaign(
name="Scipio Africanus",
objective="Defeat the Carthaginians",
x_empire="Roman",
allow_custom=True,
)
fs_store.add(camp)
camp_r = fs_store.get(camp.id)
assert camp_r.id == camp.id
assert camp_r.x_empire == camp.x_empire
def test_filesystem_object_with_custom_property_in_bundle(fs_store):
camp = stix2.v21.Campaign(
name="Scipio Africanus",
objective="Defeat the Carthaginians",
x_empire="Roman",
allow_custom=True,
)
bundle = stix2.v21.Bundle(camp, allow_custom=True)
fs_store.add(bundle)
camp_r = fs_store.get(camp.id)
assert camp_r.id == camp.id
assert camp_r.x_empire == camp.x_empire
def test_filesystem_custom_object_dict(fs_store):
fs_store.sink.allow_custom = True
newobj = {
"type": "x-new-obj-2",
"id": "x-new-obj-2--d08dc866-6149-47db-aae6-7b58a827e7f0",
"spec_version": "2.1",
"created": "2020-07-20T03:45:02.879Z",
"modified": "2020-07-20T03:45:02.879Z",
"property1": "something",
}
fs_store.add(newobj)
newobj_r = fs_store.get(newobj["id"])
assert newobj_r["id"] == newobj["id"]
assert newobj_r["property1"] == 'something'
# remove dir
shutil.rmtree(os.path.join(FS_PATH, "x-new-obj-2"), True)
fs_store.sink.allow_custom = False
def test_filesystem_custom_object(fs_store):
@stix2.v21.CustomObject(
'x-new-obj-2', [
('property1', stix2.properties.StringProperty(required=True)),
],
)
class NewObj():
pass
newobj = NewObj(property1='something')
fs_store.add(newobj)
newobj_r = fs_store.get(newobj.id)
assert newobj_r["id"] == newobj["id"]
assert newobj_r["property1"] == 'something'
# remove dir
shutil.rmtree(os.path.join(FS_PATH, "x-new-obj-2"), True)
def test_relationships(rel_fs_store):
mal = rel_fs_store.get(MALWARE_ID)
resp = rel_fs_store.relationships(mal)
assert len(resp) == 3
assert any(x['id'] == RELATIONSHIP_IDS[0] for x in resp)
assert any(x['id'] == RELATIONSHIP_IDS[1] for x in resp)
assert any(x['id'] == RELATIONSHIP_IDS[2] for x in resp)
def test_relationships_by_type(rel_fs_store):
mal = rel_fs_store.get(MALWARE_ID)
resp = rel_fs_store.relationships(mal, relationship_type='indicates')
assert len(resp) == 1
assert resp[0]['id'] == RELATIONSHIP_IDS[0]
def test_relationships_by_source(rel_fs_store):
resp = rel_fs_store.relationships(MALWARE_ID, source_only=True)
assert len(resp) == 1
assert resp[0]['id'] == RELATIONSHIP_IDS[1]
def test_relationships_by_target(rel_fs_store):
resp = rel_fs_store.relationships(MALWARE_ID, target_only=True)
assert len(resp) == 2
assert any(x['id'] == RELATIONSHIP_IDS[0] for x in resp)
assert any(x['id'] == RELATIONSHIP_IDS[2] for x in resp)
def test_relationships_by_target_and_type(rel_fs_store):
resp = rel_fs_store.relationships(MALWARE_ID, relationship_type='uses', target_only=True)
assert len(resp) == 1
assert any(x['id'] == RELATIONSHIP_IDS[2] for x in resp)
def test_relationships_by_target_and_source(rel_fs_store):
with pytest.raises(ValueError) as excinfo:
rel_fs_store.relationships(MALWARE_ID, target_only=True, source_only=True)
assert 'not both' in str(excinfo.value)
def test_related_to(rel_fs_store):
mal = rel_fs_store.get(MALWARE_ID)
resp = rel_fs_store.related_to(mal)
assert len(resp) == 3
assert any(x['id'] == CAMPAIGN_ID for x in resp)
assert any(x['id'] == INDICATOR_ID for x in resp)
assert any(x['id'] == IDENTITY_ID for x in resp)
def test_related_to_by_source(rel_fs_store):
resp = rel_fs_store.related_to(MALWARE_ID, source_only=True)
assert len(resp) == 1
assert any(x['id'] == IDENTITY_ID for x in resp)
def test_related_to_by_target(rel_fs_store):
resp = rel_fs_store.related_to(MALWARE_ID, target_only=True)
assert len(resp) == 2
assert any(x['id'] == CAMPAIGN_ID for x in resp)
assert any(x['id'] == INDICATOR_ID for x in resp)
def test_auth_set_white1():
auth_set = AuthSet({"A"}, set())
assert auth_set.auth_type == AuthSet.WHITE
assert auth_set.values == {"A"}
def test_auth_set_white2():
auth_set = AuthSet(set(), set())
assert auth_set.auth_type == AuthSet.WHITE
assert len(auth_set.values) == 0
def test_auth_set_white3():
auth_set = AuthSet({"A", "B"}, {"B", "C"})
assert auth_set.auth_type == AuthSet.WHITE
assert auth_set.values == {"A"}
def test_auth_set_black1():
auth_set = AuthSet(None, {"B", "C"})
assert auth_set.auth_type == AuthSet.BLACK
assert auth_set.values == {"B", "C"}
def test_optimize_types1():
filters = [
stix2.Filter("type", "=", "foo"),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert auth_types.values == {"foo"}
assert auth_ids.auth_type == AuthSet.BLACK
assert len(auth_ids.values) == 0
def test_optimize_types2():
filters = [
stix2.Filter("type", "=", "foo"),
stix2.Filter("type", "=", "bar"),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert len(auth_types.values) == 0
assert auth_ids.auth_type == AuthSet.BLACK
assert len(auth_ids.values) == 0
def test_optimize_types3():
filters = [
stix2.Filter("type", "in", ["A", "B", "C"]),
stix2.Filter("type", "in", ["B", "C", "D"]),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert auth_types.values == {"B", "C"}
assert auth_ids.auth_type == AuthSet.BLACK
assert len(auth_ids.values) == 0
def test_optimize_types4():
filters = [
stix2.Filter("type", "in", ["A", "B", "C"]),
stix2.Filter("type", "in", ["D", "E", "F"]),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert len(auth_types.values) == 0
assert auth_ids.auth_type == AuthSet.BLACK
assert len(auth_ids.values) == 0
def test_optimize_types5():
filters = [
stix2.Filter("type", "in", ["foo", "bar"]),
stix2.Filter("type", "!=", "bar"),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert auth_types.values == {"foo"}
assert auth_ids.auth_type == AuthSet.BLACK
assert len(auth_ids.values) == 0
def test_optimize_types6():
filters = [
stix2.Filter("type", "!=", "foo"),
stix2.Filter("type", "!=", "bar"),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.BLACK
assert auth_types.values == {"foo", "bar"}
assert auth_ids.auth_type == AuthSet.BLACK
assert len(auth_ids.values) == 0
def test_optimize_types7():
filters = [
stix2.Filter("type", "=", "foo"),
stix2.Filter("type", "!=", "foo"),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert len(auth_types.values) == 0
assert auth_ids.auth_type == AuthSet.BLACK
assert len(auth_ids.values) == 0
def test_optimize_types8():
filters = []
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.BLACK
assert len(auth_types.values) == 0
assert auth_ids.auth_type == AuthSet.BLACK
assert len(auth_ids.values) == 0
def test_optimize_types_ids1():
filters = [
stix2.Filter("type", "in", ["foo", "bar"]),
stix2.Filter("id", "=", "foo--00000000-0000-0000-0000-000000000000"),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert auth_types.values == {"foo"}
assert auth_ids.auth_type == AuthSet.WHITE
assert auth_ids.values == {"foo--00000000-0000-0000-0000-000000000000"}
def test_optimize_types_ids2():
filters = [
stix2.Filter("type", "=", "foo"),
stix2.Filter("id", "=", "bar--00000000-0000-0000-0000-000000000000"),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert len(auth_types.values) == 0
assert auth_ids.auth_type == AuthSet.WHITE
assert len(auth_ids.values) == 0
def test_optimize_types_ids3():
filters = [
stix2.Filter("type", "in", ["foo", "bar"]),
stix2.Filter("id", "!=", "bar--00000000-0000-0000-0000-000000000000"),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert auth_types.values == {"foo", "bar"}
assert auth_ids.auth_type == AuthSet.BLACK
assert auth_ids.values == {"bar--00000000-0000-0000-0000-000000000000"}
def test_optimize_types_ids4():
filters = [
stix2.Filter("type", "in", ["A", "B", "C"]),
stix2.Filter(
"id", "in", [
"B--00000000-0000-0000-0000-000000000000",
"C--00000000-0000-0000-0000-000000000000",
"D--00000000-0000-0000-0000-000000000000",
],
),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert auth_types.values == {"B", "C"}
assert auth_ids.auth_type == AuthSet.WHITE
assert auth_ids.values == {
"B--00000000-0000-0000-0000-000000000000",
"C--00000000-0000-0000-0000-000000000000",
}
def test_optimize_types_ids5():
filters = [
stix2.Filter("type", "in", ["A", "B", "C"]),
stix2.Filter("type", "!=", "C"),
stix2.Filter(
"id", "in", [
"B--00000000-0000-0000-0000-000000000000",
"C--00000000-0000-0000-0000-000000000000",
"D--00000000-0000-0000-0000-000000000000",
],
),
stix2.Filter("id", "!=", "D--00000000-0000-0000-0000-000000000000"),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert auth_types.values == {"B"}
assert auth_ids.auth_type == AuthSet.WHITE
assert auth_ids.values == {"B--00000000-0000-0000-0000-000000000000"}
def test_optimize_types_ids6():
filters = [
stix2.Filter("id", "=", "A--00000000-0000-0000-0000-000000000000"),
]
auth_types, auth_ids = _find_search_optimizations(filters)
assert auth_types.auth_type == AuthSet.WHITE
assert auth_types.values == {"A"}
assert auth_ids.auth_type == AuthSet.WHITE
assert auth_ids.values == {"A--00000000-0000-0000-0000-000000000000"}
def test_search_auth_set_white1():
auth_set = AuthSet(
{"attack-pattern", "doesntexist"},
set(),
)
results = _get_matching_dir_entries(FS_PATH, auth_set, stat.S_ISDIR)
assert results == ["attack-pattern"]
results = _get_matching_dir_entries(FS_PATH, auth_set, stat.S_ISREG)
assert len(results) == 0
def test_search_auth_set_white2():
auth_set = AuthSet(
{
"malware--6b616fc1-1505-48e3-8b2c-0d19337bff38",
"malware--92ec0cbd-2c30-44a2-b270-73f4ec949841",
},
{
"malware--92ec0cbd-2c30-44a2-b270-73f4ec949841",
"malware--96b08451-b27a-4ff6-893f-790e26393a8e",
"doesntexist",
},
)
results = _get_matching_dir_entries(
os.path.join(FS_PATH, "malware"),
auth_set, stat.S_ISDIR,
)
assert results == ["malware--6b616fc1-1505-48e3-8b2c-0d19337bff38"]
def test_search_auth_set_white3():
auth_set = AuthSet({"20170531213258226477", "doesntexist"}, set())
results = _get_matching_dir_entries(
os.path.join(
FS_PATH, "malware",
"malware--6b616fc1-1505-48e3-8b2c-0d19337bff38",
),
auth_set, stat.S_ISREG, ".json",
)
assert results == ["20170531213258226477.json"]
def test_search_auth_set_black1():
auth_set = AuthSet(
None,
{"tool--242f3da3-4425-4d11-8f5c-b842886da966", "doesntexist"},
)
results = _get_matching_dir_entries(
os.path.join(FS_PATH, "tool"),
auth_set, stat.S_ISDIR,
)
assert set(results) == {
"tool--03342581-f790-4f03-ba41-e82e67392e23",
}
def test_search_auth_set_white_empty():
auth_set = AuthSet(
set(),
set(),
)
results = _get_matching_dir_entries(FS_PATH, auth_set, stat.S_ISDIR)
assert len(results) == 0
def test_search_auth_set_black_empty(rel_fs_store):
# Ensure rel_fs_store fixture has run so that the type directories are
# predictable (it adds "campaign").
auth_set = AuthSet(
None,
set(),
)
results = _get_matching_dir_entries(FS_PATH, auth_set, stat.S_ISDIR)
# Should get all dirs
assert set(results) == {
"attack-pattern",
"campaign",
"course-of-action",
"directory",
"identity",
"indicator",
"intrusion-set",
"malware",
"marking-definition",
"relationship",
"tool",
}
def test_timestamp2filename_naive():
dt = datetime.datetime(
2010, 6, 15,
8, 30, 10, 1234,
)
filename = _timestamp2filename(dt)
assert filename == "20100615083010001234"
def test_timestamp2filename_tz():
# one hour west of UTC (i.e. an hour earlier)
tz = pytz.FixedOffset(-60)
dt = datetime.datetime(
2010, 6, 15,
7, 30, 10, 1234,
tz,
)
filename = _timestamp2filename(dt)
assert filename == "20100615083010001234"
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shards a given test suite and runs the shards in parallel.
ShardingSupervisor is called to process the command line options and creates
the specified number of worker threads. These threads then run each shard of
the test in a separate process and report on the results. When all the shards
have been completed, the supervisor reprints any lines indicating a test
failure for convenience. If only one shard is to be run, a single subprocess
is started for that shard and the output is identical to gtest's output.
"""
import itertools
import optparse
import os
import Queue
import random
import re
import sys
import threading
from stdio_buffer import StdioBuffer
from xml.dom import minidom
# Add tools/ to path
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_PATH, ".."))
try:
import find_depot_tools # pylint: disable=F0401,W0611
# Fixes a bug in Windows where some shards die upon starting
# TODO(charleslee): actually fix this bug
import subprocess2 as subprocess
except ImportError:
# Unable to find depot_tools, so just use standard subprocess
import subprocess
SS_USAGE = "python %prog [options] path/to/test [gtest_args]"
SS_DEFAULT_NUM_CORES = 4
SS_DEFAULT_SHARDS_PER_CORE = 5 # num_shards = cores * SHARDS_PER_CORE
SS_DEFAULT_RUNS_PER_CORE = 1 # num_workers = cores * RUNS_PER_CORE
SS_DEFAULT_RETRY_PERCENT = 5 # --retry-failed ignored if more than 5% fail
SS_DEFAULT_TIMEOUT = 530 # Slightly less than buildbot's default 600 seconds
def DetectNumCores():
"""Detects the number of cores on the machine.
Returns:
The number of cores on the machine or DEFAULT_NUM_CORES if it could not
be found.
"""
try:
# Override on some Chromium Valgrind bots.
if "CHROME_VALGRIND_NUMCPUS" in os.environ:
return int(os.environ["CHROME_VALGRIND_NUMCPUS"])
# Linux, Unix, MacOS
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux, Unix
return int(os.sysconf("SC_NPROCESSORS_ONLN"))
else:
# OSX
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows
return int(os.environ["NUMBER_OF_PROCESSORS"])
except ValueError:
return SS_DEFAULT_NUM_CORES
def GetGTestOutput(args):
"""Extracts gtest_output from the args. Returns none if not present."""
for arg in args:
if '--gtest_output=' in arg:
return arg.split('=')[1]
return None
def AppendToGTestOutput(gtest_args, value):
args = gtest_args[:]
current_value = GetGTestOutput(args)
if not current_value:
return gtest_args
current_arg = '--gtest_output=' + current_value
args.remove(current_arg)
args.append('--gtest_output=' + current_value + value)
return args
def RemoveGTestOutput(gtest_args):
args = gtest_args[:]
current_value = GetGTestOutput(args)
if not current_value:
return gtest_args
args.remove('--gtest_output=' + current_value)
return args
def AppendToXML(final_xml, generic_path, shard):
"""Combine the shard xml file with the final xml file."""
path = generic_path + str(shard)
try:
with open(path) as shard_xml_file:
shard_xml = minidom.parse(shard_xml_file)
except IOError:
# If the shard crashed, gtest will not have generated an xml file.
return final_xml
if not final_xml:
# Out final xml is empty, let's prepopulate it with the first one we see.
return shard_xml
shard_node = shard_xml.documentElement
final_node = final_xml.documentElement
testcases = shard_node.getElementsByTagName('testcase')
final_testcases = final_node.getElementsByTagName('testcase')
final_testsuites = final_node.getElementsByTagName('testsuite')
final_testsuites_by_name = dict(
(suite.getAttribute('name'), suite) for suite in final_testsuites)
for testcase in testcases:
name = testcase.getAttribute('name')
classname = testcase.getAttribute('classname')
failures = testcase.getElementsByTagName('failure')
status = testcase.getAttribute('status')
elapsed = testcase.getAttribute('time')
# don't bother updating the final xml if there is no data.
if status == 'notrun':
continue
# Look in our final xml to see if it's there.
# There has to be a better way...
merged_into_final_testcase = False
for final_testcase in final_testcases:
final_name = final_testcase.getAttribute('name')
final_classname = final_testcase.getAttribute('classname')
if final_name == name and final_classname == classname:
# We got the same entry.
final_testcase.setAttribute('status', status)
final_testcase.setAttribute('time', elapsed)
for failure in failures:
final_testcase.appendChild(failure)
merged_into_final_testcase = True
# We couldn't find an existing testcase to merge the results into, so we
# copy the node into the existing test suite.
if not merged_into_final_testcase:
testsuite = testcase.parentNode
final_testsuite = final_testsuites_by_name[testsuite.getAttribute('name')]
final_testsuite.appendChild(testcase)
return final_xml
def RunShard(test, total_shards, index, gtest_args, stdout, stderr):
"""Runs a single test shard in a subprocess.
Returns:
The Popen object representing the subprocess handle.
"""
args = [test]
# If there is a gtest_output
test_args = AppendToGTestOutput(gtest_args, str(index))
args.extend(test_args)
env = os.environ.copy()
env["GTEST_TOTAL_SHARDS"] = str(total_shards)
env["GTEST_SHARD_INDEX"] = str(index)
# Use a unique log file for each shard
# Allows ui_tests to be run in parallel on the same machine
env["CHROME_LOG_FILE"] = "chrome_log_%d" % index
return subprocess.Popen(
args, stdout=stdout,
stderr=stderr,
env=env,
bufsize=0,
universal_newlines=True)
class ShardRunner(threading.Thread):
"""Worker thread that manages a single shard at a time.
Attributes:
supervisor: The ShardingSupervisor that this worker reports to.
counter: Called to get the next shard index to run.
test_start: Regex that detects when a test runs.
test_ok: Regex that detects a passing test.
test_fail: Regex that detects a failing test.
current_test: The name of the currently running test.
"""
def __init__(self, supervisor, counter, test_start, test_ok, test_fail):
"""Inits ShardRunner and sets the current test to nothing."""
threading.Thread.__init__(self)
self.supervisor = supervisor
self.counter = counter
self.test_start = test_start
self.test_ok = test_ok
self.test_fail = test_fail
self.current_test = ""
def ReportFailure(self, description, index, test_name):
"""Assembles and reports a failure line to be printed later."""
log_line = "%s (%i): %s\n" % (description, index, test_name)
self.supervisor.LogTestFailure(log_line)
def ProcessLine(self, index, line):
"""Checks a shard output line for test status, and reports a failure or
incomplete test if needed.
"""
results = self.test_start.search(line)
if results:
if self.current_test:
self.ReportFailure("INCOMPLETE", index, self.current_test)
self.current_test = results.group(1)
self.supervisor.IncrementTestCount()
return
results = self.test_ok.search(line)
if results:
self.current_test = ""
return
results = self.test_fail.search(line)
if results:
self.ReportFailure("FAILED", index, results.group(1))
self.current_test = ""
def run(self):
"""Runs shards and outputs the results.
Gets the next shard index from the supervisor, runs it in a subprocess,
and collects the output. The output is read character by character in
case the shard crashes without an ending newline. Each line is processed
as it is finished.
"""
while True:
try:
index = self.counter.get_nowait()
except Queue.Empty:
break
shard_running = True
shard = RunShard(
self.supervisor.test, self.supervisor.total_shards, index,
self.supervisor.gtest_args, subprocess.PIPE, subprocess.PIPE)
buf = StdioBuffer(shard)
# Spawn two threads to collect stdio output
stdout_collector_thread = buf.handle_pipe(sys.stdout, shard.stdout)
stderr_collector_thread = buf.handle_pipe(sys.stderr, shard.stderr)
while shard_running:
pipe, line = buf.readline()
if pipe is None and line is None:
shard_running = False
if not line and not shard_running:
break
self.ProcessLine(index, line)
self.supervisor.LogOutputLine(index, line, pipe)
stdout_collector_thread.join()
stderr_collector_thread.join()
if self.current_test:
self.ReportFailure("INCOMPLETE", index, self.current_test)
self.supervisor.ShardIndexCompleted(index)
if shard.returncode != 0:
self.supervisor.LogShardFailure(index)
class ShardingSupervisor(object):
"""Supervisor object that handles the worker threads.
Attributes:
test: Name of the test to shard.
num_shards_to_run: Total number of shards to split the test into.
num_runs: Total number of worker threads to create for running shards.
color: Indicates which coloring mode to use in the output.
original_order: True if shard output should be printed as it comes.
prefix: True if each line should indicate the shard index.
retry_percent: Integer specifying the max percent of tests to retry.
gtest_args: The options to pass to gtest.
failed_tests: List of statements from shard output indicating a failure.
failed_shards: List of shards that contained failing tests.
shards_completed: List of flags indicating which shards have finished.
shard_output: Buffer that stores output from each shard as (stdio, line).
test_counter: Stores the total number of tests run.
total_slaves: Total number of slaves running this test.
slave_index: Current slave to run tests for.
If total_slaves is set, we run only a subset of the tests. This is meant to be
used when we want to shard across machines as well as across cpus. In that
case the number of shards to execute will be the same, but they will be
smaller, as the total number of shards in the test suite will be multiplied
by 'total_slaves'.
For example, if you are on a quad core machine, the sharding supervisor by
default will use 20 shards for the whole suite. However, if you set
total_slaves to 2, it will split the suite in 40 shards and will only run
shards [0-19] or shards [20-39] depending if you set slave_index to 0 or 1.
"""
SHARD_COMPLETED = object()
def __init__(self, test, num_shards_to_run, num_runs, color, original_order,
prefix, retry_percent, timeout, total_slaves, slave_index,
gtest_args):
"""Inits ShardingSupervisor with given options and gtest arguments."""
self.test = test
# Number of shards to run locally.
self.num_shards_to_run = num_shards_to_run
# Total shards in the test suite running across all slaves.
self.total_shards = num_shards_to_run * total_slaves
self.slave_index = slave_index
self.num_runs = num_runs
self.color = color
self.original_order = original_order
self.prefix = prefix
self.retry_percent = retry_percent
self.timeout = timeout
self.gtest_args = gtest_args
self.failed_tests = []
self.failed_shards = []
self.shards_completed = [False] * self.num_shards_to_run
self.shard_output = [Queue.Queue() for _ in range(self.num_shards_to_run)]
self.test_counter = itertools.count()
def ShardTest(self):
"""Runs the test and manages the worker threads.
Runs the test and outputs a summary at the end. All the tests in the
suite are run by creating (cores * runs_per_core) threads and
(cores * shards_per_core) shards. When all the worker threads have
finished, the lines saved in failed_tests are printed again. If enabled,
and failed tests that do not have FLAKY or FAILS in their names are run
again, serially, and the results are printed.
Returns:
1 if some unexpected (not FLAKY or FAILS) tests failed, 0 otherwise.
"""
# Regular expressions for parsing GTest logs. Test names look like
# SomeTestCase.SomeTest
# SomeName/SomeTestCase.SomeTest/1
# This regex also matches SomeName.SomeTest/1 and
# SomeName/SomeTestCase.SomeTest, which should be harmless.
test_name_regex = r"((\w+/)?\w+\.\w+(/\d+)?)"
# Regex for filtering out ANSI escape codes when using color.
ansi_regex = r"(?:\x1b\[.*?[a-zA-Z])?"
test_start = re.compile(
ansi_regex + r"\[\s+RUN\s+\] " + ansi_regex + test_name_regex)
test_ok = re.compile(
ansi_regex + r"\[\s+OK\s+\] " + ansi_regex + test_name_regex)
test_fail = re.compile(
ansi_regex + r"\[\s+FAILED\s+\] " + ansi_regex + test_name_regex)
workers = []
counter = Queue.Queue()
start_point = self.num_shards_to_run * self.slave_index
for i in range(start_point, start_point + self.num_shards_to_run):
counter.put(i)
for i in range(self.num_runs):
worker = ShardRunner(
self, counter, test_start, test_ok, test_fail)
worker.start()
workers.append(worker)
if self.original_order:
for worker in workers:
worker.join()
else:
self.WaitForShards()
# All the shards are done. Merge all the XML files and generate the
# main one.
output_arg = GetGTestOutput(self.gtest_args)
if output_arg:
xml, xml_path = output_arg.split(':', 1)
assert(xml == 'xml')
final_xml = None
for i in range(start_point, start_point + self.num_shards_to_run):
final_xml = AppendToXML(final_xml, xml_path, i)
if final_xml:
with open(xml_path, 'w') as final_file:
final_xml.writexml(final_file)
num_failed = len(self.failed_shards)
if num_failed > 0:
self.failed_shards.sort()
self.WriteText(sys.stdout,
"\nFAILED SHARDS: %s\n" % str(self.failed_shards),
"\x1b[1;5;31m")
else:
self.WriteText(sys.stdout, "\nALL SHARDS PASSED!\n", "\x1b[1;5;32m")
self.PrintSummary(self.failed_tests)
if self.retry_percent < 0:
return len(self.failed_shards) > 0
self.failed_tests = [x for x in self.failed_tests if x.find("FAILS_") < 0]
self.failed_tests = [x for x in self.failed_tests if x.find("FLAKY_") < 0]
if not self.failed_tests:
return 0
return self.RetryFailedTests()
def LogTestFailure(self, line):
"""Saves a line in the lsit of failed tests to be printed at the end."""
if line not in self.failed_tests:
self.failed_tests.append(line)
def LogShardFailure(self, index):
"""Records that a test in the given shard has failed."""
self.failed_shards.append(index)
def WaitForShards(self):
"""Prints the output from each shard in consecutive order, waiting for
the current shard to finish before starting on the next shard.
"""
try:
for shard_index in range(self.num_shards_to_run):
while True:
try:
_, line = self.shard_output[shard_index].get(True, self.timeout)
except Queue.Empty:
# Shard timed out, notice failure and move on.
self.LogShardFailure(shard_index)
# TODO(maruel): Print last test. It'd be simpler to have the
# processing in the main thread.
# TODO(maruel): Make sure the worker thread terminates.
sys.stdout.write('TIMED OUT\n\n')
self.LogTestFailure(
'FAILURE: SHARD %d TIMED OUT; %d seconds' % (
shard_index, self.timeout))
break
if line is self.SHARD_COMPLETED:
break
sys.stdout.write(line)
except:
sys.stdout.flush()
print 'CAUGHT EXCEPTION: dumping remaining data:'
for shard_index in range(self.num_shards_to_run):
while True:
try:
_, line = self.shard_output[shard_index].get(False)
except Queue.Empty:
# Shard timed out, notice failure and move on.
self.LogShardFailure(shard_index)
break
if line is self.SHARD_COMPLETED:
break
sys.stdout.write(line)
raise
def LogOutputLine(self, index, line, pipe=sys.stdout):
"""Either prints the shard output line immediately or saves it in the
output buffer, depending on the settings. Also optionally adds a prefix.
Adds a (sys.stdout, line) or (sys.stderr, line) tuple in the output queue.
"""
# Fix up the index.
array_index = index - (self.num_shards_to_run * self.slave_index)
if self.prefix:
line = "%i>%s" % (index, line)
if self.original_order:
pipe.write(line)
else:
self.shard_output[array_index].put((pipe, line))
def IncrementTestCount(self):
"""Increments the number of tests run. This is relevant to the
--retry-percent option.
"""
self.test_counter.next()
def ShardIndexCompleted(self, index):
"""Records that a shard has finished so the output from the next shard
can now be printed.
"""
# Fix up the index.
array_index = index - (self.num_shards_to_run * self.slave_index)
self.shard_output[array_index].put((sys.stdout, self.SHARD_COMPLETED))
def RetryFailedTests(self):
"""Reruns any failed tests serially and prints another summary of the
results if no more than retry_percent failed.
"""
num_tests_run = self.test_counter.next()
if len(self.failed_tests) > self.retry_percent * num_tests_run:
sys.stdout.write("\nNOT RETRYING FAILED TESTS (too many failed)\n")
return 1
self.WriteText(sys.stdout, "\nRETRYING FAILED TESTS:\n", "\x1b[1;5;33m")
sharded_description = re.compile(r": (?:\d+>)?(.*)")
gtest_filters = [sharded_description.search(line).group(1)
for line in self.failed_tests]
sys.stdout.write("\nRETRY GTEST FILTERS: %r\n" % gtest_filters)
failed_retries = []
for test_filter in gtest_filters:
args = [self.test, "--gtest_filter=" + test_filter]
# Don't update the xml output files during retry.
stripped_gtests_args = RemoveGTestOutput(self.gtest_args)
args.extend(stripped_gtests_args)
sys.stdout.write("\nRETRY COMMAND: %r\n" % args)
rerun = subprocess.Popen(args, stdout=sys.stdout, stderr=sys.stderr)
rerun.wait()
if rerun.returncode != 0:
failed_retries.append(test_filter)
self.WriteText(sys.stdout, "RETRY RESULTS:\n", "\x1b[1;5;33m")
self.PrintSummary(failed_retries)
return len(failed_retries) > 0
def PrintSummary(self, failed_tests):
"""Prints a summary of the test results.
If any shards had failing tests, the list is sorted and printed. Then all
the lines that indicate a test failure are reproduced.
"""
if failed_tests:
self.WriteText(sys.stdout, "FAILED TESTS:\n", "\x1b[1;5;31m")
for line in failed_tests:
sys.stdout.write(line)
else:
self.WriteText(sys.stdout, "ALL TESTS PASSED!\n", "\x1b[1;5;32m")
def WriteText(self, pipe, text, ansi):
"""Writes the text to the pipe with the ansi escape code, if colored
output is set, for Unix systems.
"""
if self.color:
pipe.write(ansi)
pipe.write(text)
if self.color:
pipe.write("\x1b[m")
def main():
parser = optparse.OptionParser(usage=SS_USAGE)
parser.add_option(
"-n", "--shards_per_core", type="int", default=SS_DEFAULT_SHARDS_PER_CORE,
help="number of shards to generate per CPU")
parser.add_option(
"-r", "--runs_per_core", type="int", default=SS_DEFAULT_RUNS_PER_CORE,
help="number of shards to run in parallel per CPU")
parser.add_option(
"-c", "--color", action="store_true",
default=sys.platform != "win32" and sys.stdout.isatty(),
help="force color output, also used by gtest if --gtest_color is not"
" specified")
parser.add_option(
"--no-color", action="store_false", dest="color",
help="disable color output")
parser.add_option(
"-s", "--runshard", type="int", help="single shard index to run")
parser.add_option(
"--reorder", action="store_true",
help="ensure that all output from an earlier shard is printed before"
" output from a later shard")
# TODO(charleslee): for backwards compatibility with master.cfg file
parser.add_option(
"--original-order", action="store_true",
help="print shard output in its orginal jumbled order of execution"
" (useful for debugging flaky tests)")
parser.add_option(
"--prefix", action="store_true",
help="prefix each line of shard output with 'N>', where N is the shard"
" index (forced True when --original-order is True)")
parser.add_option(
"--random-seed", action="store_true",
help="shuffle the tests with a random seed value")
parser.add_option(
"--retry-failed", action="store_true",
help="retry tests that did not pass serially")
parser.add_option(
"--retry-percent", type="int",
default=SS_DEFAULT_RETRY_PERCENT,
help="ignore --retry-failed if more than this percent fail [0, 100]"
" (default = %i)" % SS_DEFAULT_RETRY_PERCENT)
parser.add_option(
"-t", "--timeout", type="int", default=SS_DEFAULT_TIMEOUT,
help="timeout in seconds to wait for a shard (default=%default s)")
parser.add_option(
"--total-slaves", type="int", default=1,
help="if running a subset, number of slaves sharing the test")
parser.add_option(
"--slave-index", type="int", default=0,
help="if running a subset, index of the slave to run tests for")
parser.disable_interspersed_args()
(options, args) = parser.parse_args()
if not args:
parser.error("You must specify a path to test!")
if not os.path.exists(args[0]):
parser.error("%s does not exist!" % args[0])
num_cores = DetectNumCores()
if options.shards_per_core < 1:
parser.error("You must have at least 1 shard per core!")
num_shards_to_run = num_cores * options.shards_per_core
if options.runs_per_core < 1:
parser.error("You must have at least 1 run per core!")
num_runs = num_cores * options.runs_per_core
test = args[0]
gtest_args = ["--gtest_color=%s" % {
True: "yes", False: "no"}[options.color]] + args[1:]
if options.original_order:
options.prefix = True
# TODO(charleslee): for backwards compatibility with buildbot's log_parser
if options.reorder:
options.original_order = False
options.prefix = True
if options.random_seed:
seed = random.randint(1, 99999)
gtest_args.extend(["--gtest_shuffle", "--gtest_random_seed=%i" % seed])
if options.retry_failed:
if options.retry_percent < 0 or options.retry_percent > 100:
parser.error("Retry percent must be an integer [0, 100]!")
else:
options.retry_percent = -1
if options.runshard != None:
# run a single shard and exit
if (options.runshard < 0 or options.runshard >= num_shards_to_run):
parser.error("Invalid shard number given parameters!")
shard = RunShard(
test, num_shards_to_run, options.runshard, gtest_args, None, None)
shard.communicate()
return shard.poll()
# When running browser_tests, load the test binary into memory before running
# any tests. This is needed to prevent loading it from disk causing the first
# run tests to timeout flakily. See: http://crbug.com/124260
if "browser_tests" in test:
args = [test]
args.extend(gtest_args)
args.append("--warmup")
result = subprocess.call(args,
bufsize=0,
universal_newlines=True)
# If the test fails, don't run anything else.
if result != 0:
return result
# shard and run the whole test
ss = ShardingSupervisor(
test, num_shards_to_run, num_runs, options.color,
options.original_order, options.prefix, options.retry_percent,
options.timeout, options.total_slaves, options.slave_index, gtest_args)
return ss.ShardTest()
if __name__ == "__main__":
sys.exit(main())
| |
from __future__ import division
from math import sqrt, cos, sin, acos, degrees, radians, log
from collections import MutableSequence
# This file contains classes for the different types of SVG path segments as
# well as a Path object that contains a sequence of path segments.
MIN_DEPTH = 5
ERROR = 1e-12
def segment_length(curve, start, end, start_point, end_point, error, min_depth, depth):
"""Recursively approximates the length by straight lines"""
mid = (start + end) / 2
mid_point = curve.point(mid)
length = abs(end_point - start_point)
first_half = abs(mid_point - start_point)
second_half = abs(end_point - mid_point)
length2 = first_half + second_half
if (length2 - length > error) or (depth < min_depth):
# Calculate the length of each segment:
depth += 1
return (segment_length(curve, start, mid, start_point, mid_point,
error, min_depth, depth) +
segment_length(curve, mid, end, mid_point, end_point,
error, min_depth, depth))
# This is accurate enough.
return length2
class Line(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __repr__(self):
return 'Line(start=%s, end=%s)' % (self.start, self.end)
def __eq__(self, other):
if not isinstance(other, Line):
return NotImplemented
return self.start == other.start and self.end == other.end
def __ne__(self, other):
if not isinstance(other, Line):
return NotImplemented
return not self == other
def point(self, pos):
distance = self.end - self.start
return self.start + distance * pos
def length(self, error=None, min_depth=None):
distance = (self.end - self.start)
return sqrt(distance.real ** 2 + distance.imag ** 2)
class CubicBezier(object):
def __init__(self, start, control1, control2, end):
self.start = start
self.control1 = control1
self.control2 = control2
self.end = end
def __repr__(self):
return 'CubicBezier(start=%s, control1=%s, control2=%s, end=%s)' % (
self.start, self.control1, self.control2, self.end)
def __eq__(self, other):
if not isinstance(other, CubicBezier):
return NotImplemented
return self.start == other.start and self.end == other.end and \
self.control1 == other.control1 and self.control2 == other.control2
def __ne__(self, other):
if not isinstance(other, CubicBezier):
return NotImplemented
return not self == other
def is_smooth_from(self, previous):
"""Checks if this segment would be a smooth segment following the previous"""
if isinstance(previous, CubicBezier):
return (self.start == previous.end and
(self.control1 - self.start) == (previous.end - previous.control2))
else:
return self.control1 == self.start
def point(self, pos):
"""Calculate the x,y position at a certain position of the path"""
return ((1 - pos) ** 3 * self.start) + \
(3 * (1 - pos) ** 2 * pos * self.control1) + \
(3 * (1 - pos) * pos ** 2 * self.control2) + \
(pos ** 3 * self.end)
def length(self, error=ERROR, min_depth=MIN_DEPTH):
"""Calculate the length of the path up to a certain position"""
start_point = self.point(0)
end_point = self.point(1)
return segment_length(self, 0, 1, start_point, end_point, error, min_depth, 0)
class QuadraticBezier(object):
def __init__(self, start, control, end):
self.start = start
self.end = end
self.control = control
def __repr__(self):
return 'QuadraticBezier(start=%s, control=%s, end=%s)' % (
self.start, self.control, self.end)
def __eq__(self, other):
if not isinstance(other, QuadraticBezier):
return NotImplemented
return self.start == other.start and self.end == other.end and \
self.control == other.control
def __ne__(self, other):
if not isinstance(other, QuadraticBezier):
return NotImplemented
return not self == other
def is_smooth_from(self, previous):
"""Checks if this segment would be a smooth segment following the previous"""
if isinstance(previous, QuadraticBezier):
return (self.start == previous.end and
(self.control - self.start) == (previous.end - previous.control))
else:
return self.control == self.start
def point(self, pos):
return (1 - pos) ** 2 * self.start + 2 * (1 - pos) * pos * self.control + \
pos ** 2 * self.end
def length(self, error=None, min_depth=None):
# http://www.malczak.info/blog/quadratic-bezier-curve-length/
a = self.start - 2 * self.control + self.end
b = 2 * (self.control - self.start)
A = 4 * (a.real ** 2 + a.imag ** 2)
B = 4 * (a.real * b.real + a.imag * b.imag)
C = b.real ** 2 + b.imag ** 2
Sabc = 2 * sqrt(A + B + C)
A2 = sqrt(A)
A32 = 2 * A * A2
C2 = 2 * sqrt(C)
BA = B / A2
return (A32 * Sabc + A2 * B * (Sabc - C2) + (4 * C * A - B ** 2) *
log((2 * A2 + BA + Sabc) / (BA + C2))) / (4 * A32)
class Arc(object):
def __init__(self, start, radius, rotation, arc, sweep, end):
"""radius is complex, rotation is in degrees,
large and sweep are 1 or 0 (True/False also work)"""
self.start = start
self.radius = radius
self.rotation = rotation
self.arc = bool(arc)
self.sweep = bool(sweep)
self.end = end
self._parameterize()
def __repr__(self):
return 'Arc(start=%s, radius=%s, rotation=%s, arc=%s, sweep=%s, end=%s)' % (
self.start, self.radius, self.rotation, self.arc, self.sweep, self.end)
def __eq__(self, other):
if not isinstance(other, Arc):
return NotImplemented
return self.start == other.start and self.end == other.end and \
self.radius == other.radius and self.rotation == other.rotation and \
self.arc == other.arc and self.sweep == other.sweep
def __ne__(self, other):
if not isinstance(other, Arc):
return NotImplemented
return not self == other
def _parameterize(self):
# Conversion from endpoint to center parameterization
# http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
cosr = cos(radians(self.rotation))
sinr = sin(radians(self.rotation))
dx = (self.start.real - self.end.real) / 2
dy = (self.start.imag - self.end.imag) / 2
x1prim = cosr * dx + sinr * dy
x1prim_sq = x1prim * x1prim
y1prim = -sinr * dx + cosr * dy
y1prim_sq = y1prim * y1prim
rx = self.radius.real
rx_sq = rx * rx
ry = self.radius.imag
ry_sq = ry * ry
# Correct out of range radii
radius_check = (x1prim_sq / rx_sq) + (y1prim_sq / ry_sq)
if radius_check > 1:
rx *= sqrt(radius_check)
ry *= sqrt(radius_check)
rx_sq = rx * rx
ry_sq = ry * ry
t1 = rx_sq * y1prim_sq
t2 = ry_sq * x1prim_sq
c = sqrt(abs((rx_sq * ry_sq - t1 - t2) / (t1 + t2)))
if self.arc == self.sweep:
c = -c
cxprim = c * rx * y1prim / ry
cyprim = -c * ry * x1prim / rx
self.center = complex((cosr * cxprim - sinr * cyprim) +
((self.start.real + self.end.real) / 2),
(sinr * cxprim + cosr * cyprim) +
((self.start.imag + self.end.imag) / 2))
ux = (x1prim - cxprim) / rx
uy = (y1prim - cyprim) / ry
vx = (-x1prim - cxprim) / rx
vy = (-y1prim - cyprim) / ry
n = sqrt(ux * ux + uy * uy)
p = ux
theta = degrees(acos(p / n))
if uy < 0:
theta = -theta
self.theta = theta % 360
n = sqrt((ux * ux + uy * uy) * (vx * vx + vy * vy))
p = ux * vx + uy * vy
if p == 0:
delta = degrees(acos(0))
else:
delta = degrees(acos(p / n))
if (ux * vy - uy * vx) < 0:
delta = -delta
self.delta = delta % 360
if not self.sweep:
self.delta -= 360
def point(self, pos):
angle = radians(self.theta + (self.delta * pos))
cosr = cos(radians(self.rotation))
sinr = sin(radians(self.rotation))
x = (cosr * cos(angle) * self.radius.real - sinr * sin(angle) *
self.radius.imag + self.center.real)
y = (sinr * cos(angle) * self.radius.real + cosr * sin(angle) *
self.radius.imag + self.center.imag)
return complex(x, y)
def length(self, error=ERROR, min_depth=MIN_DEPTH):
"""The length of an elliptical arc segment requires numerical
integration, and in that case it's simpler to just do a geometric
approximation, as for cubic bezier curves.
"""
start_point = self.point(0)
end_point = self.point(1)
return segment_length(self, 0, 1, start_point, end_point, error, min_depth, 0)
class Path(MutableSequence):
"""A Path is a sequence of path segments"""
# Put it here, so there is a default if unpickled.
_closed = False
def __init__(self, *segments, **kw):
self._segments = list(segments)
self._length = None
self._lengths = None
if 'closed' in kw:
self.closed = kw['closed']
def __getitem__(self, index):
return self._segments[index]
def __setitem__(self, index, value):
self._segments[index] = value
self._length = None
def __delitem__(self, index):
del self._segments[index]
self._length = None
def insert(self, index, value):
self._segments.insert(index, value)
self._length = None
def reverse(self):
# Reversing the order of a path would require reversing each element
# as well. That's not implemented.
raise NotImplementedError
def __len__(self):
return len(self._segments)
def __repr__(self):
return 'Path(%s, closed=%s)' % (
', '.join(repr(x) for x in self._segments), self.closed)
def __eq__(self, other):
if not isinstance(other, Path):
return NotImplemented
if len(self) != len(other):
return False
for s, o in zip(self._segments, other._segments):
if not s == o:
return False
return True
def __ne__(self, other):
if not isinstance(other, Path):
return NotImplemented
return not self == other
def _calc_lengths(self, error=ERROR, min_depth=MIN_DEPTH):
if self._length is not None:
return
lengths = [each.length(error=error, min_depth=min_depth) for each in self._segments]
self._length = sum(lengths)
self._lengths = [each / self._length for each in lengths]
def point(self, pos):
# Shortcuts
if pos == 0.0:
return self._segments[0].point(pos)
if pos == 1.0:
return self._segments[-1].point(pos)
self._calc_lengths()
# Find which segment the point we search for is located on:
segment_start = 0
for index, segment in enumerate(self._segments):
segment_end = segment_start + self._lengths[index]
if segment_end >= pos:
# This is the segment! How far in on the segment is the point?
segment_pos = (pos - segment_start) / (segment_end - segment_start)
break
segment_start = segment_end
return segment.point(segment_pos)
def length(self, error=ERROR, min_depth=MIN_DEPTH):
self._calc_lengths(error, min_depth)
return self._length
def _is_closable(self):
"""Returns true if the end is on the start of a segment"""
end = self[-1].end
for segment in self:
if segment.start == end:
return True
return False
@property
def closed(self):
"""Checks that the end point is the same as the start point"""
return self._closed and self._is_closable()
@closed.setter
def closed(self, value):
value = bool(value)
if value and not self._is_closable():
raise ValueError("End does not coincide with a segment start.")
self._closed = value
def d(self):
if self.closed:
segments = self[:-1]
else:
segments = self[:]
current_pos = None
parts = []
previous_segment = None
end = self[-1].end
for segment in segments:
start = segment.start
# If the start of this segment does not coincide with the end of
# the last segment or if this segment is actually the close point
# of a closed path, then we should start a new subpath here.
if current_pos != start or (self.closed and start == end):
parts.append('M {0:G},{1:G}'.format(start.real, start.imag))
if isinstance(segment, Line):
parts.append('L {0:G},{1:G}'.format(
segment.end.real, segment.end.imag)
)
elif isinstance(segment, CubicBezier):
if segment.is_smooth_from(previous_segment):
parts.append('S {0:G},{1:G} {2:G},{3:G}'.format(
segment.control2.real, segment.control2.imag,
segment.end.real, segment.end.imag)
)
else:
parts.append('C {0:G},{1:G} {2:G},{3:G} {4:G},{5:G}'.format(
segment.control1.real, segment.control1.imag,
segment.control2.real, segment.control2.imag,
segment.end.real, segment.end.imag)
)
elif isinstance(segment, QuadraticBezier):
if segment.is_smooth_from(previous_segment):
parts.append('T {0:G},{1:G}'.format(
segment.end.real, segment.end.imag)
)
else:
parts.append('Q {0:G},{1:G} {2:G},{3:G}'.format(
segment.control.real, segment.control.imag,
segment.end.real, segment.end.imag)
)
elif isinstance(segment, Arc):
parts.append('A {0:G},{1:G} {2:G} {3:d},{4:d} {5:G},{6:G}'.format(
segment.radius.real, segment.radius.imag, segment.rotation,
int(segment.arc), int(segment.sweep),
segment.end.real, segment.end.imag)
)
current_pos = segment.end
previous_segment = segment
if self.closed:
parts.append('Z')
return ' '.join(parts)
| |
from lxml import etree
import mappers
import re
class LinkedInXMLParser(object):
def __init__(self, content):
self.routing = {
'network': self.__parse_network_updates,
'person': self.__parse_personal_profile,
'job-poster': self.__parse_personal_profile,
'update-comments': self.__parse_update_comments,
'connections': self.__parse_connections,
'error': self.__parse_error,
'position': self.__parse_position,
'skill': self.__parse_skills,
'education': self.__parse_education,
'people': self.__parse_people_collection,
'twitter-account': self.__parse_twitter_accounts,
'member-url': self.__parse_member_url_resources
}
self.tree = etree.fromstring(content)
self.root = self.tree.tag
self.results = self.__forward_tree(self.tree, self.root)
def __forward_tree(self, tree, root):
results = self.routing[root](tree)
return results
def __parse_network_updates(self, tree):
content = LinkedInNetworkUpdateParser(tree).results
return content
def __parse_personal_profile(self, tree):
content = LinkedInProfileParser(tree).results
return content
def __parse_update_comments(self, tree):
content = LinkedInNetworkCommentParser(tree).results
return content
def __parse_connections(self, tree):
content = LinkedInConnectionsParser(tree).results
return content
def __parse_skills(self, tree):
content = LinkedInSkillsParser(tree).results
return content
def __parse_error(self, tree):
content = LinkedInErrorParser(tree).results
return content
def __parse_position(self, tree):
content = LinkedInPositionParser(tree).results
return content
def __parse_education(self, tree):
content = LinkedInEducationParser(tree).results
return content
def __parse_twitter_accounts(self, tree):
content = LinkedInTwitterAccountParser(tree).results
return content
def __parse_member_url_resources(self, tree):
content = LinkedInMemberUrlResourceParser(tree).results
return content
def __parse_people_collection(self, tree):
ppl, n = tree.getchildren()
result_count = int(n.text)
content = []
for p in ppl:
rslts = LinkedInProfileParser(p).results
content.append(rslts)
return content
class LinkedInNetworkUpdateParser(LinkedInXMLParser):
def __init__(self, content):
self.xpath_collection = {
'first-name': etree.XPath('update-content/person/first-name'),
'profile-url': etree.XPath('update-content/person/site-standard-profile-request/url'),
'last-name': etree.XPath('update-content/person/last-name'),
'timestamp': etree.XPath('timestamp'),
'updates': etree.XPath('updates'),
'update': etree.XPath('updates/update'),
'update-type': etree.XPath('update-type'),
'update-key': etree.XPath('update-key'),
#special paths for question/answer updates
'qa-first-name': etree.XPath('update-content/question/author/first-name'),
'qa-last-name': etree.XPath('update-content/question/author/last-name'),
'qa-profile-url': etree.XPath('update-content/question/web-url'),
'jobp-title': etree.XPath('update-content/job/position/title'),
'jobp-company': etree.XPath('update-content/job/company/name'),
'jobp-url': etree.XPath('update-content/job/site-job-request/url')
}
self.tree = content
total = self.xpath_collection['updates'](self.tree)[0].attrib['total']
self.results = self.__build_data(self.tree, total)
def __build_data(self, tree, total):
results = {}
objs = []
results['total'] = total
updates = self.xpath_collection['update'](tree)
for u in updates:
types = self.xpath_collection['update-type'](u)[0].text
if types == 'QSTN' or types == 'ANSW':
data = self.__qa_data_builder(u)
elif types == 'JOBP':
data = self.__jobp_data_builder(u)
else:
data = self.__generic_data_builder(u)
obj = self.__objectify(data, types, u)
objs.append(obj)
results['results'] = objs
return results
def __generic_data_builder(self, u):
data = {}
try:
data['update_key'] = self.xpath_collection['update-key'](u)[0].text.strip()
except IndexError:
pass
data['first_name'] = self.xpath_collection['first-name'](u)[0].text.strip()
data['profile_url'] = self.xpath_collection['profile-url'](u)[0].text.strip()
data['last_name'] = self.xpath_collection['last-name'](u)[0].text.strip()
data['timestamp'] = self.xpath_collection['timestamp'](u)[0].text.strip()
return data
def __qa_data_builder(self, u):
data = {}
data['first_name'] = self.xpath_collection['qa-first-name'](u)[0].text.strip()
try:
data['profile_url'] = self.xpath_collection['qa-profile-url'](u)[0].text.strip()
except IndexError: #the answers url is in a different spot, that's handled by the object
pass
data['last_name'] = self.xpath_collection['qa-last-name'](u)[0].text.strip()
data['timestamp'] = self.xpath_collection['timestamp'](u)[0].text.strip()
return data
def __jobp_data_builder(self, u):
data = {}
data['job_title'] = self.xpath_collection['jobp-title'](u)[0].text.strip()
data['job_company'] = self.xpath_collection['jobp-company'](u)[0].text.strip()
data['profile_url'] = self.xpath_collection['jobp-url'](u)[0].text.strip()
return data
def __objectify(self, data, u_type, u):
if u_type == 'STAT':
obj = mappers.NetworkStatusUpdate(data, u)
elif u_type == 'CONN':
obj = mappers.NetworkConnectionUpdate(data, u)
elif u_type == 'JGRP':
obj = mappers.NetworkGroupUpdate(data, u)
elif u_type == 'NCON':
obj = mappers.NetworkNewConnectionUpdate(data, u)
elif u_type == 'CCEM':
obj = mappers.NetworkAddressBookUpdate(data, u)
elif u_type == 'QSTN':
obj = mappers.NetworkQuestionUpdate(data, u)
elif u_type == 'ANSW':
obj = mappers.NetworkAnswerUpdate(data, u)
elif u_type == 'JOBP':
obj = mappers.NetworkJobPostingUpdate(data, u)
else:
obj = mappers.NetworkUpdate(data, u)
return obj
class LinkedInProfileParser(LinkedInXMLParser):
def __init__(self, content):
self.tree = content
self.results = self.__build_data(self.tree)
def __build_data(self, tree):
results = []
for p in tree.xpath('/person'):
person = {}
for item in p.getchildren():
if item.tag == 'location':
person['location'] = item.getchildren()[0].text
else:
person[re.sub(r'-', '_', item.tag)] = item.text
obj = mappers.Profile(person, p)
results.append(obj)
# deal with hierarchical results in a somewhat kludgy way
def fix(s):
return re.sub(r'-', '_', s)
def build_name(parent, item):
s = ''
p = item.getparent()
while p != parent:
s = fix(p.tag) + '_' + s
p = p.getparent()
s += fix(item.tag)
return s
if not results:
person = {}
for item in tree.iterdescendants():
clean = item.text and item.text.strip()
if clean:
name = build_name(tree, item)
if name in person:
value = person[name]
if type(value) != list:
person[name] = [value, clean]
else:
person[name].append(clean)
else:
person[name] = clean
obj = mappers.Profile(person, tree)
results.append(obj)
if False: #not results: # the original, elegant but wrong way
person = {}
for item in tree.getchildren():
person[re.sub(r'-', '_', item.tag)] = item.text
obj = mappers.Profile(person, tree)
results.append(obj)
return results
class LinkedInNetworkCommentParser(LinkedInXMLParser):
def __init__(self, content):
self.tree = content
self.comment_xpath = etree.XPath('update-comment')
self.results = self.__build_data(self.tree)
def __build_data(self, tree):
if not tree.getchildren():
return []
else:
objs = []
for c in self.comment_xpath(tree):
obj = mappers.NetworkUpdateComment(c)
objs.append(obj)
return objs
class LinkedInConnectionsParser(LinkedInXMLParser):
def __init__(self, content):
self.tree = content
self.total = content.attrib['total']
self.results = self.__build_data(self.tree)
def __build_data(self, tree):
results = {}
results['results'] = []
for p in tree.getchildren():
parsed = LinkedInXMLParser(etree.tostring(p)).results[0]
results['results'].append(parsed)
results['total'] = self.total
return results
class LinkedInErrorParser(LinkedInXMLParser):
def __init__(self, content):
self.tree = content
self.xpath_collection = {
'status': etree.XPath('status'),
'timestamp': etree.XPath('timestamp'),
'error-code': etree.XPath('error-code'),
'message': etree.XPath('message')
}
self.results = self.__build_data(self.tree)
def __build_data(self, tree):
data = {}
data['status'] = self.xpath_collection['status'](tree)[0].text.strip()
data['timestamp'] = self.xpath_collection['timestamp'](tree)[0].text.strip()
data['error_code'] = self.xpath_collection['error-code'](tree)[0].text.strip()
data['message'] = self.xpath_collection['message'](tree)[0].text.strip()
results = mappers.LinkedInError(data, tree)
return results
class LinkedInPositionParser(LinkedInXMLParser):
def __init__(self, content):
self.tree = content
self.xpath_collection = {
'id': etree.XPath('id'),
'title': etree.XPath('title'),
'summary': etree.XPath('summary'),
'start-date-year': etree.XPath('start-date/year'),
'end-date-year': etree.XPath('end-date/year'),
'start-date-month': etree.XPath('start-date/month'),
'end-date-month': etree.XPath('end-date/month'),
'is-current': etree.XPath('is-current'),
'company-id': etree.XPath('company/id'),
'company': etree.XPath('company/name')
}
self.results = self.__build_data(self.tree)
def __build_data(self, tree):
data = dict(
[(re.sub('-','_',key),self.xpath_collection[key](tree)[0].text) for key in self.xpath_collection if len(self.xpath_collection[key](tree)) > 0]
)
results = mappers.Position(data, tree)
return results
class LinkedInEducationParser(LinkedInXMLParser):
def __init__(self, content):
self.tree = content
self.xpath_collection = {
'id': etree.XPath('id'),
'school-name': etree.XPath('school-name'),
'field-of-study': etree.XPath('field-of-study'),
'start-date': etree.XPath('start-date/year'),
'end-date': etree.XPath('end-date/year'),
'degree': etree.XPath('degree'),
'activities': etree.XPath('activities')
}
self.results = self.__build_data(self.tree)
def __build_data(self, tree):
data = {}
for n in tree.getchildren():
if not n.getchildren():
data[re.sub('-', '_', n.tag)] = n.text
else:
data[re.sub('-', '_', n.tag)] = n.getchildren()[0].text
results = mappers.Education(data, tree)
return results
class LinkedInTwitterAccountParser(LinkedInXMLParser):
def __init__(self, content):
self.tree = content
self.xpath_collection = {
'provider-account-id': etree.XPath('provider-account-id'),
'provider-account-name': etree.XPath('provider-account-name'),
}
self.results = self.__build_data(self.tree)
def __build_data(self, tree):
data = dict(
[(re.sub('-','_',key),self.xpath_collection[key](tree)[0].text) for key in self.xpath_collection if len(self.xpath_collection[key](tree)) > 0]
)
results = mappers.TwitterAccount(data, tree)
return results
class LinkedInMemberUrlResourceParser(LinkedInXMLParser):
def __init__(self, content):
self.tree = content
self.xpath_collection = {
'url': etree.XPath('url'),
'name': etree.XPath('name'),
}
self.results = self.__build_data(self.tree)
def __build_data(self, tree):
data = {}
for n in tree.getchildren():
if not n.getchildren():
data[re.sub('-', '_', n.tag)] = n.text
else:
data[re.sub('-', '_', n.tag)] = n.getchildren()[0].text
results = mappers.MemberUrlResource(data, tree)
return results
class LinkedInSkillsParser(LinkedInXMLParser):
def __init__(self, content):
self.tree = content
self.xpath_collection = {
'id': etree.XPath('id'),
'name': etree.XPath('skill/name'),
}
self.results = self.__build_data(self.tree)
def __build_data(self, tree):
data = {}
for n in tree.getchildren():
if not n.getchildren():
data[re.sub('-', '_', n.tag)] = n.text
else:
data[re.sub('-', '_', n.tag)] = n.getchildren()[0].text
results = mappers.Skills(data, tree)
return results
| |
import logging
from collections import Counter
from copy import copy
from pathlib import Path
from typing import Iterable, NamedTuple, Union
from fs import path as fspath
from fs.base import FS
from fs.errors import NoSysPath
from fs.walk import Walker
from rich.console import Console
from . import config, console
from .actions import ACTIONS
from .actions.action import Action
from .filters import FILTERS
from .filters.filter import Filter
from .migration import migrate_v1
from .utils import (
basic_args,
deep_merge_inplace,
ensure_dict,
ensure_list,
fs_path_from_options,
to_args,
)
logger = logging.getLogger(__name__)
highlighted_console = Console()
class Location(NamedTuple):
walker: Walker
fs: FS
fs_path: str
DEFAULT_SYSTEM_EXCLUDE_FILES = [
"thumbs.db",
"desktop.ini",
"~$*",
".DS_Store",
".localized",
]
DEFAULT_SYSTEM_EXCLUDE_DIRS = [
".git",
".svn",
]
def convert_options_to_walker_args(options: dict):
# combine system_exclude and exclude into a single list
excludes = copy(
ensure_list(options.get("system_exclude_files", DEFAULT_SYSTEM_EXCLUDE_FILES))
)
excludes.extend(ensure_list(options.get("exclude_files", [])))
exclude_dirs = copy(
ensure_list(options.get("system_exclude_dirs", DEFAULT_SYSTEM_EXCLUDE_DIRS))
)
exclude_dirs.extend(ensure_list(options.get("exclude_dirs", [])))
if not excludes:
excludes = None
if not exclude_dirs:
exclude_dirs = None
filter_ = copy(ensure_list(options.get("filter", [])))
filter_dirs = copy(ensure_list(options.get("filter_dirs", [])))
if not filter_:
filter_ = None
if not filter_dirs:
filter_dirs = None
# return all the default options
result = {
"ignore_errors": options.get("ignore_errors", False),
"on_error": options.get("on_error", None),
"search": options.get("search", "depth"),
"exclude": excludes,
"exclude_dirs": exclude_dirs,
"max_depth": options.get("max_depth", None),
"filter": filter_,
"filter_dirs": filter_dirs,
}
return result
def instantiate_location(options: Union[str, dict], default_max_depth=0) -> Location:
if isinstance(options, Location):
return options
if isinstance(options, str):
options = {"path": options}
# set default max depth from rule
if not "max_depth" in options:
options["max_depth"] = default_max_depth
if "walker" not in options:
args = convert_options_to_walker_args(options)
walker = Walker(**args)
else:
walker = options["walker"]
fs, fs_path = fs_path_from_options(
path=options.get("path", "/"),
filesystem=options.get("filesystem"),
)
return Location(walker=walker, fs=fs, fs_path=fs_path)
def instantiate_filter(filter_config):
if isinstance(filter_config, Filter):
return filter_config
spec = ensure_dict(filter_config)
name, value = next(iter(spec.items()))
parts = name.split(maxsplit=1)
invert = False
if len(parts) == 2 and parts[0] == "not":
name = parts[1]
invert = True
args, kwargs = to_args(value)
instance = FILTERS[name](*args, **kwargs)
instance.set_logic(inverted=invert)
return instance
def instantiate_action(action_config):
if isinstance(action_config, Action):
return action_config
spec = ensure_dict(action_config)
name, value = next(iter(spec.items()))
args, kwargs = to_args(value)
return ACTIONS[name](*args, **kwargs)
def syspath_or_exception(fs, path):
try:
return Path(fs.getsyspath(path))
except NoSysPath as e:
return e
def replace_with_instances(config: dict):
warnings = []
for rule in config["rules"]:
default_depth = None if rule.get("subfolders", False) else 0
_locations = []
for options in ensure_list(rule["locations"]):
try:
instance = instantiate_location(
options=options,
default_max_depth=default_depth,
)
_locations.append(instance)
except Exception as e:
if isinstance(options, dict) and options.get("ignore_errors", False):
warnings.append(str(e))
else:
raise ValueError("Invalid location %s (%s)" % (options, e)) from e
# filters are optional
_filters = []
for x in ensure_list(rule.get("filters", [])):
try:
_filters.append(instantiate_filter(x))
except Exception as e:
raise ValueError("Invalid filter %s (%s)" % (x, e)) from e
# actions
_actions = []
for x in ensure_list(rule["actions"]):
try:
_actions.append(instantiate_action(x))
except Exception as e:
raise ValueError("Invalid action %s (%s)" % (x, e)) from e
rule["locations"] = _locations
rule["filters"] = _filters
rule["actions"] = _actions
return warnings
def filter_pipeline(filters: Iterable[Filter], args: dict, filter_mode: str) -> bool:
"""
run the filter pipeline.
Returns True on a match, False otherwise and updates `args` in the process.
"""
results = []
for filter_ in filters:
try:
# update dynamic path args
args["path"] = syspath_or_exception(args["fs"], args["fs_path"])
args["relative_path"] = fspath.frombase(
args["fs_base_path"], args["fs_path"]
)
match, updates = filter_.pipeline(args)
result = match ^ filter_.inverted
# we cannot exit early on "any".
if (filter_mode == "none" and result) or (
filter_mode == "all" and not result
):
return False
results.append(result)
deep_merge_inplace(args, updates)
except Exception as e: # pylint: disable=broad-except
logger.exception(e)
# console.print_exception()
filter_.print_error(str(e))
return False
if filter_mode == "any":
return any(results)
return True
def action_pipeline(actions: Iterable[Action], args: dict, simulate: bool) -> bool:
for action in actions:
try:
# update dynamic path args
args["path"] = syspath_or_exception(args["fs"], args["fs_path"])
args["relative_path"] = fspath.frombase(
args["fs_base_path"], args["fs_path"]
)
updates = action.pipeline(args, simulate=simulate)
# jobs may return a dict with updates that should be merged into args
if updates is not None:
deep_merge_inplace(args, updates)
except Exception as e: # pylint: disable=broad-except
logger.exception(e)
action.print_error(str(e))
return False
return True
def run_rules(rules: dict, simulate: bool = True):
count = Counter(done=0, fail=0) # type: Counter
if simulate:
console.simulation_banner()
console.spinner(simulate=simulate)
for rule_nr, rule in enumerate(rules["rules"], start=1):
target = rule.get("targets", "files")
console.rule(rule.get("name", "Rule %s" % rule_nr))
filter_mode = rule.get("filter_mode", "all")
for walker, walker_fs, walker_path in rule["locations"]:
console.location(walker_fs, walker_path)
walk = walker.files if target == "files" else walker.dirs
for path in walk(fs=walker_fs, path=walker_path):
if walker_fs.islink(path):
continue
# tell the user which resource we're handling
console.path(walker_fs, path)
# assemble the available args
args = basic_args()
args.update(
fs=walker_fs,
fs_path=path,
fs_base_path=walker_path,
)
# run resource through the filter pipeline
match = filter_pipeline(
filters=rule["filters"],
args=args,
filter_mode=filter_mode,
)
# if the currently handled resource changed we adjust the prefix message
if args.get("resource_changed"):
console.path_changed_during_pipeline(
fs=walker_fs,
fs_path=path,
new_fs=args["fs"],
new_path=args["fs_path"],
reason=args.get("resource_changed"),
)
args.pop("resource_changed", None)
# run resource through the action pipeline
if match:
is_success = action_pipeline(
actions=rule["actions"],
args=args,
simulate=simulate,
)
if is_success:
count["done"] += 1
else:
count["fail"] += 1
if simulate:
console.simulation_banner()
return count
def run(rules: Union[str, dict], simulate: bool, validate=True):
# load and validate
if isinstance(rules, str):
rules = config.load_from_string(rules)
rules = config.cleanup(rules)
migrate_v1(rules)
if validate:
config.validate(rules)
# instantiate
warnings = replace_with_instances(rules)
for msg in warnings:
console.warn(msg)
# run
count = run_rules(rules=rules, simulate=simulate)
console.summary(count)
if count["fail"]:
raise RuntimeWarning("Some actions failed.")
| |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, The BiPy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
__credits__ = ["Evan Bolyen", "Jai Ram Rideout", "Daniel McDonald",
"Greg Caporaso"]
import os
import types
import os.path
import sys
from pyqi.util import is_py2
if is_py2():
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
from cgi import parse_header, parse_multipart, parse_qs, FieldStorage
from copy import copy
from glob import glob
from os.path import abspath, exists, isdir, isfile, split
from pyqi.core.interface import (Interface, InterfaceOutputOption, InterfaceInputOption,
InterfaceUsageExample, get_command_names, get_command_config)
from pyqi.core.factory import general_factory
from pyqi.core.exception import IncompetentDeveloperError
from pyqi.core.command import Parameter
from pyqi.util import get_version_string
class HTMLResult(InterfaceOutputOption):
"""Base class for results for an HTML config file"""
def __init__(self, MIMEType=None, **kwargs):
super(HTMLResult, self).__init__(**kwargs)
if MIMEType is None:
raise IncompetentDeveloperError("A valid MIMEType must be provided")
self.MIMEType = MIMEType;
class HTMLDownload(HTMLResult):
"""Result class for downloading a file from the server"""
def __init__(self, FileExtension=None, FilenameLookup=None, DefaultFilename=None,
MIMEType='application/octet-stream', **kwargs):
super(HTMLDownload, self).__init__(MIMEType=MIMEType, **kwargs)
self.FileExtension = FileExtension
self.FilenameLookup = FilenameLookup
self.DefaultFilename = DefaultFilename
class HTMLPage(HTMLResult):
"""Result class for displaying a page for an HTML config file"""
def __init__(self, MIMEType='text/html', **kwargs):
super(HTMLPage, self).__init__(MIMEType=MIMEType, **kwargs)
class HTMLInputOption(InterfaceInputOption):
"""Define an input option for an HTML config file"""
_type_handlers = {
None: lambda: None,
str: lambda x: str(x.value),
bool: lambda x: x.value == "True",
int: lambda x: int(x.value),
float: lambda x: float(x.value),
complex: lambda x: complex(x.value),
"upload_file": lambda x: x.file,
"multiple_choice": lambda x: x.value
}
def __init__(self, Choices=None, Type=str, **kwargs):
self.Choices = Choices
super(HTMLInputOption, self).__init__(Type=Type, **kwargs)
if Type == bool:
self.Choices = [True, False]
def cast_value(self, postdata):
"""Casts str(postdata.value) as an object of the correct type"""
return self._type_handlers[self.Type](postdata) if postdata is not None else None
def get_html(self, prefix, value=""):
"""Return the HTML needed for user input given a default value"""
if (not value) and (self.Default is not None):
value = self.Default
input_name = prefix + self.Name
string_input = lambda: '<input type="text" name="%s" value="%s"/>' % (input_name, value)
number_input = lambda: '<input type="number" name="%s" value="%s"/>' % (input_name, value)
#html input files cannot have default values.
#If the html interface worked as a data service, this would be possible as submit would be ajax.
upload_input = lambda: '<input type="file" name="%s" />' % input_name
mchoice_input = lambda: ''.join(
[ ('(%s<input type="radio" name="%s" value="%s" %s/>)'
% (choice, input_name, choice, 'checked="true"' if value == choice else ''))
for choice in self.Choices ]
)
input_switch = {
None: string_input,
str: string_input,
bool: mchoice_input,
int: number_input,
float: number_input,
complex: string_input,
"multiple_choice": mchoice_input,
"upload_file": upload_input
}
return ''.join(['<tr><td class="right">',
('<span class="required">*</span>' + self.Name) if self.Required else self.Name,
'</td><td>',
input_switch[self.Type](),
'</td></tr><tr><td></td><td>',
self.Help,
'</td></tr><tr><td> </td></tr>'
])
def _validate_option(self):
if self.Type not in self._type_handlers:
raise IncompetentDeveloperError("Unsupported Type in HTMLInputOption: %s" % self.Type)
#From optparse's __init__.py, inside class PyqiOption
if self.Type == "multiple_choice":
if self.Choices is None:
raise IncompetentDeveloperError(
"must supply a list of Choices for type '%s'" % self.type, self)
elif type(self.Choices) not in (types.TupleType, types.ListType):
raise IncompetentDeveloperError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.Choices)).split("'")[1], self)
elif self.Choices is not None:
raise IncompetentDeveloperError("must not supply Choices for type %r" % self.type, self)
class HTMLInterface(Interface):
"""An HTML interface"""
#Relative mapping wasn't working on a collegue's MacBook when pyqi was run outside of it's directory
#Until I understand why that was the case and how to fix it, I am putting the style css here.
#This is not a permanent solution.
css_style = '\n'.join([
'html, body {',
' margin: 0px;',
' padding: 0px;',
' font-family: "Trebuchet MS",sans-serif;',
'}',
'#content {',
' padding-left: 20px;',
'}',
'h1 {',
' background-color: rgb(242, 242, 242);',
' font-weight: normal;',
' color: rgb(32, 67, 92);',
' border-bottom: 2px solid rgb(204, 204, 204);',
' margin: 0px;',
' padding: 3px 0px 3px 10px;',
'}',
'.right {',
' text-align: right;',
'}',
'.required {',
' color: red;',
'}',
'.error {',
' color: red;',
' border: 1px solid red;',
' padding: 5px;',
' background: pink;',
'}',
'ul {',
' list-style-type: none;',
' font-size: 20px;',
' float: left;',
'}',
'li {',
' padding: 5px;',
' margin-bottom:5px;',
' border: 1px solid rgb(204, 204, 204);',
' background: rgb(242, 242, 242);',
'}',
'a, a:visited, a:active{',
' color: rgb(32, 67, 92);',
'}'
])
def __init__(self, input_prefix="pyqi_", **kwargs):
self._html_input_prefix = input_prefix
self._html_interface_input = {}
super(HTMLInterface, self).__init__(**kwargs)
#Override
def __call__(self, in_, *args, **kwargs):
self._the_in_validator(in_)
cmd_input, errors = self._input_handler(in_, *args, **kwargs)
if errors:
return {
'type': 'error',
'errors': errors
}
else:
cmd_result = self.CmdInstance(**cmd_input)
self._the_out_validator(cmd_result)
return self._output_handler(cmd_result)
def _validate_inputs_outputs(self, inputs, outputs):
super(HTMLInterface, self)._validate_inputs_outputs(inputs, outputs)
if len(outputs) > 1:
raise IncompetentDeveloperError("There can be only one... output")
if not ( isinstance(outputs[0], HTMLPage) or isinstance(outputs[0], HTMLDownload) ):
raise IncompetentDeveloperError("Output must subclass HTMLPage or HTMLDownload")
def _validate_usage_examples(self, usage_examples):
super(HTMLInterface, self)._validate_usage_examples(usage_examples)
if usage_examples:
raise IncompetentDeveloperError("There shouldn't be usage examples "
"associated with this command.")
def _the_in_validator(self, in_):
"""Validate input coming from the postvars"""
if not isinstance(in_, FieldStorage):
raise IncompetentDeveloperError("Unsupported input '%r'. Input "
"must be FieldStorage." % in_)
def _the_out_validator(self, out_):
"""Validate output coming from the command call"""
if not isinstance(out_, dict):
raise IncompetentDeveloperError("Unsupported result '%r'. Result "
"must be a dict." % out_)
def _input_handler(self, in_, *args, **kwargs):
"""reformat from http post data."""
errors = []
# Parse our input.
formatted_input = {}
for key in in_:
mod_key = key[ len(self._html_input_prefix): ]
formatted_input[mod_key] = in_[key]
if not formatted_input[mod_key].value:
formatted_input[mod_key] = None
cmd_input_kwargs = {}
for option in self._get_inputs():
if option.Name not in formatted_input:
formatted_input[option.Name] = None
if option.Required and formatted_input[option.Name] is None:
errors.append("Error: %s is required." % option.Name)
continue
try:
formatted_input[option.Name] = option.cast_value(formatted_input[option.Name])
except (ValueError, TypeError):
errors.append("Error: %s must be type %s" % (option.Name, option.Type) );
if option.Parameter is not None:
param_name = option.getParameterName()
if option.Handler is None:
value = formatted_input[option.Name]
else:
value = option.Handler(formatted_input[option.Name])
cmd_input_kwargs[param_name] = value
self._html_interface_input = formatted_input
return cmd_input_kwargs, errors
def _build_usage_lines(self, required_options):
""" Build the usage string from components """
return '<p class="usage_example">%s</p>' % self.CmdInstance.LongDescription
def _output_download_handler(self, output, handled_results):
"""Handle the output for type: 'download' """
#Set up the filename for download
filename = "unnamed_pyqi_output"
extension = ""
if output.FileExtension is not None:
extension = output.FileExtension
if output.FilenameLookup is None:
if output.DefaultFilename is not None:
filename = output.DefaultFilename
else:
lookup_filename = self._html_interface_input[output.FilenameLookup]
if lookup_filename is not None:
filename = lookup_filename
filehandle = filename + extension
return {
'type':'download',
'filename':filehandle,
'contents':handled_results
}
def _output_page_handler(self, output, handled_results):
"""Handle the output for type: 'page' """
return {
'type':'page',
'mime_type':output.MIMEType,
'contents':handled_results
}
def _output_handler(self, results):
"""Deal with things in output if we know how"""
output = self._get_outputs()[0]
rk = output.Name
if output.Handler is not None:
if output.InputName is None:
handled_results = output.Handler(rk, results[rk])
else:
handled_results = output.Handler(rk, results[rk],
self._html_interface_input[output.InputName])
else:
handled_results = results[rk]
if isinstance(output, HTMLDownload):
return self._output_download_handler(output, handled_results)
elif isinstance(output, HTMLPage):
return self._output_page_handler(output, handled_results)
def command_page_writer(self, write, errors, postvars):
"""Write an HTML page which contains a form for user input"""
write('<!DOCTYPE html><html><head><title>%s</title>' % self.CommandName)
write('<style>')
write(self.css_style)
write('</style>')
write('</head><body><h1>%s</h1><div id="content">' % self.CommandName)
write(self._build_usage_lines([opt for opt in self._get_inputs() if opt.Required]))
write('<p>An (<span class="required">*</span>) denotes a required field.</p>')
for e in errors:
write('<div class="error">%s</div>' % e)
write('<form method="POST" enctype="multipart/form-data">')
write('<table>')
for i in self._get_inputs():
full_name = self._html_input_prefix + i.Name
if full_name in postvars and i.Type is not 'upload_file':
default = i.cast_value(postvars[full_name])
write(i.get_html(self._html_input_prefix, value=default))
else:
write(i.get_html(self._html_input_prefix))
write('</table>')
write('<input type="submit">')
write('</form>')
write('</div></body></html>')
def html_interface_factory(command_constructor, usage_examples, inputs, outputs,
version, command_name):
interface_class = general_factory(command_constructor, usage_examples, inputs,
outputs, version, HTMLInterface)
interface_class.CommandName = command_name
return interface_class
def get_cmd_obj(cmd_cfg_mod, cmd):
"""Get a ``Command`` object"""
cmd_cfg,_ = get_command_config(cmd_cfg_mod, cmd)
version_str = get_version_string(cmd_cfg_mod)
cmd_class = html_interface_factory(cmd_cfg.CommandConstructor, [],
cmd_cfg.inputs, cmd_cfg.outputs, version_str, cmd)
cmd_obj = cmd_class()
return cmd_obj
def get_http_handler(module):
"""Return a subclassed BaseHTTPRequestHandler with module in scope."""
module_commands = get_command_names(module)
class HTMLInterfaceHTTPHandler(BaseHTTPRequestHandler):
"""Handle incoming HTTP requests"""
def __init__(self, *args, **kwargs):
self._unrouted = True
#Apparently this is an 'oldstyle' class, which doesn't allow the use of super()
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def index(self, write):
write("<html><head><title>")
write("PyQi: " + module)
write("</title>")
write("<style>")
write(HTMLInterface.css_style)
write("</style>")
write("</head><body>")
write("<h1>Available Commands:</h1>")
write("<ul>")
for command in module_commands:
write( '<li><a href="/%s">%s</a></li>'%(command, command) )
write("</ul>")
write("</body></html>")
def route(self, path, output_writer):
"""Define a route for an output_writer"""
if self._unrouted and self.path == path:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output_writer(self.wfile.write)
self.wfile.close()
self._unrouted = False;
def command_route(self, command):
"""Define a route for a command and write the command page"""
if self._unrouted and self.path == ("/" + command):
cmd_obj = get_cmd_obj(module, command)
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
cmd_obj.command_page_writer(self.wfile.write, [], {})
self.wfile.close()
self._unrouted = False
def post_route(self, command, postvars):
"""Define a route for user response and write the output or else provide errors"""
if self._unrouted and self.path == ("/" + command):
cmd_obj = get_cmd_obj(module, command)
try:
result = cmd_obj(postvars)
except Exception as e:
result = {
'type':'error',
'errors':[e]
}
if result['type'] == 'error':
self.send_response(400)
self.send_header('Content-type', 'text/html')
self.end_headers()
cmd_obj.command_page_writer(self.wfile.write, result['errors'], postvars)
elif result['type'] == 'page':
self.send_response(200)
self.send_header('Content-type', result['mime_type'])
self.end_headers()
self.wfile.write(result['contents'])
elif result['type'] == 'download':
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.send_header('Content-disposition', 'attachment; filename='+result['filename'])
self.end_headers()
self.wfile.write(result['contents'])
self.wfile.close()
self._unrouted = False
def end_routes(self):
"""If a route hasn't matched the path up to now, return a 404 and close stream"""
if self._unrouted:
self.send_response(404)
self.end_headers()
self.wfile.close()
self._unrouted = False
def do_GET(self):
"""Handle GET requests"""
self.route("/", self.index)
self.route("/index", self.index)
self.route("/home", self.index)
def r(write):#host.domain.tld/help
write("This is still a very in development interface, there is no help.")
self.route("/help", r)
for command in module_commands:
self.command_route(command)
self.end_routes()
def do_POST(self):
"""Handle POST requests"""
postvars = FieldStorage(fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type']})
for command in module_commands:
self.post_route(command, postvars)
self.end_routes()
return HTMLInterfaceHTTPHandler
#This will generally be called from a generated command.
def start_server(port, module):
"""Start a server for the HTMLInterface on the specified port"""
interface_server = HTTPServer(("", port), get_http_handler(module))
print("-- Starting server at http://localhost:%d --" % port)
print("To close the server, type 'ctrl-c' into this window.")
try:
interface_server.serve_forever()
except KeyboardInterrupt:
return "-- Finished serving HTMLInterface --"
| |
import json
import logging
from django.conf import settings
from django.contrib.auth.views import logout as auth_logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db import transaction
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.views.decorators.cache import cache_control, never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _
from haystack.generic_views import SearchView
from raven.contrib.django.models import client
from waffle.decorators import waffle_flag
import mozillians.phonebook.forms as forms
from mozillians.api.models import APIv2App
from mozillians.common.decorators import allow_public, allow_unvouched
from mozillians.common.templatetags.helpers import get_object_or_none, redirect, urlparams
from mozillians.common.middleware import LOGIN_MESSAGE, GET_VOUCHED_MESSAGE
from mozillians.common.urlresolvers import reverse
from mozillians.phonebook.models import Invite
from mozillians.phonebook.utils import redeem_invite
from mozillians.users.managers import EMPLOYEES, MOZILLIANS, PUBLIC, PRIVILEGED
from mozillians.users.models import AbuseReport, ExternalAccount, UserProfile
from mozillians.users.tasks import check_spam_account, update_email_in_basket
@allow_unvouched
def login(request):
if request.user.userprofile.is_complete:
return redirect('phonebook:home')
return redirect('phonebook:profile_edit')
@never_cache
@allow_public
def home(request):
show_start = False
if request.GET.get('source', ''):
show_start = True
return render(request, 'phonebook/home.html',
{'show_start': show_start})
@waffle_flag('testing-autovouch-views')
@allow_unvouched
@never_cache
def vouch(request, username):
"""Automatically vouch username.
This must be behind a waffle flag and activated only for testing
purposes.
"""
profile = get_object_or_404(UserProfile, user__username=username)
now = timezone.now()
description = 'Automatically vouched for testing purposes on {0}'.format(now)
vouch = profile.vouch(None, description=description, autovouch=True)
if vouch:
messages.success(request, _('Successfully vouched user.'))
else:
msg = _('User not vouched. Maybe there are {0} vouches already?')
msg = msg.format(settings.VOUCH_COUNT_LIMIT)
messages.error(request, msg)
return redirect('phonebook:profile_view', profile.user.username)
@waffle_flag('testing-autovouch-views')
@allow_unvouched
@never_cache
def unvouch(request, username):
"""Automatically remove all vouches from username.
This must be behind a waffle flag and activated only for testing
purposes.
"""
profile = get_object_or_404(UserProfile, user__username=username)
profile.vouches_received.all().delete()
messages.success(request, _('Successfully unvouched user.'))
return redirect('phonebook:profile_view', profile.user.username)
@allow_public
@never_cache
def view_profile(request, username):
"""View a profile by username."""
data = {}
privacy_mappings = {'anonymous': PUBLIC, 'mozillian': MOZILLIANS, 'employee': EMPLOYEES,
'privileged': PRIVILEGED, 'myself': None}
privacy_level = None
abuse_form = None
if (request.user.is_authenticated() and request.user.username == username):
# own profile
view_as = request.GET.get('view_as', 'myself')
privacy_level = privacy_mappings.get(view_as, None)
profile = UserProfile.objects.privacy_level(privacy_level).get(user__username=username)
data['privacy_mode'] = view_as
else:
userprofile_query = UserProfile.objects.filter(user__username=username)
public_profile_exists = userprofile_query.public().exists()
profile_exists = userprofile_query.exists()
profile_complete = userprofile_query.exclude(full_name='').exists()
if not public_profile_exists:
if not request.user.is_authenticated():
# you have to be authenticated to continue
messages.warning(request, LOGIN_MESSAGE)
return (login_required(view_profile, login_url=reverse('phonebook:home'))
(request, username))
if not request.user.userprofile.is_vouched:
# you have to be vouched to continue
messages.error(request, GET_VOUCHED_MESSAGE)
return redirect('phonebook:home')
if not profile_exists or not profile_complete:
raise Http404
profile = UserProfile.objects.get(user__username=username)
profile.set_instance_privacy_level(PUBLIC)
if request.user.is_authenticated():
profile.set_instance_privacy_level(
request.user.userprofile.privacy_level)
if (request.user.is_authenticated() and request.user.userprofile.is_vouched and
not profile.can_vouch):
abuse_report = get_object_or_none(AbuseReport, reporter=request.user.userprofile,
profile=profile)
if not abuse_report:
abuse_report = AbuseReport(reporter=request.user.userprofile, profile=profile)
abuse_form = forms.AbuseReportForm(request.POST or None, instance=abuse_report)
if abuse_form.is_valid():
abuse_form.save()
msg = _(u'Thanks for helping us improve mozillians.org!')
messages.info(request, msg)
return redirect('phonebook:profile_view', profile.user.username)
if (request.user.is_authenticated() and profile.is_vouchable(request.user.userprofile)):
vouch_form = forms.VouchForm(request.POST or None)
data['vouch_form'] = vouch_form
if vouch_form.is_valid():
# We need to re-fetch profile from database.
profile = UserProfile.objects.get(user__username=username)
profile.vouch(request.user.userprofile, vouch_form.cleaned_data['description'])
# Notify the current user that they vouched successfully.
msg = _(u'Thanks for vouching for a fellow Mozillian! This user is now vouched!')
messages.info(request, msg)
return redirect('phonebook:profile_view', profile.user.username)
data['shown_user'] = profile.user
data['profile'] = profile
data['groups'] = profile.get_annotated_groups()
data['abuse_form'] = abuse_form
# Only show pending groups if user is looking at their own profile,
# or current user is a superuser
if not (request.user.is_authenticated() and
(request.user.username == username or request.user.is_superuser)):
data['groups'] = [grp for grp in data['groups'] if not (grp.pending or grp.pending_terms)]
return render(request, 'phonebook/profile.html', data)
@allow_unvouched
@never_cache
def edit_profile(request):
"""Edit user profile view."""
# Don't use request.user
user = User.objects.get(pk=request.user.id)
profile = user.userprofile
user_groups = profile.groups.all().order_by('name')
emails = ExternalAccount.objects.filter(type=ExternalAccount.TYPE_EMAIL)
accounts_qs = ExternalAccount.objects.exclude(type=ExternalAccount.TYPE_EMAIL)
sections = {
'registration_section': ['user_form', 'registration_form'],
'basic_section': ['user_form', 'basic_information_form'],
'groups_section': ['groups_privacy_form'],
'skills_section': ['skills_form'],
'email_section': ['email_privacy_form', 'alternate_email_formset'],
'languages_section': ['language_privacy_form', 'language_formset'],
'accounts_section': ['accounts_formset'],
'location_section': ['location_form'],
'irc_section': ['irc_form'],
'contribution_section': ['contribution_form'],
'tshirt_section': ['tshirt_form'],
}
curr_sect = next((s for s in sections.keys() if s in request.POST), None)
def get_request_data(form):
if curr_sect and form in sections[curr_sect]:
return request.POST
return None
ctx = {}
ctx['user_form'] = forms.UserForm(get_request_data('user_form'), instance=user)
ctx['registration_form'] = forms.RegisterForm(get_request_data('registration_form'),
request.FILES or None,
instance=profile)
basic_information_data = get_request_data('basic_information_form')
ctx['basic_information_form'] = forms.BasicInformationForm(basic_information_data,
request.FILES or None,
instance=profile)
ctx['accounts_formset'] = forms.AccountsFormset(get_request_data('accounts_formset'),
instance=profile,
queryset=accounts_qs)
ctx['location_form'] = forms.LocationForm(get_request_data('location_form'), instance=profile)
ctx['language_formset'] = forms.LanguagesFormset(get_request_data('language_formset'),
instance=profile,
locale=request.locale)
language_privacy_data = get_request_data('language_privacy_form')
ctx['language_privacy_form'] = forms.LanguagesPrivacyForm(language_privacy_data,
instance=profile)
ctx['skills_form'] = forms.SkillsForm(get_request_data('skills_form'), instance=profile)
ctx['contribution_form'] = forms.ContributionForm(get_request_data('contribution_form'),
instance=profile)
ctx['tshirt_form'] = forms.TshirtForm(get_request_data('tshirt_form'), instance=profile)
ctx['groups_privacy_form'] = forms.GroupsPrivacyForm(get_request_data('groups_privacy_form'),
instance=profile)
ctx['irc_form'] = forms.IRCForm(get_request_data('irc_form'), instance=profile)
ctx['email_privacy_form'] = forms.EmailPrivacyForm(get_request_data('email_privacy_form'),
instance=profile)
alternate_email_formset_data = get_request_data('alternate_email_formset')
ctx['alternate_email_formset'] = forms.AlternateEmailFormset(alternate_email_formset_data,
instance=profile,
queryset=emails)
ctx['autocomplete_form_media'] = ctx['registration_form'].media + ctx['skills_form'].media
forms_valid = True
if request.POST:
if not curr_sect:
raise Http404
curr_forms = map(lambda x: ctx[x], sections[curr_sect])
forms_valid = all(map(lambda x: x.is_valid(), curr_forms))
if forms_valid:
old_username = request.user.username
for f in curr_forms:
f.save()
# Spawn task to check for spam
if not profile.can_vouch:
params = {
'instance_id': profile.id,
'user_ip': request.META.get('REMOTE_ADDR'),
'user_agent': request.META.get('HTTP_USER_AGENT'),
'referrer': request.META.get('HTTP_REFERER'),
'comment_author': profile.full_name,
'comment_author_email': profile.email,
'comment_content': profile.bio
}
check_spam_account.delay(**params)
next_section = request.GET.get('next')
next_url = urlparams(reverse('phonebook:profile_edit'), next_section)
if curr_sect == 'registration_section':
settings_url = reverse('phonebook:profile_edit')
settings_link = '<a href="{0}">settings</a>'.format(settings_url)
msg = _(u'Your registration is complete. '
u'Feel free to visit the {0} page to add '
u'additional information to your profile.'.format(settings_link))
messages.info(request, mark_safe(msg))
redeem_invite(profile, request.session.get('invite-code'))
next_url = reverse('phonebook:profile_view', args=[user.username])
elif user.username != old_username:
msg = _(u'You changed your username; '
u'please note your profile URL has also changed.')
messages.info(request, _(msg))
return HttpResponseRedirect(next_url)
ctx.update({
'user_groups': user_groups,
'profile': request.user.userprofile,
'vouch_threshold': settings.CAN_VOUCH_THRESHOLD,
'appsv2': profile.apps.filter(enabled=True),
'forms_valid': forms_valid
})
return render(request, 'phonebook/edit_profile.html', ctx)
@allow_unvouched
@never_cache
def delete_email(request, email_pk):
"""Delete alternate email address."""
user = User.objects.get(pk=request.user.id)
profile = user.userprofile
# Only email owner can delete emails
if not ExternalAccount.objects.filter(user=profile, pk=email_pk).exists():
raise Http404()
ExternalAccount.objects.get(pk=email_pk).delete()
return redirect('phonebook:profile_edit')
@allow_unvouched
@never_cache
def change_primary_email(request, email_pk):
"""Change primary email address."""
user = User.objects.get(pk=request.user.id)
profile = user.userprofile
alternate_emails = ExternalAccount.objects.filter(user=profile,
type=ExternalAccount.TYPE_EMAIL)
# Only email owner can change primary email
if not alternate_emails.filter(pk=email_pk).exists():
raise Http404()
alternate_email = alternate_emails.get(pk=email_pk)
primary_email = user.email
# Change primary email
user.email = alternate_email.identifier
# Turn primary email to alternate
alternate_email.identifier = primary_email
with transaction.atomic():
user.save()
alternate_email.save()
# Notify Basket about this change
update_email_in_basket.delay(primary_email, user.email)
return redirect('phonebook:profile_edit')
@allow_unvouched
@never_cache
def confirm_delete(request):
"""Display a confirmation page asking the user if they want to
leave.
"""
return render(request, 'phonebook/confirm_delete.html')
@allow_unvouched
@never_cache
@require_POST
def delete(request):
request.user.delete()
messages.info(request, _('Your account has been deleted. Thanks for being a Mozillian!'))
return logout(request)
@allow_public
@cache_control(public=True, must_revalidate=True, max_age=3600 * 24 * 7) # 1 week.
def search_plugin(request):
"""Render an OpenSearch Plugin."""
return render(request, 'phonebook/search_opensearch.xml',
content_type='application/opensearchdescription+xml')
def invite(request):
profile = request.user.userprofile
invite_form = None
vouch_form = None
if profile.can_vouch:
invite_form = forms.InviteForm(request.POST or None,
instance=Invite(inviter=profile))
vouch_form = forms.VouchForm(request.POST or None)
if invite_form and vouch_form and invite_form.is_valid() and vouch_form.is_valid():
invite_form.instance.reason = vouch_form.cleaned_data['description']
invite = invite_form.save()
invite.send(sender=profile, personal_message=invite_form.cleaned_data['message'])
msg = _(u"%s has been invited to Mozillians. They'll receive an email "
u"with instructions on how to join. You can "
u"invite another Mozillian if you like.") % invite.recipient
messages.success(request, msg)
return redirect('phonebook:invite')
return render(request, 'phonebook/invite.html',
{
'invite_form': invite_form,
'vouch_form': vouch_form,
'invites': profile.invites.all(),
'vouch_threshold': settings.CAN_VOUCH_THRESHOLD,
})
@require_POST
def delete_invite(request, invite_pk):
profile = request.user.userprofile
deleted_invite = get_object_or_404(Invite, pk=invite_pk, inviter=profile, redeemed=None)
deleted_invite.delete()
msg = (_(u"%s's invitation to Mozillians has been revoked. "
u"You can invite %s again if you like.") %
(deleted_invite.recipient, deleted_invite.recipient))
messages.success(request, msg)
return redirect('phonebook:invite')
def apikeys(request):
profile = request.user.userprofile
apikey_request_form = forms.APIKeyRequestForm(
request.POST or None,
instance=APIv2App(enabled=True, owner=profile)
)
if apikey_request_form.is_valid():
apikey_request_form.save()
msg = _(u'API Key generated successfully.')
messages.success(request, msg)
return redirect('phonebook:apikeys')
data = {
'appsv2': profile.apps.filter(enabled=True),
'apikey_request_form': apikey_request_form,
}
return render(request, 'phonebook/apikeys.html', data)
def delete_apikey(request, api_pk):
api_key = get_object_or_404(APIv2App, pk=api_pk, owner=request.user.userprofile)
api_key.delete()
messages.success(request, _('API key successfully deleted.'))
return redirect('phonebook:apikeys')
def list_mozillians_in_location(request, country, region=None, city=None):
queryset = UserProfile.objects.vouched().filter(country__name__iexact=country)
show_pagination = False
if city:
queryset = queryset.filter(city__name__iexact=city)
if region:
queryset = queryset.filter(region__name__iexact=region)
paginator = Paginator(queryset, settings.ITEMS_PER_PAGE)
page = request.GET.get('page', 1)
try:
people = paginator.page(page)
except PageNotAnInteger:
people = paginator.page(1)
except EmptyPage:
people = paginator.page(paginator.num_pages)
if paginator.count > settings.ITEMS_PER_PAGE:
show_pagination = True
data = {'people': people,
'country_name': country,
'city_name': city,
'region_name': region,
'page': page,
'show_pagination': show_pagination}
return render(request, 'phonebook/location_list.html', data)
@allow_unvouched
def logout(request):
"""View that logs out the user and redirects to home page."""
auth_logout(request)
return redirect('phonebook:home')
@allow_public
def register(request):
"""Registers Users.
Pulls out an invite code if it exists and auto validates the user
if so. Single-purpose view.
"""
# TODO already vouched users can be re-vouched?
if 'code' in request.GET:
request.session['invite-code'] = request.GET['code']
if request.user.is_authenticated():
if not request.user.userprofile.is_vouched:
redeem_invite(request.user.userprofile, request.session['invite-code'])
else:
messages.info(request, _("You've been invited to join Mozillians.org! "
"Sign in and then you can create a profile."))
return redirect('phonebook:home')
@require_POST
@csrf_exempt
@allow_public
def capture_csp_violation(request):
data = client.get_data_from_request(request)
data.update({
'level': logging.INFO,
'logger': 'CSP',
})
try:
csp_data = json.loads(request.body)
except ValueError:
# Cannot decode CSP violation data, ignore
return HttpResponseBadRequest('Invalid CSP Report')
try:
blocked_uri = csp_data['csp-report']['blocked-uri']
except KeyError:
# Incomplete CSP report
return HttpResponseBadRequest('Incomplete CSP Report')
client.captureMessage(
message='CSP Violation: {}'.format(blocked_uri),
data=data)
return HttpResponse('Captured CSP violation, thanks for reporting.')
# Django haystack
@allow_public
class PhonebookSearchView(SearchView):
form_class = forms.PhonebookSearchForm
template_name = 'phonebook/search.html'
def get_queryset(self):
sqs = super(PhonebookSearchView, self).get_queryset()
return sqs
def get_form_kwargs(self):
"""Pass the request.user to the form's kwargs."""
kwargs = super(PhonebookSearchView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import re
from monty.io import zopen
from monty.dev import requires
from monty.tempfile import ScratchDir
from pymatgen.core.structure import Structure, Molecule
from pymatgen.core.lattice import Lattice
from pymatgen.io.cssr import Cssr
from pymatgen.io.xyz import XYZ
try:
from zeo.netstorage import AtomNetwork, VoronoiNetwork
from zeo.area_volume import volume, surface_area
from zeo.cluster import get_nearest_largest_diameter_highaccuracy_vornode, \
generate_simplified_highaccuracy_voronoi_network, \
prune_voronoi_network_close_node
zeo_found = True
except ImportError:
zeo_found = False
"""
Module implementing classes and functions to use Zeo++.
Zeo++ Installation Steps:
========================
1) Zeo++ requires Voro++. Download Voro++ from code.lbl.gov using
subversion:
"svn checkout --username anonsvn https://code.lbl.gov/svn/voro/trunk
Password is anonsvn.
2) Stable version of Zeo++ can be obtained from
http://www.maciejharanczyk.info/Zeopp/
Alternatively it can be obtained from code.lbl.gov. Replace voro
with zeo.
3) (Optional) Install cython from pip
Mac OS X:
4) (a) Edit the Voro++/voro/trunk/config.mk file to suit your environment
(compiler, linker).
(b) Run make command
5) (a) Edit the Zeo++/trunk/cython_wrapper/setup.py to correctly point to
Voro++ directory.
(b) Run "python setup.py develop" to install Zeo++ python bindings.
Be patient, it will take a while.
Linux:
4) (a) Edit the Voro++/voro/trunk/config.mk file to suit your environment.
(b) Also add -fPIC option to CFLAGS variable in config.mk file.
(c) Run make command
5) (a) Go to Zeo++/zeo/trunk folder and compile zeo++ library using the
command "make dylib".
(b) Edit the Zeo++/trunk/cython_wrapper/setup_alt.py to correctly
point to Voro++ directory.
(c) Run "python setup_alt.py develop" to install Zeo++ python bindings.
Zeo++ Post-Installation Checking:
==============================
1) Go to pymatgen/io/tests and run "python test_zeoio.py"
If Zeo++ python bindings are properly installed, the tests should
pass. One or two tests will be skipped.
b) Go to pymatgen/analysis/defects/tests and run
"python test_point_defects.py". Lots of tests will be skipped if GULP
is not installed. But there should be no errors.
"""
__author__ = "Bharat Medasani"
__copyright = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Bharat Medasani"
__email__ = "mbkumar@gmail.com"
__data__ = "Aug 2, 2013"
class ZeoCssr(Cssr):
"""
ZeoCssr adds extra fields to CSSR sites to conform with Zeo++
input CSSR format. The coordinate system is rorated from xyz to zyx.
This change aligns the pivot axis of pymatgen (z-axis) to pivot axis
of Zeo++ (x-axis) for structurural modifications.
Args:
structure: A structure to create ZeoCssr object
"""
def __init__(self, structure):
super(ZeoCssr, self).__init__(structure)
def __str__(self):
"""
CSSR.__str__ method is modified to padd 0's to the CSSR site data.
The padding is to conform with the CSSR format supported Zeo++.
The oxidation state is stripped from site.specie
Also coordinate system is rotated from xyz to zxy
"""
output = [
"{:.4f} {:.4f} {:.4f}"
# .format(*self.structure.lattice.abc),
.format(self.structure.lattice.c,
self.structure.lattice.a,
self.structure.lattice.b),
"{:.2f} {:.2f} {:.2f} SPGR = 1 P 1 OPT = 1"
# .format(*self.structure.lattice.angles),
.format(self.structure.lattice.gamma,
self.structure.lattice.alpha,
self.structure.lattice.beta),
"{} 0".format(len(self.structure)),
"0 {}".format(self.structure.formula)
]
for i, site in enumerate(self.structure.sites):
# if not hasattr(site, 'charge'):
# charge = 0
# else:
# charge = site.charge
charge = site.charge if hasattr(site, 'charge') else 0
# specie = site.specie.symbol
specie = site.species_string
output.append(
"{} {} {:.4f} {:.4f} {:.4f} 0 0 0 0 0 0 0 0 {:.4f}"
.format(
i + 1, specie, site.c, site.a, site.b, charge
# i+1, site.specie, site.a, site.b, site.c, site.charge
)
)
return "\n".join(output)
@staticmethod
def from_string(string):
"""
Reads a string representation to a ZeoCssr object.
Args:
string: A string representation of a ZeoCSSR.
Returns:
ZeoCssr object.
"""
lines = string.split("\n")
toks = lines[0].split()
lengths = [float(i) for i in toks]
toks = lines[1].split()
angles = [float(i) for i in toks[0:3]]
# Zeo++ takes x-axis along a and pymatgen takes z-axis along c
a = lengths.pop(-1)
lengths.insert(0, a)
alpha = angles.pop(-1)
angles.insert(0, alpha)
latt = Lattice.from_lengths_and_angles(lengths, angles)
sp = []
coords = []
chrg = []
for l in lines[4:]:
m = re.match(r'\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+' +
r'([0-9\-\.]+)\s+(?:0\s+){8}([0-9\-\.]+)', l.strip())
if m:
sp.append(m.group(1))
# coords.append([float(m.group(i)) for i in xrange(2, 5)])
# Zeo++ takes x-axis along a and pymatgen takes z-axis along c
coords.append([float(m.group(i)) for i in [3, 4, 2]])
chrg.append(m.group(5))
return ZeoCssr(
Structure(latt, sp, coords, site_properties={'charge': chrg})
)
@staticmethod
def from_file(filename):
"""
Reads a CSSR file to a ZeoCssr object.
Args:
filename: Filename to read from.
Returns:
ZeoCssr object.
"""
with zopen(filename, "r") as f:
return ZeoCssr.from_string(f.read())
class ZeoVoronoiXYZ(XYZ):
"""
Class to read Voronoi Nodes from XYZ file written by Zeo++.
The sites have an additional column representing the voronoi node radius.
The voronoi node radius is represented by the site property voronoi_radius.
Args:
mol: Input molecule holding the voronoi node information
"""
def __init__(self, mol):
super(ZeoVoronoiXYZ, self).__init__(mol)
@staticmethod
def from_string(contents):
"""
Creates Zeo++ Voronoi XYZ object from a string.
from_string method of XYZ class is being redefined.
Args:
contents: String representing Zeo++ Voronoi XYZ file.
Returns:
ZeoVoronoiXYZ object
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
prop = []
coord_patt = re.compile(
r"(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+" +
r"([0-9\-\.]+)"
)
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
# coords.append(map(float, m.groups()[1:4])) # this is 0-indexed
coords.append([float(j)
for j in [m.group(i) for i in [3, 4, 2]]])
prop.append(float(m.group(5)))
return ZeoVoronoiXYZ(
Molecule(sp, coords, site_properties={'voronoi_radius': prop})
)
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename) as f:
return ZeoVoronoiXYZ.from_string(f.read())
def __str__(self):
output = [str(len(self._mol)), self._mol.composition.formula]
fmtstr = "{{}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}}".format(
self.precision
)
for site in self._mol:
output.append(fmtstr.format(
site.specie.symbol, site.z, site.x, site.y,
# site.specie, site.x, site.y, site.z,
site.properties['voronoi_radius']
))
return "\n".join(output)
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
def get_voronoi_nodes(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
fp.write("{} {}\n".format(el, rad_dict[el].real))
atmnet = AtomNetwork.read_from_CSSR(
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
vornet, vor_edge_centers, vor_face_centers = \
atmnet.perform_voronoi_decomposition()
vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + '_voro.xyz'
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties['voronoi_radius'])
lattice = Lattice.from_lengths_and_angles(
structure.lattice.abc, structure.lattice.angles)
vor_node_struct = Structure(
lattice, species, coords, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
# PMG-Zeo c<->a transformation for voronoi face centers
rot_face_centers = [(center[1], center[2], center[0]) for center in
vor_face_centers]
rot_edge_centers = [(center[1], center[2], center[0]) for center in
vor_edge_centers]
species = ["X"] * len(rot_face_centers)
prop = [0.0] * len(rot_face_centers) # Vor radius not evaluated for fc
vor_facecenter_struct = Structure(
lattice, species, rot_face_centers, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
species = ["X"] * len(rot_edge_centers)
prop = [0.0] * len(rot_edge_centers) # Vor radius not evaluated for fc
vor_edgecenter_struct = Structure(
lattice, species, rot_edge_centers, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
return vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct
def get_high_accuracy_voronoi_nodes(structure, rad_dict, probe_rad=0.1):
"""
Analyze the void space in the input structure using high accuracy
voronoi decomposition.
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms.
Default is 0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_flag = True
rad_file = name + ".rad"
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
print("{} {}".format(el, rad_dict[el].real), file=fp)
atmnet = AtomNetwork.read_from_CSSR(
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
# vornet, vor_edge_centers, vor_face_centers = \
# atmnet.perform_voronoi_decomposition()
red_ha_vornet = \
prune_voronoi_network_close_node(atmnet)
# generate_simplified_highaccuracy_voronoi_network(atmnet)
# get_nearest_largest_diameter_highaccuracy_vornode(atmnet)
red_ha_vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + '_voro.xyz'
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties['voronoi_radius'])
lattice = Lattice.from_lengths_and_angles(
structure.lattice.abc, structure.lattice.angles)
vor_node_struct = Structure(
lattice, species, coords, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
return vor_node_struct
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
def get_free_sphere_params(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
fp.write("{} {}\n".format(el, rad_dict[el].real))
atmnet = AtomNetwork.read_from_CSSR(
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
out_file = "temp.res"
atmnet.calculate_free_sphere_parameters(out_file)
if os.path.isfile(out_file) and os.path.getsize(out_file) > 0:
with open(out_file, "rt") as fp:
output = fp.readline()
else:
output = ""
fields = [val.strip() for val in output.split()][1:4]
if len(fields) == 3:
fields = [float(field) for field in fields]
free_sphere_params = {'inc_sph_max_dia': fields[0],
'free_sph_max_dia': fields[1],
'inc_sph_along_free_sph_path_max_dia': fields[2]}
return free_sphere_params
# Deprecated. Not needed anymore
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3,
probe_rad=0.1):
"""
Computes the volume and surface area of isolated void using Zeo++.
Useful to compute the volume and surface area of vacant site.
Args:
structure: pymatgen Structure containing vacancy
rad_dict(optional): Dictionary with short name of elements and their
radii.
chan_rad(optional): Minimum channel Radius.
probe_rad(optional): Probe radius for Monte Carlo sampling.
Returns:
volume: floating number representing the volume of void
"""
with ScratchDir('.'):
name = "temp_zeo"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
if rad_dict:
rad_file = name + ".rad"
with open(rad_file, 'w') as fp:
for el in rad_dict.keys():
fp.write("{0} {1}".format(el, rad_dict[el]))
atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file)
vol_str = volume(atmnet, 0.3, probe_rad, 10000)
sa_str = surface_area(atmnet, 0.3, probe_rad, 10000)
vol = None
sa = None
for line in vol_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
vol = -1.0
break
if float(fields[1]) == 0:
vol = -1.0
break
vol = float(fields[3])
for line in sa_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
# raise ValueError("Too many voids")
sa = -1.0
break
if float(fields[1]) == 0:
sa = -1.0
break
sa = float(fields[3])
if not vol or not sa:
raise ValueError("Error in zeo++ output stream")
return vol, sa
| |
from __future__ import absolute_import
import time
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.utils import unittest
from django.views.generic import View, TemplateView, RedirectView
from . import views
class SimpleView(View):
"""
A simple view with a docstring.
"""
def get(self, request):
return HttpResponse('This is a simple view')
class SimplePostView(SimpleView):
post = SimpleView.get
class PostOnlyView(View):
def post(self, request):
return HttpResponse('This view only accepts POST')
class CustomizableView(SimpleView):
parameter = {}
def decorator(view):
view.is_decorated = True
return view
class DecoratedDispatchView(SimpleView):
@decorator
def dispatch(self, request, *args, **kwargs):
return super(DecoratedDispatchView, self).dispatch(request, *args, **kwargs)
class AboutTemplateView(TemplateView):
def get(self, request):
return self.render_to_response({})
def get_template_names(self):
return ['generic_views/about.html']
class AboutTemplateAttributeView(TemplateView):
template_name = 'generic_views/about.html'
def get(self, request):
return self.render_to_response(context={})
class InstanceView(View):
def get(self, request):
return self
class ViewTest(unittest.TestCase):
rf = RequestFactory()
def _assert_simple(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'This is a simple view')
def test_no_init_kwargs(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
view = SimpleView(key='value').as_view()
self.fail('Should not be able to instantiate a view')
except AttributeError:
pass
def test_no_init_args(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
view = SimpleView.as_view('value')
self.fail('Should not be able to use non-keyword arguments instantiating a view')
except TypeError:
pass
def test_pathological_http_method(self):
"""
The edge case of a http request that spoofs an existing method name is caught.
"""
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='DISPATCH')
).status_code, 405)
def test_get_only(self):
"""
Test a view which only allows GET doesn't allow other methods.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
self.assertEqual(SimpleView.as_view()(self.rf.post('/')).status_code, 405)
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_get_and_head(self):
"""
Test a view which supplies a GET method also responds correctly to HEAD.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
response = SimpleView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 200)
def test_head_no_get(self):
"""
Test a view which supplies no GET method responds to HEAD with HTTP 405.
"""
response = PostOnlyView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
def test_get_and_post(self):
"""
Test a view which only allows both GET and POST.
"""
self._assert_simple(SimplePostView.as_view()(self.rf.get('/')))
self._assert_simple(SimplePostView.as_view()(self.rf.post('/')))
self.assertEqual(SimplePostView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_invalid_keyword_argument(self):
"""
Test that view arguments must be predefined on the class and can't
be named like a HTTP method.
"""
# Check each of the allowed method names
for method in SimpleView.http_method_names:
kwargs = dict(((method, "value"),))
self.assertRaises(TypeError, SimpleView.as_view, **kwargs)
# Check the case view argument is ok if predefined on the class...
CustomizableView.as_view(parameter="value")
# ...but raises errors otherwise.
self.assertRaises(TypeError, CustomizableView.as_view, foobar="value")
def test_calling_more_than_once(self):
"""
Test a view can only be called once.
"""
request = self.rf.get('/')
view = InstanceView.as_view()
self.assertNotEqual(view(request), view(request))
def test_class_attributes(self):
"""
Test that the callable returned from as_view() has proper
docstring, name and module.
"""
self.assertEqual(SimpleView.__doc__, SimpleView.as_view().__doc__)
self.assertEqual(SimpleView.__name__, SimpleView.as_view().__name__)
self.assertEqual(SimpleView.__module__, SimpleView.as_view().__module__)
def test_dispatch_decoration(self):
"""
Test that attributes set by decorators on the dispatch method
are also present on the closure.
"""
self.assertTrue(DecoratedDispatchView.as_view().is_decorated)
def test_options(self):
"""
Test that views respond to HTTP OPTIONS requests with an Allow header
appropriate for the methods implemented by the view class.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self.assertEqual(200, response.status_code)
self.assertTrue(response['Allow'])
def test_options_for_get_view(self):
"""
Test that a view implementing GET allows GET and HEAD.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD')
def test_options_for_get_and_post_view(self):
"""
Test that a view implementing GET and POST allows GET, HEAD, and POST.
"""
request = self.rf.options('/')
view = SimplePostView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD', 'POST')
def test_options_for_post_view(self):
"""
Test that a view implementing POST allows POST.
"""
request = self.rf.options('/')
view = PostOnlyView.as_view()
response = view(request)
self._assert_allows(response, 'POST')
def _assert_allows(self, response, *expected_methods):
"Assert allowed HTTP methods reported in the Allow response header"
response_allows = set(response['Allow'].split(', '))
self.assertEqual(set(expected_methods + ('OPTIONS',)), response_allows)
class TemplateViewTest(TestCase):
urls = 'regressiontests.generic_views.urls'
rf = RequestFactory()
def _assert_about(self, response):
response.render()
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<h1>About</h1>')
def test_get(self):
"""
Test a view that simply renders a template on GET
"""
self._assert_about(AboutTemplateView.as_view()(self.rf.get('/about/')))
def test_head(self):
"""
Test a TemplateView responds correctly to HEAD
"""
response = AboutTemplateView.as_view()(self.rf.head('/about/'))
self.assertEqual(response.status_code, 200)
def test_get_template_attribute(self):
"""
Test a view that renders a template on GET with the template name as
an attribute on the class.
"""
self._assert_about(AboutTemplateAttributeView.as_view()(self.rf.get('/about/')))
def test_get_generic_template(self):
"""
Test a completely generic view that renders a template on GET
with the template name as an argument at instantiation.
"""
self._assert_about(TemplateView.as_view(template_name='generic_views/about.html')(self.rf.get('/about/')))
def test_template_name_required(self):
"""
A template view must provide a template name
"""
self.assertRaises(ImproperlyConfigured, self.client.get, '/template/no_template/')
def test_template_params(self):
"""
A generic template view passes kwargs as context.
"""
response = self.client.get('/template/simple/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertTrue(isinstance(response.context['view'], View))
def test_extra_template_params(self):
"""
A template view can be customized to return extra context.
"""
response = self.client.get('/template/custom/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertEqual(response.context['key'], 'value')
self.assertTrue(isinstance(response.context['view'], View))
def test_cached_views(self):
"""
A template view can be cached
"""
response = self.client.get('/template/cached/bar/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
class RedirectViewTest(unittest.TestCase):
rf = RequestFactory()
def test_no_url(self):
"Without any configuration, returns HTTP 410 GONE"
response = RedirectView.as_view()(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 410)
def test_permanent_redirect(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/bar/')
def test_temporary_redirect(self):
"Permanent redirects are an option"
response = RedirectView.as_view(url='/bar/', permanent=False)(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/bar/')
def test_include_args(self):
"GET arguments can be included in the redirected URL"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/bar/')
response = RedirectView.as_view(url='/bar/', query_string=True)(self.rf.get('/foo/?pork=spam'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/bar/?pork=spam')
def test_include_urlencoded_args(self):
"GET arguments can be URL-encoded when included in the redirected URL"
response = RedirectView.as_view(url='/bar/', query_string=True)(
self.rf.get('/foo/?unicode=%E2%9C%93'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/bar/?unicode=%E2%9C%93')
def test_parameter_substitution(self):
"Redirection URLs can be parameterized"
response = RedirectView.as_view(url='/bar/%(object_id)d/')(self.rf.get('/foo/42/'), object_id=42)
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/bar/42/')
def test_redirect_POST(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.post('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/bar/')
def test_redirect_HEAD(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.head('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/bar/')
def test_redirect_OPTIONS(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.options('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/bar/')
def test_redirect_PUT(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.put('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/bar/')
def test_redirect_DELETE(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.delete('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/bar/')
def test_redirect_when_meta_contains_no_query_string(self):
"regression for #16705"
# we can't use self.rf.get because it always sets QUERY_STRING
response = RedirectView.as_view(url='/bar/')(self.rf.request(PATH_INFO='/foo/'))
self.assertEqual(response.status_code, 301)
class GetContextDataTest(unittest.TestCase):
def test_get_context_data_super(self):
test_view = views.CustomContextView()
context = test_view.get_context_data(kwarg_test='kwarg_value')
# the test_name key is inserted by the test classes parent
self.assertTrue('test_name' in context)
self.assertEqual(context['kwarg_test'], 'kwarg_value')
self.assertEqual(context['custom_key'], 'custom_value')
# test that kwarg overrides values assigned higher up
context = test_view.get_context_data(test_name='test_value')
self.assertEqual(context['test_name'], 'test_value')
| |
#!/usr/bin/env python
import roslib,rospy,sys,cv2,time
import numpy as np
roslib.load_manifest('lane_follower')
# from __future__ import print_function
from std_msgs.msg import Int32
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
bridge = CvBridge()
pub = rospy.Publisher('lane_detection', Int32, queue_size=1) #ros-lane-detection
pub_image = rospy.Publisher('lane_detection_image',Image,queue_size=1)
def callback(data):
rospy.loginfo("ciao")
# convert image to cv2 standard format
img = bridge.imgmsg_to_cv2(data)
# start time
start_time = cv2.getTickCount()
# Gaussian Filter to remove noise
img = cv2.medianBlur(img,5)
gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
# print img.shape = (200,350,3)
rows,cols,channels = img.shape
# ROI
roi_mask = np.zeros(img.shape,dtype=np.uint8)
roi_mask[10:rows,0:cols] = 255
street = cv2.bitwise_and(img,roi_mask)
stop_roi_mask = np.zeros(gray.shape,dtype=np.uint8)
stop_roi_mask[150:rows,150:250] = 255
right_roi_mask = np.zeros(gray.shape,dtype=np.uint8)
right_roi_mask[150:rows,200:360] = 255
right_roi = cv2.bitwise_and(img,img,right_roi_mask)
left_roi_mask = np.zeros(gray.shape,dtype=np.uint8)
left_roi_mask[150:rows,0:200] = 255
left_roi = cv2.bitwise_and(img,img,left_roi_mask)
# define range of color in HSV
hsv = cv2.cvtColor(street,cv2.COLOR_RGB2HSV)
sensitivity = 120 # range of sensitivity=[90,150]
lower_white = np.array([0,0,255-sensitivity])
upper_white = np.array([255,sensitivity,255])
white_mask = cv2.inRange(hsv,lower_white,upper_white)
white_mask = cv2.erode(white_mask, None, iterations=2)
white_mask = cv2.dilate(white_mask, None, iterations=2)
lower_red = np.array([150,70,50])#150
upper_red = np.array([200,255,255])
lower_red2 = np.array([0,100,100])
upper_red2 = np.array([9,255,255])#10
red_mask1 = cv2.inRange(hsv,lower_red,upper_red)
red_mask1 = cv2.erode(red_mask1, None, iterations=2)
red_mask1 = cv2.dilate(red_mask1, None, iterations=2)
red_mask2 = cv2.inRange(hsv, lower_red2, upper_red2)
red_mask2 = cv2.erode(red_mask2, None, iterations=2)
red_mask2 = cv2.dilate(red_mask2, None, iterations=2)
red_mask = cv2.bitwise_or(red_mask1,red_mask2)
lower_yellow = np.array([0,100,100]) #0,100,100
upper_yellow = np.array([40,255,255]) #30,255,255
yellow_mask = cv2.inRange(hsv,lower_yellow,upper_yellow)
yellow_mask = cv2.erode(yellow_mask, None, iterations=2)
yellow_mask = cv2.dilate(yellow_mask, None, iterations=2)
# mask AND original img
whitehsvthresh = cv2.bitwise_and(right_roi,right_roi,mask=white_mask)
yellowhsvthresh = cv2.bitwise_and(street,street,mask=yellow_mask)
redhsvthresh = cv2.bitwise_and(street,street,mask=red_mask1)
# Canny Edge Detection
right_edges = cv2.Canny(whitehsvthresh,100,200)
left_edges = cv2.Canny(yellowhsvthresh,100,200)
right_edges = cv2.bitwise_and(right_edges,right_roi_mask)
left_edges = cv2.bitwise_and(left_edges,left_roi_mask)
red_edges_hsv = cv2.Canny(redhsvthresh,100,200)
red_edges = cv2.bitwise_and(red_edges_hsv,stop_roi_mask)
# Standard Hough Transform
right_lines = cv2.HoughLines(right_edges,0.8,np.pi/180,35)
left_lines = cv2.HoughLines(left_edges,0.8,np.pi/180,30)
red_lines = cv2.HoughLines(red_edges,1,np.pi/180,40)
xm = cols/2
ym = rows
# Draw right lane
x = []
i = 0
if right_lines is not None:
right_lines = np.array(right_lines[0])
for rho, theta in right_lines:
a=np.cos(theta)
b=np.sin(theta)
x0,y0=a*rho,b*rho
y3 = 140
x3 = int(x0+((y0-y3)*np.sin(theta)/np.cos(theta)))
x.insert(i,x3)
i+1
pt1=(int(x0+1000*(-b)),int(y0+1000*(a)))
pt2=(int(x0-1000*(-b)),int(y0-1000*(a)))
cv2.line(img,pt1,pt2,(255,0,0),2)
if len(x) != 0:
xmin = x[0]
for k in range(0,len(x)):
if x[k] < xmin and x[k] > 0:
xmin = x[k]
kr = int(np.sqrt(((xmin-xm)*(xmin-xm))+((y3-ym)*(y3-ym))))
else:
kr = 0
xmin = 0
# Draw left lane
x = []
i = 0
if left_lines is not None:
left_lines = np.array(left_lines[0])
for rho, theta in left_lines:
a=np.cos(theta)
b=np.sin(theta)
x0,y0=a*rho,b*rho
y3 = 140
x3 = int(x0+((y0-y3)*np.sin(theta)/np.cos(theta)))
x.insert(i,x3)
i+1
pt1=(int(x0+1000*(-b)),int(y0+1000*(a)))
pt2=(int(x0-1000*(-b)),int(y0-1000*(a)))
cv2.line(img,pt1,pt2,(0,255,0),2)
if len(x) != 0:
xmax = x[0]
for k in range(0,len(x)):
if x[k] > xmax and x[k]<cols:
xmax = x[k]
kl = int(np.sqrt(((xmax-xm)*(xmax-xm))+((y3-ym)*(y3-ym))))
else:
kl = 0
xmax = 0
error = kr - kl
#end time
end_time = cv2.getTickCount()
time_count= (end_time - start_time) / cv2.getTickFrequency()
# rospy.loginfo(time_count)
if red_lines is not None:
rospy.loginfo("STOP")
message = 154 #stop
elif right_lines is not None and left_lines is not None:
rospy.loginfo(error)
if error > 150:
error = 150
elif error < -150:
error = -150
message = error
elif left_lines is not None and right_lines is None:
rospy.loginfo("Turn Right")
rospy.loginfo(kl)
message = 152 #turn right
elif left_lines is None and right_lines is not None:
rospy.loginfo("Turn Left")
message = 153 #turn let
elif left_lines is None and right_lines is None:
rospy.loginfo("No line")
message = 155 #no line found
else:
message = 155 #no line found
pub.publish(message)
#print(img)
pub_image.publish(bridge.cv2_to_imgmsg(img, "rgb8"))
def lane_detection():
rospy.init_node('lane-detection',anonymous=True)
rospy.Subscriber("camera/color/image_raw",Image,callback,queue_size=1,buff_size=2**24)
try:
rospy.loginfo("Enetering ROS Spin")
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
try:
lane_detection()
except rospy.ROSInterruptException:
pass
| |
# -*- coding: utf-8 -*-
import httplib as http
from flask import request
from modularodm.exceptions import ValidationError, ValidationValueError
from framework import forms
from framework import status
from framework.auth import cas
from framework.auth import User, get_user
from framework.auth.core import generate_confirm_token
from framework.auth.decorators import collect_auth, must_be_logged_in
from framework.auth.forms import PasswordForm, SetEmailAndPasswordForm
from framework.auth.signals import user_registered
from framework.auth.utils import validate_email
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.sessions import session
from framework.transactions.handlers import no_auto_transaction
from website.util.time import get_timestamp, throttle_period_expired
from website import mails
from website import language
from website import security
from website import settings
from website.models import Node
from website.profile import utils as profile_utils
from website.project.decorators import (must_have_permission, must_be_valid_project,
must_not_be_registration, must_be_contributor_or_public, must_be_contributor)
from website.project.model import has_anonymous_link
from website.project.signals import unreg_contributor_added, contributor_added
from website.util import web_url_for, is_json_request
from website.util.permissions import expand_permissions, ADMIN
from website.util import sanitize
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_node_contributors_abbrev(auth, node, **kwargs):
anonymous = has_anonymous_link(node, auth)
formatter = 'surname'
max_count = kwargs.get('max_count', 3)
if 'user_ids' in kwargs:
users = [
User.load(user_id) for user_id in kwargs['user_ids']
if user_id in node.visible_contributor_ids
]
else:
users = node.visible_contributors
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contributors = []
n_contributors = len(users)
others_count = ''
for index, user in enumerate(users[:max_count]):
if index == max_count - 1 and len(users) > max_count:
separator = ' &'
others_count = str(n_contributors - 3)
elif index == len(users) - 1:
separator = ''
elif index == len(users) - 2:
separator = ' &'
else:
separator = ','
contributor = user.get_summary(formatter)
contributor['user_id'] = user._primary_key
contributor['separator'] = separator
contributors.append(contributor)
return {
'contributors': contributors,
'others_count': others_count,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_contributors(auth, node, **kwargs):
# Can set limit to only receive a specified number of contributors in a call to this route
if request.args.get('limit'):
try:
limit = int(request.args['limit'])
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "limit": {}'.format(request.args['limit'])
))
else:
limit = None
anonymous = has_anonymous_link(node, auth)
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
# Limit is either an int or None:
# if int, contribs list is sliced to specified length
# if None, contribs list is not sliced
contribs = profile_utils.serialize_contributors(
node.visible_contributors[0:limit],
node=node,
)
# Will either return just contributor list or contributor list + 'more' element
if limit:
return {
'contributors': contribs,
'more': max(0, len(node.visible_contributors) - limit)
}
else:
return {'contributors': contribs}
@must_be_logged_in
@must_be_valid_project
def get_contributors_from_parent(auth, node, **kwargs):
parent = node.parent_node
if not parent:
raise HTTPError(http.BAD_REQUEST)
if not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contribs = [
profile_utils.add_contributor_json(contrib)
for contrib in parent.visible_contributors
]
return {'contributors': contribs}
def deserialize_contributors(node, user_dicts, auth, validate=False):
"""View helper that returns a list of User objects from a list of
serialized users (dicts). The users in the list may be registered or
unregistered users.
e.g. ``[{'id': 'abc123', 'registered': True, 'fullname': ..},
{'id': None, 'registered': False, 'fullname'...},
{'id': '123ab', 'registered': False, 'fullname': ...}]
If a dict represents an unregistered user without an ID, creates a new
unregistered User record.
:param Node node: The node to add contributors to
:param list(dict) user_dicts: List of serialized users in the format above.
:param Auth auth:
:param bool validate: Whether to validate and sanitize fields (if necessary)
"""
# Add the registered contributors
contribs = []
for contrib_dict in user_dicts:
fullname = contrib_dict['fullname']
visible = contrib_dict['visible']
email = contrib_dict.get('email')
if validate is True:
# Validate and sanitize inputs as needed. Email will raise error if invalid.
# TODO Edge case bug: validation and saving are performed in same loop, so all in list
# up to the invalid entry will be saved. (communicate to the user what needs to be retried)
fullname = sanitize.strip_html(fullname)
if not fullname:
raise ValidationValueError('Full name field cannot be empty')
if email:
validate_email(email) # Will raise a ValidationError if email invalid
if contrib_dict['id']:
contributor = User.load(contrib_dict['id'])
else:
try:
contributor = User.create_unregistered(
fullname=fullname,
email=email)
contributor.save()
except ValidationValueError:
## FIXME: This suppresses an exception if ID not found & new validation fails; get_user will return None
contributor = get_user(email=email)
# Add unclaimed record if necessary
if (not contributor.is_registered
and node._primary_key not in contributor.unclaimed_records):
contributor.add_unclaimed_record(node=node, referrer=auth.user,
given_name=fullname,
email=email)
contributor.save()
unreg_contributor_added.send(node, contributor=contributor,
auth=auth)
contribs.append({
'user': contributor,
'visible': visible,
'permissions': expand_permissions(contrib_dict.get('permission'))
})
return contribs
@unreg_contributor_added.connect
def finalize_invitation(node, contributor, auth):
record = contributor.get_unclaimed_record(node._primary_key)
if record['email']:
send_claim_email(record['email'], contributor, node, notify=True)
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_contributors_post(auth, node, **kwargs):
""" Add contributors to a node. """
user_dicts = request.json.get('users')
node_ids = request.json.get('node_ids')
if node._id in node_ids:
node_ids.remove(node._id)
if user_dicts is None or node_ids is None:
raise HTTPError(http.BAD_REQUEST)
# Prepare input data for `Node::add_contributors`
try:
contribs = deserialize_contributors(node, user_dicts, auth=auth, validate=True)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
node.add_contributors(contributors=contribs, auth=auth)
node.save()
# Disconnect listener to avoid multiple invite emails
unreg_contributor_added.disconnect(finalize_invitation)
for child_id in node_ids:
child = Node.load(child_id)
# Only email unreg users once
try:
child_contribs = deserialize_contributors(
child, user_dicts, auth=auth, validate=True
)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
child.add_contributors(contributors=child_contribs, auth=auth)
child.save()
# Reconnect listeners
unreg_contributor_added.connect(finalize_invitation)
return {
'status': 'success',
'contributors': profile_utils.serialize_contributors(
node.visible_contributors,
node=node,
)
}, 201
@no_auto_transaction
@must_be_valid_project # injects project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_manage_contributors(auth, node, **kwargs):
"""Reorder and remove contributors.
:param Auth auth: Consolidated authorization
:param-json list contributors: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>}
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributors = request.json.get('contributors')
# Update permissions and order
try:
node.manage_contributors(contributors, auth=auth, save=True)
except ValueError as error:
raise HTTPError(http.BAD_REQUEST, data={'message_long': error.message})
# If user has removed herself from project, alert; redirect to user
# dashboard if node is private, else node dashboard
if not node.is_contributor(auth.user):
status.push_status_message(
'You have removed yourself as a contributor from this project',
kind='success',
trust=False
)
if node.is_public:
return {'redirectUrl': node.url}
return {'redirectUrl': web_url_for('dashboard')}
# Else if user has revoked her admin permissions, alert and stay on
# current page
if not node.has_permission(auth.user, ADMIN):
status.push_status_message(
'You have removed your administrative privileges for this project',
kind='success',
trust=False
)
# Else stay on current page
return {}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_remove_contributor(auth, **kwargs):
"""Remove a contributor from a list of nodes.
:param Auth auth: Consolidated authorization
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributor_id = request.get_json()['contributorID']
node_ids = request.get_json()['nodeIDs']
contributor = User.load(contributor_id)
if contributor is None:
raise HTTPError(http.BAD_REQUEST, data={'message_long': 'Contributor not found.'})
redirect_url = {}
parent_id = node_ids[0]
for node_id in node_ids:
# Update permissions and order
node = Node.load(node_id)
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
if len(node.visible_contributor_ids) == 1 \
and node.visible_contributor_ids[0] == contributor._id:
raise HTTPError(http.FORBIDDEN, data={
'message_long': 'Must have at least one bibliographic contributor'
})
nodes_removed = node.remove_contributor(contributor, auth=auth)
# remove_contributor returns false if there is not one admin or visible contributor left after the move.
if not nodes_removed:
raise HTTPError(http.BAD_REQUEST, data={
'message_long': 'Could not remove contributor.'})
# On parent node, if user has removed herself from project, alert; redirect to user
# dashboard if node is private, else node dashboard
if not node.is_contributor(auth.user) and node_id == parent_id:
status.push_status_message(
'You have removed yourself as a contributor from this project',
kind='success',
trust=False
)
if node.is_public:
redirect_url = {'redirectUrl': node.url}
# Else stay on current page
else:
redirect_url = {'redirectUrl': web_url_for('dashboard')}
return redirect_url
def send_claim_registered_email(claimer, unreg_user, node, throttle=24 * 3600):
unclaimed_record = unreg_user.get_unclaimed_record(node._primary_key)
# roll the valid token for each email, thus user cannot change email and approve a different email address
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(400, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
unclaimed_record['token'] = generate_confirm_token()
unclaimed_record['claimer_email'] = claimer.username
unreg_user.save()
referrer = User.load(unclaimed_record['referrer_id'])
claim_url = web_url_for(
'claim_user_registered',
uid=unreg_user._primary_key,
pid=node._primary_key,
token=unclaimed_record['token'],
_external=True,
)
# Send mail to referrer, telling them to forward verification link to claimer
mails.send_mail(
referrer.username,
mails.FORWARD_INVITE_REGISTERED,
user=unreg_user,
referrer=referrer,
node=node,
claim_url=claim_url,
fullname=unclaimed_record['name'],
)
unclaimed_record['last_sent'] = get_timestamp()
unreg_user.save()
# Send mail to claimer, telling them to wait for referrer
mails.send_mail(
claimer.username,
mails.PENDING_VERIFICATION_REGISTERED,
fullname=claimer.fullname,
referrer=referrer,
node=node,
)
def send_claim_email(email, user, node, notify=True, throttle=24 * 3600):
"""Send an email for claiming a user account. Either sends to the given email
or the referrer's email, depending on the email address provided.
:param str email: The address given in the claim user form
:param User user: The User record to claim.
:param Node node: The node where the user claimed their account.
:param bool notify: If True and an email is sent to the referrer, an email
will also be sent to the invited user about their pending verification.
:param int throttle: Time period (in seconds) after the referrer is
emailed during which the referrer will not be emailed again.
"""
claimer_email = email.lower().strip()
unclaimed_record = user.get_unclaimed_record(node._primary_key)
referrer = User.load(unclaimed_record['referrer_id'])
claim_url = user.get_claim_url(node._primary_key, external=True)
# If given email is the same provided by user, just send to that email
if unclaimed_record.get('email') == claimer_email:
mail_tpl = mails.INVITE
to_addr = claimer_email
unclaimed_record['claimer_email'] = claimer_email
user.save()
else: # Otherwise have the referrer forward the email to the user
# roll the valid token for each email, thus user cannot change email and approve a different email address
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(400, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
unclaimed_record['last_sent'] = get_timestamp()
unclaimed_record['token'] = generate_confirm_token()
unclaimed_record['claimer_email'] = claimer_email
user.save()
claim_url = user.get_claim_url(node._primary_key, external=True)
if notify:
pending_mail = mails.PENDING_VERIFICATION
mails.send_mail(
claimer_email,
pending_mail,
user=user,
referrer=referrer,
fullname=unclaimed_record['name'],
node=node
)
mail_tpl = mails.FORWARD_INVITE
to_addr = referrer.username
mails.send_mail(
to_addr,
mail_tpl,
user=user,
referrer=referrer,
node=node,
claim_url=claim_url,
email=claimer_email,
fullname=unclaimed_record['name']
)
return to_addr
@contributor_added.connect
def notify_added_contributor(node, contributor, auth=None, throttle=None):
throttle = throttle or settings.CONTRIBUTOR_ADDED_EMAIL_THROTTLE
# Exclude forks and templates because the user forking/templating the project gets added
# via 'add_contributor' but does not need to get notified.
# Only email users for projects, or for components where they are not contributors on the parent node.
if (contributor.is_registered and not node.template_node and not node.is_fork and
(not node.parent_node or
(node.parent_node and not node.parent_node.is_contributor(contributor)))):
contributor_record = contributor.contributor_added_email_records.get(node._id, {})
if contributor_record:
timestamp = contributor_record.get('last_sent', None)
if timestamp:
if not throttle_period_expired(timestamp, throttle):
return
else:
contributor.contributor_added_email_records[node._id] = {}
mails.send_mail(
contributor.username,
mails.CONTRIBUTOR_ADDED,
user=contributor,
node=node,
referrer_name=auth.user.fullname if auth else ''
)
contributor.contributor_added_email_records[node._id]['last_sent'] = get_timestamp()
contributor.save()
def verify_claim_token(user, token, pid):
"""View helper that checks that a claim token for a given user and node ID
is valid. If not valid, throws an error with custom error messages.
"""
# if token is invalid, throw an error
if not user.verify_claim_token(token=token, project_id=pid):
if user.is_registered:
error_data = {
'message_short': 'User has already been claimed.',
'message_long': 'Please <a href="/login/">log in</a> to continue.'}
raise HTTPError(400, data=error_data)
else:
return False
return True
@collect_auth
@must_be_valid_project
def claim_user_registered(auth, node, **kwargs):
"""View that prompts user to enter their password in order to claim
contributorship on a project.
A user must be logged in.
"""
current_user = auth.user
sign_out_url = web_url_for('auth_login', logout=True, next=request.url)
if not current_user:
return redirect(sign_out_url)
# Logged in user should not be a contributor the project
if node.is_contributor(current_user):
logout_url = web_url_for('auth_logout', redirect_url=request.url)
data = {
'message_short': 'Already a contributor',
'message_long': ('The logged-in user is already a contributor to this '
'project. Would you like to <a href="{}">log out</a>?').format(logout_url)
}
raise HTTPError(http.BAD_REQUEST, data=data)
uid, pid, token = kwargs['uid'], kwargs['pid'], kwargs['token']
unreg_user = User.load(uid)
if not verify_claim_token(unreg_user, token, pid=node._primary_key):
raise HTTPError(http.BAD_REQUEST)
# Store the unreg_user data on the session in case the user registers
# a new account
session.data['unreg_user'] = {
'uid': uid, 'pid': pid, 'token': token
}
form = PasswordForm(request.form)
if request.method == 'POST':
if form.validate():
if current_user.check_password(form.password.data):
node.replace_contributor(old=unreg_user, new=current_user)
node.save()
status.push_status_message(
'You are now a contributor to this project.',
kind='success')
return redirect(node.url)
else:
status.push_status_message(language.LOGIN_FAILED, kind='warning', trust=True)
else:
forms.push_errors_to_status(form.errors)
if is_json_request():
form_ret = forms.utils.jsonify(form)
user_ret = profile_utils.serialize_user(current_user, full=False)
else:
form_ret = form
user_ret = current_user
return {
'form': form_ret,
'user': user_ret,
'signOutUrl': sign_out_url
}
@user_registered.connect
def replace_unclaimed_user_with_registered(user):
"""Listens for the user_registered signal. If unreg_user is stored in the
session, then the current user is trying to claim themselves as a contributor.
Replaces the old, unregistered contributor with the newly registered
account.
"""
unreg_user_info = session.data.get('unreg_user')
if unreg_user_info:
unreg_user = User.load(unreg_user_info['uid'])
pid = unreg_user_info['pid']
node = Node.load(pid)
node.replace_contributor(old=unreg_user, new=user)
node.save()
status.push_status_message(
'Successfully claimed contributor.', kind='success', trust=False)
@collect_auth
def claim_user_form(auth, **kwargs):
"""View for rendering the set password page for a claimed user.
Must have ``token`` as a querystring argument.
Renders the set password form, validates it, and sets the user's password.
"""
uid, pid = kwargs['uid'], kwargs['pid']
token = request.form.get('token') or request.args.get('token')
# If user is logged in, redirect to 're-enter password' page
if auth.logged_in:
return redirect(web_url_for('claim_user_registered',
uid=uid, pid=pid, token=token))
user = User.load(uid) # The unregistered user
# user ID is invalid. Unregistered user is not in database
if not user:
raise HTTPError(http.BAD_REQUEST)
# If claim token not valid, redirect to registration page
if not verify_claim_token(user, token, pid):
return redirect(web_url_for('auth_login'))
unclaimed_record = user.unclaimed_records[pid]
user.fullname = unclaimed_record['name']
user.update_guessed_names()
# The email can be the original referrer email if no claimer email has been specified.
claimer_email = unclaimed_record.get('claimer_email') or unclaimed_record.get('email')
form = SetEmailAndPasswordForm(request.form, token=token)
if request.method == 'POST':
if form.validate():
username, password = claimer_email, form.password.data
user.register(username=username, password=password)
# Clear unclaimed records
user.unclaimed_records = {}
user.verification_key = security.random_string(20)
user.save()
# Authenticate user and redirect to project page
node = Node.load(pid)
status.push_status_message(language.CLAIMED_CONTRIBUTOR.format(node=node),
kind='success',
trust=True)
# Redirect to CAS and authenticate the user with a verification key.
return redirect(cas.get_login_url(
web_url_for('user_profile', _absolute=True),
auto=True,
username=user.username,
verification_key=user.verification_key
))
else:
forms.push_errors_to_status(form.errors)
return {
'firstname': user.given_name,
'email': claimer_email if claimer_email else '',
'fullname': user.fullname,
'form': forms.utils.jsonify(form) if is_json_request() else form,
}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def invite_contributor_post(node, **kwargs):
"""API view for inviting an unregistered user. Performs validation, but does not actually invite the user.
Expects JSON arguments with 'fullname' (required) and email (not required).
"""
fullname = request.json.get('fullname').strip()
email = request.json.get('email')
# Validate and sanitize inputs as needed. Email will raise error if invalid.
fullname = sanitize.strip_html(fullname)
if email:
email = email.lower().strip()
try:
validate_email(email)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
if not fullname:
return {'status': 400, 'message': 'Full name field cannot be empty'}, 400
# Check if email is in the database
user = get_user(email=email)
if user:
if user.is_registered:
msg = 'User is already in database. Please go back and try your search again.'
return {'status': 400, 'message': msg}, 400
elif node.is_contributor(user):
msg = 'User with this email address is already a contributor to this project.'
return {'status': 400, 'message': msg}, 400
else:
serialized = profile_utils.add_contributor_json(user)
# use correct display name
serialized['fullname'] = fullname
serialized['email'] = email
else:
# Create a placeholder
serialized = profile_utils.serialize_unregistered(fullname, email)
return {'status': 'success', 'contributor': serialized}
@must_be_contributor_or_public
def claim_user_post(node, **kwargs):
"""View for claiming a user from the X-editable form on a project page.
"""
reqdata = request.json
# Unreg user
user = User.load(reqdata['pk'])
unclaimed_data = user.get_unclaimed_record(node._primary_key)
# Submitted through X-editable
if 'value' in reqdata: # Submitted email address
email = reqdata['value'].lower().strip()
claimer = get_user(email=email)
if claimer and claimer.is_registered:
send_claim_registered_email(claimer=claimer, unreg_user=user,
node=node)
else:
send_claim_email(email, user, node, notify=True)
# TODO(sloria): Too many assumptions about the request data. Just use
elif 'claimerId' in reqdata: # User is logged in and confirmed identity
claimer_id = reqdata['claimerId']
claimer = User.load(claimer_id)
send_claim_registered_email(claimer=claimer, unreg_user=user, node=node)
email = claimer.username
else:
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'email': email,
'fullname': unclaimed_data['name']
}
| |
from pydevd_comm import CMD_SET_BREAK, CMD_ADD_EXCEPTION_BREAK
import inspect
from pydevd_constants import STATE_SUSPEND, GetThreadId, DictContains, DictIterItems
from pydevd_file_utils import NormFileToServer, GetFileNameAndBaseFromFile
from pydevd_breakpoints import LineBreakpoint, get_exception_name
import pydevd_vars
import traceback
import pydev_log
from pydevd_frame_utils import add_exception_to_frame, FCode, cached_call, just_raised
IS_DJANGO19 = False
IS_DJANGO18 = False
try:
import django
version = django.__version__.split('.')
IS_DJANGO19 = version[0] == '1' and version[1] == '9'
IS_DJANGO18 = version[0] == '1' and version[1] == '8'
except:
pass
DJANGO_SUSPEND = 2
class DjangoLineBreakpoint(LineBreakpoint):
def __init__(self, file, line, condition, func_name, expression):
self.file = file
LineBreakpoint.__init__(self, line, condition, func_name, expression)
def is_triggered(self, template_frame_file, template_frame_line):
return self.file == template_frame_file and self.line == template_frame_line
def __str__(self):
return "DjangoLineBreakpoint: %s-%d" %(self.file, self.line)
def add_line_breakpoint(plugin, pydb, type, file, line, condition, expression, func_name):
if type == 'django-line':
breakpoint = DjangoLineBreakpoint(file, line, condition, func_name, expression)
if not hasattr(pydb, 'django_breakpoints'):
_init_plugin_breaks(pydb)
return breakpoint, pydb.django_breakpoints
return None
def add_exception_breakpoint(plugin, pydb, type, exception):
if type == 'django':
if not hasattr(pydb, 'django_exception_break'):
_init_plugin_breaks(pydb)
pydb.django_exception_break[exception] = True
pydb.setTracingForUntracedContexts()
return True
return False
def _init_plugin_breaks(pydb):
pydb.django_exception_break = {}
pydb.django_breakpoints = {}
def remove_exception_breakpoint(plugin, pydb, type, exception):
if type == 'django':
try:
del pydb.django_exception_break[exception]
return True
except:
pass
return False
def get_breakpoints(plugin, pydb, type):
if type == 'django-line':
return pydb.django_breakpoints
return None
def _inherits(cls, *names):
if cls.__name__ in names:
return True
inherits_node = False
for base in inspect.getmro(cls):
if base.__name__ in names:
inherits_node = True
break
return inherits_node
def _is_django_render_call(frame):
try:
name = frame.f_code.co_name
if name != 'render':
return False
if not DictContains(frame.f_locals, 'self'):
return False
cls = frame.f_locals['self'].__class__
inherits_node = _inherits(cls, 'Node')
if not inherits_node:
return False
clsname = cls.__name__
return clsname != 'TextNode' and clsname != 'NodeList'
except:
traceback.print_exc()
return False
def _is_django_context_get_call(frame):
try:
if not DictContains(frame.f_locals, 'self'):
return False
cls = frame.f_locals['self'].__class__
return _inherits(cls, 'BaseContext')
except:
traceback.print_exc()
return False
def _is_django_resolve_call(frame):
try:
name = frame.f_code.co_name
if name != '_resolve_lookup':
return False
if not DictContains(frame.f_locals, 'self'):
return False
cls = frame.f_locals['self'].__class__
clsname = cls.__name__
return clsname == 'Variable'
except:
traceback.print_exc()
return False
def _is_django_suspended(thread):
return thread.additionalInfo.suspend_type == DJANGO_SUSPEND
def suspend_django(mainDebugger, thread, frame, cmd=CMD_SET_BREAK):
frame = DjangoTemplateFrame(frame)
if frame.f_lineno is None:
return None
#try:
# if thread.additionalInfo.filename == frame.f_code.co_filename and thread.additionalInfo.line == frame.f_lineno:
# return None # don't stay twice on the same line
#except AttributeError:
# pass
pydevd_vars.addAdditionalFrameById(GetThreadId(thread), {id(frame): frame})
mainDebugger.setSuspend(thread, cmd)
thread.additionalInfo.suspend_type = DJANGO_SUSPEND
thread.additionalInfo.filename = frame.f_code.co_filename
thread.additionalInfo.line = frame.f_lineno
return frame
def _find_django_render_frame(frame):
while frame is not None and not _is_django_render_call(frame):
frame = frame.f_back
return frame
#=======================================================================================================================
# Django Frame
#=======================================================================================================================
def _read_file(filename):
f = open(filename, "r")
s = f.read()
f.close()
return s
def _offset_to_line_number(text, offset):
curLine = 1
curOffset = 0
while curOffset < offset:
if curOffset == len(text):
return -1
c = text[curOffset]
if c == '\n':
curLine += 1
elif c == '\r':
curLine += 1
if curOffset < len(text) and text[curOffset + 1] == '\n':
curOffset += 1
curOffset += 1
return curLine
def _get_source(frame):
# This method is usable only for the Django <= 1.8
try:
node = frame.f_locals['self']
if hasattr(node, 'source'):
return node.source
else:
if IS_DJANGO18:
# The debug setting was changed since Django 1.8
pydev_log.error_once("WARNING: Template path is not available. Set the 'debug' option in the OPTIONS of a DjangoTemplates "
"backend.")
else:
# The debug setting for Django < 1.8
pydev_log.error_once("WARNING: Template path is not available. Please set TEMPLATE_DEBUG=True in your settings.py to make "
"django template breakpoints working")
return None
except:
pydev_log.debug(traceback.format_exc())
return None
def _get_template_file_name(frame):
try:
if IS_DJANGO19:
# The Node source was removed since Django 1.9
if DictContains(frame.f_locals, 'context'):
context = frame.f_locals['context']
if hasattr(context, 'template') and hasattr(context.template, 'origin') and \
hasattr(context.template.origin, 'name'):
return context.template.origin.name
return None
source = _get_source(frame)
if source is None:
pydev_log.debug("Source is None\n")
return None
fname = source[0].name
if fname == '<unknown source>':
pydev_log.debug("Source name is %s\n" % fname)
return None
else:
filename, base = GetFileNameAndBaseFromFile(fname)
return filename
except:
pydev_log.debug(traceback.format_exc())
return None
def _get_template_line(frame):
if IS_DJANGO19:
# The Node source was removed since Django 1.9
self = frame.f_locals['self']
if hasattr(self, 'token') and hasattr(self.token, 'lineno'):
return self.token.lineno
else:
return None
source = _get_source(frame)
file_name = _get_template_file_name(frame)
try:
return _offset_to_line_number(_read_file(file_name), source[1][0])
except:
return None
class DjangoTemplateFrame:
def __init__(self, frame):
file_name = _get_template_file_name(frame)
self.back_context = frame.f_locals['context']
self.f_code = FCode('Django Template', file_name)
self.f_lineno = _get_template_line(frame)
self.f_back = frame
self.f_globals = {}
self.f_locals = self.collect_context(self.back_context)
self.f_trace = None
def collect_context(self, context):
res = {}
try:
for d in context.dicts:
for k, v in d.items():
res[k] = v
except AttributeError:
pass
return res
def changeVariable(self, name, value):
for d in self.back_context.dicts:
for k, v in d.items():
if k == name:
d[k] = value
def change_variable(plugin, frame, attr, expression):
if isinstance(frame, DjangoTemplateFrame):
result = eval(expression, frame.f_globals, frame.f_locals)
frame.changeVariable(attr, result)
return result
return False
def _is_django_exception_break_context(frame):
try:
name = frame.f_code.co_name
except:
name = None
return name in ['_resolve_lookup', 'find_template']
#=======================================================================================================================
# Django Step Commands
#=======================================================================================================================
def can_not_skip(plugin, mainDebugger, pydb_frame, frame):
if mainDebugger.django_breakpoints and _is_django_render_call(frame):
filename = _get_template_file_name(frame)
django_breakpoints_for_file = mainDebugger.django_breakpoints.get(filename)
if django_breakpoints_for_file:
return True
return False
def has_exception_breaks(plugin):
if len(plugin.main_debugger.django_exception_break) > 0:
return True
return False
def has_line_breaks(plugin):
for file, breakpoints in DictIterItems(plugin.main_debugger.django_breakpoints):
if len(breakpoints) > 0:
return True
return False
def cmd_step_into(plugin, mainDebugger, frame, event, args, stop_info, stop):
mainDebugger, filename, info, thread = args
plugin_stop = False
if _is_django_suspended(thread):
stop_info['django_stop'] = event == 'call' and _is_django_render_call(frame)
plugin_stop = stop_info['django_stop']
stop = stop and _is_django_resolve_call(frame.f_back) and not _is_django_context_get_call(frame)
if stop:
info.pydev_django_resolve_frame = 1 #we remember that we've go into python code from django rendering frame
return stop, plugin_stop
def cmd_step_over(plugin, mainDebugger, frame, event, args, stop_info, stop):
mainDebugger, filename, info, thread = args
plugin_stop = False
if _is_django_suspended(thread):
stop_info['django_stop'] = event == 'call' and _is_django_render_call(frame)
plugin_stop = stop_info['django_stop']
stop = False
return stop, plugin_stop
else:
if event == 'return' and info.pydev_django_resolve_frame is not None and _is_django_resolve_call(frame.f_back):
#we return to Django suspend mode and should not stop before django rendering frame
info.pydev_step_stop = info.pydev_django_resolve_frame
info.pydev_django_resolve_frame = None
thread.additionalInfo.suspend_type = DJANGO_SUSPEND
stop = info.pydev_step_stop is frame and event in ('line', 'return')
return stop, plugin_stop
def stop(plugin, mainDebugger, frame, event, args, stop_info, arg, step_cmd):
mainDebugger, filename, info, thread = args
if DictContains(stop_info, 'django_stop') and stop_info['django_stop']:
frame = suspend_django(mainDebugger, thread, frame, step_cmd)
if frame:
mainDebugger.doWaitSuspend(thread, frame, event, arg)
return True
return False
def get_breakpoint(plugin, mainDebugger, pydb_frame, frame, event, args):
mainDebugger, filename, info, thread = args
flag = False
django_breakpoint = None
new_frame = None
type = 'django'
if event == 'call' and info.pydev_state != STATE_SUSPEND and \
mainDebugger.django_breakpoints and _is_django_render_call(frame):
filename = _get_template_file_name(frame)
pydev_log.debug("Django is rendering a template: %s\n" % filename)
django_breakpoints_for_file = mainDebugger.django_breakpoints.get(filename)
if django_breakpoints_for_file:
pydev_log.debug("Breakpoints for that file: %s\n" % django_breakpoints_for_file)
template_line = _get_template_line(frame)
pydev_log.debug("Tracing template line: %d\n" % template_line)
if DictContains(django_breakpoints_for_file, template_line):
django_breakpoint = django_breakpoints_for_file[template_line]
flag = True
new_frame = DjangoTemplateFrame(frame)
return flag, django_breakpoint, new_frame, type
def suspend(plugin, mainDebugger, thread, frame, bp_type):
if bp_type == 'django':
return suspend_django(mainDebugger, thread, frame)
return None
def exception_break(plugin, mainDebugger, pydb_frame, frame, args, arg):
mainDebugger, filename, info, thread = args
exception, value, trace = arg
if mainDebugger.django_exception_break and \
get_exception_name(exception) in ['VariableDoesNotExist', 'TemplateDoesNotExist', 'TemplateSyntaxError'] and \
just_raised(trace) and _is_django_exception_break_context(frame):
render_frame = _find_django_render_frame(frame)
if render_frame:
suspend_frame = suspend_django(mainDebugger, thread, render_frame, CMD_ADD_EXCEPTION_BREAK)
if suspend_frame:
add_exception_to_frame(suspend_frame, (exception, value, trace))
flag = True
thread.additionalInfo.message = 'VariableDoesNotExist'
suspend_frame.f_back = frame
frame = suspend_frame
return (flag, frame)
return None
| |
import logging
from rest_framework import decorators, permissions, status
from rest_framework.renderers import JSONPRenderer, JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
import requests
from builds.constants import LATEST
from builds.models import Version
from djangome import views as djangome
from search.indexes import PageIndex, ProjectIndex, SectionIndex
from projects.models import Project
from restapi import utils
log = logging.getLogger(__name__)
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def quick_search(request):
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', LATEST)
query = request.GET.get('q', None)
redis_data = djangome.r.keys('redirects:v4:en:%s:%s:*%s*' % (version_slug, project_slug, query))
ret_dict = {}
for data in redis_data:
if 'http://' in data or 'https://' in data:
key = data.split(':')[5]
value = ':'.join(data.split(':')[6:])
ret_dict[key] = value
return Response({"results": ret_dict})
@decorators.api_view(['POST'])
@decorators.permission_classes((permissions.IsAdminUser,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def index_search(request):
"""
Add things to the search index.
"""
data = request.DATA['data']
project_pk = data['project_pk']
version_pk = data['version_pk']
commit = data.get('commit')
project = Project.objects.get(pk=project_pk)
version = Version.objects.get(pk=version_pk)
resp = requests.get('https://api.grokthedocs.com/api/v1/index/1/heatmap/', params={'project': project.slug, 'compare': True})
ret_json = resp.json()
project_scale = ret_json.get('scaled_project', {}).get(project.slug)
page_scale = ret_json.get('scaled_page', {}).get(page['path'], 1)
utils.index_search_request(version=version, page_list=data['page_list'], commit=commit, project_scale=project_scale, page_scale=page_scale)
return Response({'indexed': True})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def search(request):
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', LATEST)
query = request.GET.get('q', None)
log.debug("(API Search) %s" % query)
kwargs = {}
body = {
"query": {
"function_score": {
"field_value_factor": {"field": "weight"},
"query": {
"bool": {
"should": [
{"match": {"title": {"query": query, "boost": 10}}},
{"match": {"headers": {"query": query, "boost": 5}}},
{"match": {"content": {"query": query}}},
]
}
}
}
},
"highlight": {
"fields": {
"title": {},
"headers": {},
"content": {},
}
},
"fields": ["title", "project", "version", "path"],
"size": 50 # TODO: Support pagination.
}
if project_slug:
body['filter'] = {
"and": [
{"term": {"project": project_slug}},
{"term": {"version": version_slug}},
]
}
# Add routing to optimize search by hitting the right shard.
kwargs['routing'] = project_slug
results = PageIndex().search(body, **kwargs)
return Response({'results': results})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def project_search(request):
query = request.GET.get('q', None)
log.debug("(API Project Search) %s" % (query))
body = {
"query": {
"function_score": {
"field_value_factor": {"field": "weight"},
"query": {
"bool": {
"should": [
{"match": {"name": {"query": query, "boost": 10}}},
{"match": {"description": {"query": query}}},
]
}
}
}
},
"fields": ["name", "slug", "description", "lang"]
}
results = ProjectIndex().search(body)
return Response({'results': results})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def section_search(request):
"""
Search for a Section of content on Read the Docs.
A Section is a subheading on a specific page.
Query Thoughts
--------------
If you want to search across all documents, just query with a ``q`` GET arg.
If you want to filter by a specific project, include a ``project`` GET arg.
Facets
------
When you search, you will have a ``project`` facet, which includes the number of matching sections per project.
When you search inside a project, the ``path`` facet will show the number of matching sections per page.
Possible GET args
-----------------
* q - The query string **Required**
* project - A project slug *Optional*
* version - A version slug *Optional*
* path - A file path slug *Optional*
Example
-------
GET /api/v2/search/section/?q=virtualenv&project=django
Current Query
-------------
"""
query = request.GET.get('q', None)
if not query:
return Response({'error': 'Search term required. Use the "q" GET arg to search. '}, status=status.HTTP_400_BAD_REQUEST)
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', LATEST)
path_slug = request.GET.get('path', None)
log.debug("(API Section Search) [%s:%s] %s" % (project_slug, version_slug, query))
kwargs = {}
body = {
"query": {
"function_score": {
"field_value_factor": {"field": "weight"},
"query": {
"bool": {
"should": [
{"match": {"title": {"query": query, "boost": 10}}},
{"match": {"content": {"query": query}}},
]
}
}
}
},
"facets": {
"project": {
"terms": {"field": "project"},
"facet_filter": {
"term": {"version": version_slug},
}
},
},
"highlight": {
"fields": {
"title": {},
"content": {},
}
},
"fields": ["title", "project", "version", "path", "page_id", "content"],
"size": 10 # TODO: Support pagination.
}
if project_slug:
body['filter'] = {
"and": [
{"term": {"project": project_slug}},
{"term": {"version": version_slug}},
]
}
body['facets']['path'] = {
"terms": {"field": "path"},
"facet_filter": {
"term": {"project": project_slug},
}
},
# Add routing to optimize search by hitting the right shard.
kwargs['routing'] = project_slug
if path_slug:
body['filter'] = {
"and": [
{"term": {"path": path_slug}},
]
}
if path_slug and not project_slug:
# Show facets when we only have a path
body['facets']['path'] = {
"terms": {"field": "path"}
}
results = SectionIndex().search(body, **kwargs)
return Response({'results': results})
| |
#!/usr/bin/env python # pylint: disable=too-many-lines
''' Ansible module '''
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
# vim: expandtab:tabstop=4:shiftwidth=4
# Copyright 2016 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Purpose: An ansible module to communicate with statuspageio.
DOCUMENTATION = '''
module: statuspage_incident
short_description: Create, modify, and idempotently manage statuspage incidents
description:
- Manage statuspage incidents
options:
api_key:
description:
- statuspage api key
required: True
default: os.environ.get('STATUSPAGE_API_KEY', '')
aliases: []
page_id:
description:
- The statuspage page
required: True
default: None
aliases: []
org_id:
description:
- Organization id for the user. Required when modifying users.
required: false
default: None
aliases: []
state:
description:
- Whether to create, update, delete, or list the desired object
required: True
default: present
aliases: []
name:
description:
- Name of the incident
required: false
default: None
aliases: []
unresolved_only:
description:
- Filter the incidents on the unresolved_only
required: false
default: None
aliases: []
scheduled_only:
description:
- Filter the incidents on the scheduled_only
required: false
default: None
aliases: []
incident_type:
description:
- The type of incident to create.
choices: ['realtime', 'scheduled', 'historical']
required: false
default: None
aliases: []
status:
description:
- The status of the incident.
choices: ['investigating', 'identified', 'monitoring', 'resolved', 'scheduled', 'in_progress', 'verifying', 'completed']
required: false
default: None
aliases: []
update_twitter:
description:
- Whether to update the twitters
required: false
default: False
aliases: []
msg:
description:
- The incident message that gets posted
required: false
default: None
aliases: []
impact_override:
description:
- Whether update the impact
choices: ['none', 'minor', 'major', 'critical']
required: false
default: None
aliases: []
components:
description:
- An array of the components
required: false
default: None
aliases: []
scheduled_for:
description:
- The date when the maintenance will start
required: false
default: None
aliases: []
scheduled_until:
description:
- The date when the maintenance will end
required: false
default: None
aliases: []
scheduled_remind_prior:
description:
- Whether to remind the subscribers that the maintenance will begin
required: false
default: None
aliases: []
scheduled_auto_in_progress:
description:
- Whether to auto start the maintenance period and transition the status to in_progress
required: false
default: None
aliases: []
scheduled_auto_completed:
description:
- Whether to auto complete the maintenance period and transition the status to completed
required: false
default: None
aliases: []
'''
EXAMPLES = '''
# list indicents
- name: list incidents
statuspage_incident:
state: list
api_key: "{{ api_key }}"
org_id: "{{ org_id }}"
page_id: "{{ page_id }}"
register: incout
# create an incident
- name: create an incident
statuspage_incident:
api_key: "{{ api_key }}"
org_id: "{{ org_id }}"
page_id: "{{ page_id }}"
name: API Outage
message: Investigating an issue with the API
components:
- group: opstest
component:
- name: Master API
status: partial_outage
register: incout
- debug: var=incout
# create a scheduled maintenance incident
- name: create a scheduled incident
statuspage_incident:
api_key: "{{ api_key }}"
org_id: "{{ org_id }}"
page_id: "{{ page_id }}"
incident_type: scheduled
status: scheduled
name: Cluster upgrade
message: "Upgrading from 3.2 to 3.3."
components:
- group: opstest
component:
- name: Etcd Service
status: partial_outage
- name: Master API
status: partial_outage
scheduled_for: '2016-10-14T13:21:00-0400'
scheduled_until: '2016-10-14T13:25:00-0400'
scheduled_auto_in_progress: True
scheduled_remind_prior: True
register: incout
- debug: var=incout
#resolve an incident
- name: resolve an incident
statuspage_incident:
api_key: "{{ api_key }}"
org_id: "{{ org_id }}"
page_id: "{{ page_id }}"
status: resolved
name: API Outage
message: "Fixed and ready to go."
components:
- group: opstest
component:
- name: Master API
status: operational
register: incout
- debug: var=incout
'''
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import os
# pylint: disable=import-error
import statuspageio
class StatusPageIOAPIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class StatusPageIOAPI(object):
''' Class to wrap the command line tools '''
def __init__(self,
api_key,
page_id,
org_id=None):
''' Constructor for OpenshiftCLI '''
self.api_key = api_key
self.page_id = page_id
self.org_id = org_id
self.client = statuspageio.Client(api_key=self.api_key, page_id=self.page_id, organization_id=self.org_id)
def _get_incidents(self, scheduled=False, unresolved_only=False):
'''return a list of incidents'''
if unresolved_only:
return self.client.incidents.list_unresolved()
if scheduled:
return self.client.incidents.list_scheduled()
return self.client.incidents.list()
def _delete_component(self, compid):
'''delete a component'''
return self.client.components.delete(compid)
def _delete_incident(self, incid):
'''delete a incident'''
return self.client.incidents.delete(incid)
def _create_component(self, kwargs):
'''create a component'''
return self.client.components.create(**kwargs)
def _create_incident(self, kwargs, scheduled=False):
'''create a an incident'''
if scheduled:
return self.client.incidents.create_scheduled(**kwargs)
return self.client.incidents.create(**kwargs)
def _update_incident(self, incid, kwargs):
'''return a list of incidents'''
return self.client.incidents.update(incid, **kwargs)
def _get_components_by_name(self, names):
'''return the components in a specific group'''
components = self._get_components()
# first, find the parent component
tmp_comps = []
for comp in components:
if comp.name in names:
tmp_comps.append(comp)
return tmp_comps
def _get_components_by_group(self, group):
'''return the components in a specific group'''
components = self._get_components()
# first, find the parent component
tmp_comps = []
parent = None
for comp in components:
if group == comp.name:
parent = comp
tmp_comps.append(comp)
# now, find all subcomponents
for comp in components:
if comp.group_id == parent.id:
tmp_comps.append(comp)
return tmp_comps
def _get_components(self):
'''return components'''
return self.client.components.list()
def _update_component(self, cid, name=None, desc=None, status=None):
'''update a component'''
kwargs = {}
if name:
kwargs['name'] = name
if desc:
kwargs['desc'] = desc
if status:
kwargs['status'] = status
return self.client.components.update(cid, **kwargs)
# pylint: disable=too-many-instance-attributes
class StatusPageIncident(StatusPageIOAPI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
api_key,
page_id,
name=None,
scheduled=None,
unresolved=None,
org_id=None,
incident_type='realtime',
status='investigating',
update_twitter=False,
message=None,
components=None,
scheduled_for=None,
scheduled_until=None,
scheduled_remind_prior=False,
scheduled_auto_in_progress=False,
scheduled_auto_completed=False,
verbose=False):
''' Constructor for OCVolume '''
super(StatusPageIncident, self).__init__(api_key, page_id, org_id)
self.name = name
self.api_key = api_key
self.page_id = page_id
self.org_id = org_id
self.scheduled = scheduled
self.unresolved = unresolved
self.verbose = verbose
self.incidents = None
self.incident_type = incident_type
self.status = status
self.update_twitter = update_twitter
self.message = message
self.components = components
self.scheduled_for = scheduled_for
self.scheduled_until = scheduled_until
self.scheduled_remind_prior = scheduled_remind_prior
self.scheduled_auto_in_progress = scheduled_auto_in_progress
self.scheduled_auto_completed = scheduled_auto_completed
if self.components == None:
self.components = {}
self._params = None
self._incidents = None
@property
def incidents(self):
''' property function service'''
if not self._incidents:
self._incidents = self.get()
return self._incidents
@incidents.setter
def incidents(self, data):
''' setter function for incidents var '''
self._incidents = data
@property
def params(self):
''' proeprty for params '''
if self._params == None:
self._params = self.build_params()
return self._params
@params.setter
def params(self, data):
''' setter function for params'''
self._params = data
def get(self):
'''return incidents'''
# unresolved? unscheduled?
incs = self._get_incidents(scheduled=self.scheduled, unresolved_only=self.unresolved)
if self.name:
r_incs = []
for inc in incs:
if self.name.lower() in inc.name.lower():
r_incs.append(inc)
else:
r_incs = incs
return r_incs
def delete(self):
'''delete the incident'''
found, _, _ = self.find_incident()
if len(found) == 1:
results = self._delete_incident(found[0].id)
for comp in found[0].incident_updates[-1].affected_components:
self.set_component_status(comp.keys()[0], name=None, desc=None, status='operational')
return results
else:
return False
def build_params(self):
'''build parameters for update or create'''
ids = []
r_comps = {}
for inc_comp in self.components:
if inc_comp.has_key('group') and inc_comp['group']:
comps = self._get_components_by_group(inc_comp['group'])
else:
comps = self._get_components_by_name([_comp['name'] for _comp in inc_comp['component']])
for comp in comps:
# only include the components in my passed in component list
if comp.name in [tmp_comp['name'] for tmp_comp in inc_comp['component']]:
ids.append(comp.id)
r_comps[comp.id] = comp
if self.components and not ids:
raise StatusPageIOAPIError('No components found.')
args = {'name': self.name,
'component_ids': ids,
'message': self.message,
'wants_twitter_update': self.update_twitter,
}
if self.status:
args['status'] = self.status
if self.incident_type == 'scheduled':
args['scheduled_for'] = self.scheduled_for
args['scheduled_until'] = self.scheduled_until
args['scheduled_remind_prior'] = self.scheduled_remind_prior
args['scheduled_auto_in_progress'] = self.scheduled_auto_in_progress
args['scheduled_auto_completed'] = self.scheduled_auto_completed
return {'params': args, 'comps': r_comps}
def set_component_status(self, cid, name=None, desc=None, status=None):
'''update a component's status'''
return self._update_component(cid, name=name, desc=desc, status=status)
def create(self):
'''create the object'''
params = self.params['params']
comps = self.prepare_component_status(self.params['comps'])
scheduled = self.incident_type == 'scheduled'
results = self._create_incident(params, scheduled=scheduled)
for cid, comp in comps.items():
self.set_component_status(cid, name=None, desc=None, status=comp.status)
return results
def prepare_component_status(self, comps):
'''prepare the component status for update'''
# for each group
for inc_comp in self.components:
# for each component in this group
for tmp_comp in inc_comp['component']:
for ex_comp in comps.values():
if tmp_comp['name'] == ex_comp.name and tmp_comp.get('status', 'operational') != ex_comp.status:
ex_comp.status = tmp_comp.get('status', 'operational')
return comps
def update(self):
'''update the object'''
# need to update the tls information and the service name
found, params, comps = self.find_incident()
results = self._update_incident(found[0].id, kwargs=params)
comps = self.prepare_component_status(comps)
for cid, comp in comps.items():
self.set_component_status(cid, name=None, desc=None, status=comp.status)
return results
@staticmethod
def get_affected_components(aff_comps):
'''return a list of affected component ids'''
ids = []
if aff_comps and aff_comps.has_key('affected_components'):
for comp in aff_comps['affected_components']:
# data structure appears to have changed recently (2017-12):
# - if comp.code exists, use it as component code
# - if not, then use comp.key()[0] for backwards compatability
if 'code' in comp.keys():
ids.append(comp.code)
else:
ids.append(comp.keys()[0])
return ids
# skip false positive on "for incident in self.incidents:"
# pylint: disable=not-an-iterable
def find_incident(self):
'''attempt to match the incoming incident with existing incidents'''
params = self.params['params']
comps = self.params['comps']
found = []
for incident in self.incidents:
if incident.name == params['name'] and \
incident.resolved_at == None and \
set(StatusPageIncident.get_affected_components(incident.incident_updates[-1])) == \
set(params['component_ids']):
# This could be the one!
found.append(incident)
return found, params, comps
def exists(self):
''' verify if the incoming incident exists
As per some discussion, this is a difficult task without
a unique identifier on the incident.
Decision: If an incident exists, with the same components, and the components
are in the same state as before then we can say with a small degree of
confidenced that this is the correct incident referred to by the caller.
'''
found, _, _ = self.find_incident()
if len(found) == 1:
return True
if len(found) == 0:
return False
raise StatusPageIOAPIError('Found %s instances matching your search. Please resolve this issue ids=[%s].' \
% (len(found), ', '.join([inc.id for inc in found])))
def needs_update(self):
''' verify an update is needed '''
# cannot update historical
if self.incident_type == 'historical':
return False
# we need to check to see if the current status metches what we are about to update
found, params, comps = self.find_incident()
# check incoming components status against existing
curr_incident = found[0]
# for each group
for comp in self.components:
if comp['component']:
# for each component in a group
for inc_comp in comp['component']:
# for each comp in the current existing incident
for ex_comp in comps.values():
if ex_comp.name == inc_comp['name']:
if ex_comp.status == inc_comp.get('status', 'operational'):
break
return True
# didn't find the component name in the existing compents, need to update
else:
return True
# Checdk the message is the same
if params['message'] != curr_incident.incident_updates[-1].body or \
params['status'] != curr_incident.incident_updates[-1].status:
return True
if self.incident_type == 'scheduled':
if self.scheduled_for != params['scheduled_for'] or \
self.scheduled_until != params['scheduled_until'] or \
self.scheduled_remind_prior != params['scheduled_remind_prior'] or \
self.scheduled_auto_in_progress != params['scheduled_auto_in_progress'] or \
self.scheduled_auto_completed != params['scheduled_auto_completed']:
return True
return False
@staticmethod
def run_ansible(params):
'''run the idempotent actions'''
spio = StatusPageIncident(params['api_key'],
params['page_id'],
params['name'],
params['scheduled_only'],
params['unresolved_only'],
params['org_id'],
params['incident_type'],
params['status'],
params['update_twitter'],
params['msg'],
params['components'],
params['scheduled_for'],
params['scheduled_until'],
params['scheduled_remind_prior'],
params['scheduled_auto_in_progress'],
params['scheduled_auto_completed'],
params['verbose'])
results = spio.get()
if params['state'] == 'list':
return {'changed': False, 'result': results}
elif params['state'] == 'absent':
if spio.exists():
results = spio.delete()
return {'changed': True, 'result': results}
else:
return {'changed': False, 'result': {}}
elif params['state'] == 'present':
if not spio.exists():
results = spio.create()
return {'changed': True, 'result': results}
elif spio.needs_update():
results = spio.update()
return {'changed': True, 'result': results}
return {'changed': False, 'result': results}
raise StatusPageIOAPIError('Unsupported state: %s' % params['state'])
def main():
'''
ansible oc module for route
'''
module = AnsibleModule(
argument_spec=dict(
api_key=dict(default=os.environ.get('STATUSPAGE_API_KEY', ''), type='str'),
page_id=dict(default=None, type='str', required=True, ),
org_id=dict(default=None, type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
name=dict(default=None, type='str'),
unresolved_only=dict(default=False, type='bool'),
scheduled_only=dict(default=False, type='bool'),
incident_type=dict(default='realtime', choices=['scheduled', 'realtime', 'historical'], type='str'),
status=dict(default='investigating',
choices=['investigating', 'identified', 'monitoring', 'resolved',
'scheduled', 'in_progress', 'verifying', 'completed'],
type='str'),
update_twitter=dict(default=False, type='bool'),
msg=dict(default=None, type='str'),
impact_override=dict(default=None, choices=['none', 'minor', 'major', 'critical'], type='str'),
components=dict(default=None, type='list'),
scheduled_for=dict(default=None, type='str'),
scheduled_until=dict(default=None, type='str'),
scheduled_remind_prior=dict(default=False, type='bool'),
scheduled_auto_in_progress=dict(default=False, type='bool'),
scheduled_auto_completed=dict(default=False, type='bool'),
verbose=dict(default=False, type='bool'),
),
supports_check_mode=True,
required_if=[['incident_type', 'scheduled', ['scheduled_for', 'scheduled_until']]],
)
if module.params['incident_type'] == 'scheduled':
if not module.params['status'] in ['scheduled', 'in_progress', 'verifying', 'completed']:
module.exit_json(msg='If incident type is scheduled, then status must be one of ' +
'scheduled|in_progress|verifying|completed')
elif module.params['incident_type'] in 'realtime':
if not module.params['status'] in ['investigating', 'identified', 'monitoring', 'resolved']:
module.exit_json(msg='If incident type is realtime, then status must be one of' +
' investigating|identified|monitoring|resolved')
results = StatusPageIncident.run_ansible(module.params)
module.exit_json(**results)
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
| |
# $Id: VTKBlender.py,v 1.19 2008-07-03 15:13:21 cwant Exp $
#
# Copyright (c) 2005, Chris Want, Research Support Group,
# AICT, University of Alberta. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# Contributors: Chris Want (University of Alberta),
# Fritz Mielert (University of Stuttgart)
"""
VTK inside Blender module.
This module provides code so that polydata from vtk can
be used inside of blender.
Python needs to find the vtk stuff and this module in order
for this to work, and you can either a) set your PYTHONPATH
in your environment, or you can b) hardcode your vtk path's
in your script, e.g.,
a) at the prompt, before starting blender, type:
PYTHONPATH=$VTK_ROOT/Wrapping/Python:${LIBRARY_OUTPUT_PATH}
PYTHONPATH=$PYTHONPATH:${PATH_TO_THIS_MODULE}
export PYTHONPATH
b) add the following to your script near the beginning, before
importing vtk or VTKBlender:
import sys
sys.path.append($VTK_ROOT/Wrapping/Python)
sys.path.append(${LIBRARY_OUTPUT_PATH})
sys.path.append(${PATH_TO_VTKBlender_MODULE})
Be sure to replace $VTK_ROOT and ${LIBRARY_OUTPUT_PATH} with
values that are relevant to your system. These values can be
found by starting vtkpython with no arguments and typing:
import sys
print sys.path
Usually the first two items reported are the ones you want.
Also replace ${PATH_TO_VTKBlender_MODULE} with wherever you have
put the VTKBlender module.
"""
import vtk
import time, string
try:
import Blender
from Blender import Mesh, Object, Material
except:
print "No Blender module found!"
__versiontag__ = "$Revision: 1.19 $"
__version__ = string.split(__versiontag__)[1]
# some flags to alter behavior
flags = 0
TRIS_TO_QUADS = 0x01
SMOOTH_FACES = 0x02
# What is this 'tri to quad' stuff? Well, sometimes it's best to
# try to read in pairs of consecutive triangles in as quad faces.
# An example: you extrude a tube along a polyline in vtk, and if
# you can get it into Blender as a bunch of quads, you can use a
# Catmull-Clark subdivision surface to smooth the tube out, with
# fewer creases.
def SetTrisToQuads():
global flags
flags = flags | TRIS_TO_QUADS
def SetTrisToTris():
global flags
flags = flags & ~TRIS_TO_QUADS
def SetFacesToSmooth():
global flags
flags = flags | SMOOTH_FACES
def SetFacesToFaceted():
global flags
flags = flags & ~SMOOTH_FACES
def BlenderToPolyData(me, uvlayer=None):
pcoords = vtk.vtkFloatArray()
pcoords.SetNumberOfComponents(3)
pcoords.SetNumberOfTuples(len(me.verts))
for i in range(len(me.verts)):
p0 = me.verts[i].co[0]
p1 = me.verts[i].co[1]
p2 = me.verts[i].co[2]
pcoords.SetTuple3(i, p0, p1, p2)
points = vtk.vtkPoints()
points.SetData(pcoords)
polys = vtk.vtkCellArray()
lines = vtk.vtkCellArray()
for face in me.faces:
if len(face.v) == 4:
polys.InsertNextCell(4)
polys.InsertCellPoint(face.v[0].index)
polys.InsertCellPoint(face.v[1].index)
polys.InsertCellPoint(face.v[2].index)
polys.InsertCellPoint(face.v[3].index)
elif len(face.v) == 3:
polys.InsertNextCell(3)
polys.InsertCellPoint(face.v[0].index)
polys.InsertCellPoint(face.v[1].index)
polys.InsertCellPoint(face.v[2].index)
elif len(face.v) == 2:
lines.InsertNextCell(2)
lines.InsertCellPoint(face.v[0].index)
lines.InsertCellPoint(face.v[1].index)
for edge in me.edges:
lines.InsertNextCell(2)
lines.InsertCellPoint(edge.v1.index)
lines.InsertCellPoint(edge.v2.index)
pdata =vtk.vtkPolyData()
pdata.SetPoints(points)
pdata.SetPolys(polys)
pdata.SetLines(lines)
if me.faceUV:
if uvlayer:
uvnames = me.getUVLayerNames()
if uvlayer in uvnames:
me.activeUVLayer = uvlayer
tcoords = vtk.vtkFloatArray()
tcoords.SetNumberOfComponents(2)
tcoords.SetNumberOfTuples(len(me.verts))
for face in me.faces:
for i in range(len(face.verts)):
uv = face.uv[i]
tcoords.SetTuple2(face.v[i].index, uv[0], uv[1])
pdata.GetPointData().SetTCoords(tcoords);
pdata.Update()
return pdata
def PolyDataMapperToBlender(pmapper, me=None):
global flags
faces = []
edges = []
oldmats = None
newmesh = 0
if (me == None):
me = Mesh.New()
newmesh = 1
else:
if me.materials:
oldmats = me.materials
me.verts = None # this kills the faces/edges tooo
pmapper.Update()
pdata = pmapper.GetInput()
plut = pmapper.GetLookupTable()
#print pdata.GetNumberOfCells()
scalars = pdata.GetPointData().GetScalars()
verts = []
for i in range(pdata.GetNumberOfPoints()):
point = pdata.GetPoint(i)
verts.append([point[0],point[1],point[2]])
me.verts.extend(verts)
# I think we can free some memory by killing the reference
# from vert to the list it points at (not sure though)
verts = []
colors = None
if ( (scalars != None) and (plut != None) ):
colors = []
# Have to be a bit careful since VTK 5.0 changed the
# prototype of vtkLookupTable.GetColor()
try:
# VTK 5.x
scolor = [0,0,0]
for i in range(scalars.GetNumberOfTuples()):
plut.GetColor(scalars.GetTuple1(i), scolor)
color = map(VTKToBlenderColor, scolor)
alpha = int(plut.GetOpacity(scalars.GetTuple1(i))*255)
colors.append([color[0], color[1], color[2], alpha])
except:
# VTK 4.x
for i in range(scalars.GetNumberOfTuples()):
color = map(VTKToBlenderColor, \
plut.GetColor(scalars.GetTuple1(i)))
alpha = int(plut.GetOpacity(scalars.GetTuple1(i))*255)
colors.append([color[0], color[1], color[2], alpha])
skiptriangle = False
for i in range(pdata.GetNumberOfCells()):
cell = pdata.GetCell(i)
#print i, pdata.GetCellType(i)
# Do lines
if pdata.GetCellType(i)==3:
n1 = cell.GetPointId(0)
n2 = cell.GetPointId(1)
BlenderAddEdge(me, edges, n1, n2)
# Do poly lines
if pdata.GetCellType(i)==4:
for j in range(cell.GetNumberOfPoints()-1):
n1 = cell.GetPointId(j)
n2 = cell.GetPointId(j+1)
BlenderAddEdge(me, edges, n1, n2)
# Do triangles
if pdata.GetCellType(i)==5:
if skiptriangle==True:
skiptriangle = False
elif ( (flags & TRIS_TO_QUADS) and
(i < pdata.GetNumberOfCells()-1) and
(pdata.GetCellType(i+1)==5) ):
n1 = cell.GetPointId(0)
n2 = cell.GetPointId(1)
n3 = cell.GetPointId(2)
nextcell = pdata.GetCell(i+1)
m1 = nextcell.GetPointId(0)
m2 = nextcell.GetPointId(1)
m3 = nextcell.GetPointId(2)
if ( (n2 == m3) and (n3 == m2) ):
BlenderAddFace(me, faces, n1, n2, m1, n3)
skiptriangle = True
else:
BlenderAddFace(me, faces, n1, n2, n3)
else:
n1 = cell.GetPointId(0)
n2 = cell.GetPointId(1)
n3 = cell.GetPointId(2)
BlenderAddFace(me, faces, n1, n2, n3)
# Do triangle strips
if pdata.GetCellType(i)==6:
numpoints = cell.GetNumberOfPoints()
if ( (flags & TRIS_TO_QUADS) and (numpoints % 2 == 0) ):
for j in range(cell.GetNumberOfPoints()-3):
if (j % 2 == 0):
n1 = cell.GetPointId(j)
n2 = cell.GetPointId(j+1)
n3 = cell.GetPointId(j+2)
n4 = cell.GetPointId(j+3)
BlenderAddFace(me, faces, n1, n2, n4, n3)
else:
for j in range(cell.GetNumberOfPoints()-2):
if (j % 2 == 0):
n1 = cell.GetPointId(j)
n2 = cell.GetPointId(j+1)
n3 = cell.GetPointId(j+2)
else:
n1 = cell.GetPointId(j)
n2 = cell.GetPointId(j+2)
n3 = cell.GetPointId(j+1)
BlenderAddFace(me, faces, n1, n2, n3)
# Do polygon
if pdata.GetCellType(i)==7:
# Add a vert at the center of the polygon,
# and break into triangles
x = 0.0
y = 0.0
z = 0.0
scal = 0.0
N = cell.GetNumberOfPoints()
for j in range(N):
point = pdata.GetPoint(cell.GetPointId(j))
x = x + point[0]
y = y + point[1]
z = z + point[2]
if (scalars != None):
scal = scal + scalars.GetTuple1(j)
x = x / N
y = y / N
z = z / N
scal = scal / N
newidx = len(me.verts)
me.verts.extend(x,y,z)
if (scalars != None):
try:
# VTK 5.x
scolor = [0,0,0]
plut.GetColor(scal, scolor)
color = map(VTKToBlenderColor, scolor)
except:
color = map(VTKToBlenderColor, plut.GetColor(scal))
alpha = int(plut.GetOpacity(scalars.GetTuple1(i))*255)
colors.append([color[0], color[1], color[2], alpha])
# Add triangles connecting polynomial sides to new vert
for j in range(N):
n1 = cell.GetPointId(j)
n2 = cell.GetPointId( (j+1) % N )
n3 = newidx
BlenderAddFace(me, faces, n1, n2, n3)
# Do pixel
if pdata.GetCellType(i)==8:
n1 = cell.GetPointId(0)
n2 = cell.GetPointId(1)
n3 = cell.GetPointId(2)
n4 = cell.GetPointId(3)
BlenderAddFace(me, faces, n1, n2, n3, n4)
# Do quad
if pdata.GetCellType(i)==9:
n1 = cell.GetPointId(0)
n2 = cell.GetPointId(1)
n3 = cell.GetPointId(2)
n4 = cell.GetPointId(3)
BlenderAddFace(me, faces, n1, n2, n3, n4)
if len(edges) > 0:
me.edges.extend(edges)
if len(faces) > 0:
me.faces.extend(faces)
if ( flags & SMOOTH_FACES):
for f in me.faces:
f.smooth = 1
# Some faces in me.faces may have been discarded from our
# list, so best to compute the vertex colors after the faces
# have been added to the mesh
if (colors != None):
me.vertexColors = 1
for f in me.faces:
f_col = []
for v in f.v:
f_col.append(colors[v.index])
SetVColors(f.col, f_col)
if not me.materials:
if oldmats:
me.materials = oldmats
else:
newmat = Material.New()
if (colors != None):
newmat.mode |= Material.Modes.VCOL_PAINT
me.materials = [newmat]
if (newmesh==0):
me.update()
return me
def VTKToBlenderColor(x):
return int(255*float(x)+0.5)
def BlenderAddFace(me, faces, n1, n2, n3, n4=None):
if (n4 != None):
faces.append([me.verts[n1], me.verts[n2], \
me.verts[n3], me.verts[n4]])
else:
faces.append([me.verts[n1], me.verts[n2], me.verts[n3]])
def BlenderAddEdge(me, edges, n1, n2):
edges.append([me.verts[n1], me.verts[n2]])
def SetVColors(col, vcols):
for j in range(len(col)):
col[j].r = vcols[j][0]
col[j].g = vcols[j][1]
col[j].b = vcols[j][2]
if len(vcols[j]) == 3:
col[j].a = 255
else:
col[j].a = vcols[j][3]
| |
"""
The DoInterestManager keeps track of which parent/zones that we currently
have interest in. When you want to "look" into a zone you add an interest
to that zone. When you want to get rid of, or ignore, the objects in that
zone, remove interest in that zone.
p.s. A great deal of this code is just code moved from ClientRepository.py.
"""
from pandac.PandaModules import *
from MsgTypes import *
from direct.showbase.PythonUtil import *
from direct.showbase import DirectObject
from PyDatagram import PyDatagram
from direct.directnotify.DirectNotifyGlobal import directNotify
import types
from direct.showbase.PythonUtil import report
class InterestState:
StateActive = 'Active'
StatePendingDel = 'PendingDel'
def __init__(self, desc, state, context, event, parentId, zoneIdList,
eventCounter, auto=False):
self.desc = desc
self.state = state
self.context = context
# We must be ready to keep track of multiple events. If somebody
# requested an interest to be removed and we get a second request
# for removal of the same interest before we get a response for the
# first interest removal, we now have two parts of the codebase
# waiting for a response on the removal of a single interest.
self.events = []
self.eventCounter = eventCounter
if event:
self.addEvent(event)
self.parentId = parentId
self.zoneIdList = zoneIdList
self.auto = auto
def addEvent(self, event):
self.events.append(event)
self.eventCounter.num += 1
def getEvents(self):
return list(self.events)
def clearEvents(self):
self.eventCounter.num -= len(self.events)
assert self.eventCounter.num >= 0
self.events = []
def sendEvents(self):
for event in self.events:
messenger.send(event)
self.clearEvents()
def setDesc(self, desc):
self.desc = desc
def isPendingDelete(self):
return self.state == InterestState.StatePendingDel
def __repr__(self):
return 'InterestState(desc=%s, state=%s, context=%s, event=%s, parentId=%s, zoneIdList=%s)' % (
self.desc, self.state, self.context, self.events, self.parentId, self.zoneIdList)
class InterestHandle:
"""This class helps to ensure that valid handles get passed in to DoInterestManager funcs"""
def __init__(self, id):
self._id = id
def asInt(self):
return self._id
def __eq__(self, other):
if type(self) == type(other):
return self._id == other._id
return self._id == other
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._id)
# context value for interest changes that have no complete event
NO_CONTEXT = 0
class DoInterestManager(DirectObject.DirectObject):
"""
Top level Interest Manager
"""
notify = directNotify.newCategory("DoInterestManager")
try:
tempbase = base
except:
tempbase = simbase
InterestDebug = tempbase.config.GetBool('interest-debug', False)
del tempbase
# 'handle' is a number that represents a single interest set that the
# client has requested; the interest set may be modified
_HandleSerialNum = 0
# high bit is reserved for server interests
_HandleMask = 0x7FFF
# 'context' refers to a single request to change an interest set
_ContextIdSerialNum = 100
_ContextIdMask = 0x3FFFFFFF # avoid making Python create a long
_interests = {}
if __debug__:
_debug_interestHistory = []
_debug_maxDescriptionLen = 40
_SerialGen = SerialNumGen()
_SerialNum = serialNum()
def __init__(self):
assert DoInterestManager.notify.debugCall()
DirectObject.DirectObject.__init__(self)
self._addInterestEvent = uniqueName('DoInterestManager-Add')
self._removeInterestEvent = uniqueName('DoInterestManager-Remove')
self._noNewInterests = False
self._completeDelayedCallback = None
# keep track of request contexts that have not completed
self._completeEventCount = ScratchPad(num=0)
self._allInterestsCompleteCallbacks = []
def __verbose(self):
return self.InterestDebug or self.getVerbose()
def _getAnonymousEvent(self, desc):
return 'anonymous-%s-%s' % (desc, DoInterestManager._SerialGen.next())
def setNoNewInterests(self, flag):
self._noNewInterests = flag
def noNewInterests(self):
return self._noNewInterests
def setAllInterestsCompleteCallback(self, callback):
if ((self._completeEventCount.num == 0) and
(self._completeDelayedCallback is None)):
callback()
else:
self._allInterestsCompleteCallbacks.append(callback)
def getAllInterestsCompleteEvent(self):
return 'allInterestsComplete-%s' % DoInterestManager._SerialNum
def resetInterestStateForConnectionLoss(self):
DoInterestManager._interests.clear()
self._completeEventCount = ScratchPad(num=0)
if __debug__:
self._addDebugInterestHistory("RESET", "", 0, 0, 0, [])
def isValidInterestHandle(self, handle):
# pass in a handle (or anything else) and this will return true if it is
# still a valid interest handle
if not isinstance(handle, InterestHandle):
return False
return handle.asInt() in DoInterestManager._interests
def updateInterestDescription(self, handle, desc):
iState = DoInterestManager._interests.get(handle.asInt())
if iState:
iState.setDesc(desc)
def addInterest(self, parentId, zoneIdList, description, event=None):
"""
Look into a (set of) zone(s).
"""
assert DoInterestManager.notify.debugCall()
handle = self._getNextHandle()
# print 'base.cr.addInterest(',description,',',handle,'):',globalClock.getFrameCount()
if self._noNewInterests:
DoInterestManager.notify.warning(
"addInterest: addingInterests on delete: %s" % (handle))
return
# make sure we've got parenting rules set in the DC
if parentId not in (self.getGameDoId(),):
parent = self.getDo(parentId)
if not parent:
DoInterestManager.notify.error(
'addInterest: attempting to add interest under unknown object %s' % parentId)
else:
if not parent.hasParentingRules():
DoInterestManager.notify.error(
'addInterest: no setParentingRules defined in the DC for object %s (%s)'
'' % (parentId, parent.__class__.__name__))
if event:
contextId = self._getNextContextId()
else:
contextId = 0
# event = self._getAnonymousEvent('addInterest')
DoInterestManager._interests[handle] = InterestState(
description, InterestState.StateActive, contextId, event, parentId, zoneIdList, self._completeEventCount)
if self.__verbose():
print 'CR::INTEREST.addInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s, event=%s)' % (
handle, parentId, zoneIdList, description, event)
self._sendAddInterest(handle, contextId, parentId, zoneIdList, description)
if event:
messenger.send(self._getAddInterestEvent(), [event])
assert self.printInterestsIfDebug()
return InterestHandle(handle)
def addAutoInterest(self, parentId, zoneIdList, description):
"""
Look into a (set of) zone(s).
"""
assert DoInterestManager.notify.debugCall()
handle = self._getNextHandle()
if self._noNewInterests:
DoInterestManager.notify.warning(
"addInterest: addingInterests on delete: %s" % (handle))
return
# make sure we've got parenting rules set in the DC
if parentId not in (self.getGameDoId(),):
parent = self.getDo(parentId)
if not parent:
DoInterestManager.notify.error(
'addInterest: attempting to add interest under unknown object %s' % parentId)
else:
if not parent.hasParentingRules():
DoInterestManager.notify.error(
'addInterest: no setParentingRules defined in the DC for object %s (%s)'
'' % (parentId, parent.__class__.__name__))
DoInterestManager._interests[handle] = InterestState(
description, InterestState.StateActive, 0, None, parentId, zoneIdList, self._completeEventCount, True)
if self.__verbose():
print 'CR::INTEREST.addInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s)' % (
handle, parentId, zoneIdList, description)
assert self.printInterestsIfDebug()
return InterestHandle(handle)
def removeInterest(self, handle, event = None):
"""
Stop looking in a (set of) zone(s)
"""
# print 'base.cr.removeInterest(',handle,'):',globalClock.getFrameCount()
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
existed = False
if not event:
event = self._getAnonymousEvent('removeInterest')
handle = handle.asInt()
if handle in DoInterestManager._interests:
existed = True
intState = DoInterestManager._interests[handle]
if event:
messenger.send(self._getRemoveInterestEvent(),
[event, intState.parentId, intState.zoneIdList])
if intState.isPendingDelete():
self.notify.warning(
'removeInterest: interest %s already pending removal' %
handle)
# this interest is already pending delete, so let's just tack this
# callback onto the list
if event is not None:
intState.addEvent(event)
else:
if len(intState.events) > 0:
# we're not pending a removal, but we have outstanding events?
# probably we are waiting for an add/alter complete.
# should we send those events now?
assert self.notify.warning('removeInterest: abandoning events: %s' %
intState.events)
intState.clearEvents()
intState.state = InterestState.StatePendingDel
contextId = self._getNextContextId()
intState.context = contextId
if event:
intState.addEvent(event)
self._sendRemoveInterest(handle, contextId)
if not event:
self._considerRemoveInterest(handle)
if self.__verbose():
print 'CR::INTEREST.removeInterest(handle=%s, event=%s)' % (
handle, event)
else:
DoInterestManager.notify.warning(
"removeInterest: handle not found: %s" % (handle))
assert self.printInterestsIfDebug()
return existed
def removeAutoInterest(self, handle):
"""
Stop looking in a (set of) zone(s)
"""
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
existed = False
handle = handle.asInt()
if handle in DoInterestManager._interests:
existed = True
intState = DoInterestManager._interests[handle]
if intState.isPendingDelete():
self.notify.warning(
'removeInterest: interest %s already pending removal' %
handle)
# this interest is already pending delete, so let's just tack this
# callback onto the list
else:
if len(intState.events) > 0:
# we're not pending a removal, but we have outstanding events?
# probably we are waiting for an add/alter complete.
# should we send those events now?
self.notify.warning('removeInterest: abandoning events: %s' %
intState.events)
intState.clearEvents()
intState.state = InterestState.StatePendingDel
self._considerRemoveInterest(handle)
if self.__verbose():
print 'CR::INTEREST.removeAutoInterest(handle=%s)' % (handle)
else:
DoInterestManager.notify.warning(
"removeInterest: handle not found: %s" % (handle))
assert self.printInterestsIfDebug()
return existed
@report(types = ['args'], dConfigParam = 'guildmgr')
def removeAIInterest(self, handle):
"""
handle is NOT an InterestHandle. It's just a bare integer representing an
AI opened interest. We're making the client close down this interest since
the AI has trouble removing interests(that its opened) when the avatar goes
offline. See GuildManager(UD) for how it's being used.
"""
self._sendRemoveAIInterest(handle)
def alterInterest(self, handle, parentId, zoneIdList, description=None,
event=None):
"""
Removes old interests and adds new interests.
Note that when an interest is changed, only the most recent
change's event will be triggered. Previous events are abandoned.
If this is a problem, consider opening multiple interests.
"""
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
#assert not self._noNewInterests
handle = handle.asInt()
if self._noNewInterests:
DoInterestManager.notify.warning(
"alterInterest: addingInterests on delete: %s" % (handle))
return
exists = False
if event is None:
event = self._getAnonymousEvent('alterInterest')
if handle in DoInterestManager._interests:
if description is not None:
DoInterestManager._interests[handle].desc = description
else:
description = DoInterestManager._interests[handle].desc
# are we overriding an existing change?
if DoInterestManager._interests[handle].context != NO_CONTEXT:
DoInterestManager._interests[handle].clearEvents()
contextId = self._getNextContextId()
DoInterestManager._interests[handle].context = contextId
DoInterestManager._interests[handle].parentId = parentId
DoInterestManager._interests[handle].zoneIdList = zoneIdList
DoInterestManager._interests[handle].addEvent(event)
if self.__verbose():
print 'CR::INTEREST.alterInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s, event=%s)' % (
handle, parentId, zoneIdList, description, event)
self._sendAddInterest(handle, contextId, parentId, zoneIdList, description, action='modify')
exists = True
assert self.printInterestsIfDebug()
else:
DoInterestManager.notify.warning(
"alterInterest: handle not found: %s" % (handle))
return exists
def openAutoInterests(self, obj):
if hasattr(obj, '_autoInterestHandle'):
# must be multiple inheritance
self.notify.debug('openAutoInterests(%s): interests already open' % obj.__class__.__name__)
return
autoInterests = obj.getAutoInterests()
obj._autoInterestHandle = None
if not len(autoInterests):
return
obj._autoInterestHandle = self.addAutoInterest(obj.doId, autoInterests, '%s-autoInterest' % obj.__class__.__name__)
def closeAutoInterests(self, obj):
if not hasattr(obj, '_autoInterestHandle'):
# must be multiple inheritance
self.notify.debug('closeAutoInterests(%s): interests already closed' % obj)
return
if obj._autoInterestHandle is not None:
self.removeAutoInterest(obj._autoInterestHandle)
del obj._autoInterestHandle
# events for InterestWatcher
def _getAddInterestEvent(self):
return self._addInterestEvent
def _getRemoveInterestEvent(self):
return self._removeInterestEvent
def _getInterestState(self, handle):
return DoInterestManager._interests[handle]
def _getNextHandle(self):
handle = DoInterestManager._HandleSerialNum
while True:
handle = (handle + 1) & DoInterestManager._HandleMask
# skip handles that are already in use
if handle not in DoInterestManager._interests:
break
DoInterestManager.notify.warning(
'interest %s already in use' % handle)
DoInterestManager._HandleSerialNum = handle
return DoInterestManager._HandleSerialNum
def _getNextContextId(self):
contextId = DoInterestManager._ContextIdSerialNum
while True:
contextId = (contextId + 1) & DoInterestManager._ContextIdMask
# skip over the 'no context' id
if contextId != NO_CONTEXT:
break
DoInterestManager._ContextIdSerialNum = contextId
return DoInterestManager._ContextIdSerialNum
def _considerRemoveInterest(self, handle):
"""
Consider whether we should cull the interest set.
"""
assert DoInterestManager.notify.debugCall()
if handle in DoInterestManager._interests:
if DoInterestManager._interests[handle].isPendingDelete():
# make sure there is no pending event for this interest
if DoInterestManager._interests[handle].context == NO_CONTEXT:
assert len(DoInterestManager._interests[handle].events) == 0
del DoInterestManager._interests[handle]
if __debug__:
def printInterestsIfDebug(self):
if DoInterestManager.notify.getDebug():
self.printInterests()
return 1 # for assert
def _addDebugInterestHistory(self, action, description, handle,
contextId, parentId, zoneIdList):
if description is None:
description = ''
DoInterestManager._debug_interestHistory.append(
(action, description, handle, contextId, parentId, zoneIdList))
DoInterestManager._debug_maxDescriptionLen = max(
DoInterestManager._debug_maxDescriptionLen, len(description))
def printInterestHistory(self):
print "***************** Interest History *************"
format = '%9s %' + str(DoInterestManager._debug_maxDescriptionLen) + 's %6s %6s %9s %s'
print format % (
"Action", "Description", "Handle", "Context", "ParentId",
"ZoneIdList")
for i in DoInterestManager._debug_interestHistory:
print format % tuple(i)
print "Note: interests with a Context of 0 do not get" \
" done/finished notices."
def printInterestSets(self):
print "******************* Interest Sets **************"
format = '%6s %' + str(DoInterestManager._debug_maxDescriptionLen) + 's %11s %11s %8s %8s %8s'
print format % (
"Handle", "Description",
"ParentId", "ZoneIdList",
"State", "Context",
"Event")
for id, state in DoInterestManager._interests.items():
if len(state.events) == 0:
event = ''
elif len(state.events) == 1:
event = state.events[0]
else:
event = state.events
print format % (id, state.desc,
state.parentId, state.zoneIdList,
state.state, state.context,
event)
print "************************************************"
def printInterests(self):
self.printInterestHistory()
self.printInterestSets()
def _sendAddInterest(self, handle, contextId, parentId, zoneIdList, description,
action=None):
"""
Part of the new otp-server code.
handle is a client-side created number that refers to
a set of interests. The same handle number doesn't
necessarily have any relationship to the same handle
on another client.
"""
assert DoInterestManager.notify.debugCall()
if __debug__:
if isinstance(zoneIdList, types.ListType):
zoneIdList.sort()
if action is None:
action = 'add'
self._addDebugInterestHistory(
action, description, handle, contextId, parentId, zoneIdList)
if parentId == 0:
DoInterestManager.notify.error(
'trying to set interest to invalid parent: %s' % parentId)
datagram = PyDatagram()
# Add message type
datagram.addUint16(CLIENT_ADD_INTEREST)
datagram.addUint16(handle)
datagram.addUint32(contextId)
datagram.addUint32(parentId)
if isinstance(zoneIdList, types.ListType):
vzl = list(zoneIdList)
vzl.sort()
uniqueElements(vzl)
for zone in vzl:
datagram.addUint32(zone)
else:
datagram.addUint32(zoneIdList)
self.send(datagram)
def _sendRemoveInterest(self, handle, contextId):
"""
handle is a client-side created number that refers to
a set of interests. The same handle number doesn't
necessarily have any relationship to the same handle
on another client.
"""
assert DoInterestManager.notify.debugCall()
assert handle in DoInterestManager._interests
datagram = PyDatagram()
# Add message type
datagram.addUint16(CLIENT_REMOVE_INTEREST)
datagram.addUint16(handle)
if contextId != 0:
datagram.addUint32(contextId)
self.send(datagram)
if __debug__:
state = DoInterestManager._interests[handle]
self._addDebugInterestHistory(
"remove", state.desc, handle, contextId,
state.parentId, state.zoneIdList)
def _sendRemoveAIInterest(self, handle):
"""
handle is a bare int, NOT an InterestHandle. Use this to
close an AI opened interest.
"""
datagram = PyDatagram()
# Add message type
datagram.addUint16(CLIENT_REMOVE_INTEREST)
datagram.addUint16((1<<15) + handle)
self.send(datagram)
def cleanupWaitAllInterestsComplete(self):
if self._completeDelayedCallback is not None:
self._completeDelayedCallback.destroy()
self._completeDelayedCallback = None
def queueAllInterestsCompleteEvent(self, frames=5):
# wait for N frames, if no new interests, send out all-done event
# calling this is OK even if there are no pending interest completes
def checkMoreInterests():
# if there are new interests, cancel this delayed callback, another
# will automatically be scheduled when all interests complete
# print 'checkMoreInterests(',self._completeEventCount.num,'):',globalClock.getFrameCount()
return self._completeEventCount.num > 0
def sendEvent():
messenger.send(self.getAllInterestsCompleteEvent())
for callback in self._allInterestsCompleteCallbacks:
callback()
self._allInterestsCompleteCallbacks = []
self.cleanupWaitAllInterestsComplete()
self._completeDelayedCallback = FrameDelayedCall(
'waitForAllInterestCompletes',
callback=sendEvent,
frames=frames,
cancelFunc=checkMoreInterests)
checkMoreInterests = None
sendEvent = None
def handleInterestDoneMessage(self, di):
"""
This handles the interest done messages and may dispatch an event
"""
assert DoInterestManager.notify.debugCall()
handle = di.getUint16()
contextId = di.getUint32()
if self.__verbose():
print 'CR::INTEREST.interestDone(handle=%s)' % handle
DoInterestManager.notify.debug(
"handleInterestDoneMessage--> Received handle %s, context %s" % (
handle, contextId))
if handle in DoInterestManager._interests:
eventsToSend = []
# if the context matches, send out the event
if contextId == DoInterestManager._interests[handle].context:
DoInterestManager._interests[handle].context = NO_CONTEXT
# the event handlers may call back into the interest manager. Send out
# the events after we're once again in a stable state.
#DoInterestManager._interests[handle].sendEvents()
eventsToSend = list(DoInterestManager._interests[handle].getEvents())
DoInterestManager._interests[handle].clearEvents()
else:
DoInterestManager.notify.debug(
"handleInterestDoneMessage--> handle: %s: Expecting context %s, got %s" % (
handle, DoInterestManager._interests[handle].context, contextId))
if __debug__:
state = DoInterestManager._interests[handle]
self._addDebugInterestHistory(
"finished", state.desc, handle, contextId, state.parentId,
state.zoneIdList)
self._considerRemoveInterest(handle)
for event in eventsToSend:
messenger.send(event)
else:
DoInterestManager.notify.warning(
"handleInterestDoneMessage: handle not found: %s" % (handle))
# if there are no more outstanding interest-completes, send out global all-done event
if self._completeEventCount.num == 0:
self.queueAllInterestsCompleteEvent()
assert self.printInterestsIfDebug()
if __debug__:
import unittest
class AsyncTestCase(unittest.TestCase):
def setCompleted(self):
self._async_completed = True
def isCompleted(self):
return getattr(self, '_async_completed', False)
class AsyncTestSuite(unittest.TestSuite):
pass
class AsyncTestLoader(unittest.TestLoader):
suiteClass = AsyncTestSuite
class AsyncTextTestRunner(unittest.TextTestRunner):
def run(self, testCase):
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
class TestInterestAddRemove(AsyncTestCase, DirectObject.DirectObject):
def testInterestAdd(self):
event = uniqueName('InterestAdd')
self.acceptOnce(event, self.gotInterestAddResponse)
self.handle = base.cr.addInterest(base.cr.GameGlobalsId, 100, 'TestInterest', event=event)
def gotInterestAddResponse(self):
event = uniqueName('InterestRemove')
self.acceptOnce(event, self.gotInterestRemoveResponse)
base.cr.removeInterest(self.handle, event=event)
def gotInterestRemoveResponse(self):
self.setCompleted()
def runTests():
suite = unittest.makeSuite(TestInterestAddRemove)
unittest.AsyncTextTestRunner(verbosity=2).run(suite)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
#
# pylint: disable=no-member, chained-comparison, unnecessary-comprehension, not-callable
"""
This module provides objects to inspect the status of the Abinit tasks at run-time.
by extracting information from the main output file (text format).
"""
import os
from collections import OrderedDict
from collections.abc import Iterable, Iterator, Mapping
import numpy as np
from monty.collections import AttrDict
from monty.functools import lazy_property
from tabulate import tabulate
try:
import ruamel.yaml as yaml
except ImportError:
try:
import ruamel_yaml as yaml # type: ignore # noqa
except ImportError:
import yaml # type: ignore # noqa
from pymatgen.util.plotting import add_fig_kwargs, get_axarray_fig_plt
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def _magic_parser(stream, magic):
"""
Parse the section with the SCF cycle
Returns:
dict where the key are the name of columns and
the values are list of numbers. Note if no section was found.
.. warning::
The parser is very fragile and should be replaced by YAML.
"""
# Example (SCF cycle, similar format is used for phonons):
#
# iter Etot(hartree) deltaE(h) residm vres2
# ETOT 1 -8.8604027880849 -8.860E+00 2.458E-02 3.748E+00
# At SCF step 5 vres2 = 3.53E-08 < tolvrs= 1.00E-06 =>converged.
in_doc, fields = 0, None
for line in stream:
line = line.strip()
if line.startswith(magic):
# print("Found magic token in line:", line)
keys = line.split()
fields = OrderedDict((k, []) for k in keys)
if fields is not None:
# print(line)
in_doc += 1
if in_doc == 1:
continue
# End of the section or empty SCF cycle
if not line or line.startswith("prteigrs"):
break
# print("Try to parse line:", line)
tokens = list(map(float, line.split()[1:]))
assert len(tokens) == len(keys)
for l, v in zip(fields.values(), tokens):
l.append(v)
# Convert values to numpy arrays.
if fields:
return OrderedDict([(k, np.array(v)) for k, v in fields.items()])
return None
def plottable_from_outfile(filepath):
"""
Factory function that returns a plottable object by inspecting the main output file of abinit
Returns None if it is not able to detect the class to instantiate.
"""
# TODO
# Figure out how to detect the type of calculations
# without having to parse the input. Possible approach: YAML doc
# with YamlTokenizer(filepath) as r:
# doc = r.next_doc_with_tag("!CalculationType")
# d = yaml.safe_load(doc.text_notag)
# calc_type = d["calculation_type"]
# ctype2class = {
# "Ground State": GroundStateScfCycle,
# "Phonon": PhononScfCycle,
# "Relaxation": Relaxation,
# }
# obj = ctype2class.get(calc_type, None)
obj = GroundStateScfCycle
if obj is not None:
return obj.from_file(filepath)
return None
# Use log scale for these variables.
_VARS_SUPPORTING_LOGSCALE = set(["residm", "vres2", "nres2"])
# Hard-coded y-range for selected variables.
_VARS_WITH_YRANGE = {
"deltaE(h)": (-1e-3, +1e-3),
"deltaE(Ha)": (-1e-3, +1e-3),
}
class ScfCycle(Mapping):
"""
It essentially consists of a dictionary mapping string
to list of floats containing the data at the different iterations.
.. attributes::
num_iterations: Number of iterations performed.
"""
MAGIC = "Must be defined by the subclass." ""
def __init__(self, fields):
"""
Args:
fields: Dictionary with label --> list of numerical values.
"""
self.fields = fields
all_lens = [len(lst) for lst in self.values()]
self.num_iterations = all_lens[0]
assert all(n == self.num_iterations for n in all_lens)
def __getitem__(self, slice):
return self.fields.__getitem__(slice)
def __iter__(self):
return self.fields.__iter__()
def __len__(self):
return len(self.fields)
def __str__(self):
return self.to_string()
def to_string(self, verbose=0):
"""String representation."""
rows = [[it + 1] + list(map(str, (self[k][it] for k in self.keys()))) for it in range(self.num_iterations)]
return tabulate(rows, headers=["Iter"] + list(self.keys()))
@property
def last_iteration(self):
"""Returns a dictionary with the values of the last iteration."""
return {k: v[-1] for k, v in self.items()}
@classmethod
def from_file(cls, filepath):
"""Read the first occurrence of ScfCycle from file."""
with open(filepath, "rt") as stream:
return cls.from_stream(stream)
@classmethod
def from_stream(cls, stream):
"""
Read the first occurrence of ScfCycle from stream.
Returns:
None if no `ScfCycle` entry is found.
"""
fields = _magic_parser(stream, magic=cls.MAGIC)
if fields:
fields.pop("iter")
return cls(fields)
return None
@add_fig_kwargs
def plot(self, ax_list=None, fontsize=12, **kwargs):
"""
Uses matplotlib to plot the evolution of the SCF cycle.
Args:
ax_list: List of axes. If None a new figure is produced.
fontsize: legend fontsize.
kwargs: keyword arguments are passed to ax.plot
Returns: matplotlib figure
"""
# Build grid of plots.
num_plots, ncols, nrows = len(self), 1, 1
if num_plots > 1:
ncols = 2
nrows = num_plots // ncols + num_plots % ncols
ax_list, fig, plot = get_axarray_fig_plt(
ax_list, nrows=nrows, ncols=ncols, sharex=True, sharey=False, squeeze=False
)
ax_list = np.array(ax_list).ravel()
iter_num = np.array(list(range(self.num_iterations))) + 1
label = kwargs.pop("label", None)
for i, ((key, values), ax) in enumerate(zip(self.items(), ax_list)):
ax.grid(True)
ax.set_xlabel("Iteration Step")
ax.set_xticks(iter_num, minor=False)
ax.set_ylabel(key)
xx, yy = iter_num, values
if self.num_iterations > 1:
# Don't show the first iteration since it's not very useful.
xx, yy = xx[1:], values[1:]
if not kwargs and label is None:
ax.plot(xx, yy, "-o", lw=2.0)
else:
ax.plot(xx, yy, label=label if i == 0 else None, **kwargs)
if key in _VARS_SUPPORTING_LOGSCALE and np.all(yy > 1e-22):
ax.set_yscale("log")
if key in _VARS_WITH_YRANGE:
ymin, ymax = _VARS_WITH_YRANGE[key]
val_min, val_max = np.min(yy), np.max(yy)
if abs(val_max - val_min) > abs(ymax - ymin):
ax.set_ylim(ymin, ymax)
if label is not None:
ax.legend(loc="best", fontsize=fontsize, shadow=True)
# Get around a bug in matplotlib.
if num_plots % ncols != 0:
ax_list[-1].plot(xx, yy, lw=0.0)
ax_list[-1].axis("off")
return fig
class GroundStateScfCycle(ScfCycle):
"""Result of the Ground State self-consistent cycle."""
MAGIC = "iter Etot(hartree)"
@property
def last_etotal(self):
"""The total energy at the last iteration."""
return self["Etot(hartree)"][-1]
class D2DEScfCycle(ScfCycle):
"""Result of the Phonon self-consistent cycle."""
MAGIC = "iter 2DEtotal(Ha)"
@property
def last_etotal(self):
"""The 2-nd order derivative of the energy at the last iteration."""
return self["2DEtotal(Ha)"][-1]
class PhononScfCycle(D2DEScfCycle):
"""Iterations of the DFPT SCF cycle for phonons."""
class CyclesPlotter:
"""Relies on the plot method of cycle objects to build multiple subfigures."""
def __init__(self):
"""Initialize object."""
self.labels = []
self.cycles = []
def items(self):
"""To iterate over (label, cycle)."""
return zip(self.labels, self.cycles)
def add_label_cycle(self, label, cycle):
"""Add new cycle to the plotter with label `label`."""
self.labels.append(label)
self.cycles.append(cycle)
@add_fig_kwargs
def combiplot(self, fontsize=8, **kwargs):
"""
Compare multiple cycels on a grid: one subplot per quantity,
all cycles on the same subplot.
Args:
fontsize: Legend fontsize.
"""
ax_list = None
for i, (label, cycle) in enumerate(self.items()):
fig = cycle.plot(
ax_list=ax_list,
label=label,
fontsize=fontsize,
lw=2.0,
marker="o",
linestyle="-",
show=False,
)
ax_list = fig.axes
return fig
def slideshow(self, **kwargs):
"""
Produce slides show of the different cycles. One plot per cycle.
"""
for label, cycle in self.items():
cycle.plot(title=label, tight_layout=True)
class Relaxation(Iterable):
"""
A list of :class:`GroundStateScfCycle` objects.
.. attributes::
num_iterations: Number of iterations performed.
.. note::
Forces, stresses and crystal structures are missing.
This object is mainly used to analyze the behavior of the Scf cycles
during the structural relaxation. A more powerful and detailed analysis
can be obtained by using the HIST.nc file.
"""
def __init__(self, cycles):
"""
Args
cycles: list of `GroundStateScfCycle` objects.
"""
self.cycles = cycles
self.num_iterations = len(self.cycles)
def __iter__(self):
return self.cycles.__iter__()
def __len__(self):
return self.cycles.__len__()
def __getitem__(self, slice):
return self.cycles[slice]
def __str__(self):
return self.to_string()
def to_string(self, verbose=0):
"""String representation."""
lines = []
app = lines.append
for i, cycle in enumerate(self):
app("")
app("RELAXATION STEP: %d" % (i + 1))
app(cycle.to_string(verbose=verbose))
return "\n".join(lines)
@classmethod
def from_file(cls, filepath):
"""Initialize the object from the Abinit main output file."""
with open(filepath, "rt") as stream:
return cls.from_stream(stream)
@classmethod
def from_stream(cls, stream):
"""
Extract data from stream. Returns None if some error occurred.
"""
cycles = []
while True:
scf_cycle = GroundStateScfCycle.from_stream(stream)
if scf_cycle is None:
break
cycles.append(scf_cycle)
return cls(cycles) if cycles else None
@lazy_property
def history(self):
"""
Ordered Dictionary of lists with the evolution of
the data as function of the relaxation step.
"""
history = OrderedDict()
for cycle in self:
d = cycle.last_iteration
for k, v in d.items():
if k in history:
history[k].append(v)
else:
history[k] = [v]
# Convert to numpy arrays.
for k, v in history.items():
history[k] = np.array(v)
return history
def slideshow(self, **kwargs):
"""
Uses matplotlib to plot the evolution of the structural relaxation.
Args:
ax_list: List of axes. If None a new figure is produced.
Returns:
`matplotlib` figure
"""
for i, cycle in enumerate(self.cycles):
cycle.plot(
title="Relaxation step %s" % (i + 1),
tight_layout=kwargs.pop("tight_layout", True),
show=kwargs.pop("show", True),
)
@add_fig_kwargs
def plot(self, ax_list=None, fontsize=12, **kwargs):
"""
Plot relaxation history i.e. the results of the last iteration of each SCF cycle.
Args:
ax_list: List of axes. If None a new figure is produced.
fontsize: legend fontsize.
kwargs: keyword arguments are passed to ax.plot
Returns: matplotlib figure
"""
history = self.history
# Build grid of plots.
num_plots, ncols, nrows = len(history), 1, 1
if num_plots > 1:
ncols = 2
nrows = num_plots // ncols + num_plots % ncols
ax_list, fig, plot = get_axarray_fig_plt(
ax_list, nrows=nrows, ncols=ncols, sharex=True, sharey=False, squeeze=False
)
ax_list = np.array(ax_list).ravel()
iter_num = np.array(list(range(self.num_iterations))) + 1
label = kwargs.pop("label", None)
for i, ((key, values), ax) in enumerate(zip(history.items(), ax_list)):
ax.grid(True)
ax.set_xlabel("Relaxation Step")
ax.set_xticks(iter_num, minor=False)
ax.set_ylabel(key)
xx, yy = iter_num, values
if not kwargs and label is None:
ax.plot(xx, yy, "-o", lw=2.0)
else:
ax.plot(xx, yy, label=label if i == 0 else None, **kwargs)
if key in _VARS_SUPPORTING_LOGSCALE and np.all(yy > 1e-22):
ax.set_yscale("log")
if key in _VARS_WITH_YRANGE:
ymin, ymax = _VARS_WITH_YRANGE[key]
val_min, val_max = np.min(yy), np.max(yy)
if abs(val_max - val_min) > abs(ymax - ymin):
ax.set_ylim(ymin, ymax)
if label is not None:
ax.legend(loc="best", fontsize=fontsize, shadow=True)
# Get around a bug in matplotlib.
if num_plots % ncols != 0:
ax_list[-1].plot(xx, yy, lw=0.0)
ax_list[-1].axis("off")
return fig
# TODO
# class HaydockIterations(Iterable):
# """This object collects info on the different steps of the Haydock technique used in the Bethe-Salpeter code"""
# @classmethod
# def from_file(cls, filepath):
# """Initialize the object from file."""
# with open(filepath, "rt") as stream:
# return cls.from_stream(stream)
#
# @classmethod
# def from_stream(cls, stream):
# """Extract data from stream. Returns None if some error occurred."""
# cycles = []
# while True:
# scf_cycle = GroundStateScfCycle.from_stream(stream)
# if scf_cycle is None: break
# cycles.append(scf_cycle)
#
# return cls(cycles) if cycles else None
#
# #def __init__(self):
#
# def plot(self, **kwargs):
# """
# Uses matplotlib to plot the evolution of the structural relaxation.
# ============== ==============================================================
# kwargs Meaning
# ============== ==============================================================
# title Title of the plot (Default: None).
# how True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps'* to save the figure to a file.
# ============== ==============================================================
# Returns:
# `matplotlib` figure
# """
# import matplotlib.pyplot as plt
# title = kwargs.pop("title", None)
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# if title: fig.suptitle(title)
# if savefig is not None: fig.savefig(savefig)
# if show: plt.show()
# return fig
##################
# Yaml parsers.
##################
class YamlTokenizerError(Exception):
"""Exceptions raised by :class:`YamlTokenizer`."""
class YamlTokenizer(Iterator):
"""
Provides context-manager support so you can use it in a with statement.
"""
Error = YamlTokenizerError
def __init__(self, filename):
"""
Args:
filename: Filename
"""
# The position inside the file.
self.linepos = 0
self.filename = filename
try:
self.stream = open(filename, "rt") # pylint: disable=R1732
except IOError as exc:
# Look for associated error file.
root, ext = os.path.splitext(self.filename)
errfile = root + ".err"
if os.path.exists(errfile) and errfile != self.filename:
print("Found error file: %s" % errfile)
with open(errfile, "rt") as fh:
print(fh.read())
raise exc
def __iter__(self):
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
self.close()
def close(self):
"""Close the stream."""
try:
self.stream.close()
except Exception:
print("Exception in YAMLTokenizer.close()")
print("Python traceback:")
print(straceback())
def seek(self, offset, whence=0):
"""
seek(offset[, whence]) -> None. Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
0 (offset from start of file, offset should be >= 0); other values are 1
(move relative to current position, positive or negative), and 2 (move
relative to end of file, usually negative, although many platforms allow
seeking beyond the end of a file). If the file is opened in text mode,
only offsets returned by tell() are legal. Use of other offsets causes
undefined behavior.
Note that not all file objects are seekable.
"""
assert offset == 0
self.linepos = 0
return self.stream.seek(offset, whence)
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
"""
Returns the first YAML document in stream.
.. warning::
Assume that the YAML document are closed explicitely with the sentinel '...'
"""
in_doc, lines, doc_tag = None, [], None
for i, line in enumerate(self.stream):
self.linepos += 1
# print(i, line)
if line.startswith("---"):
# Include only lines in the form:
# "--- !tag"
# "---"
# Other lines are spurious.
in_doc = False
l = line[3:].strip().lstrip()
if l.startswith("!"):
# "--- !tag"
doc_tag = l
in_doc = True
elif not l:
# "---"
in_doc = True
doc_tag = None
if in_doc:
lineno = self.linepos
if in_doc:
lines.append(line)
if in_doc and line.startswith("..."):
return YamlDoc(text="".join(lines), lineno=lineno, tag=doc_tag)
raise StopIteration("Cannot find next YAML document in %s" % self.filename)
def all_yaml_docs(self):
"""
Returns a list with all the YAML docs found in stream.
Seek the stream before returning.
.. warning::
Assume that all the YAML docs (with the exception of the last one)
are closed explicitely with the sentinel '...'
"""
docs = [doc for doc in self]
self.seek(0)
return docs
def next_doc_with_tag(self, doc_tag):
"""
Returns the next document with the specified tag. Empty string is no doc is found.
"""
while True:
try:
doc = next(self)
if doc.tag == doc_tag:
return doc
except StopIteration:
raise
def all_docs_with_tag(self, doc_tag):
"""
Returns all the documents with the specified tag.
"""
docs = []
while True:
try:
doc = self.next_doc_with(doc_tag)
docs.append(doc)
except StopIteration:
break
self.seek(0)
return docs
def yaml_read_kpoints(filename, doc_tag="!Kpoints"):
"""Read the K-points from file."""
with YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag(doc_tag)
d = yaml.safe_load(doc.text_notag)
return np.array(d["reduced_coordinates_of_qpoints"])
def yaml_read_irred_perts(filename, doc_tag="!IrredPerts"):
"""Read the list of irreducible perturbations from file."""
with YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag(doc_tag)
d = yaml.safe_load(doc.text_notag)
return [AttrDict(**pert) for pert in d["irred_perts"]]
# return d["irred_perts"]
class YamlDoc:
"""
Handy object that stores that YAML document, its main tag and the
position inside the file.
"""
__slots__ = [
"text",
"lineno",
"tag",
]
def __init__(self, text, lineno, tag=None):
"""
Args:
text: String with the YAML document.
lineno: The line number where the document is located.
tag: The YAML tag associate to the document.
"""
# Sanitize strings: use "ignore" to skip invalid characters in .encode/.decode like
if isinstance(text, bytes):
text = text.decode("utf-8", "ignore")
text = text.rstrip().lstrip()
self.text = text
self.lineno = lineno
if isinstance(tag, bytes):
tag = tag.decode("utf-8", "ignore")
self.tag = tag
def __str__(self):
return self.text
def __eq__(self, other):
if other is None:
return False
return self.text == other.text and self.lineno == other.lineno and self.tag == other.tag
def __ne__(self, other):
return not self == other
@property
def text_notag(self):
"""
Returns the YAML text without the tag.
Useful if we don't have any constructor registered for the tag
(we used the tag just to locate the document).
"""
if self.tag is not None:
return self.text.replace(self.tag, "")
return self.text
def as_dict(self):
"""Use Yaml to parse the text (without the tag) and returns a dictionary."""
return yaml.safe_load(self.text_notag)
| |
from __future__ import print_function
from SimpleCV.base import *
import scipy.signal as sps
import scipy.optimize as spo
import numpy as np
import copy, operator
class LineScan(list):
"""
**SUMMARY**
A line scan is a one dimensional signal pulled from the intensity
of a series of a pixels in an image. LineScan allows you to do a series
of operations just like on an image class object. You can also treat the
line scan as a python list object. A linescan object is automatically
generated by calling ImageClass.getLineScan on an image. You can also
roll your own by declaring a LineScan object and passing the constructor
a 1xN list of values.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> s = img.getLineScan(y=128)
>>>> ss = s.smooth()
>>>> plt.plot(s)
>>>> plt.plot(ss)
>>>> plt.show()
"""
pointLoc = None
image = None
def __init__(self, args, **kwargs):
if isinstance(args, np.ndarray):
args = args.tolist()
list.__init__(self,args)
self.image = None
self.pt1 = None
self.pt2 = None
self.row = None
self.col = None
self.channel = -1
for key in kwargs:
if key == 'pointLocs':
if kwargs[key] is not None:
self.pointLoc = kwargs[key]
if key == 'image':
if kwargs[key] is not None:
self.img = kwargs[key]
if key == 'pt1':
if kwargs[key] is not None:
self.pt1 = kwargs[key]
if key == 'pt2':
if kwargs[key] is not None:
self.pt2 = kwargs[key]
if key == "x":
if kwargs[key] is not None:
self.col = kwargs[key]
if key == "y":
if kwargs[key] is not None:
self.row = kwargs[key]
if key == "channel":
if kwargs[key] is not None:
self.channel = kwargs[key]
if(self.pointLoc is None):
self.pointLoc = zip(range(0,len(self)),range(0,len(self)))
def __getitem__(self,key):
"""
**SUMMARY**
Returns a LineScan when sliced. Previously used to
return list. Now it is possible to use LineScanm member
functions on sub-lists
"""
if type(key) is slice: #Or can use 'try:' for speed
return LineScan(list.__getitem__(self, key))
else:
return list.__getitem__(self,key)
def __getslice__(self, i, j):
"""
Deprecated since python 2.0, now using __getitem__
"""
return self.__getitem__(slice(i,j))
def __sub__(self,other):
if len(self) == len(other):
retVal = LineScan(map(operator.sub,self,other))
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def __add__(self,other):
if len(self) == len(other):
retVal = LineScan(map(operator.add,self,other))
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def __mul__(self,other):
if len(self) == len(other):
retVal = LineScan(map(operator.mul,self,other))
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def __div__(self,other):
if len(self) == len(other):
try:
retVal = LineScan(map(operator.div,self,other))
except ZeroDivisionError:
print('Second LineScan contains zeros')
return None
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def _update(self, linescan):
"""
** SUMMARY**
Updates LineScan's Instance Objects.
"""
self.image = linescan.image
self.pt1 = linescan.pt1
self.pt2 = linescan.pt2
self.row = linescan.row
self.col = linescan.col
self.channel = linescan.channel
self.pointLoc = linescan.pointLoc
def smooth(self,degree=3):
"""
**SUMMARY**
Perform a Gasusian simple smoothing operation on the signal.
**PARAMETERS**
* *degree* - The degree of the fitting function. Higher degree means more smoothing.
**RETURNS**
A smoothed LineScan object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.smooth(7))
>>>> plt.show()
**NOTES**
Cribbed from http://www.swharden.com/blog/2008-11-17-linear-data-smoothing-in-python/
"""
window=degree*2-1
weight=np.array([1.0]*window)
weightGauss=[]
for i in range(window):
i=i-degree+1
frac=i/float(window)
gauss=1/(np.exp((4*(frac))**2))
weightGauss.append(gauss)
weight=np.array(weightGauss)*weight
smoothed=[0.0]*(len(self)-window)
for i in range(len(smoothed)):
smoothed[i]=sum(np.array(self[i:i+window])*weight)/sum(weight)
# recenter the signal so it sits nicely on top of the old
front = self[0:(degree-1)]
front += smoothed
front += self[-1*degree:]
retVal = LineScan(front,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def normalize(self):
"""
**SUMMARY**
Normalize the signal so the maximum value is scaled to one.
**RETURNS**
A normalized scanline object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.normalize())
>>>> plt.show()
"""
temp = np.array(self, dtype='float32')
temp = temp / np.max(temp)
retVal = LineScan(list(temp[:]),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def scale(self,value_range=(0,1)):
"""
**SUMMARY**
Scale the signal so the maximum and minimum values are
all scaled to the values in value_range. This is handy
if you want to compare the shape of two signals that
are scaled to different ranges.
**PARAMETERS**
* *value_range* - A tuple that provides the lower and upper bounds
for the output signal.
**RETURNS**
A scaled LineScan object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.scale(value_range(0,255)))
>>>> plt.show()
**SEE ALSO**
"""
temp = np.array(self, dtype='float32')
vmax = np.max(temp)
vmin = np.min(temp)
a = np.min(value_range)
b = np.max(value_range)
temp = (((b-a)/(vmax-vmin))*(temp-vmin))+a
retVal = LineScan(list(temp[:]),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def minima(self):
"""
**SUMMARY**
The function the global minima in the line scan.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MinimaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> minima = sl.smooth().minima()
>>>> plt.plot(sl)
>>>> for m in minima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
# all of these functions should return
# value, index, pixel coordinate
# [(index,value,(pix_x,pix_y))...]
minvalue = np.min(self)
idxs = np.where(np.array(self)==minvalue)[0]
minvalue = np.ones((1,len(idxs)))*minvalue # make zipable
minvalue = minvalue[0]
pts = np.array(self.pointLoc)
pts = pts[idxs]
pts = [(p[0],p[1]) for p in pts] # un numpy this
return zip(idxs,minvalue,pts)
def maxima(self):
"""
**SUMMARY**
The function finds the global maxima in the line scan.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MaximaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> maxima = sl.smooth().maxima()
>>>> plt.plot(sl)
>>>> for m in maxima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
# all of these functions should return
# value, index, pixel coordinate
# [(index,value,(pix_x,pix_y))...]
maxvalue = np.max(self)
idxs = np.where(np.array(self)==maxvalue)[0]
maxvalue = np.ones((1,len(idxs)))*maxvalue # make zipable
maxvalue = maxvalue[0]
pts = np.array(self.pointLoc)
pts = pts[idxs]
pts = [(p[0],p[1]) for p in pts] # un numpy
return zip(idxs,maxvalue,pts)
def derivative(self):
"""
**SUMMARY**
This function finds the discrete derivative of the signal.
The discrete derivative is simply the difference between each
succesive samples. A good use of this function is edge detection
**RETURNS**
Returns the discrete derivative function as a LineScan object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.derivative())
>>>> plt.show()
"""
temp = np.array(self,dtype='float32')
d = [0]
d += list(temp[1:]-temp[0:-1])
retVal = LineScan(d,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
#retVal.image = self.image
#retVal.pointLoc = self.pointLoc
return retVal
def localMaxima(self):
"""
**SUMMARY**
The function finds local maxima in the line scan. Local maxima
are defined as points that are greater than their neighbors to
the left and to the right.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MaximaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> maxima = sl.smooth().maxima()
>>>> plt.plot(sl)
>>>> for m in maxima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
temp = np.array(self)
idx = np.r_[True, temp[1:] > temp[:-1]] & np.r_[temp[:-1] > temp[1:], True]
idx = np.where(idx==True)[0]
values = temp[idx]
pts = np.array(self.pointLoc)
pts = pts[idx]
pts = [(p[0],p[1]) for p in pts] # un numpy
return zip(idx,values,pts)
def localMinima(self):
"""""
**SUMMARY**
The function the local minima in the line scan. Local minima
are defined as points that are less than their neighbors to
the left and to the right.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MinimaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> minima = sl.smooth().minima()
>>>> plt.plot(sl)
>>>> for m in minima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
temp = np.array(self)
idx = np.r_[True, temp[1:] < temp[:-1]] & np.r_[temp[:-1] < temp[1:], True]
idx = np.where(idx==True)[0]
values = temp[idx]
pts = np.array(self.pointLoc)
pts = pts[idx]
pts = [(p[0],p[1]) for p in pts] # un numpy
return zip(idx,values,pts)
def resample(self,n=100):
"""
**SUMMARY**
Resample the signal to fit into n samples. This method is
handy if you would like to resize multiple signals so that
they fit together nice. Note that using n < len(LineScan)
can cause data loss.
**PARAMETERS**
* *n* - The number of samples to resample to.
**RETURNS**
A LineScan object of length n.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.resample(100))
>>>> plt.show()
"""
signal = sps.resample(self,n)
pts = np.array(self.pointLoc)
# we assume the pixel points are linear
# so we can totally do this better manually
x = linspace(pts[0,0],pts[-1,0],n)
y = linspace(pts[0,1],pts[-1,1],n)
pts = zip(x,y)
retVal = LineScan(list(signal),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
# this needs to be moved out to a cookbook or something
#def linear(xdata,m,b):
# return m*xdata+b
# need to add polyfit too
#http://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html
def fitToModel(self,f,p0=None):
"""
**SUMMARY**
Fit the data to the provided model. This can be any arbitrary
2D signal. Return the data of the model scaled to the data.
**PARAMETERS**
* *f* - a function of the form f(x_values, p0,p1, ... pn) where
p is parameter for the model.
* *p0* - a list of the initial guess for the model parameters.
**RETURNS**
A LineScan object where the fitted model data replaces the
actual data.
**EXAMPLE**
>>>> def aLine(x,m,b):
>>>> return m*x+b
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> fit = sl.fitToModel(aLine)
>>>> plt.plot(sl)
>>>> plt.plot(fit)
>>>> plt.show()
"""
yvals = np.array(self,dtype='float32')
xvals = range(0,len(yvals),1)
popt,pcov = spo.curve_fit(f,xvals,yvals,p0=p0)
yvals = f(xvals,*popt)
retVal = LineScan(list(yvals),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def getModelParameters(self,f,p0=None):
"""
**SUMMARY**
Fit a model to the data and then return
**PARAMETERS**
* *f* - a function of the form f(x_values, p0,p1, ... pn) where
p is parameter for the model.
* *p0* - a list of the initial guess for the model parameters.
**RETURNS**
The model parameters as a list. For example if you use a line
model y=mx+b the function returns the m and b values that fit
the data.
**EXAMPLE**
>>>> def aLine(x,m,b):
>>>> return m*x+b
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> p = sl.getModelParameters(aLine)
>>>> print p
"""
yvals = np.array(self,dtype='float32')
xvals = range(0,len(yvals),1)
popt,pcov = spo.curve_fit(f,xvals,yvals,p0=p0)
return popt
def convolve(self,kernel):
"""
**SUMMARY**
Convolve the line scan with a one dimenisional kernel stored as
a list. This allows you to create an arbitrary filter for the signal.
**PARAMETERS**
* *kernel* - An Nx1 list or np.array that defines the kernel.
**RETURNS**
A LineScan feature with the kernel applied. We crop off
the fiddly bits at the end and the begining of the kernel
so everything lines up nicely.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> smooth_kernel = [0.1,0.2,0.4,0.2,0.1]
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> out = sl.convolve(smooth_kernel)
>>>> plt.plot(sl)
>>>> plt.plot(out)
>>>> plt.show()
**SEE ALSO**
"""
out = np.convolve(self,np.array(kernel,dtype='float32'),'same')
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2,channel=self.channel)
return retVal
def fft(self):
"""
**SUMMARY**
Perform a Fast Fourier Transform on the line scan and return
the FFT output and the frequency of each value.
**RETURNS**
The FFT as a numpy array of irrational numbers and a one dimensional
list of frequency values.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> fft,freq = sl.fft()
>>>> plt.plot(freq,fft.real,freq,fft.imag)
>>>> plt.show()
"""
signal = np.array(self,dtype='float32')
fft = np.fft.fft(signal)
freq = np.fft.fftfreq(len(signal))
return (fft,freq)
def ifft(self,fft):
"""
**SUMMARY**
Perform an inverse fast Fourier transform on the provided
irrationally valued signal and return the results as a
LineScan.
**PARAMETERS**
* *fft* - A one dimensional numpy array of irrational values
upon which we will perform the IFFT.
**RETURNS**
A LineScan object of the reconstructed signal.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(pt1=(0,0),pt2=(300,200))
>>>> fft,frq = sl.fft()
>>>> fft[30:] = 0 # low pass filter
>>>> sl2 = sl.ifft(fft)
>>>> import matplotlib.pyplot as plt
>>>> plt.plot(sl)
>>>> plt.plot(sl2)
"""
signal = np.fft.ifft(fft)
retVal = LineScan(signal.real)
retVal.image = self.image
retVal.pointLoc = self.pointLoc
return retVal
def createEmptyLUT(self,defaultVal=-1):
"""
**SUMMARY**
Create an empty look up table (LUT).
If default value is what the lut is intially filled with
if defaultVal == 0
the array is all zeros.
if defaultVal > 0
the array is set to default value. Clipped to 255.
if defaultVal < 0
the array is set to the range [0,255]
if defaultVal is a tuple of two values:
we set stretch the range of 0 to 255 to match
the range provided.
**PARAMETERS**
* *defaultVal* - See above.
**RETURNS**
A LUT.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> lut = ls.createEmptyLUT()
>>>> ls2 = ls.applyLUT(lut)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
lut = None
if( isinstance(defaultVal,list) or
isinstance(defaultVal,tuple)):
start = np.clip(defaultVal[0],0,255)
stop = np.clip(defaultVal[1],0,255)
lut = np.around(np.linspace(start,stop,256),0)
lut = np.array(lut,dtype='uint8')
lut = lut.tolist()
elif( defaultVal == 0 ):
lut = np.zeros([1,256]).tolist()[0]
elif( defaultVal > 0 ):
defaultVal = np.clip(defaultVal,1,255)
lut = np.ones([1,256])*defaultVal
lut = np.array(lut,dtype='uint8')
lut = lut.tolist()[0]
elif( defaultVal < 0 ):
lut = np.linspace(0,256,256)
lut = np.array(lut,dtype='uint8')
lut = lut.tolist()
return lut
def fillLUT(self,lut,idxs,value=255):
"""
**SUMMARY**
Fill up an existing LUT (look up table) at the indexes specified
by idxs with the value specified by value. This is useful for picking
out specific values.
**PARAMETERS**
* *lut* - An existing LUT (just a list of 255 values).
* *idxs* - The indexes of the LUT to fill with the value.
This can also be a sample swatch of an image.
* *value* - the value to set the LUT[idx] to
**RETURNS**
An updated LUT.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> lut = ls.createEmptyLUT()
>>>> swatch = img.crop(0,0,10,10)
>>>> ls.fillLUT(lut,swatch,255)
>>>> ls2 = ls.applyLUT(lut)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
# for the love of god keep this small
# for some reason isInstance is being persnickety
if(idxs.__class__.__name__ == 'Image' ):
npg = idxs.getGrayNumpy()
npg = npg.reshape([npg.shape[0]*npg.shape[1]])
idxs = npg.tolist()
value = np.clip(value,0,255)
for idx in idxs:
if(idx >= 0 and idx < len(lut)):
lut[idx]=value
return lut
def threshold(self,threshold=128,invert=False):
"""
**SUMMARY**
Do a 1D threshold operation. Values about the threshold
will be set to 255, values below the threshold will be
set to 0. If invert is true we do the opposite.
**PARAMETERS**
* *threshold* - The cutoff value for our threshold.
* *invert* - if invert is false values above the threshold
are set to 255, if invert is True the are set to 0.
**RETURNS**
The thresholded linescan operation.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> ls2 = ls.threshold()
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
out = []
high = 255
low = 0
if( invert ):
high = 0
low = 255
for pt in self:
if( pt < threshold ):
out.append(low)
else:
out.append(high)
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def invert(self,max=255):
"""
**SUMMARY**
Do an 8bit invert of the signal. What was black is now
white, what was 255 is now zero.
**PARAMETERS**
* *max* - The maximum value of a pixel in the image, usually 255.
**RETURNS**
The inverted LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> ls2 = ls.invert()
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
out = []
for pt in self:
out.append(255-pt)
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def mean(self):
"""
**SUMMARY**
Computes the statistical mean of the signal.
**RETURNS**
The mean of the LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> avg = ls.mean()
>>>> plt.plot(ls)
>>>> plt.axhline(y = avg)
>>>> plt.show()
"""
return float(sum(self))/len(self)
def variance(self):
"""
**SUMMARY**
Computes the variance of the signal.
**RETURNS**
The variance of the LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> var = ls.variance()
>>>> var
"""
mean = float(sum(self))/len(self)
summation = 0
for num in self:
summation += (num - mean)**2
return summation/len(self)
def std(self):
"""
**SUMMARY**
Computes the standard deviation of the signal.
**RETURNS**
The standard deviation of the LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> avg = ls.mean()
>>>> std = ls.std()
>>>> plt.plot(ls)
>>>> plt.axhline(y = avg)
>>>> plt.axhline(y = avg - std, color ='r')
>>>> plt.axhline(y = avg + std, color ='r')
>>>> plt.show()
"""
mean = float(sum(self))/len(self)
summation = 0
for num in self:
summation += (num - mean)**2
return np.sqrt(summation/len(self))
def median(self,sz=5):
"""
**SUMMARY**
Do a sliding median filter with a window size equal to size.
**PARAMETERS**
* *sz* - the size of the median filter.
**RETURNS**
The linescan after being passed through the median filter.
The last index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> ls2 = ls.median(7)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
if( sz%2==0 ):
sz = sz+1
skip = int(np.floor(sz/2))
out = self[0:skip]
vsz = len(self)
for idx in range(skip,vsz-skip):
val = np.median(self[(idx-skip):(idx+skip)])
out.append(val)
for pt in self[-1*skip:]:
out.append(pt)
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def findFirstIdxEqualTo(self,value=255):
"""
**SUMMARY**
Find the index of the first element of the linescan that has
a value equal to value. If nothing is found None is returned.
**PARAMETERS**
* *value* - the value to look for.
**RETURNS**
The first index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> idx = ls.findFIRSTIDXEqualTo()
"""
vals = np.where(np.array(self)==value)[0]
retVal = None
if( len(vals) > 0 ):
retVal = vals[0]
return retVal
def findLastIdxEqualTo(self,value=255):
"""
**SUMMARY**
Find the index of the last element of the linescan that has
a value equal to value. If nothing is found None is returned.
**PARAMETERS**
* *value* - the value to look for.
**RETURNS**
The last index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> idx = ls.findLastIDXEqualTo()
"""
vals = np.where(np.array(self)==value)[0]
retVal = None
if( len(vals) > 0 ):
retVal = vals[-1]
return retVal
def findFirstIdxGreaterThan(self,value=255):
"""
**SUMMARY**
Find the index of the first element of the linescan that has
a value equal to value. If nothing is found None is returned.
**PARAMETERS**
* *value* - the value to look for.
**RETURNS**
The first index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> idx = ls.findFIRSTIDXEqualTo()
"""
vals = np.where(np.array(self)>=value)[0]
retVal = None
if( len(vals) > 0 ):
retVal = vals[0]
return retVal
def applyLUT(self,lut):
"""
**SUMMARY**
Apply a look up table to the signal.
**PARAMETERS**
* *lut* an array of of length 256, the array elements are the values
that are replaced via the lut
**RETURNS**
A LineScan object with the LUT applied to the values.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> lut = ls.createEmptyLUT()
>>>> ls2 = ls.applyLUT(lut)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
"""
out = []
for pt in self:
out.append(lut[pt])
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def medianFilter(self, kernel_size=5):
"""
**SUMMARY**
Apply median filter on the data
**PARAMETERS**
* *kernel_size* - Size of the filter (should be odd int) - int
**RETURNS**
A LineScan object with the median filter applied to the values.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> mf = ls.medianFilter()
>>> plt.plot(ls)
>>> plt.plot(mf)
"""
try:
from scipy.signal import medfilt
except ImportError:
warnings.warn("Scipy vesion >= 0.11 requierd.")
return None
if kernel_size % 2 == 0:
kernel_size-=1
print("Kernel Size should be odd. New kernel size =" , (kernel_size))
medfilt_array = medfilt(np.asarray(self[:]), kernel_size)
retVal = LineScan(medfilt_array.astype("uint8").tolist(), image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2, x=self.col, y=self.row)
retVal._update(self)
return retVal
def detrend(self):
"""
**SUMMARY**
Detren the data
**PARAMETERS**
**RETURNS**
A LineScan object with detrened data.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> dt = ls.detrend()
>>> plt.plot(ls)
>>> plt.plot(dt)
"""
try:
from scipy.signal import detrend as sdetrend
except ImportError:
warnings.warn("Scipy vesion >= 0.11 requierd.")
return None
detrend_arr = sdetrend(np.asarray(self[:]))
retVal = LineScan(detrend_arr.astype("uint8").tolist(), image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2, x=self.col, y=self.row)
retVal._update(self)
return retVal
def runningAverage(self, diameter=3, algo="uniform"):
"""
**SUMMARY**
Finds the running average by either using a uniform kernel or using a gaussian kernel.
The gaussian kernelis calculated from the standard normal distribution formulae.
**PARAMETERS**
* *diameter* - Size of the window (should be odd int) - int
* *algo* - "uniform" (default) / "gaussian" - used to decide the kernel - string
**RETURNS**
A LineScan object with the kernel of the provided algorithm applied.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> ra = ls.runningAverage()
>>> rag = ls.runningAverage(15,algo="gaussian")
>>> plt.plot(ls)
>>> plt.plot(ra)
>>> plt.plot(rag)
>>> plt.show()
"""
if diameter%2 == 0:
warnings.warn("Diameter must be an odd integer")
return None
if algo=="uniform":
kernel=list(1/float(diameter)*np.ones(diameter))
elif algo=="gaussian":
kernel=list()
r=float(diameter)/2
for i in range(-int(r),int(r)+1):
kernel.append(np.exp(-i**2/(2*(r/3)**2))/(np.sqrt(2*np.pi)*(r/3)))
retVal = LineScan(map(int,self.convolve(kernel)))
retVal._update(self)
return retVal
def findPeaks(self, window = 30, delta = 3):
"""
**SUMMARY**
Finds the peaks in a LineScan.
**PARAMETERS**
* *window* - the size of the window in which the peak
should have the highest value to be considered as a peak.
By default this is 15 as it gives appropriate results.
The lower this value the more the peaks are returned
* *delta* - the minimum difference between the peak and
all elements in the window
**RETURNS**
A list of (peak position, peak value) tuples.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> peaks = ls.findPeaks()
>>> print peaks
>>> peaks10 = ls.findPeaks(window=10)
>>> print peaks10
"""
maximum = -np.Inf
width = int(window/2.0)
peaks = []
for index,val in enumerate(self):
#peak found
if val > maximum:
maximum = val
maxpos = index
#checking whether peak satisfies window and delta conditions
if max( self[max(0, index-width):index+width])+delta< maximum:
peaks.append((maxpos, maximum))
maximum = -np.Inf
return peaks
def findValleys(self,window = 30, delta = 3 ):
"""
**SUMMARY**
Finds the valleys in a LineScan.
**PARAMETERS**
* *window* - the size of the window in which the valley
should have the highest value to be considered as a valley.
By default this is 15 as it gives appropriate results.
The lower this value the more the valleys are returned
* *delta* - the minimum difference between the valley and
all elements in the window
**RETURNS**
A list of (peak position, peak value) tuples.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> valleys = ls.findValleys()
>>> print valleys
>>> valleys10 = ls.findValleys(window=10)
>>> print valleys10
"""
minimum = np.Inf
width = int(window/2.0)
peaks = []
for index,val in enumerate(self):
#peak found
if val < minimum:
minimum = val
minpos = index
#checking whether peak satisfies window and delta conditions
if min( self[max(0, index-width):index+width])-delta > minimum:
peaks.append((minpos, minimum))
minimum = np.Inf
return peaks
def fitSpline(self,degree=2):
"""
**SUMMARY**
A function to generate a spline curve fitting over the points in LineScan with
order of precision given by the parameter degree
**PARAMETERS**
* *degree* - the precision of the generated spline
**RETURNS**
The spline as a LineScan fitting over the initial values of LineScan
**EXAMPLE**
>>> import matplotlib.pyplot as plt
>>> img = Image("lenna")
>>> ls = img.getLineScan(pt1=(10,10)),pt2=(20,20)).normalize()
>>> spline = ls.fitSpline()
>>> plt.plot(ls)
>>> plt.show()
>>> plt.plot(spline)
>>> plt.show()
**NOTES**
Implementation taken from http://www.scipy.org/Cookbook/Interpolation
"""
if degree > 4:
degree = 4 # No significant improvement with respect to time usage
if degree < 1:
warnings.warn('LineScan.fitSpline - degree needs to be >= 1')
return None
retVal = None
y = np.array(self)
x = np.arange(0,len(y),1)
dx = 1
newx = np.arange(0,len(y)-1,pow(0.1,degree))
cj = sps.cspline1d(y)
retVal = sps.cspline1d_eval(cj,newx,dx=dx,x0=x[0])
return retVal
| |
#!/usr/bin/env python
# Copyright 2018-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Peng Bao <baopeng@iccas.ac.cn>
# Qiming Sun <osirpt.sun@gmail.com>
#
'''
semi-grid Coulomb and eXchange without differencial density matrix
To lower the scaling of coulomb and exchange matrix construction for large system, one
coordinate is analitical and the other is grid. The traditional two electron
integrals turn to analytical one electron integrals and numerical integration
based on grid.(see Friesner, R. A. Chem. Phys. Lett. 1985, 116, 39)
Minimizing numerical errors using overlap fitting correction.(see
Lzsak, R. et. al. J. Chem. Phys. 2011, 135, 144105)
Grid screening for weighted AO value and DktXkg.
Two SCF steps: coarse grid then fine grid. There are 5 parameters can be changed:
# threshold for Xg and Fg screening
gthrd = 1e-10
# initial and final grids level
grdlvl_i = 0
grdlvl_f = 1
# norm_ddm threshold for grids change
thrd_nddm = 0.03
# set block size to adapt memory
sblk = 200
Set mf.direct_scf = False because no traditional 2e integrals
'''
import ctypes
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.lib import logger
from pyscf.df.incore import aux_e2
from pyscf.gto import moleintor
from pyscf.scf import _vhf
from pyscf.dft import gen_grid
def get_jk_favork(sgx, dm, hermi=1, with_j=True, with_k=True,
direct_scf_tol=1e-13):
t0 = logger.process_clock(), logger.perf_counter()
mol = sgx.mol
grids = sgx.grids
gthrd = sgx.grids_thrd
dms = numpy.asarray(dm)
dm_shape = dms.shape
nao = dm_shape[-1]
dms = dms.reshape(-1,nao,nao)
nset = dms.shape[0]
if sgx.debug:
batch_nuc = _gen_batch_nuc(mol)
else:
batch_jk = _gen_jk_direct(mol, 's2', with_j, with_k, direct_scf_tol,
sgx._opt, sgx.pjs)
t1 = logger.timer_debug1(mol, "sgX initialziation", *t0)
sn = numpy.zeros((nao,nao))
vj = numpy.zeros_like(dms)
vk = numpy.zeros_like(dms)
ngrids = grids.coords.shape[0]
max_memory = sgx.max_memory - lib.current_memory()[0]
sblk = sgx.blockdim
blksize = min(ngrids, max(4, int(min(sblk, max_memory*1e6/8/nao**2))))
tnuc = 0, 0
for i0, i1 in lib.prange(0, ngrids, blksize):
coords = grids.coords[i0:i1]
weights = grids.weights[i0:i1,None]
ao = mol.eval_gto('GTOval', coords)
wao = ao * grids.weights[i0:i1,None]
sn += lib.dot(ao.T, wao)
fg = lib.einsum('gi,xij->xgj', wao, dms)
mask = numpy.zeros(i1-i0, dtype=bool)
for i in range(nset):
mask |= numpy.any(fg[i]>gthrd, axis=1)
mask |= numpy.any(fg[i]<-gthrd, axis=1)
if not numpy.all(mask):
ao = ao[mask]
wao = wao[mask]
fg = fg[:,mask]
coords = coords[mask]
weights = weights[mask]
if sgx.debug:
tnuc = tnuc[0] - logger.process_clock(), tnuc[1] - logger.perf_counter()
gbn = batch_nuc(mol, coords)
tnuc = tnuc[0] + logger.process_clock(), tnuc[1] + logger.perf_counter()
if with_j:
jg = numpy.einsum('gij,xij->xg', gbn, dms)
if with_k:
gv = lib.einsum('gvt,xgt->xgv', gbn, fg)
gbn = None
else:
tnuc = tnuc[0] - logger.process_clock(), tnuc[1] - logger.perf_counter()
jg, gv = batch_jk(mol, coords, dms, fg.copy(), weights)
tnuc = tnuc[0] + logger.process_clock(), tnuc[1] + logger.perf_counter()
if with_j:
xj = lib.einsum('gv,xg->xgv', ao, jg)
for i in range(nset):
vj[i] += lib.einsum('gu,gv->uv', wao, xj[i])
if with_k:
for i in range(nset):
vk[i] += lib.einsum('gu,gv->uv', ao, gv[i])
jg = gv = None
t2 = logger.timer_debug1(mol, "sgX J/K builder", *t1)
tdot = t2[0] - t1[0] - tnuc[0] , t2[1] - t1[1] - tnuc[1]
logger.debug1(sgx, '(CPU, wall) time for integrals (%.2f, %.2f); '
'for tensor contraction (%.2f, %.2f)',
tnuc[0], tnuc[1], tdot[0], tdot[1])
ovlp = mol.intor_symmetric('int1e_ovlp')
proj = scipy.linalg.solve(sn, ovlp)
if with_j:
vj = lib.einsum('pi,xpj->xij', proj, vj)
vj = (vj + vj.transpose(0,2,1))*.5
if with_k:
vk = lib.einsum('pi,xpj->xij', proj, vk)
if hermi == 1:
vk = (vk + vk.transpose(0,2,1))*.5
logger.timer(mol, "vj and vk", *t0)
return vj.reshape(dm_shape), vk.reshape(dm_shape)
def get_jk_favorj(sgx, dm, hermi=1, with_j=True, with_k=True,
direct_scf_tol=1e-13):
t0 = logger.process_clock(), logger.perf_counter()
mol = sgx.mol
grids = sgx.grids
gthrd = sgx.grids_thrd
dms = numpy.asarray(dm)
dm_shape = dms.shape
nao = dm_shape[-1]
dms = dms.reshape(-1,nao,nao)
nset = dms.shape[0]
if sgx.debug:
batch_nuc = _gen_batch_nuc(mol)
else:
batch_jk = _gen_jk_direct(mol, 's2', with_j, with_k, direct_scf_tol,
sgx._opt, sgx.pjs)
sn = numpy.zeros((nao,nao))
ngrids = grids.coords.shape[0]
max_memory = sgx.max_memory - lib.current_memory()[0]
sblk = sgx.blockdim
blksize = min(ngrids, max(4, int(min(sblk, max_memory*1e6/8/nao**2))))
for i0, i1 in lib.prange(0, ngrids, blksize):
coords = grids.coords[i0:i1]
ao = mol.eval_gto('GTOval', coords)
wao = ao * grids.weights[i0:i1,None]
sn += lib.dot(ao.T, wao)
ovlp = mol.intor_symmetric('int1e_ovlp')
proj = scipy.linalg.solve(sn, ovlp)
proj_dm = lib.einsum('ki,xij->xkj', proj, dms)
t1 = logger.timer_debug1(mol, "sgX initialziation", *t0)
vj = numpy.zeros_like(dms)
vk = numpy.zeros_like(dms)
tnuc = 0, 0
for i0, i1 in lib.prange(0, ngrids, blksize):
coords = grids.coords[i0:i1]
weights = grids.weights[i0:i1,None]
ao = mol.eval_gto('GTOval', coords)
wao = ao * grids.weights[i0:i1,None]
fg = lib.einsum('gi,xij->xgj', wao, proj_dm)
mask = numpy.zeros(i1-i0, dtype=bool)
for i in range(nset):
mask |= numpy.any(fg[i]>gthrd, axis=1)
mask |= numpy.any(fg[i]<-gthrd, axis=1)
if not numpy.all(mask):
ao = ao[mask]
fg = fg[:,mask]
coords = coords[mask]
weights = weights[mask]
if with_j:
rhog = numpy.einsum('xgu,gu->xg', fg, ao)
else:
rhog = None
if sgx.debug:
tnuc = tnuc[0] - logger.process_clock(), tnuc[1] - logger.perf_counter()
gbn = batch_nuc(mol, coords)
tnuc = tnuc[0] + logger.process_clock(), tnuc[1] + logger.perf_counter()
if with_j:
jpart = numpy.einsum('guv,xg->xuv', gbn, rhog)
if with_k:
gv = lib.einsum('gtv,xgt->xgv', gbn, fg)
gbn = None
else:
tnuc = tnuc[0] - logger.process_clock(), tnuc[1] - logger.perf_counter()
if with_j: rhog = rhog.copy()
jpart, gv = batch_jk(mol, coords, rhog, fg.copy(), weights)
tnuc = tnuc[0] + logger.process_clock(), tnuc[1] + logger.perf_counter()
if with_j:
vj += jpart
if with_k:
for i in range(nset):
vk[i] += lib.einsum('gu,gv->uv', ao, gv[i])
jpart = gv = None
t2 = logger.timer_debug1(mol, "sgX J/K builder", *t1)
tdot = t2[0] - t1[0] - tnuc[0] , t2[1] - t1[1] - tnuc[1]
logger.debug1(sgx, '(CPU, wall) time for integrals (%.2f, %.2f); '
'for tensor contraction (%.2f, %.2f)',
tnuc[0], tnuc[1], tdot[0], tdot[1])
for i in range(nset):
lib.hermi_triu(vj[i], inplace=True)
if with_k and hermi == 1:
vk = (vk + vk.transpose(0,2,1))*.5
logger.timer(mol, "vj and vk", *t0)
return vj.reshape(dm_shape), vk.reshape(dm_shape)
def _gen_batch_nuc(mol):
'''Coulomb integrals of the given points and orbital pairs'''
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, 'int3c2e')
def batch_nuc(mol, grid_coords, out=None):
fakemol = gto.fakemol_for_charges(grid_coords)
j3c = aux_e2(mol, fakemol, intor='int3c2e', aosym='s2ij', cintopt=cintopt)
return lib.unpack_tril(j3c.T, out=out)
return batch_nuc
def _gen_jk_direct(mol, aosym, with_j, with_k, direct_scf_tol, sgxopt=None, pjs=False):
'''Contraction between sgX Coulomb integrals and density matrices
J: einsum('guv,xg->xuv', gbn, dms) if dms == rho at grid
einsum('gij,xij->xg', gbn, dms) if dms are density matrices
K: einsum('gtv,xgt->xgv', gbn, fg)
'''
if sgxopt is None:
from pyscf.sgx import sgx
sgxopt = sgx._make_opt(mol, pjs=pjs)
sgxopt.direct_scf_tol = direct_scf_tol
ncomp = 1
nao = mol.nao
cintor = _vhf._fpointer(sgxopt._intor)
fdot = _vhf._fpointer('SGXdot_nrk')
drv = _vhf.libcvhf.SGXnr_direct_drv
def jk_part(mol, grid_coords, dms, fg, weights):
atm, bas, env = mol._atm, mol._bas, mol._env
ngrids = grid_coords.shape[0]
env = numpy.append(env, grid_coords.ravel())
env[gto.NGRIDS] = ngrids
env[gto.PTR_GRIDS] = mol._env.size
if pjs:
sgxopt.set_dm(fg / numpy.sqrt(numpy.abs(weights[None,:])),
mol._atm, mol._bas, env)
ao_loc = moleintor.make_loc(bas, sgxopt._intor)
shls_slice = (0, mol.nbas, 0, mol.nbas)
fg = numpy.ascontiguousarray(fg.transpose(0,2,1))
vj = vk = None
fjk = []
dmsptr = []
vjkptr = []
if with_j:
if dms[0].ndim == 1: # the value of density at each grid
vj = numpy.zeros((len(dms),ncomp,nao,nao))[:,0]
for i, dm in enumerate(dms):
dmsptr.append(dm.ctypes.data_as(ctypes.c_void_p))
vjkptr.append(vj[i].ctypes.data_as(ctypes.c_void_p))
fjk.append(_vhf._fpointer('SGXnr'+aosym+'_ijg_g_ij'))
else:
vj = numpy.zeros((len(dms),ncomp,ngrids))[:,0]
for i, dm in enumerate(dms):
dmsptr.append(dm.ctypes.data_as(ctypes.c_void_p))
vjkptr.append(vj[i].ctypes.data_as(ctypes.c_void_p))
fjk.append(_vhf._fpointer('SGXnr'+aosym+'_ijg_ji_g'))
if with_k:
vk = numpy.zeros((len(fg),ncomp,nao,ngrids))[:,0]
for i, dm in enumerate(fg):
dmsptr.append(dm.ctypes.data_as(ctypes.c_void_p))
vjkptr.append(vk[i].ctypes.data_as(ctypes.c_void_p))
fjk.append(_vhf._fpointer('SGXnr'+aosym+'_ijg_gj_gi'))
n_dm = len(fjk)
fjk = (ctypes.c_void_p*(n_dm))(*fjk)
dmsptr = (ctypes.c_void_p*(n_dm))(*dmsptr)
vjkptr = (ctypes.c_void_p*(n_dm))(*vjkptr)
drv(cintor, fdot, fjk, dmsptr, vjkptr, n_dm, ncomp,
(ctypes.c_int*4)(*shls_slice),
ao_loc.ctypes.data_as(ctypes.c_void_p),
sgxopt._cintopt, sgxopt._this,
atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
env.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(env.shape[0]),
ctypes.c_int(2 if aosym == 's2' else 1))
if vk is not None:
vk = vk.transpose(0,2,1)
vk = numpy.ascontiguousarray(vk)
return vj, vk
return jk_part
# pre for get_k
# Use default mesh grids and weights
def get_gridss(mol, level=1, gthrd=1e-10):
Ktime = (logger.process_clock(), logger.perf_counter())
grids = gen_grid.Grids(mol)
grids.level = level
grids.build()
ngrids = grids.weights.size
mask = []
for p0, p1 in lib.prange(0, ngrids, 10000):
ao_v = mol.eval_gto('GTOval', grids.coords[p0:p1])
ao_v *= grids.weights[p0:p1,None]
wao_v0 = ao_v
mask.append(numpy.any(wao_v0>gthrd, axis=1) |
numpy.any(wao_v0<-gthrd, axis=1))
mask = numpy.hstack(mask)
grids.coords = grids.coords[mask]
grids.weights = grids.weights[mask]
logger.debug(mol, 'threshold for grids screening %g', gthrd)
logger.debug(mol, 'number of grids %d', grids.weights.size)
logger.timer_debug1(mol, "Xg screening", *Ktime)
return grids
get_jk = get_jk_favorj
if __name__ == '__main__':
from pyscf import scf
from pyscf.sgx import sgx
mol = gto.Mole()
mol.build(
verbose = 0,
atom = [["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'ccpvdz',
)
dm = scf.RHF(mol).run().make_rdm1()
vjref, vkref = scf.hf.get_jk(mol, dm)
print(numpy.einsum('ij,ji->', vjref, dm))
print(numpy.einsum('ij,ji->', vkref, dm))
sgxobj = sgx.SGX(mol)
sgxobj.grids = get_gridss(mol, 0, 1e-10)
with lib.temporary_env(sgxobj, debug=True):
vj, vk = get_jk_favork(sgxobj, dm)
print(numpy.einsum('ij,ji->', vj, dm))
print(numpy.einsum('ij,ji->', vk, dm))
print(abs(vjref-vj).max().max())
print(abs(vkref-vk).max().max())
with lib.temporary_env(sgxobj, debug=False):
vj1, vk1 = get_jk_favork(sgxobj, dm)
print(abs(vj - vj1).max())
print(abs(vk - vk1).max())
with lib.temporary_env(sgxobj, debug=True):
vj, vk = get_jk_favorj(sgxobj, dm)
print(numpy.einsum('ij,ji->', vj, dm))
print(numpy.einsum('ij,ji->', vk, dm))
print(abs(vjref-vj).max().max())
print(abs(vkref-vk).max().max())
with lib.temporary_env(sgxobj, debug=False):
vj1, vk1 = get_jk_favorj(sgxobj, dm)
print(abs(vj - vj1).max())
print(abs(vk - vk1).max())
| |
import re
import pytest
from django.http import HttpRequest, HttpResponse
from django.test import Client
from helusers.jwt import JWT
from helusers.models import OIDCBackChannelLogoutEvent
from .conftest import AUDIENCE, encoded_jwt_factory, ISSUER1, unix_timestamp_now
from .keys import rsa_key2
_NOT_PROVIDED = object()
@pytest.fixture(autouse=True)
def auto_auth_server(auth_server):
return auth_server
def build_logout_token(**kwargs):
if "iss" not in kwargs:
kwargs["iss"] = ISSUER1
if "aud" not in kwargs:
kwargs["aud"] = AUDIENCE
if "iat" not in kwargs:
kwargs["iat"] = unix_timestamp_now() - 1
if "jti" not in kwargs:
kwargs["jti"] = "jwt_id"
if "sub" not in kwargs:
kwargs["sub"] = "sub_value"
if "events" not in kwargs:
kwargs["events"] = {"http://schemas.openid.net/event/backchannel-logout": {}}
return encoded_jwt_factory(**kwargs)
def execute_back_channel_logout(
http_method="post",
content_type="application/x-www-form-urlencoded",
overwrite_token=_NOT_PROVIDED,
**kwargs,
):
params = {}
if overwrite_token is not None:
token = (
build_logout_token(**kwargs)
if overwrite_token is _NOT_PROVIDED
else overwrite_token
)
if content_type == "application/x-www-form-urlencoded":
params["data"] = f"logout_token={token}"
else:
params["data"] = {"logout_token": token}
if content_type:
params["content_type"] = content_type
client = Client()
return getattr(client, http_method)("/logout/oidc/backchannel/", **params)
@pytest.mark.django_db
def test_valid_logout_token_is_accepted(all_auth_servers):
response = execute_back_channel_logout(
iss=all_auth_servers.issuer, signing_key=all_auth_servers.key
)
assert response.status_code == 200
@pytest.mark.parametrize("http_method", ("get", "head"))
def test_do_not_accept_query_http_methods(http_method):
response = execute_back_channel_logout(http_method=http_method, content_type=None)
assert response.status_code == 405
@pytest.mark.parametrize("http_method", ("put", "patch", "delete", "options", "trace"))
def test_accept_only_post_modification_http_method(http_method):
response = execute_back_channel_logout(http_method=http_method)
assert response.status_code == 405
def test_require_application_x_www_form_urlencoded_content_type():
# Test client uses multipart/form-data content type by default for POST requests
response = execute_back_channel_logout(content_type=None)
assert response.status_code == 400
@pytest.mark.django_db
def test_include_cache_prevention_response_headers():
response = execute_back_channel_logout()
cache_control_header = response.get("Cache-Control", "")
cache_controls = {
v.lower() for v in re.split(r"\s*,\s*", cache_control_header) if v
}
assert cache_controls == {"no-cache", "no-store"}
pragma_header = response.get("Pragma", "")
assert pragma_header == "no-cache"
def test_require_logout_token_parameter():
response = execute_back_channel_logout(overwrite_token=None)
assert response.status_code == 400
def test_handle_undecodable_logout_token():
response = execute_back_channel_logout(overwrite_token="invalid_token")
assert response.status_code == 400
def test_invalid_signature_is_not_accepted():
response = execute_back_channel_logout(signing_key=rsa_key2)
assert response.status_code == 400
def test_issuer_is_required():
response = execute_back_channel_logout(iss=None)
assert response.status_code == 400
def test_issuer_not_found_from_settings_is_not_accepted():
response = execute_back_channel_logout(iss="unknown_issuer")
assert response.status_code == 400
def test_audience_is_required():
response = execute_back_channel_logout(aud=None)
assert response.status_code == 400
@pytest.mark.django_db
def test_audience_in_token_can_be_a_list():
response = execute_back_channel_logout(
aud=["some_audience", AUDIENCE, "another_audience"]
)
assert response.status_code == 200
def test_audience_not_found_from_settings_is_not_accepted():
response = execute_back_channel_logout(aud="unknown_audience")
assert response.status_code == 400
def test_iat_claim_is_required():
response = execute_back_channel_logout(iat=None)
assert response.status_code == 400
def test_iat_claim_must_be_a_number():
response = execute_back_channel_logout(iat="not_number")
assert response.status_code == 400
@pytest.mark.django_db
@pytest.mark.parametrize(
"sub,sid", [("sub_only", None), (None, "sid_only"), ("both_sub", "and_sid")]
)
def test_accepted_sub_and_sid_claim_combinations(sub, sid):
response = execute_back_channel_logout(sub=sub, sid=sid)
assert response.status_code == 200
@pytest.mark.parametrize(
"sub,sid", [(None, None), ("non_string_sid", 123), (123, "non_string_sub")]
)
def test_rejected_sub_and_sid_claim_combinations(sub, sid):
response = execute_back_channel_logout(sub=sub, sid=sid)
assert response.status_code == 400
@pytest.mark.parametrize(
"value",
[
None,
"not_object",
{"no_required_member": {}},
{"http://schemas.openid.net/event/backchannel-logout": "not_object"},
],
)
def test_rejected_events_claim_values(value):
response = execute_back_channel_logout(events=value)
assert response.status_code == 400
def test_nonce_claim_is_not_allowed():
response = execute_back_channel_logout(nonce="not allowed")
assert response.status_code == 400
def test_jti_claim_is_required():
response = execute_back_channel_logout(jti=None)
assert response.status_code == 400
@pytest.mark.parametrize("value", [123, {}, []])
def test_jti_claim_must_be_a_string(value):
response = execute_back_channel_logout(jti=value)
assert response.status_code == 400
def _callback(**kwargs):
pass
@pytest.fixture
def callback(settings, mocker):
callback_mock = mocker.patch(
"helusers.tests.test_back_channel_logout._callback", autospec=True
)
callback_mock.return_value = None
settings.HELUSERS_BACK_CHANNEL_LOGOUT_CALLBACK = (
"helusers.tests.test_back_channel_logout._callback"
)
return callback_mock
class TestUserProvidedCallback:
@pytest.mark.django_db
def test_calls_user_provided_callback_for_valid_token(self, callback):
execute_back_channel_logout()
assert callback.call_count == 1
call_args, call_kwargs = callback.call_args
assert call_args == ()
request_arg = call_kwargs["request"]
assert isinstance(request_arg, HttpRequest)
jwt_arg = call_kwargs["jwt"]
assert isinstance(jwt_arg, JWT)
assert jwt_arg.issuer == ISSUER1
def test_does_not_call_user_provided_callback_for_invalid_token(self, callback):
execute_back_channel_logout(iss="unknown_issuer")
assert callback.call_count == 0
@pytest.mark.parametrize("status", [418, 504])
@pytest.mark.django_db
def test_4xx_and_5xx_http_responses_returned_by_callback_terminate_the_logout_handling(
self, callback, status
):
callback.return_value = HttpResponse(status=status)
response = execute_back_channel_logout()
assert response.status_code == status
assert OIDCBackChannelLogoutEvent.objects.count() == 0
@pytest.mark.parametrize("response", [None, "something", HttpResponse(status=301)])
@pytest.mark.django_db
def test_other_responses_returned_by_callback_are_ignored(self, callback, response):
callback.return_value = response
response = execute_back_channel_logout()
assert response.status_code == 200
assert OIDCBackChannelLogoutEvent.objects.count() == 1
| |
# Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
ssl() -- secure socket layer support (only available if configured)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
from functools import partial
from types import MethodType
try:
import _ssl
except ImportError:
# no SSL support
pass
else:
def ssl(sock, keyfile=None, certfile=None):
# we do an internal import here because the ssl
# module imports the socket module
import ssl as _realssl
warnings.warn("socket.ssl() is deprecated. Use ssl.wrap_socket() instead.",
DeprecationWarning, stacklevel=2)
return _realssl.sslwrap_simple(sock, keyfile, certfile)
# we need to import the same constants we used to...
from _ssl import SSLError as sslerror
from _ssl import \
RAND_add, \
RAND_egd, \
RAND_status, \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
import os, sys, warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EINTR = getattr(errno, 'EINTR', 4)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_socketmethods = (
'bind', 'connect', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'sendall', 'setblocking',
'settimeout', 'gettimeout', 'shutdown')
if os.name == "nt":
_socketmethods = _socketmethods + ('ioctl',)
if sys.platform == "riscos":
_socketmethods = _socketmethods + ('sleeptaskw',)
# All the method names that must be delegated to either the real socket
# object or the _closedsocket object.
_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into",
"send", "sendto")
class _closedsocket(object):
__slots__ = []
def _dummy(*args):
raise error(EBADF, 'Bad file descriptor')
# All _delegate_methods must also be initialized here.
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
__getattr__ = _dummy
# Wrapper around platform socket objects. This implements
# a platform-independent dup() functionality. The
# implementation currently relies on reference counting
# to close the underlying socket object.
class _socketobject(object):
__doc__ = _realsocket.__doc__
__slots__ = ["_sock", "__weakref__"] + list(_delegate_methods)
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
if _sock is None:
_sock = _realsocket(family, type, proto)
self._sock = _sock
for method in _delegate_methods:
setattr(self, method, getattr(_sock, method))
def close(self):
self._sock = _closedsocket()
dummy = self._sock._dummy
for method in _delegate_methods:
setattr(self, method, dummy)
close.__doc__ = _realsocket.close.__doc__
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(_sock=sock), addr
accept.__doc__ = _realsocket.accept.__doc__
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource."""
return _socketobject(_sock=self._sock)
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
return _fileobject(self._sock, mode, bufsize)
family = property(lambda self: self._sock.family, doc="the socket family")
type = property(lambda self: self._sock.type, doc="the socket type")
proto = property(lambda self: self._sock.proto, doc="the socket protocol")
def meth(name,self,*args):
return getattr(self._sock,name)(*args)
for _m in _socketmethods:
p = partial(meth,_m)
p.__name__ = _m
p.__doc__ = getattr(_realsocket,_m).__doc__
m = MethodType(p,None,_socketobject)
setattr(_socketobject,_m,m)
socket = SocketType = _socketobject
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ["mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf", "_wbuf_len",
"_close"]
def __init__(self, sock, mode='rb', bufsize=-1, close=False):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
# _rbufsize is the suggested recv buffer size. It is *strictly*
# obeyed within readline() for recv calls. If it is larger than
# default_bufsize it will be used for recv calls within read().
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
# We use StringIO for the read buffer to avoid holding a list
# of variously sized string objects which have been known to
# fragment the heap due to how they are malloc()ed and often
# realloc()ed down much smaller than their original allocation.
self._rbuf = StringIO()
self._wbuf = [] # A list of strings
self._wbuf_len = 0
self._close = close
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
self.flush()
finally:
if self._close:
self._sock.close()
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
data = "".join(self._wbuf)
self._wbuf = []
self._wbuf_len = 0
buffer_size = max(self._rbufsize, self.default_bufsize)
data_size = len(data)
write_offset = 0
view = memoryview(data)
try:
while write_offset < data_size:
self._sock.sendall(view[write_offset:write_offset+buffer_size])
write_offset += buffer_size
finally:
if write_offset < data_size:
remainder = data[write_offset:]
del view, data # explicit free
self._wbuf.append(remainder)
self._wbuf_len = len(remainder)
def fileno(self):
return self._sock.fileno()
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
self._wbuf_len += len(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._wbuf_len >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
lines = filter(None, map(str, list))
self._wbuf_len += sum(map(len, lines))
self._wbuf.extend(lines)
if (self._wbufsize <= 1 or
self._wbuf_len >= self._wbufsize):
self.flush()
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except error, e:
# The try..except to catch EINTR was moved outside the
# recv loop to avoid the per byte overhead.
if e.args[0] == EINTR:
continue
raise
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
msg = "getaddrinfo returns an empty list"
host, port = address
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error, msg:
if sock is not None:
sock.close()
raise error, msg
| |
"""Support for Waze travel time sensor."""
from datetime import timedelta
import logging
import re
import WazeRouteCalculator
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_NAME,
CONF_REGION,
CONF_UNIT_SYSTEM_IMPERIAL,
CONF_UNIT_SYSTEM_METRIC,
EVENT_HOMEASSISTANT_START,
TIME_MINUTES,
)
from homeassistant.helpers import location
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_DESTINATION = "destination"
ATTR_DURATION = "duration"
ATTR_DISTANCE = "distance"
ATTR_ORIGIN = "origin"
ATTR_ROUTE = "route"
ATTRIBUTION = "Powered by Waze"
CONF_DESTINATION = "destination"
CONF_ORIGIN = "origin"
CONF_INCL_FILTER = "incl_filter"
CONF_EXCL_FILTER = "excl_filter"
CONF_REALTIME = "realtime"
CONF_UNITS = "units"
CONF_VEHICLE_TYPE = "vehicle_type"
CONF_AVOID_TOLL_ROADS = "avoid_toll_roads"
CONF_AVOID_SUBSCRIPTION_ROADS = "avoid_subscription_roads"
CONF_AVOID_FERRIES = "avoid_ferries"
DEFAULT_NAME = "Waze Travel Time"
DEFAULT_REALTIME = True
DEFAULT_VEHICLE_TYPE = "car"
DEFAULT_AVOID_TOLL_ROADS = False
DEFAULT_AVOID_SUBSCRIPTION_ROADS = False
DEFAULT_AVOID_FERRIES = False
ICON = "mdi:car"
UNITS = [CONF_UNIT_SYSTEM_METRIC, CONF_UNIT_SYSTEM_IMPERIAL]
REGIONS = ["US", "NA", "EU", "IL", "AU"]
VEHICLE_TYPES = ["car", "taxi", "motorcycle"]
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ORIGIN): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_REGION): vol.In(REGIONS),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_INCL_FILTER): cv.string,
vol.Optional(CONF_EXCL_FILTER): cv.string,
vol.Optional(CONF_REALTIME, default=DEFAULT_REALTIME): cv.boolean,
vol.Optional(CONF_VEHICLE_TYPE, default=DEFAULT_VEHICLE_TYPE): vol.In(
VEHICLE_TYPES
),
vol.Optional(CONF_UNITS): vol.In(UNITS),
vol.Optional(
CONF_AVOID_TOLL_ROADS, default=DEFAULT_AVOID_TOLL_ROADS
): cv.boolean,
vol.Optional(
CONF_AVOID_SUBSCRIPTION_ROADS, default=DEFAULT_AVOID_SUBSCRIPTION_ROADS
): cv.boolean,
vol.Optional(CONF_AVOID_FERRIES, default=DEFAULT_AVOID_FERRIES): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Waze travel time sensor platform."""
destination = config.get(CONF_DESTINATION)
name = config.get(CONF_NAME)
origin = config.get(CONF_ORIGIN)
region = config.get(CONF_REGION)
incl_filter = config.get(CONF_INCL_FILTER)
excl_filter = config.get(CONF_EXCL_FILTER)
realtime = config.get(CONF_REALTIME)
vehicle_type = config.get(CONF_VEHICLE_TYPE)
avoid_toll_roads = config.get(CONF_AVOID_TOLL_ROADS)
avoid_subscription_roads = config.get(CONF_AVOID_SUBSCRIPTION_ROADS)
avoid_ferries = config.get(CONF_AVOID_FERRIES)
units = config.get(CONF_UNITS, hass.config.units.name)
data = WazeTravelTimeData(
None,
None,
region,
incl_filter,
excl_filter,
realtime,
units,
vehicle_type,
avoid_toll_roads,
avoid_subscription_roads,
avoid_ferries,
)
sensor = WazeTravelTime(name, origin, destination, data)
add_entities([sensor])
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, lambda _: sensor.update())
def _get_location_from_attributes(state):
"""Get the lat/long string from an states attributes."""
attr = state.attributes
return "{},{}".format(attr.get(ATTR_LATITUDE), attr.get(ATTR_LONGITUDE))
class WazeTravelTime(Entity):
"""Representation of a Waze travel time sensor."""
def __init__(self, name, origin, destination, waze_data):
"""Initialize the Waze travel time sensor."""
self._name = name
self._waze_data = waze_data
self._state = None
self._origin_entity_id = None
self._destination_entity_id = None
# Attempt to find entity_id without finding address with period.
pattern = "(?<![a-zA-Z0-9 ])[a-z_]+[.][a-zA-Z0-9_]+"
if re.fullmatch(pattern, origin):
_LOGGER.debug("Found origin source entity %s", origin)
self._origin_entity_id = origin
else:
self._waze_data.origin = origin
if re.fullmatch(pattern, destination):
_LOGGER.debug("Found destination source entity %s", destination)
self._destination_entity_id = destination
else:
self._waze_data.destination = destination
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._waze_data.duration is not None:
return round(self._waze_data.duration)
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TIME_MINUTES
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the last update."""
if self._waze_data.duration is None:
return None
res = {ATTR_ATTRIBUTION: ATTRIBUTION}
res[ATTR_DURATION] = self._waze_data.duration
res[ATTR_DISTANCE] = self._waze_data.distance
res[ATTR_ROUTE] = self._waze_data.route
res[ATTR_ORIGIN] = self._waze_data.origin
res[ATTR_DESTINATION] = self._waze_data.destination
return res
def _get_location_from_entity(self, entity_id):
"""Get the location from the entity_id."""
state = self.hass.states.get(entity_id)
if state is None:
_LOGGER.error("Unable to find entity %s", entity_id)
return None
# Check if the entity has location attributes.
if location.has_location(state):
_LOGGER.debug("Getting %s location", entity_id)
return _get_location_from_attributes(state)
# Check if device is inside a zone.
zone_state = self.hass.states.get(f"zone.{state.state}")
if location.has_location(zone_state):
_LOGGER.debug(
"%s is in %s, getting zone location", entity_id, zone_state.entity_id
)
return _get_location_from_attributes(zone_state)
# If zone was not found in state then use the state as the location.
if entity_id.startswith("sensor."):
return state.state
# When everything fails just return nothing.
return None
def _resolve_zone(self, friendly_name):
"""Get a lat/long from a zones friendly_name."""
states = self.hass.states.all()
for state in states:
if state.domain == "zone" and state.name == friendly_name:
return _get_location_from_attributes(state)
return friendly_name
def update(self):
"""Fetch new state data for the sensor."""
_LOGGER.debug("Fetching Route for %s", self._name)
# Get origin latitude and longitude from entity_id.
if self._origin_entity_id is not None:
self._waze_data.origin = self._get_location_from_entity(
self._origin_entity_id
)
# Get destination latitude and longitude from entity_id.
if self._destination_entity_id is not None:
self._waze_data.destination = self._get_location_from_entity(
self._destination_entity_id
)
# Get origin from zone name.
self._waze_data.origin = self._resolve_zone(self._waze_data.origin)
# Get destination from zone name.
self._waze_data.destination = self._resolve_zone(self._waze_data.destination)
self._waze_data.update()
class WazeTravelTimeData:
"""WazeTravelTime Data object."""
def __init__(
self,
origin,
destination,
region,
include,
exclude,
realtime,
units,
vehicle_type,
avoid_toll_roads,
avoid_subscription_roads,
avoid_ferries,
):
"""Set up WazeRouteCalculator."""
self._calc = WazeRouteCalculator
self.origin = origin
self.destination = destination
self.region = region
self.include = include
self.exclude = exclude
self.realtime = realtime
self.units = units
self.duration = None
self.distance = None
self.route = None
self.avoid_toll_roads = avoid_toll_roads
self.avoid_subscription_roads = avoid_subscription_roads
self.avoid_ferries = avoid_ferries
# Currently WazeRouteCalc only supports PRIVATE, TAXI, MOTORCYCLE.
if vehicle_type.upper() == "CAR":
# Empty means PRIVATE for waze which translates to car.
self.vehicle_type = ""
else:
self.vehicle_type = vehicle_type.upper()
def update(self):
"""Update WazeRouteCalculator Sensor."""
if self.origin is not None and self.destination is not None:
try:
params = self._calc.WazeRouteCalculator(
self.origin,
self.destination,
self.region,
self.vehicle_type,
self.avoid_toll_roads,
self.avoid_subscription_roads,
self.avoid_ferries,
)
routes = params.calc_all_routes_info(real_time=self.realtime)
if self.include is not None:
routes = {
k: v
for k, v in routes.items()
if self.include.lower() in k.lower()
}
if self.exclude is not None:
routes = {
k: v
for k, v in routes.items()
if self.exclude.lower() not in k.lower()
}
route = list(routes)[0]
self.duration, distance = routes[route]
if self.units == CONF_UNIT_SYSTEM_IMPERIAL:
# Convert to miles.
self.distance = distance / 1.609
else:
self.distance = distance
self.route = route
except self._calc.WRCError as exp:
_LOGGER.warning("Error on retrieving data: %s", exp)
return
except KeyError:
_LOGGER.error("Error retrieving data from server")
return
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from django.utils import datastructures
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
def get_flavor_names(request):
# TODO(lsmola) The flavors can be set per project,
# so it should show only valid ones.
try:
flavors = nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def is_iterable(var):
"""Return True if the given is list or tuple."""
return (isinstance(var, (list, tuple)) or
issubclass(var.__class__, (list, tuple)))
def make_query(user_id=None, tenant_id=None, resource_id=None,
user_ids=None, tenant_ids=None, resource_ids=None):
"""Returns query built from given parameters.
This query can be then used for querying resources, meters and
statistics.
:Parameters:
- `user_id`: user_id, has a priority over list of ids
- `tenant_id`: tenant_id, has a priority over list of ids
- `resource_id`: resource_id, has a priority over list of ids
- `user_ids`: list of user_ids
- `tenant_ids`: list of tenant_ids
- `resource_ids`: list of resource_ids
"""
user_ids = user_ids or []
tenant_ids = tenant_ids or []
resource_ids = resource_ids or []
query = []
if user_id:
user_ids = [user_id]
for u_id in user_ids:
query.append({"field": "user_id", "op": "eq", "value": u_id})
if tenant_id:
tenant_ids = [tenant_id]
for t_id in tenant_ids:
query.append({"field": "project_id", "op": "eq", "value": t_id})
if resource_id:
resource_ids = [resource_id]
for r_id in resource_ids:
query.append({"field": "resource_id", "op": "eq", "value": r_id})
return query
class Meter(base.APIResourceWrapper):
"""Represents one Ceilometer meter."""
_attrs = ['name', 'type', 'unit', 'resource_id', 'user_id', 'project_id']
def __init__(self, apiresource):
super(Meter, self).__init__(apiresource)
self._label = self.name
self._description = ""
def augment(self, label=None, description=None):
if label:
self._label = label
if description:
self._description = description
@property
def description(self):
return self._description
@property
def label(self):
return self._label
class Resource(base.APIResourceWrapper):
"""Represents one Ceilometer resource."""
_attrs = ['resource_id', 'source', 'user_id', 'project_id', 'metadata',
'links']
def __init__(self, apiresource, ceilometer_usage=None):
super(Resource, self).__init__(apiresource)
# Save empty strings to IDs rather than None, so it gets
# serialized correctly. We don't want 'None' strings.
self.project_id = self.project_id or ""
self.user_id = self.user_id or ""
self.resource_id = self.resource_id or ""
self._id = "%s__%s__%s" % (self.project_id,
self.user_id,
self.resource_id)
# Meters with statistics data
self._meters = {}
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and self.project_id:
self._tenant = ceilometer_usage.get_tenant(self.project_id)
else:
self._tenant = None
if ceilometer_usage and self.user_id:
self._user = ceilometer_usage.get_user(self.user_id)
else:
self._user = None
self._query = make_query(tenant_id=self.project_id,
user_id=self.user_id,
resource_id=self.resource_id)
@property
def name(self):
name = self.metadata.get("name", None)
display_name = self.metadata.get("display_name", None)
return name or display_name or ""
@property
def id(self):
return self._id
@property
def tenant(self):
return self._tenant
@property
def user(self):
return self._user
@property
def resource(self):
return self.resource_id
@property
def query(self):
return self._query
@property
def meters(self):
return self._meters
def get_meter(self, meter_name):
return self._meters.get(meter_name, None)
def set_meter(self, meter_name, value):
self._meters[meter_name] = value
class ResourceAggregate(Resource):
"""Represents aggregate of more resources together.
Aggregate of resources can be obtained by specifying
multiple ids in one parameter or by not specifying
one parameter.
It can also be specified by query directly.
Example:
We can obtain an aggregate of resources by specifying
multiple resource_ids in resource_id parameter in init.
Or we can specify only tenant_id, which will return
all resources of that tenant.
"""
def __init__(self, tenant_id=None, user_id=None, resource_id=None,
tenant_ids=None, user_ids=None, resource_ids=None,
ceilometer_usage=None, query=None, identifier=None):
self._id = identifier
self.tenant_id = None
self.user_id = None
self.resource_id = None
# Meters with statistics data
self._meters = {}
if query:
self._query = query
else:
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and tenant_id:
self.tenant_id = tenant_id
self._tenant = ceilometer_usage.get_tenant(tenant_id)
else:
self._tenant = None
if ceilometer_usage and user_id:
self.user_id = user_id
self._user = ceilometer_usage.get_user(user_id)
else:
self._user = None
if resource_id:
self.resource_id = resource_id
self._query = make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id,
tenant_ids=tenant_ids,
user_ids=user_ids,
resource_ids=resource_ids)
@property
def id(self):
return self._id
class Sample(base.APIResourceWrapper):
"""Represents one Ceilometer sample."""
_attrs = ['counter_name', 'user_id', 'resource_id', 'timestamp',
'resource_metadata', 'source', 'counter_unit', 'counter_volume',
'project_id', 'counter_type', 'resource_metadata']
@property
def instance(self):
display_name = self.resource_metadata.get('display_name', None)
instance_id = self.resource_metadata.get('instance_id', None)
return display_name or instance_id
@property
def name(self):
name = self.resource_metadata.get("name", None)
display_name = self.resource_metadata.get("display_name", None)
return name or display_name or ""
class Statistic(base.APIResourceWrapper):
"""Represents one Ceilometer statistic."""
_attrs = ['period', 'period_start', 'period_end',
'count', 'min', 'max', 'sum', 'avg',
'duration', 'duration_start', 'duration_end']
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def resource_list(request, query=None, ceilometer_usage_object=None):
"""List the resources."""
resources = ceilometerclient(request).resources.list(q=query)
return [Resource(r, ceilometer_usage_object) for r in resources]
def sample_list(request, meter_name, query=None):
"""List the samples for this meters."""
samples = ceilometerclient(request).samples.list(meter_name=meter_name,
q=query)
return [Sample(s) for s in samples]
def meter_list(request, query=None):
"""List the user's meters."""
meters = ceilometerclient(request).meters.list(query)
return [Meter(m) for m in meters]
def statistic_list(request, meter_name, query=None, period=None):
"""List of statistics."""
statistics = ceilometerclient(request).\
statistics.list(meter_name=meter_name, q=query, period=period)
return [Statistic(s) for s in statistics]
class ThreadedUpdateResourceWithStatistics(threading.Thread):
"""Multithread wrapper for update_with_statistics method of
resource_usage.
A join logic is placed in process_list class method. All resources
will have its statistics attribute filled in separate threads.
The resource_usage object is shared between threads. Each thread is
updating one Resource.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `resources`: List of Resource or ResourceAggregate object,
that will be filled by statistic data.
- `resource_usage`: Wrapping resource usage object, that holds
all statistics data.
- `meter_names`: List of meter names of the statistics we want.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will be
returned, divided into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the attribute name of the stats.
E.g. (avg, max, min...) If None is given, whole
statistic object is returned,
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
# TODO(lsmola) Can be removed once Ceilometer supports sample-api
# and group-by, so all of this optimization will not be necessary.
# It is planned somewhere to I.
def __init__(self, resource_usage, resource, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
super(ThreadedUpdateResourceWithStatistics, self).__init__()
self.resource_usage = resource_usage
self.resource = resource
self.meter_names = meter_names
self.period = period
self.stats_attr = stats_attr
self.additional_query = additional_query
def run(self):
# Run the job
self.resource_usage.update_with_statistics(
self.resource,
meter_names=self.meter_names, period=self.period,
stats_attr=self.stats_attr, additional_query=self.additional_query)
@classmethod
def process_list(cls, resource_usage, resources, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
threads = []
for resource in resources:
# add statistics data into resource
thread = cls(resource_usage, resource, meter_names=meter_names,
period=period, stats_attr=stats_attr,
additional_query=additional_query)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
class CeilometerUsage(object):
"""Represents wrapper of any Ceilometer queries.
One instance of this class should be shared between resources
as this class provides a place where users and tenants are
cached. So there are no duplicate queries to API.
This class also wraps Ceilometer API calls and provides parallel
HTTP calls to API.
This class should also serve as reasonable abstraction, that will
cover huge amount of optimization due to optimization of Ceilometer
service, without changing of the interface.
"""
def __init__(self, request):
self._request = request
# Cached users and tenants.
self._users = {}
self._tenants = {}
def get_user(self, user_id):
"""Returns user fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
user = self._users.get(user_id, None)
if not user:
user = keystone.user_get(self._request, user_id)
# caching the user, for later use
self._users[user_id] = user
return user
def preload_all_users(self):
"""Preloads all users into dictionary.
It's more effective to preload all users, rather than fetching many
users by separate API get calls.
"""
users = keystone.user_list(self._request)
# Cache all users on right indexes, this is more effective than to
# obtain large number of users one by one by keystone.user_get
for u in users:
self._users[u.id] = u
def get_tenant(self, tenant_id):
"""Returns tenant fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
tenant = self._tenants.get(tenant_id, None)
if not tenant:
tenant = keystone.tenant_get(self._request, tenant_id)
# caching the tenant for later use
self._tenants[tenant_id] = tenant
return tenant
def preload_all_tenants(self):
"""Preloads all tenants into dictionary.
It's more effective to preload all tenants, rather than fetching many
tenants by separate API get calls.
"""
tenants, more = keystone.tenant_list(self._request)
# Cache all tenants on right indexes, this is more effective than to
# obtain large number of tenants one by one by keystone.tenant_get
for t in tenants:
self._tenants[t.id] = t
def global_data_get(self, used_cls=None, query=None,
with_statistics=False, additional_query=None,
with_users_and_tenants=True):
"""Obtaining a resources for table view.
It obtains resources with statistics data according to declaration
in used_cls class.
:Parameters:
- `user_cls`: Class wrapper for usage data. It acts as wrapper for
settings needed. See the call of this method for
details.
- `query`: Explicit query definition for fetching the resources. If
no query is provided, it takes a default_query from
used_cls. If no default query is provided, it fetches
all the resources and filters them by meters defined
in used_cls.
- `with_statistic`: Define whether statistics data from the meters
defined in used_cls should be fetched.
Can be used to first obtain only the pure
resources, then with the statistics data by
AJAX.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
default_query = used_cls.default_query
query = query or default_query
filter_func = None
def filter_resources(resource):
"""Method for filtering resources by their links.rel attr.
The links.rel attributes contain all meters the resource has.
"""
for link in resource.links:
if link['rel'] in used_cls.meters:
return True
return False
if not query:
# Not all resource types can be obtained by query, if there is not
# a query, we are filtering all resources by this function.
filter_func = filter_resources
if with_statistics:
# Will add statistic data into resources.
resources = self.resources_with_statistics(
query,
used_cls.meters,
filter_func=filter_func,
stats_attr=used_cls.stats_attr,
additional_query=additional_query,
with_users_and_tenants=with_users_and_tenants)
else:
# Will load only resources without statistical data.
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
return [used_cls(resource) for resource in resources]
def query_from_object_id(self, object_id):
"""Obtaining a query from resource id.
Query can be then used to identify a resource in resources or meters
API calls. ID is being built in the Resource initializer, or returned
by Datatable into UpdateRow functionality.
"""
try:
tenant_id, user_id, resource_id = object_id.split("__")
except ValueError:
return []
return make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id)
def update_with_statistics(self, resource, meter_names=None, period=None,
stats_attr=None, additional_query=None):
"""Adding statistical data into one Resource or ResourceAggregate.
It adds each statistic of each meter_names into the resource
attributes. Attribute name is the meter name with replaced '.' to '_'.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given a faceted result will be
returned, dividend into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
if not meter_names:
raise ValueError("meter_names and resources must be defined to be "
"able to obtain the statistics.")
# query for identifying one resource in meters
query = resource.query
if additional_query:
if not is_iterable(additional_query):
raise ValueError("Additional query must be list of"
" conditions. See the docs for format.")
query = query + additional_query
# TODO(lsmola) thread for each meter will be probably overkill
# but I should test lets say thread pool with 100 of threads
# and apply it only to this code.
# Though I do expect Ceilometer will support bulk requests,
# so all of this optimization will not be necessary.
for meter in meter_names:
statistics = statistic_list(self._request, meter,
query=query, period=period)
meter = meter.replace(".", "_")
if statistics:
if stats_attr:
# I want to load only a specific attribute
resource.set_meter(
meter,
getattr(statistics[0], stats_attr, None))
else:
# I want a dictionary of all statistics
resource.set_meter(meter, statistics)
else:
resource.set_meter(meter, None)
return resource
def resources(self, query=None, filter_func=None,
with_users_and_tenants=False):
"""Obtaining resources with the query or filter_func.
Obtains resources and also fetch tenants and users associated
with those resources if with_users_and_tenants flag is true.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
if with_users_and_tenants:
ceilometer_usage_object = self
else:
ceilometer_usage_object = None
resources = resource_list(
self._request,
query=query, ceilometer_usage_object=ceilometer_usage_object)
if filter_func:
resources = [resource for resource in resources if
filter_func(resource)]
return resources
def resources_with_statistics(self, query=None, meter_names=None,
period=None, filter_func=None,
stats_attr=None, additional_query=None,
with_users_and_tenants=False):
"""Obtaining resources with statistics data inside.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
ThreadedUpdateResourceWithStatistics.process_list(
self, resources,
meter_names=meter_names, period=period, stats_attr=stats_attr,
additional_query=additional_query)
return resources
def resource_aggregates(self, queries=None):
"""Obtaining resource aggregates with queries.
Representing a resource aggregate by query is a most general way
how to obtain a resource aggregates.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
"""
resource_aggregates = []
for identifier, query in queries.items():
resource_aggregates.append(ResourceAggregate(query=query,
ceilometer_usage=None,
identifier=identifier))
return resource_aggregates
def resource_aggregates_with_statistics(self, queries=None,
meter_names=None, period=None,
filter_func=None, stats_attr=None,
additional_query=None):
"""Obtaining resource aggregates with statistics data inside.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
resource_aggregates = self.resource_aggregates(queries)
ThreadedUpdateResourceWithStatistics.process_list(
self,
resource_aggregates, meter_names=meter_names, period=period,
stats_attr=stats_attr, additional_query=additional_query)
return resource_aggregates
def diff_lists(a, b):
if not a:
return []
elif not b:
return a
else:
return list(set(a) - set(b))
class Meters(object):
"""Class for listing of available meters.
It is listing meters defined in this class that are available
in Ceilometer meter_list.
It is storing information that is not available in Ceilometer, i.e.
label, description.
"""
def __init__(self, request=None, ceilometer_meter_list=None):
# Storing the request.
self._request = request
# Storing the Ceilometer meter list
if ceilometer_meter_list:
self._ceilometer_meter_list = ceilometer_meter_list
else:
try:
self._ceilometer_meter_list = meter_list(request)
except Exception:
self._ceilometer_meter_list = []
exceptions.handle(self._request,
_('Unable to retrieve Ceilometer meter '
'list.'))
# Storing the meters info categorized by their services.
self._nova_meters_info = self._get_nova_meters_info()
self._neutron_meters_info = self._get_neutron_meters_info()
self._glance_meters_info = self._get_glance_meters_info()
self._cinder_meters_info = self._get_cinder_meters_info()
self._swift_meters_info = self._get_swift_meters_info()
self._kwapi_meters_info = self._get_kwapi_meters_info()
self._ipmi_meters_info = self._get_ipmi_meters_info()
# Storing the meters info of all services together.
all_services_meters = (self._nova_meters_info,
self._neutron_meters_info,
self._glance_meters_info,
self._cinder_meters_info,
self._swift_meters_info,
self._kwapi_meters_info,
self._ipmi_meters_info)
self._all_meters_info = {}
for service_meters in all_services_meters:
self._all_meters_info.update(dict([(meter_name, meter_info)
for meter_name, meter_info
in service_meters.items()]))
# Here will be the cached Meter objects, that will be reused for
# repeated listing.
self._cached_meters = {}
def list_all(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=only_meters,
except_meters=except_meters)
def list_nova(self, except_meters=None):
"""Returns a list of meters tied to nova.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._nova_meters_info.keys(),
except_meters=except_meters)
def list_neutron(self, except_meters=None):
"""Returns a list of meters tied to neutron.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._neutron_meters_info.keys(),
except_meters=except_meters)
def list_glance(self, except_meters=None):
"""Returns a list of meters tied to glance.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._glance_meters_info.keys(),
except_meters=except_meters)
def list_cinder(self, except_meters=None):
"""Returns a list of meters tied to cinder.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._cinder_meters_info.keys(),
except_meters=except_meters)
def list_swift(self, except_meters=None):
"""Returns a list of meters tied to swift.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._swift_meters_info.keys(),
except_meters=except_meters)
def list_kwapi(self, except_meters=None):
"""Returns a list of meters tied to kwapi.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._kwapi_meters_info.keys(),
except_meters=except_meters)
def list_ipmi(self, except_meters=None):
"""Returns a list of meters tied to ipmi
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._ipmi_meters_info.keys(),
except_meters=except_meters)
def _list(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
# Get all wanted meter names.
if only_meters:
meter_names = only_meters
else:
meter_names = [meter_name for meter_name
in self._all_meters_info.keys()]
meter_names = diff_lists(meter_names, except_meters)
# Collect meters for wanted meter names.
return self._get_meters(meter_names)
def _get_meters(self, meter_names):
"""Obtain meters based on meter_names.
The meters that do not exist in Ceilometer meter list are left out.
:Parameters:
- `meter_names`: A list of meter names we want to fetch.
"""
meters = []
for meter_name in meter_names:
meter = self._get_meter(meter_name)
if meter:
meters.append(meter)
return meters
def _get_meter(self, meter_name):
"""Obtains a meter.
Obtains meter either from cache or from Ceilometer meter list
joined with statically defined meter info like label and description.
:Parameters:
- `meter_name`: A meter name we want to fetch.
"""
meter = self._cached_meters.get(meter_name, None)
if not meter:
meter_candidates = [m for m in self._ceilometer_meter_list
if m.name == meter_name]
if meter_candidates:
meter_info = self._all_meters_info.get(meter_name, None)
if meter_info:
label = meter_info["label"]
description = meter_info["description"]
else:
label = ""
description = ""
meter = meter_candidates[0]
meter.augment(label=label, description=description)
self._cached_meters[meter_name] = meter
return meter
def _get_nova_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
meters_info = datastructures.SortedDict([
("instance", {
'label': '',
'description': _("Existence of instance"),
}),
("instance:<type>", {
'label': '',
'description': _("Existence of instance <type> "
"(openstack types)"),
}),
("memory", {
'label': '',
'description': _("Volume of RAM"),
}),
("memory.usage", {
'label': '',
'description': _("Volume of RAM used"),
}),
("cpu", {
'label': '',
'description': _("CPU time used"),
}),
("cpu_util", {
'label': '',
'description': _("Average CPU utilization"),
}),
("vcpus", {
'label': '',
'description': _("Number of VCPUs"),
}),
("disk.read.requests", {
'label': '',
'description': _("Number of read requests"),
}),
("disk.write.requests", {
'label': '',
'description': _("Number of write requests"),
}),
("disk.read.bytes", {
'label': '',
'description': _("Volume of reads"),
}),
("disk.write.bytes", {
'label': '',
'description': _("Volume of writes"),
}),
("disk.read.requests.rate", {
'label': '',
'description': _("Average rate of read requests"),
}),
("disk.write.requests.rate", {
'label': '',
'description': _("Average rate of write requests"),
}),
("disk.read.bytes.rate", {
'label': '',
'description': _("Average rate of reads"),
}),
("disk.write.bytes.rate", {
'label': '',
'description': _("Average volume of writes"),
}),
("disk.root.size", {
'label': '',
'description': _("Size of root disk"),
}),
("disk.ephemeral.size", {
'label': '',
'description': _("Size of ephemeral disk"),
}),
("network.incoming.bytes", {
'label': '',
'description': _("Number of incoming bytes "
"on the network for a VM interface"),
}),
("network.outgoing.bytes", {
'label': '',
'description': _("Number of outgoing bytes "
"on the network for a VM interface"),
}),
("network.incoming.packets", {
'label': '',
'description': _("Number of incoming "
"packets for a VM interface"),
}),
("network.outgoing.packets", {
'label': '',
'description': _("Number of outgoing "
"packets for a VM interface"),
}),
("network.incoming.bytes.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"bytes on a VM network interface"),
}),
("network.outgoing.bytes.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"bytes on a VM network interface"),
}),
("network.incoming.packets.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"packets on a VM network interface"),
}),
("network.outgoing.packets.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"packets on a VM network interface"),
}),
])
# Adding flavor based meters into meters_info dict
# TODO(lsmola) this kind of meter will be probably deprecated
# https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
for flavor in get_flavor_names(self._request):
name = 'instance:%s' % flavor
meters_info[name] = dict(meters_info["instance:<type>"])
meters_info[name]['description'] = (
_('Duration of instance type %s (openstack flavor)') %
flavor)
# TODO(lsmola) allow to set specific in local_settings. For all meters
# because users can have their own agents and meters.
return meters_info
def _get_neutron_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('network', {
'label': '',
'description': _("Existence of network"),
}),
('network.create', {
'label': '',
'description': _("Creation requests for this network"),
}),
('network.update', {
'label': '',
'description': _("Update requests for this network"),
}),
('subnet', {
'label': '',
'description': _("Existence of subnet"),
}),
('subnet.create', {
'label': '',
'description': _("Creation requests for this subnet"),
}),
('subnet.update', {
'label': '',
'description': _("Update requests for this subnet"),
}),
('port', {
'label': '',
'description': _("Existence of port"),
}),
('port.create', {
'label': '',
'description': _("Creation requests for this port"),
}),
('port.update', {
'label': '',
'description': _("Update requests for this port"),
}),
('router', {
'label': '',
'description': _("Existence of router"),
}),
('router.create', {
'label': '',
'description': _("Creation requests for this router"),
}),
('router.update', {
'label': '',
'description': _("Update requests for this router"),
}),
('ip.floating', {
'label': '',
'description': _("Existence of floating ip"),
}),
('ip.floating.create', {
'label': '',
'description': _("Creation requests for this floating ip"),
}),
('ip.floating.update', {
'label': '',
'description': _("Update requests for this floating ip"),
}),
])
def _get_glance_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('image', {
'label': '',
'description': _("Image existence check"),
}),
('image.size', {
'label': '',
'description': _("Uploaded image size"),
}),
('image.update', {
'label': '',
'description': _("Number of image updates"),
}),
('image.upload', {
'label': '',
'description': _("Number of image uploads"),
}),
('image.delete', {
'label': '',
'description': _("Number of image deletions"),
}),
('image.download', {
'label': '',
'description': _("Image is downloaded"),
}),
('image.serve', {
'label': '',
'description': _("Image is served out"),
}),
])
def _get_cinder_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('volume', {
'label': '',
'description': _("Existence of volume"),
}),
('volume.size', {
'label': '',
'description': _("Size of volume"),
}),
])
def _get_swift_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('storage.objects', {
'label': '',
'description': _("Number of objects"),
}),
('storage.objects.size', {
'label': '',
'description': _("Total size of stored objects"),
}),
('storage.objects.containers', {
'label': '',
'description': _("Number of containers"),
}),
('storage.objects.incoming.bytes', {
'label': '',
'description': _("Number of incoming bytes"),
}),
('storage.objects.outgoing.bytes', {
'label': '',
'description': _("Number of outgoing bytes"),
}),
('storage.api.request', {
'label': '',
'description': _("Number of API requests against swift"),
}),
])
def _get_kwapi_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('energy', {
'label': '',
'description': _("Amount of energy"),
}),
('power', {
'label': '',
'description': _("Power consumption"),
}),
])
def _get_ipmi_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('hardware.ipmi.node.power', {
'label': '',
'description': _("System Current Power"),
}),
('hardware.ipmi.node.temperature', {
'label': '',
'description': _("System Current Temperature"),
}),
('hardware.ipmi.fan', {
'label': '',
'description': _("Fan RPM"),
}),
('hardware.ipmi.temperature', {
'label': '',
'description': _("Sensor Temperature Reading"),
}),
('hardware.ipmi.current', {
'label': '',
'description': _("Sensor Current Reading"),
}),
('hardware.ipmi.voltage', {
'label': '',
'description': _("Sensor Voltage Reading"),
}),
])
| |
"""
Optimise the combination of profile and summation intensity values.
"""
from __future__ import annotations
import logging
import boost_adaptbx.boost.python
from cctbx import crystal, miller
from dials.algorithms.scaling.scaling_utilities import DialsMergingStatisticsError
from dials.array_family import flex
from dials.util import tabulate
miller_ext = boost_adaptbx.boost.python.import_ext("cctbx_miller_ext")
logger = logging.getLogger("dials")
def fast_merging_stats(array):
"""
Quickly calculate required merging stats for intensity combination.
This is a cut-down version of iobtx.merging_statistics.merging_stats.
"""
assert array.sigmas() is not None
positive_sel = array.sigmas() > 0
i_over_sigma_sel = (array.data() / array.sigmas()) > 1.0
array = array.select(positive_sel & i_over_sigma_sel)
if not array.size():
return -1.0, -1.0
array = array.sort("packed_indices")
merge_ext = miller_ext.merge_equivalents_obs(
array.indices(), array.data(), array.sigmas(), use_internal_variance=True
)
r_meas = merge_ext.r_meas
cc_one_half = miller.compute_cc_one_half(unmerged=array, return_n_refl=False)
return r_meas, cc_one_half
def map_indices_to_asu(miller_indices, space_group):
"""Map the indices to the asymmetric unit."""
crystal_symmetry = crystal.symmetry(space_group=space_group)
miller_set = miller.set(
crystal_symmetry=crystal_symmetry, indices=miller_indices, anomalous_flag=False
)
miller_set_in_asu = miller_set.map_to_asu()
return miller_set_in_asu.indices()
def _make_reflection_table_from_scaler(scaler):
"""Copy across required columns and filter data."""
reflections = flex.reflection_table()
required_cols = [
"intensity.prf.value",
"intensity.prf.variance",
"intensity.sum.value",
"intensity.sum.variance",
"prescaling_correction",
"inverse_scale_factor",
"miller_index",
]
optional_cols = ["partiality"]
for col in required_cols:
reflections[col] = scaler.reflection_table[col]
for col in optional_cols:
if col in scaler.reflection_table:
reflections[col] = scaler.reflection_table[col]
# now select good data
sel = _get_filter_selection(scaler.reflection_table)
suitable_isel = scaler.suitable_refl_for_scaling_sel.iselection()
outlier_isel = suitable_isel.select(scaler.outliers)
free_set_isel = suitable_isel.select(scaler.free_set_selection)
not_outliers_or_free = flex.bool(reflections.size(), True)
not_outliers_or_free.set_selected(outlier_isel, False)
not_outliers_or_free.set_selected(free_set_isel, False)
reflections = reflections.select(sel & not_outliers_or_free)
reflections["miller_index"] = map_indices_to_asu(
reflections["miller_index"], scaler.experiment.crystal.get_space_group()
)
logger.debug("Reflection table size for combining: %s", reflections.size())
return reflections
def _determine_Imids(combiner, raw_intensities):
if not combiner.Imids:
avg = max(10, flex.mean(raw_intensities))
Imid = flex.max(raw_intensities) / 10.0
Imid_list = [0, 1, avg, Imid]
while Imid > avg:
Imid /= 10.0
Imid_list.append(Imid)
combiner.Imids = Imid_list
class SingleDatasetIntensityCombiner:
"""
Class to combine profile and summation intensities for a single dataset.
"""
def __init__(self, scaler, use_Imid=None):
self.scaler = scaler
self.experiment = scaler.experiment
if "intensity.prf.value" not in scaler.reflection_table:
self.max_key = 1
logger.info(
"No profile intensities found, skipping profile/summation intensity combination."
)
return
if use_Imid is not None:
self.max_key = use_Imid
else:
self.Imids = scaler.params.reflection_selection.combine.Imid
self.dataset = _make_reflection_table_from_scaler(self.scaler)
if "partiality" in self.dataset:
raw_intensities = (
self.dataset["intensity.sum.value"].as_double()
/ self.dataset["partiality"]
)
else:
raw_intensities = self.dataset["intensity.sum.value"].as_double()
logger.debug("length of raw intensity array: %s", raw_intensities.size())
_determine_Imids(self, raw_intensities)
header = ["Combination", "CC1/2", "Rmeas"]
rows, results = self._test_Imid_combinations()
logger.info(tabulate(rows, header))
self.max_key = min(results, key=results.get)
while results[self.max_key] < 0:
del results[self.max_key]
if results:
self.max_key = min(results, key=results.get)
else:
self.max_key = -1
break
if self.max_key == 0:
logger.info("Profile intensities determined to be best for scaling. \n")
elif self.max_key == 1:
logger.info(
"Summation intensities determined to be best for scaling. \n"
)
elif self.max_key == -1:
logger.info("No good statistics found, using profile intensities. \n")
self.max_key = 0
else:
logger.info(
"Combined intensities with Imid = %.2f determined to be best for scaling. \n",
self.max_key,
)
def calculate_suitable_combined_intensities(self):
"""Combine the 'suitable for scaling' intensities in the scaler."""
return _calculate_suitable_combined_intensities(self.scaler, self.max_key)
def _test_Imid_combinations(self):
"""Test the different combinations, returning the rows and results dict."""
rows = []
results = {}
for Imid in self.Imids:
Int, Var = _get_Is_from_Imidval(self.dataset, Imid)
miller_set = miller.set(
crystal_symmetry=self.experiment.crystal.get_crystal_symmetry(
assert_is_compatible_unit_cell=False
),
indices=self.dataset["miller_index"],
anomalous_flag=False,
)
i_obs = miller.array(
miller_set,
data=(
Int
* self.dataset["prescaling_correction"]
/ self.dataset["inverse_scale_factor"]
),
)
i_obs.set_observation_type_xray_intensity()
i_obs.set_sigmas(
flex.sqrt(Var)
* self.dataset["prescaling_correction"]
/ self.dataset["inverse_scale_factor"]
)
try:
rmeas, cchalf = fast_merging_stats(array=i_obs)
logger.debug("Imid: %s, Rmeas %s, cchalf %s", Imid, rmeas, cchalf)
except RuntimeError:
raise DialsMergingStatisticsError(
"Unable to merge for intensity combination"
)
# record the results
results[Imid] = rmeas
res_str = {0: "prf only", 1: "sum only"}
if Imid not in res_str:
res_str[Imid] = "Imid = " + str(round(Imid, 2))
rows.append([res_str[Imid], str(round(cchalf, 5)), str(round(rmeas, 5))])
return rows, results
def combine_intensities(reflections, Imid):
"""Take unscaled data, and apply intensity combination with a given Imid."""
if "intensity.prf.value" in reflections:
Ipr = reflections["intensity.prf.value"]
Vpr = reflections["intensity.prf.variance"]
assert "intensity.sum.value" in reflections
assert "prescaling_correction" in reflections
conv = reflections["prescaling_correction"]
Isum = reflections["intensity.sum.value"]
Vsum = reflections["intensity.sum.variance"]
not_prf = ~reflections.get_flags(reflections.flags.integrated_prf)
not_sum = ~reflections.get_flags(reflections.flags.integrated_sum)
both = reflections.get_flags(reflections.flags.integrated, all=True)
if "partiality" in reflections:
inv_p = _determine_inverse_partiality(reflections)
sum_conv = conv * inv_p
else:
sum_conv = conv
if Imid == 1: # i.e. sum is best, so use sum if exists, else prf
intensity = Isum * sum_conv
variance = Vsum * sum_conv * sum_conv
# get not summation successful
if "intensity.prf.value" in reflections:
intensity.set_selected(not_sum.iselection(), (Ipr * conv).select(not_sum))
variance.set_selected(
not_sum.iselection(), (Vpr * conv * conv).select(not_sum)
)
else:
# first set as prf
intensity = Ipr * conv
variance = Vpr * conv * conv
# set those not prf successful
intensity.set_selected(not_prf.iselection(), (Isum * sum_conv).select(not_prf))
variance.set_selected(
not_prf.iselection(), (Vsum * sum_conv * sum_conv).select(not_prf)
)
if Imid == 0: # done all we need to do.
pass
else:
# calculate combined intensities, but only set for those where both prf and sum good
if "partiality" in reflections:
Int, Var = _calculate_combined_raw_intensities(
Ipr, Isum * inv_p, Vpr, Vsum * inv_p * inv_p, Imid
)
else:
Int, Var = _calculate_combined_raw_intensities(
Ipr, Isum, Vpr, Vsum, Imid
)
intensity.set_selected(both.iselection(), (Int * conv).select(both))
variance.set_selected(both.iselection(), (Var * conv * conv).select(both))
return intensity, variance
def _calculate_suitable_combined_intensities(scaler, max_key):
reflections = scaler.reflection_table.select(scaler.suitable_refl_for_scaling_sel)
return combine_intensities(reflections, max_key)
class MultiDatasetIntensityCombiner:
"""
Class to combine profile and summation intensities for multiple datasets.
"""
def __init__(self, multiscaler):
self.active_scalers = multiscaler.active_scalers
self.Imids = multiscaler.params.reflection_selection.combine.Imid
# first copy across relevant data that's needed
self.good_datasets = []
for i, scaler in enumerate(self.active_scalers):
if "intensity.prf.value" in scaler.reflection_table:
self.good_datasets.append(i)
if not self.good_datasets:
self.max_key = 1
logger.info(
"No profile intensities found, skipping profile/summation intensity combination."
)
return
self.datasets = [
_make_reflection_table_from_scaler(self.active_scalers[i])
for i in self.good_datasets
]
raw_intensities = self._get_raw_intensity_array()
logger.debug("length of raw intensity array: %s", raw_intensities.size())
_determine_Imids(self, raw_intensities)
header = ["Combination", "CC1/2", "Rmeas"]
rows, results = self._test_Imid_combinations()
logger.info(tabulate(rows, header))
self.max_key = min(results, key=results.get)
while results[self.max_key] < 0:
del results[self.max_key]
if results:
self.max_key = min(results, key=results.get)
else:
self.max_key = -1
break
if self.max_key == 0:
logger.info("Profile intensities determined to be best for scaling. \n")
elif self.max_key == 1:
logger.info("Summation intensities determined to be best for scaling. \n")
elif self.max_key == -1:
logger.info("No good statistics found, using profile intensities. \n")
self.max_key = 0
else:
logger.info(
"Combined intensities with Imid = %.2f determined to be best for scaling. \n",
self.max_key,
)
def calculate_suitable_combined_intensities(self, dataset):
"""Combine the 'suitable for scaling' intensities in the scaler."""
if dataset not in self.good_datasets:
return _calculate_suitable_combined_intensities(
self.active_scalers[dataset], 1
)
return _calculate_suitable_combined_intensities(
self.active_scalers[dataset], self.max_key
)
def _get_raw_intensity_array(self):
intensities = flex.double()
for dataset in self.datasets:
if "partiality" in dataset:
intensities.extend(
dataset["intensity.sum.value"].as_double() / dataset["partiality"]
)
else:
intensities.extend(dataset["intensity.sum.value"].as_double())
return intensities
def _test_Imid_combinations(self):
rows = []
results = {}
for Imid in self.Imids:
combined_intensities = flex.double([])
combined_sigmas = flex.double([])
combined_scales = flex.double([])
combined_indices = flex.miller_index([])
for dataset in self.datasets:
Int, Var = _get_Is_from_Imidval(dataset, Imid)
Int *= dataset["prescaling_correction"]
sigma = flex.sqrt(Var) * dataset["prescaling_correction"]
combined_intensities.extend(Int)
combined_sigmas.extend(sigma)
combined_scales.extend(dataset["inverse_scale_factor"])
combined_indices.extend(dataset["miller_index"])
# apply scale factor before determining merging stats
miller_set = miller.set(
crystal_symmetry=self.active_scalers[
0
].experiment.crystal.get_crystal_symmetry(),
indices=combined_indices,
anomalous_flag=False,
)
i_obs = miller.array(
miller_set, data=combined_intensities / combined_scales
)
i_obs.set_observation_type_xray_intensity()
i_obs.set_sigmas(combined_sigmas / combined_scales)
try:
rmeas, cchalf = fast_merging_stats(array=i_obs)
logger.debug("Imid: %s, Rmeas %s, cchalf %s", Imid, rmeas, cchalf)
except RuntimeError:
raise DialsMergingStatisticsError(
"Unable to merge for intensity combination"
)
# record the results
results[Imid] = rmeas
res_str = {0: "prf only", 1: "sum only"}
if Imid not in res_str:
res_str[Imid] = "Imid = " + str(round(Imid, 2))
rows.append([res_str[Imid], str(round(cchalf, 5)), str(round(rmeas, 5))])
return rows, results
### Helper functions for combine_intensities
def _get_Is_from_Imidval(reflections, Imid):
"""Interpret the Imid value to extract and return the Icomb and Vcomb values."""
if Imid == 0: # special value to trigger prf
Int = reflections["intensity.prf.value"]
Var = reflections["intensity.prf.variance"]
elif Imid == 1: # special value to trigger sum
if "partiality" in reflections:
Int = reflections["intensity.sum.value"] / reflections["partiality"]
Var = reflections["intensity.sum.variance"] / flex.pow2(
reflections["partiality"]
)
else:
Int = reflections["intensity.sum.value"]
Var = reflections["intensity.sum.variance"]
else:
if "partiality" in reflections:
Int, Var = _calculate_combined_raw_intensities(
reflections["intensity.prf.value"],
reflections["intensity.sum.value"] / reflections["partiality"],
reflections["intensity.prf.variance"],
reflections["intensity.sum.variance"]
/ flex.pow2(reflections["partiality"]),
Imid,
)
else:
Int, Var = _calculate_combined_raw_intensities(
reflections["intensity.prf.value"],
reflections["intensity.sum.value"],
reflections["intensity.prf.variance"],
reflections["intensity.sum.variance"],
Imid,
)
return Int, Var
def _get_filter_selection(reflections):
bad_sel = (
reflections.get_flags(reflections.flags.bad_for_scaling, all=False)
| (reflections["intensity.prf.variance"] <= 0)
| (reflections["intensity.sum.variance"] <= 0)
| (reflections["inverse_scale_factor"] <= 0)
)
integrated = reflections.get_flags(reflections.flags.integrated, all=True)
good_sel = integrated & ~bad_sel
if "partiality" in reflections:
good_sel &= reflections["partiality"] > 0
return good_sel
def _determine_inverse_partiality(reflections):
inverse_partiality = flex.double(reflections.size(), 1.0)
nonzero_partiality_sel = reflections["partiality"] > 0.0
good_refl = reflections.select(nonzero_partiality_sel)
inverse_partiality.set_selected(
nonzero_partiality_sel.iselection(), 1.0 / good_refl["partiality"]
)
return inverse_partiality
def _calculate_combined_raw_intensities(Iprf, Isum, Vprf, Vsum, Imid):
"""Use partiality-corrected Isum, alongside Iprf to calculate
combined raw intensities."""
w = 1.0 / (1.0 + (Isum / Imid) ** 3)
w.set_selected(Isum <= 0, 1.0)
Icomb = (w * Iprf) + ((1.0 - w) * Isum)
Vcomb = (w * Vprf) + ((1.0 - w) * Vsum)
return Icomb, Vcomb
| |
"""More comprehensive traceback formatting for Python scripts.
To enable this module, do:
import cgitb; cgitb.enable()
at the top of your script. The optional arguments to enable() are:
display - if true, tracebacks are displayed in the web browser
logdir - if set, tracebacks are written to files in this directory
context - number of lines of source code to show for each stack frame
format - 'text' or 'html' controls the output format
By default, tracebacks are displayed but not saved, the context is 5 lines
and the output format is 'html' (for backwards compatibility with the
original use of this module)
Alternatively, if you have caught an exception and want cgitb to display it
for you, call cgitb.handler(). The optional argument to handler() is a
3-item tuple (etype, evalue, etb) just like the value of sys.exc_info().
The default handler displays output as HTML.
"""
__author__ = 'Ka-Ping Yee'
__version__ = '$Revision: 39758 $'
import sys
def reset():
"""Return a string that resets the CGI and browser to a known state."""
return '''<!--: spam
Content-Type: text/html
<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> -->
<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> -->
</font> </font> </font> </script> </object> </blockquote> </pre>
</table> </table> </table> </table> </table> </font> </font> </font>'''
__UNDEF__ = [] # a special sentinel object
def small(text):
if text:
return '<small>' + text + '</small>'
else:
return ''
def strong(text):
if text:
return '<strong>' + text + '</strong>'
else:
return ''
def grey(text):
if text:
return '<font color="#909090">' + text + '</font>'
else:
return ''
def lookup(name, frame, locals):
"""Find the value for a given name in the given environment."""
if name in locals:
return 'local', locals[name]
if name in frame.f_globals:
return 'global', frame.f_globals[name]
if '__builtins__' in frame.f_globals:
builtins = frame.f_globals['__builtins__']
if type(builtins) is type({}):
if name in builtins:
return 'builtin', builtins[name]
else:
if hasattr(builtins, name):
return 'builtin', getattr(builtins, name)
return None, __UNDEF__
def scanvars(reader, frame, locals):
"""Scan one logical line of Python and look up values of variables used."""
import tokenize, keyword
vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
for ttype, token, start, end, line in tokenize.generate_tokens(reader):
if ttype == tokenize.NEWLINE: break
if ttype == tokenize.NAME and token not in keyword.kwlist:
if lasttoken == '.':
if parent is not __UNDEF__:
value = getattr(parent, token, __UNDEF__)
vars.append((prefix + token, prefix, value))
else:
where, value = lookup(token, frame, locals)
vars.append((token, where, value))
elif token == '.':
prefix += lasttoken + '.'
parent = value
else:
parent, prefix = None, ''
lasttoken = token
return vars
def html((etype, evalue, etb), context=5):
"""Return a nice HTML document describing a given traceback."""
import os, types, time, traceback, linecache, inspect, pydoc
if type(etype) is types.ClassType:
etype = etype.__name__
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading(
'<big><big>%s</big></big>' %
strong(pydoc.html.escape(str(etype))),
'#ffffff', '#6622aa', pyver + '<br>' + date) + '''
<p>A problem occurred in a Python script. Here is the sequence of
function calls leading up to the error, in the order they occurred.</p>'''
indent = '<tt>' + small(' ' * 5) + ' </tt>'
frames = []
records = inspect.getinnerframes(etb, context)
for frame, file, lnum, func, lines, index in records:
if file:
file = os.path.abspath(file)
link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
else:
file = link = '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = 'in ' + strong(func) + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.html.repr(value))
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = scanvars(reader, frame, locals)
rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' %
('<big> </big>', link, call)]
if index is not None:
i = lnum - index
for line in lines:
num = small(' ' * (5-len(str(i))) + str(i)) + ' '
line = '<tt>%s%s</tt>' % (num, pydoc.html.preformat(line))
if i in highlight:
rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line)
else:
rows.append('<tr><td>%s</td></tr>' % grey(line))
i += 1
done, dump = {}, []
for name, where, value in vars:
if name in done: continue
done[name] = 1
if value is not __UNDEF__:
if where in ('global', 'builtin'):
name = ('<em>%s</em> ' % where) + strong(name)
elif where == 'local':
name = strong(name)
else:
name = where + strong(name.split('.')[-1])
dump.append('%s = %s' % (name, pydoc.html.repr(value)))
else:
dump.append(name + ' <em>undefined</em>')
rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump))))
frames.append('''
<table width="100%%" cellspacing=0 cellpadding=0 border=0>
%s</table>''' % '\n'.join(rows))
exception = ['<p>%s: %s' % (strong(pydoc.html.escape(str(etype))),
pydoc.html.escape(str(evalue)))]
if type(evalue) is types.InstanceType:
for name in dir(evalue):
if name[:1] == '_': continue
value = pydoc.html.repr(getattr(evalue, name))
exception.append('\n<br>%s%s =\n%s' % (indent, name, value))
import traceback
return head + ''.join(frames) + ''.join(exception) + '''
<!-- The above is a description of an error in a Python program, formatted
for a Web browser because the 'cgitb' module was enabled. In case you
are not reading this in a Web browser, here is the original traceback:
%s
-->
''' % ''.join(traceback.format_exception(etype, evalue, etb))
def text((etype, evalue, etb), context=5):
"""Return a plain text document describing a given traceback."""
import os, types, time, traceback, linecache, inspect, pydoc
if type(etype) is types.ClassType:
etype = etype.__name__
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + '''
A problem occurred in a Python script. Here is the sequence of
function calls leading up to the error, in the order they occurred.
'''
frames = []
records = inspect.getinnerframes(etb, context)
for frame, file, lnum, func, lines, index in records:
file = file and os.path.abspath(file) or '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = 'in ' + func + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.text.repr(value))
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = scanvars(reader, frame, locals)
rows = [' %s %s' % (file, call)]
if index is not None:
i = lnum - index
for line in lines:
num = '%5d ' % i
rows.append(num+line.rstrip())
i += 1
done, dump = {}, []
for name, where, value in vars:
if name in done: continue
done[name] = 1
if value is not __UNDEF__:
if where == 'global': name = 'global ' + name
elif where != 'local': name = where + name.split('.')[-1]
dump.append('%s = %s' % (name, pydoc.text.repr(value)))
else:
dump.append(name + ' undefined')
rows.append('\n'.join(dump))
frames.append('\n%s\n' % '\n'.join(rows))
exception = ['%s: %s' % (str(etype), str(evalue))]
if type(evalue) is types.InstanceType:
for name in dir(evalue):
value = pydoc.text.repr(getattr(evalue, name))
exception.append('\n%s%s = %s' % (" "*4, name, value))
import traceback
return head + ''.join(frames) + ''.join(exception) + '''
The above is a description of an error in a Python program. Here is
the original traceback:
%s
''' % ''.join(traceback.format_exception(etype, evalue, etb))
class Hook:
"""A hook to replace sys.excepthook that shows tracebacks in HTML."""
def __init__(self, display=1, logdir=None, context=5, file=None,
format="html"):
self.display = display # send tracebacks to browser if true
self.logdir = logdir # log tracebacks to files if not None
self.context = context # number of source code lines per frame
self.file = file or sys.stdout # place to send the output
self.format = format
def __call__(self, etype, evalue, etb):
self.handle((etype, evalue, etb))
def handle(self, info=None):
info = info or sys.exc_info()
if self.format == "html":
self.file.write(reset())
formatter = (self.format=="html") and html or text
plain = False
try:
doc = formatter(info, self.context)
except: # just in case something goes wrong
import traceback
doc = ''.join(traceback.format_exception(*info))
plain = True
if self.display:
if plain:
doc = doc.replace('&', '&').replace('<', '<')
self.file.write('<pre>' + doc + '</pre>\n')
else:
self.file.write(doc + '\n')
else:
self.file.write('<p>A problem occurred in a Python script.\n')
if self.logdir is not None:
import os, tempfile
suffix = ['.txt', '.html'][self.format=="html"]
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
try:
file = os.fdopen(fd, 'w')
file.write(doc)
file.close()
msg = '<p> %s contains the description of this error.' % path
except:
msg = '<p> Tried to save traceback to %s, but failed.' % path
self.file.write(msg + '\n')
try:
self.file.flush()
except: pass
handler = Hook().handle
def enable(display=1, logdir=None, context=5, format="html"):
"""Install an exception handler that formats tracebacks as HTML.
The optional argument 'display' can be set to 0 to suppress sending the
traceback to the browser, and 'logdir' can be set to a directory to cause
tracebacks to be written to files there."""
sys.excepthook = Hook(display=display, logdir=logdir,
context=context, format=format)
| |
import unittest
from bet_calculator.bet_calculator import Bet_Calculator
from decimal import *
class Bet_Test_Case(unittest.TestCase):
"""Test the Bet_Calculator class"""
def setUp(self):
self.bet_calculator = Bet_Calculator()
def test_if_is_calculating_that_odds_will_profit(self):
"""
Just remember that to be profit te quotation in houses
must follow the equation:
d1 => decimal 1 in one bet house,
d2 => decimal 2 in other bet house
1
________ < (d2-1)
(d1 - 1)
"""
# Testing edge cases
# Just remember 1.33333...4 is not a the reapeating decimal
# 1.33333.... and is bigger than it so it's an edge case
self.bet_calculator.decimal_team1_house1 = '1.333333333333333334'
self.bet_calculator.decimal_team2_house2 = '4.0'
# put in the edge to be more sure that is calculating
# correctly
self.bet_calculator.decimal_team1_house2 = '3.0000000001'
self.bet_calculator.decimal_team2_house1 = '1.5'
self.assertTrue(self.bet_calculator.can_bet_team1_house1)
self.assertTrue(self.bet_calculator.can_bet_team1_house2)
# Testing normal cases
self.bet_calculator.decimal_team1_house1 = '1.5'
self.bet_calculator.decimal_team2_house2 = '4.0'
self.bet_calculator.decimal_team1_house2 = '3.0'
self.bet_calculator.decimal_team2_house1 = '1.8'
self.assertTrue(self.bet_calculator.can_bet_team1_house1)
self.assertTrue(self.bet_calculator.can_bet_team1_house2)
def test_if_is_calculating_that_odds_will_not_profit(self):
"""
Just remember that to not be profit te quotation in houses
must follow the equation:
d1 => decimal 1 in one bet house,
d2 => decimal 2 in other bet house
1
________ >= (d2-1)
(d1 - 1)
"""
# Testing Edge Cases
# Just remember 1.333333333333333333 is not a the reapeating decimal
# 1.33333.... and is smaller than it so it's an edge case
self.bet_calculator.decimal_team1_house1 = '1.333333333333333333'
self.bet_calculator.decimal_team2_house2 = '4.0'
# put in the edge to be more sure that is calculating
# correctly
self.bet_calculator.decimal_team1_house2 = '2.999999999999999999'
self.bet_calculator.decimal_team2_house1 = '1.5'
self.assertFalse(self.bet_calculator.can_bet_team1_house1)
self.assertFalse(self.bet_calculator.can_bet_team1_house2)
# Testing Normal Cases
self.bet_calculator.decimal_team1_house1 = '1.3'
self.bet_calculator.decimal_team2_house2 = '1.5'
# put in the edge to be more sure that is calculating
# correctly
self.bet_calculator.decimal_team1_house2 = '1.01'
self.bet_calculator.decimal_team2_house1 = '3'
self.assertFalse(self.bet_calculator.can_bet_team1_house1)
self.assertFalse(self.bet_calculator.can_bet_team1_house2)
def test_cash_made_when_you_profit(self):
"""
Will be inserted some odds, and they don't need to profit
the Profit equation is simple:
d1 => decimal in team1
d2 => decimal in team 2,
tm => total money spent in your gamble
m1 => (money spent on team1) part of the tm you spent in team1 (It's obvious you spent tm-m1 in team2)
m2 => (money spent on team2) part of the tm you spent in team2 = tm-m1
cashMade => ?
you will win the following cash if team1 wins:
cashMade = m1 x d1 - tm
and if team2 wins the cashMade will be:
cashMade = m2 x d2 - tm
"""
self.bet_calculator.cash_to_bet = '200' # tm in the equation
self.bet_calculator.decimal_team1_house1 = '1.2'
self.bet_calculator.decimal_team2_house2 = '6.5'
self.bet_calculator.decimal_team1_house2 = '1.4'
self.bet_calculator.decimal_team2_house1 = '4.5'
# 180.43 x 1.2 = 216.516 =...> 216.516 - 200 = 16.516
self.assertEqual(self.bet_calculator.profit_if_team1_wins('180.43', bet_team1_house1 = True) , Decimal('16.516'))
# 150.75 x 1.4 = 211.05 =...> 211.05 - 200 = 11.05
self.assertEqual(self.bet_calculator.profit_if_team1_wins('150.75', bet_team1_house1 = False) , Decimal('11.05'))
# 35 x 6.5 = 227.5 =...> 227.5 - 200 = 27.05
# If you bet team1 in house 1 you bet team2 in house 2
self.assertEqual(self.bet_calculator.profit_if_team2_wins('35', bet_team1_house1 = True) , Decimal('27.5'))
# 64.41 x 4.5 = 289.845 =...> 289.845 - 200 = 89.845
# If you don't bet team1 in house 1 you bet team2 in house 1
self.assertEqual(self.bet_calculator.profit_if_team2_wins('64.41', bet_team1_house1 = False) , Decimal('89.845'))
def test_cash_made_when_you_lose(self):
"""
Will be inserted some odds, and they don't need to profit
the Profit equation is simple:
d1 => decimal in team1
d2 => decimal in team 2,
tm => total money spent in your gamble
m1 => (money spent on team1) part of the tm you spent in team1 (It's obvious you spent tm-m1 in team2)
m2 => (money spent on team2) part of the tm you spent in team2 = tm-m1
cashMade => ?
you will win the following cash if team1 wins:
cashMade = m1 x d1 - tm
and if team2 wins the cashMade will be:
cashMade = m2 x d2 - tm
So, if tm is bigger than m1 x d1, or m2 x d2 you will lose money if team1 or team2 win. So the result
will be negative
"""
self.bet_calculator.cash_to_bet = '200' # tm in the equation
self.bet_calculator.decimal_team1_house1 = '1.5'
self.bet_calculator.decimal_team2_house2 = '3.2'
self.bet_calculator.decimal_team1_house2 = '1.7'
self.bet_calculator.decimal_team2_house1 = '2.2'
# 50.54 x 1.5 = 75.81 =...> 75.81 - 200 = -124.19
self.assertEqual(self.bet_calculator.profit_if_team1_wins('50.54', bet_team1_house1 = True) , Decimal('-124.19'))
# 81.56 x 1.7 = 138.652 =...> 138.652 - 200 = -61.348
self.assertEqual(self.bet_calculator.profit_if_team1_wins('81.56', bet_team1_house1 = False) , Decimal('-61.348'))
# 55.17 x 3.2 = 176.544 =...> 176.544 - 200 = -23.456 (it was coincidence :) )
# If you bet team1 in house 1 you bet team2 in house 2
self.assertEqual(self.bet_calculator.profit_if_team2_wins('55.17', bet_team1_house1 = True) , Decimal('-23.456'))
# 85.42 x 2.2 = 187.924 =...> 187.924 - 200 = -12.076
# If you don't bet team1 in house 1 you bet team2 in house 1
self.assertEqual(self.bet_calculator.profit_if_team2_wins('85.42', bet_team1_house1 = False) , Decimal('-12.076'))
def test_cash_made_when_you_bet_more_than_total_cash(self):
"""
We don't have to see the all equation. But if you
bet in one team more than total cash you said
it was supposed to have an exception
"""
self.bet_calculator.cash_to_bet = '200' # tm in the equation
# these informations don't matter --------------
self.bet_calculator.decimal_team1_house1 = '1.5'
self.bet_calculator.decimal_team2_house2 = '3.2'
self.bet_calculator.decimal_team1_house2 = '1.7'
self.bet_calculator.decimal_team2_house1 = '2.2'
# ----------------------------------------------
# Edge Case
self.assertRaises(Exception, self.bet_calculator.profit_if_team1_wins, '200.000000000000001', bet_team1_house1 = True)
self.assertRaises(Exception, self.bet_calculator.profit_if_team1_wins, '200.000000000000001', bet_team1_house1 = False)
self.assertRaises(Exception, self.bet_calculator.profit_if_team2_wins, '200.000000000000001', bet_team1_house1 = True)
self.assertRaises(Exception, self.bet_calculator.profit_if_team2_wins, '200.000000000000001', bet_team1_house1 = False)
# Normal Case
self.assertRaises(Exception, self.bet_calculator.profit_if_team1_wins, '205', bet_team1_house1 = True)
self.assertRaises(Exception, self.bet_calculator.profit_if_team1_wins, '301', bet_team1_house1 = False)
self.assertRaises(Exception, self.bet_calculator.profit_if_team2_wins, '400', bet_team1_house1 = True)
self.assertRaises(Exception, self.bet_calculator.profit_if_team2_wins, '405', bet_team1_house1 = False)
def test_calc_of_least_guaranteed_profit_when_team1_wins(self):
"""
Test if team 1 wins, I will get the least possible profit (it will be 0, because
we want to assure any loses).
The least possible profit that you ensure no loses if team 1 wins, if
that occurs you will maintain the same mone you bet at the principle
Ps: Always remember that if the result of the match is draw you
will lose everything
You have to have in mind that you have to bet at least to guarantee
the return of your total money back.
The following equation must be followed to get your money back without loses:
d1 = decimal in team1
tm = total money invested (in team1 and team2)
m1 = How much I should spend in team1 without lose the tm I Invested => ?
tm
if d1 x m1 = tm => m1 = _________
d1
"""
self.bet_calculator.cash_to_bet = '200' # tm in the equation
self.bet_calculator.decimal_team1_house1 = '1.5'
self.bet_calculator.decimal_team2_house2 = '3.2'
self.bet_calculator.decimal_team1_house2 = '1.7'
self.bet_calculator.decimal_team2_house1 = '2.5'
# Because the real result is 133.3333333..., we need to quantize to 5 (don't have to be exactly 5)
# decimal points to ensure the result will be followed
self.assertEqual(
self.bet_calculator.least_possible_value_team1(bet_team1_house1 = True).quantize(Decimal('0.00001')),
Decimal('133.33333')
)
# Because the real result is 117.6470588235294..., we need to quantize to 5 (don't have to be exactly 5)
# decimal points to ensure the result will be followed
self.assertEqual(
self.bet_calculator.least_possible_value_team1(bet_team1_house1 = False).quantize(Decimal('0.00001')),
Decimal('117.64706')
)
def test_calc_of_least_guaranteed_profit_when_team1_wins_when_the_decimals_dont_profit(self):
"""
In this situation, when the decimals don't profit
it will raise Exception, because there is no guaranteed
pair of Bets
Ps: If the result of the match is draw you will lose
everything
"""
self.bet_calculator.cash_to_bet = '200' # tm in the equation
self.bet_calculator.decimal_team1_house1 = '1.5'
self.bet_calculator.decimal_team2_house2 = '1.8'
self.bet_calculator.decimal_team1_house2 = '1.01'
self.bet_calculator.decimal_team2_house1 = '10.999999999'
self.assertRaises(Exception, self.bet_calculator.least_possible_value_team1, bet_team1_house1 = True)
self.assertRaises(Exception, self.bet_calculator.least_possible_value_team1, bet_team1_house1 = False)
def test_calc_of_biggest_guaranteed_profit_if_team1_wins(self):
"""
Test if team1 wins I will win the most profitable bet
in this situation without having lost if team2 wins.
Ps: Always remember that if the result of the match is draw you
will lose everything
With this situation in mind, the equation is...
d2 = decimal in team2
tm = total money invested (in team1 and team2)
m2 = How much I shoud spend in team2 without lose the tm I Invested => ?
tm
m2 = __________ (The same as I did in the previous test with d1 and m1)
d2
m1 = How much I should spend in team1 having in mind that
if team1 wins I Will have that most profitable bet => ?
Be sure, that when I ensure that I will have no lost if team2 wins,
we will use the max money I could spent in team1, so this bet
ensure that I will have the most profitable return if team1 wins
so:
tm
m1 = tm - _________ OR tm - m2
d2
"""
self.bet_calculator.cash_to_bet = '200' # tm in the equation
self.bet_calculator.decimal_team1_house1 = '1.3'
self.bet_calculator.decimal_team2_house2 = '4.5'
self.bet_calculator.decimal_team1_house2 = '1.9'
self.bet_calculator.decimal_team2_house1 = '2.6'
# Because the real result is 155.55555..., we need to quantize to 5 (don't have to be exactly 5)
# decimal points to ensure the result will be followed
self.assertEqual(
self.bet_calculator.biggest_possible_value_team1(bet_team1_house1 = True).quantize(Decimal('0.00001')),
Decimal('155.55556')
)
# Because the real result is 123.0769230769..., we need to quantize to 5 (don't have to be exactly 5)
# decimal points to ensure the result will be followed
self.assertEqual(
self.bet_calculator.biggest_possible_value_team1(bet_team1_house1 = False).quantize(Decimal('0.00001')),
Decimal('123.07692')
)
if __name__ == '__main__':
unittest.main()
| |
import sys
import libbtaps
import time
def get_line():
line = raw_input('> ')
if line.lower() in ('quit', 'exit', 'kill'):
exit()
return line
# Sort day dictionary in Monday-Sunday order
def print_dic_sorted(dic):
order = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
for key in sorted(dic, key=order.index):
print key, ":", dic[key], "",
print ""
# Given a list of BTApsTimer objects, print them in a legible format
def print_timers(timer_list):
print "Timers:"
for timer in timer_list:
print "\tName: ", timer.name
print "\tID: ", timer.timer_id
print "\tOn: ",
if timer.on == 1:
print "On"
else:
print "Off"
print "\tDays: ",
print_dic_sorted(timer.repeat_days)
print "\tStart Time: ", timer.start_time
print "\tEnd Time: ", timer.end_time
print ""
# Turn switch on/off
def toggle_switch(btaps, status):
if status == 0:
btaps.set_switch(True)
else:
btaps.set_switch(False)
# Print name and on/off state of switch
def print_status(btaps):
name = btaps.get_dev_name()
status = btaps.get_switch_state()
print "Name: " + name
print "Switch: ",
if status[0] == 1:
print "On"
else:
print "Off"
return status
# Simple interactive command line prompts for creating new timer
def create_timer(btaps, timer_list):
print "Creating New Timer:"
print "Name: "
name = get_line()
new_timer = libbtaps.BTapsTimer(len(timer_list) + 1, name)
print "Enter Start and End Time in 24-hour format (ex: 23:54)"
print "Start Time: "
start = get_line()
start = time.strptime(start, "%H:%M")
new_timer.set_start_time(start[3], start[4])
print "End Time: "
end = get_line()
end = time.strptime(end, "%H:%M")
new_timer.set_end_time(end[3], end[4])
print "Repeat Timer?"
repeat = get_line().lower()
if repeat == "y":
day_list = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
for i, day in enumerate(day_list):
print day, "?"
repeat = get_line().lower()
if repeat == 'y':
day_list[i] = True
else:
day_list[i] = False
new_timer.set_repeat_days(day_list[0], day_list[1], day_list[2], day_list[3],
day_list[4], day_list[5], day_list[6])
print "Enable New Timer? Y/N"
enable = get_line().lower()
if enable == 'y':
new_timer.toggle_on()
btaps.create_timer(new_timer)
# Simple interactive command line prompts for modifying a timer
def modify_timer(btaps, timer_list):
print "Enter Timer ID for the timer you wish to modify:"
id = get_line()
mod_timer = timer_list[int(id)-1]
print "Enter values you wish to change, leave blank to keep original value"
print "Name: ", mod_timer.name
name = get_line()
if name != '':
mod_timer.set_name(name)
print "Enter Start and End Time in 24-hour format (ex: 23:54)"
print "Start Time: ",
print_dic_sorted(mod_timer.start_time)
start = get_line()
if start != '':
start = time.strptime(start, "%H:%M")
mod_timer.set_start_time(start[3], start[4])
print "End Time: ", mod_timer.end_time
end = get_line()
if end != '':
end = time.strptime(end, "%H:%M")
mod_timer.set_end_time(end[3], end[4])
print "Repeat Timer?", mod_timer.repeat_days
repeat = get_line().lower()
if repeat == "y":
day_list = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
for i, day in enumerate(day_list):
print day, "?"
repeat = get_line().lower()
if repeat == 'y':
day_list[i] = True
else:
day_list[i] = False
mod_timer.set_repeat_days(day_list[0], day_list[1], day_list[2], day_list[3],
day_list[4], day_list[5], day_list[6])
print "Enable Timer? Y/N"
enable = get_line().lower()
if (enable == 'y') and (mod_timer.on != 1):
mod_timer.toggle_on()
elif (enable == 'n') and (mod_timer.on != 0):
mod_timer.toggle_on()
btaps.modify_timer(mod_timer)
def main(argv):
print " === Plugable PS-BTAPS CLI v0.8 ==="
if len(argv) != 2:
print "USAGE: python", sys.argv[0], "[Bluetooth address]"
print "EXAMPLE: python", sys.argv[0], "00:00:FF:FF:00:00"
sys.exit(0)
# Establish connection to BTAPS
btaps = libbtaps.BTaps(argv[1])
connected = btaps.connect()
if not connected:
sys.exit(0)
btaps.set_datetime_now()
status = print_status(btaps)
print_timers(status[1])
while True:
print "Select a function..."
print "1. (T)oggle Switch"
print "2. (C)reate Timer"
print "3. (M)odify Timer"
print "4. (D)elete Timer"
print "5. (S)et Device Name"
print "6. (G)et Switch Status (Name, On/Off, Timers)"
print "7. E(x)it"
try:
function = get_line().lower()
if function in ['1', 't']:
toggle_switch(btaps, status[0])
elif function in ['2', 'c']:
create_timer(btaps, status[1])
elif function in ['3', 'm']:
print_timers(status[1])
modify_timer(btaps, status[1])
elif function in ['4', 'd']:
print_timers(status[1])
print "Enter Timer ID to delete:"
timer_id = get_line()
btaps.delete_timer(timer_id)
elif function in ['5', 's']:
print "New Device Name:"
name = get_line()
btaps.set_dev_name(name)
elif function in ['6', 'g']:
status = print_status(btaps)
print_timers(status[1])
elif function in ['7', 'x']:
break
if not (function in ['5', 'g']):
status = print_status(btaps)
except KeyboardInterrupt:
break
btaps.disconnect()
if __name__ == '__main__':
main(sys.argv)
| |
#!/usr/bin/env python
#
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate test functions for use with mock_server_t.
Defines functions like future_cursor_next in future-functions.h and
future-functions.c, which defer a libmongoc operation to a background thread
via functions like background_cursor_next. Also defines functions like
future_value_set_bson_ptr and future_value_get_bson_ptr which support the
future / background functions, and functions like future_get_bson_ptr which
wait for a future to resolve, then return its value.
These future functions are used in conjunction with mock_server_t to
conveniently test libmongoc wire protocol operations.
Written for Python 2.6+, requires Jinja 2 for templating.
"""
import glob
from collections import namedtuple
from os.path import basename, dirname, join as joinpath, normpath
from jinja2 import Environment, FileSystemLoader # Please "pip install jinja2".
this_dir = dirname(__file__)
template_dir = joinpath(this_dir, 'future_function_templates')
mock_server_dir = normpath(joinpath(this_dir, '../tests/mock_server'))
# Add additional types here. Use typedefs for derived types so they can
# be named with one symbol.
typedef = namedtuple("typedef", ["name", "typedef"])
# These are typedef'ed if necessary in future-value.h, and added to the union
# of possible future_value_t.value types. future_value_t getters and setters
# are generated for all types, as well as future_t getters.
typedef_list = [
# Fundamental.
typedef("bool", None),
typedef("char_ptr", "char *"),
typedef("char_ptr_ptr", "char **"),
typedef("int", None),
typedef("int64_t", None),
typedef("size_t", None),
typedef("ssize_t", None),
typedef("uint32_t", None),
# Const fundamental.
typedef("const_char_ptr", "const char *"),
# libbson.
typedef("bson_error_ptr", "bson_error_t *"),
typedef("bson_ptr", "bson_t *"),
# Const libbson.
typedef("const_bson_ptr", "const bson_t *"),
typedef("const_bson_ptr_ptr", "const bson_t **"),
# libmongoc.
typedef("mongoc_bulk_operation_ptr", "mongoc_bulk_operation_t *"),
typedef("mongoc_client_ptr", "mongoc_client_t *"),
typedef("mongoc_collection_ptr", "mongoc_collection_t *"),
typedef("mongoc_cursor_ptr", "mongoc_cursor_t *"),
typedef("mongoc_database_ptr", "mongoc_database_t *"),
typedef("mongoc_gridfs_file_ptr", "mongoc_gridfs_file_t *"),
typedef("mongoc_gridfs_ptr", "mongoc_gridfs_t *"),
typedef("mongoc_insert_flags_t", None),
typedef("mongoc_iovec_ptr", "mongoc_iovec_t *"),
typedef("mongoc_query_flags_t", None),
typedef("mongoc_server_description_ptr", "mongoc_server_description_t *"),
typedef("mongoc_ss_optype_t", None),
typedef("mongoc_topology_ptr", "mongoc_topology_t *"),
# Const libmongoc.
typedef("const_mongoc_find_and_modify_opts_ptr", "const mongoc_find_and_modify_opts_t *"),
typedef("const_mongoc_read_prefs_ptr", "const mongoc_read_prefs_t *"),
typedef("const_mongoc_write_concern_ptr", "const mongoc_write_concern_t *"),
]
type_list = [T.name for T in typedef_list]
type_list_with_void = type_list + ['void']
param = namedtuple("param", ["type_name", "name"])
future_function = namedtuple("future_function", ["ret_type", "name", "params"])
# Add additional functions to be tested here. For a name like "cursor_next", we
# generate two functions: future_cursor_next to prepare the future_t and launch
# a background thread, and background_cursor_next to run on the thread and
# resolve the future.
future_functions = [
future_function("uint32_t",
"mongoc_bulk_operation_execute",
[param("mongoc_bulk_operation_ptr", "bulk"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_client_command_simple",
[param("mongoc_client_ptr", "client"),
param("const_char_ptr", "db_name"),
param("const_bson_ptr", "command"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("void",
"mongoc_client_kill_cursor",
[param("mongoc_client_ptr", "client"),
param("int64_t", "cursor_id")]),
future_function("mongoc_cursor_ptr",
"mongoc_collection_aggregate",
[param("mongoc_collection_ptr", "collection"),
param("mongoc_query_flags_t", "flags"),
param("const_bson_ptr", "pipeline"),
param("const_bson_ptr", "options"),
param("const_mongoc_read_prefs_ptr", "read_prefs")]),
future_function("int64_t",
"mongoc_collection_count",
[param("mongoc_collection_ptr", "collection"),
param("mongoc_query_flags_t", "flags"),
param("const_bson_ptr", "query"),
param("int64_t", "skip"),
param("int64_t", "limit"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_collection_find_and_modify_with_opts",
[param("mongoc_collection_ptr", "collection"),
param("const_bson_ptr", "query"),
param("const_mongoc_find_and_modify_opts_ptr", "opts"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_collection_find_and_modify",
[param("mongoc_collection_ptr", "collection"),
param("const_bson_ptr", "query"),
param("const_bson_ptr", "sort"),
param("const_bson_ptr", "update"),
param("const_bson_ptr", "fields"),
param("bool", "_remove"),
param("bool", "upsert"),
param("bool", "_new"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_collection_insert",
[param("mongoc_collection_ptr", "collection"),
param("mongoc_insert_flags_t", "flags"),
param("const_bson_ptr", "document"),
param("const_mongoc_write_concern_ptr", "write_concern"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_collection_insert_bulk",
[param("mongoc_collection_ptr", "collection"),
param("mongoc_insert_flags_t", "flags"),
param("const_bson_ptr_ptr", "documents"),
param("uint32_t", "n_documents"),
param("const_mongoc_write_concern_ptr", "write_concern"),
param("bson_error_ptr", "error")]),
future_function("void",
"mongoc_cursor_destroy",
[param("mongoc_cursor_ptr", "cursor")]),
future_function("bool",
"mongoc_cursor_next",
[param("mongoc_cursor_ptr", "cursor"),
param("const_bson_ptr_ptr", "doc")]),
future_function("char_ptr_ptr",
"mongoc_client_get_database_names",
[param("mongoc_client_ptr", "client"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_database_command_simple",
[param("mongoc_database_ptr", "database"),
param("bson_ptr", "command"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("char_ptr_ptr",
"mongoc_database_get_collection_names",
[param("mongoc_database_ptr", "database"),
param("bson_error_ptr", "error")]),
future_function("ssize_t",
"mongoc_gridfs_file_readv",
[param("mongoc_gridfs_file_ptr", "file"),
param("mongoc_iovec_ptr", "iov"),
param("size_t", "iovcnt"),
param("size_t", "min_bytes"),
param("uint32_t", "timeout_msec")]),
future_function("mongoc_gridfs_file_ptr",
"mongoc_gridfs_find_one",
[param("mongoc_gridfs_ptr", "gridfs"),
param("const_bson_ptr", "query"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_gridfs_file_remove",
[param("mongoc_gridfs_file_ptr", "file"),
param("bson_error_ptr", "error")]),
future_function("int",
"mongoc_gridfs_file_seek",
[param("mongoc_gridfs_file_ptr", "file"),
param("int64_t", "delta"),
param("int", "whence")]),
future_function("ssize_t",
"mongoc_gridfs_file_writev",
[param("mongoc_gridfs_file_ptr", "file"),
param("mongoc_iovec_ptr", "iov"),
param("size_t", "iovcnt"),
param("uint32_t", "timeout_msec")]),
future_function("mongoc_server_description_ptr",
"mongoc_topology_select",
[param("mongoc_topology_ptr", "topology"),
param("mongoc_ss_optype_t", "optype"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("bson_error_ptr", "error")]),
future_function("mongoc_gridfs_ptr",
"mongoc_client_get_gridfs",
[param("mongoc_client_ptr", "client"),
param("const_char_ptr", "db"),
param("const_char_ptr", "prefix"),
param("bson_error_ptr", "error")]),
]
for fn in future_functions:
if fn.ret_type not in type_list_with_void:
raise Exception('bad type "%s"\n\nin %s' % (fn.ret_type, fn))
for p in fn.params:
if p.type_name not in type_list:
raise Exception('bad type "%s"\n\nin %s' % (p.type_name, fn))
header_comment = """/**************************************************
*
* Generated by build/%s.
*
* DO NOT EDIT THIS FILE.
*
*************************************************/""" % basename(__file__)
def future_function_name(fn):
if fn.name.startswith('mongoc'):
# E.g. future_cursor_next().
return 'future' + fn.name[len('mongoc'):]
else:
# E.g. future__mongoc_client_kill_cursor().
return 'future_' + fn.name
env = Environment(loader=FileSystemLoader(template_dir))
env.filters['future_function_name'] = future_function_name
files = ["future.h",
"future.c",
"future-value.h",
"future-value.c",
"future-functions.h",
"future-functions.c"]
for file_name in files:
print(file_name)
with open(joinpath(mock_server_dir, file_name), 'w+') as f:
t = env.get_template(file_name + ".template")
f.write(t.render(globals()))
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AppServiceEnvironmentResource(Resource):
"""App Service Environment ARM resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param app_service_environment_resource_name: Name of the App Service
Environment.
:type app_service_environment_resource_name: str
:param app_service_environment_resource_location: Location of the App
Service Environment, e.g. "West US".
:type app_service_environment_resource_location: str
:ivar provisioning_state: Provisioning state of the App Service
Environment. Possible values include: 'Succeeded', 'Failed', 'Canceled',
'InProgress', 'Deleting'
:vartype provisioning_state: str or
~azure.mgmt.web.models.ProvisioningState
:ivar status: Current status of the App Service Environment. Possible
values include: 'Preparing', 'Ready', 'Scaling', 'Deleting'
:vartype status: str or ~azure.mgmt.web.models.HostingEnvironmentStatus
:param vnet_name: Name of the Virtual Network for the App Service
Environment.
:type vnet_name: str
:param vnet_resource_group_name: Resource group of the Virtual Network.
:type vnet_resource_group_name: str
:param vnet_subnet_name: Subnet of the Virtual Network.
:type vnet_subnet_name: str
:param virtual_network: Description of the Virtual Network.
:type virtual_network: ~azure.mgmt.web.models.VirtualNetworkProfile
:param internal_load_balancing_mode: Specifies which endpoints to serve
internally in the Virtual Network for the App Service Environment.
Possible values include: 'None', 'Web', 'Publishing'
:type internal_load_balancing_mode: str or
~azure.mgmt.web.models.InternalLoadBalancingMode
:param multi_size: Front-end VM size, e.g. "Medium", "Large".
:type multi_size: str
:param multi_role_count: Number of front-end instances.
:type multi_role_count: int
:param worker_pools: Description of worker pools with worker size IDs, VM
sizes, and number of workers in each pool.
:type worker_pools: list[~azure.mgmt.web.models.WorkerPool]
:param ipssl_address_count: Number of IP SSL addresses reserved for the
App Service Environment.
:type ipssl_address_count: int
:ivar database_edition: Edition of the metadata database for the App
Service Environment, e.g. "Standard".
:vartype database_edition: str
:ivar database_service_objective: Service objective of the metadata
database for the App Service Environment, e.g. "S0".
:vartype database_service_objective: str
:ivar upgrade_domains: Number of upgrade domains of the App Service
Environment.
:vartype upgrade_domains: int
:ivar subscription_id: Subscription of the App Service Environment.
:vartype subscription_id: str
:param dns_suffix: DNS suffix of the App Service Environment.
:type dns_suffix: str
:ivar last_action: Last deployment action on the App Service Environment.
:vartype last_action: str
:ivar last_action_result: Result of the last deployment action on the App
Service Environment.
:vartype last_action_result: str
:ivar allowed_multi_sizes: List of comma separated strings describing
which VM sizes are allowed for front-ends.
:vartype allowed_multi_sizes: str
:ivar allowed_worker_sizes: List of comma separated strings describing
which VM sizes are allowed for workers.
:vartype allowed_worker_sizes: str
:ivar maximum_number_of_machines: Maximum number of VMs in the App Service
Environment.
:vartype maximum_number_of_machines: int
:ivar vip_mappings: Description of IP SSL mapping for the App Service
Environment.
:vartype vip_mappings: list[~azure.mgmt.web.models.VirtualIPMapping]
:ivar environment_capacities: Current total, used, and available worker
capacities.
:vartype environment_capacities:
list[~azure.mgmt.web.models.StampCapacity]
:param network_access_control_list: Access control list for controlling
traffic to the App Service Environment.
:type network_access_control_list:
list[~azure.mgmt.web.models.NetworkAccessControlEntry]
:ivar environment_is_healthy: True/false indicating whether the App
Service Environment is healthy.
:vartype environment_is_healthy: bool
:ivar environment_status: Detailed message about with results of the last
check of the App Service Environment.
:vartype environment_status: str
:ivar resource_group: Resource group of the App Service Environment.
:vartype resource_group: str
:param front_end_scale_factor: Scale factor for front-ends.
:type front_end_scale_factor: int
:ivar default_front_end_scale_factor: Default Scale Factor for FrontEnds.
:vartype default_front_end_scale_factor: int
:param api_management_account_id: API Management Account associated with
the App Service Environment.
:type api_management_account_id: str
:param suspended: <code>true</code> if the App Service Environment is
suspended; otherwise, <code>false</code>. The environment can be
suspended, e.g. when the management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type suspended: bool
:param dynamic_cache_enabled: True/false indicating whether the App
Service Environment is suspended. The environment can be suspended e.g.
when the management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type dynamic_cache_enabled: bool
:param cluster_settings: Custom settings for changing the behavior of the
App Service Environment.
:type cluster_settings: list[~azure.mgmt.web.models.NameValuePair]
:param user_whitelisted_ip_ranges: User added ip ranges to whitelist on
ASE db
:type user_whitelisted_ip_ranges: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'app_service_environment_resource_name': {'required': True},
'app_service_environment_resource_location': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'virtual_network': {'required': True},
'worker_pools': {'required': True},
'database_edition': {'readonly': True},
'database_service_objective': {'readonly': True},
'upgrade_domains': {'readonly': True},
'subscription_id': {'readonly': True},
'last_action': {'readonly': True},
'last_action_result': {'readonly': True},
'allowed_multi_sizes': {'readonly': True},
'allowed_worker_sizes': {'readonly': True},
'maximum_number_of_machines': {'readonly': True},
'vip_mappings': {'readonly': True},
'environment_capacities': {'readonly': True},
'environment_is_healthy': {'readonly': True},
'environment_status': {'readonly': True},
'resource_group': {'readonly': True},
'default_front_end_scale_factor': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'app_service_environment_resource_name': {'key': 'properties.name', 'type': 'str'},
'app_service_environment_resource_location': {'key': 'properties.location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'},
'status': {'key': 'properties.status', 'type': 'HostingEnvironmentStatus'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vnet_resource_group_name': {'key': 'properties.vnetResourceGroupName', 'type': 'str'},
'vnet_subnet_name': {'key': 'properties.vnetSubnetName', 'type': 'str'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'VirtualNetworkProfile'},
'internal_load_balancing_mode': {'key': 'properties.internalLoadBalancingMode', 'type': 'InternalLoadBalancingMode'},
'multi_size': {'key': 'properties.multiSize', 'type': 'str'},
'multi_role_count': {'key': 'properties.multiRoleCount', 'type': 'int'},
'worker_pools': {'key': 'properties.workerPools', 'type': '[WorkerPool]'},
'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'},
'database_edition': {'key': 'properties.databaseEdition', 'type': 'str'},
'database_service_objective': {'key': 'properties.databaseServiceObjective', 'type': 'str'},
'upgrade_domains': {'key': 'properties.upgradeDomains', 'type': 'int'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'},
'last_action': {'key': 'properties.lastAction', 'type': 'str'},
'last_action_result': {'key': 'properties.lastActionResult', 'type': 'str'},
'allowed_multi_sizes': {'key': 'properties.allowedMultiSizes', 'type': 'str'},
'allowed_worker_sizes': {'key': 'properties.allowedWorkerSizes', 'type': 'str'},
'maximum_number_of_machines': {'key': 'properties.maximumNumberOfMachines', 'type': 'int'},
'vip_mappings': {'key': 'properties.vipMappings', 'type': '[VirtualIPMapping]'},
'environment_capacities': {'key': 'properties.environmentCapacities', 'type': '[StampCapacity]'},
'network_access_control_list': {'key': 'properties.networkAccessControlList', 'type': '[NetworkAccessControlEntry]'},
'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'},
'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'front_end_scale_factor': {'key': 'properties.frontEndScaleFactor', 'type': 'int'},
'default_front_end_scale_factor': {'key': 'properties.defaultFrontEndScaleFactor', 'type': 'int'},
'api_management_account_id': {'key': 'properties.apiManagementAccountId', 'type': 'str'},
'suspended': {'key': 'properties.suspended', 'type': 'bool'},
'dynamic_cache_enabled': {'key': 'properties.dynamicCacheEnabled', 'type': 'bool'},
'cluster_settings': {'key': 'properties.clusterSettings', 'type': '[NameValuePair]'},
'user_whitelisted_ip_ranges': {'key': 'properties.userWhitelistedIpRanges', 'type': '[str]'},
}
def __init__(self, location, app_service_environment_resource_name, app_service_environment_resource_location, virtual_network, worker_pools, kind=None, tags=None, vnet_name=None, vnet_resource_group_name=None, vnet_subnet_name=None, internal_load_balancing_mode=None, multi_size=None, multi_role_count=None, ipssl_address_count=None, dns_suffix=None, network_access_control_list=None, front_end_scale_factor=None, api_management_account_id=None, suspended=None, dynamic_cache_enabled=None, cluster_settings=None, user_whitelisted_ip_ranges=None):
super(AppServiceEnvironmentResource, self).__init__(kind=kind, location=location, tags=tags)
self.app_service_environment_resource_name = app_service_environment_resource_name
self.app_service_environment_resource_location = app_service_environment_resource_location
self.provisioning_state = None
self.status = None
self.vnet_name = vnet_name
self.vnet_resource_group_name = vnet_resource_group_name
self.vnet_subnet_name = vnet_subnet_name
self.virtual_network = virtual_network
self.internal_load_balancing_mode = internal_load_balancing_mode
self.multi_size = multi_size
self.multi_role_count = multi_role_count
self.worker_pools = worker_pools
self.ipssl_address_count = ipssl_address_count
self.database_edition = None
self.database_service_objective = None
self.upgrade_domains = None
self.subscription_id = None
self.dns_suffix = dns_suffix
self.last_action = None
self.last_action_result = None
self.allowed_multi_sizes = None
self.allowed_worker_sizes = None
self.maximum_number_of_machines = None
self.vip_mappings = None
self.environment_capacities = None
self.network_access_control_list = network_access_control_list
self.environment_is_healthy = None
self.environment_status = None
self.resource_group = None
self.front_end_scale_factor = front_end_scale_factor
self.default_front_end_scale_factor = None
self.api_management_account_id = api_management_account_id
self.suspended = suspended
self.dynamic_cache_enabled = dynamic_cache_enabled
self.cluster_settings = cluster_settings
self.user_whitelisted_ip_ranges = user_whitelisted_ip_ranges
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.