CombinedText stringlengths 4 3.42M |
|---|
# coding=utf-8
from distutils.version import LooseVersion
from itertools import chain
import tempfile
import os
import logging
import hashlib
import random
import json
import types
import re
from collections import defaultdict
from datetime import datetime
from functools import wraps
from copy import deepcopy
from urllib2 import urlopen
from urlparse import urljoin
from couchdbkit import ResourceConflict, MultipleResultsFound
from lxml import etree
from django.core.cache import cache
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ugettext
from couchdbkit.exceptions import BadValueError, DocTypeError
from couchdbkit.ext.django.schema import *
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404
from django.template.loader import render_to_string
from restkit.errors import ResourceError
from couchdbkit.resource import ResourceNotFound
from corehq import toggles, privileges
from corehq.apps.app_manager.feature_support import CommCareFeatureSupportMixin
from django_prbac.exceptions import PermissionDenied
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.app_manager.commcare_settings import check_condition
from corehq.apps.app_manager.const import *
from corehq.apps.app_manager.xpath import dot_interpolate, LocationXpath
from corehq.apps.builds import get_default_build_spec
from corehq.util.hash_compat import make_password
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.couch.lazy_attachment_doc import LazyAttachmentDoc
from dimagi.utils.couch.undo import DeleteRecord, DELETED_SUFFIX
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import get_url_base, parse_int
from dimagi.utils.couch.database import get_db
import commcare_translations
from corehq.util import bitly
from corehq.util import view_utils
from corehq.apps.appstore.models import SnapshotMixin
from corehq.apps.builds.models import BuildSpec, CommCareBuildConfig, BuildRecord
from corehq.apps.hqmedia.models import HQMediaMixin
from corehq.apps.reports.templatetags.timezone_tags import utc_to_timezone
from corehq.apps.translations.models import TranslationMixin
from corehq.apps.users.models import CouchUser
from corehq.apps.users.util import cc_user_domain
from corehq.apps.domain.models import cached_property
from corehq.apps.app_manager import current_builds, app_strings, remote_app
from corehq.apps.app_manager import fixtures, suite_xml, commcare_settings
from corehq.apps.app_manager.util import split_path, save_xform, get_correct_app_class
from corehq.apps.app_manager.xform import XForm, parse_xml as _parse_xml, \
validate_xform
from corehq.apps.app_manager.templatetags.xforms_extras import trans
from .exceptions import (
AppEditingError,
BlankXFormError,
ConflictingCaseTypeError,
FormNotFoundException,
IncompatibleFormTypeException,
LocationXpathValidationError,
ModuleNotFoundException,
RearrangeError,
VersioningError,
XFormError,
XFormIdNotUnique,
XFormValidationError,
)
from corehq.apps.app_manager import id_strings
WORKFLOW_DEFAULT = 'default'
WORKFLOW_MODULE = 'module'
WORKFLOW_PREVIOUS = 'previous_screen'
AUTO_SELECT_USER = 'user'
AUTO_SELECT_FIXTURE = 'fixture'
AUTO_SELECT_CASE = 'case'
AUTO_SELECT_RAW = 'raw'
DETAIL_TYPES = ['case_short', 'case_long', 'ref_short', 'ref_long']
FIELD_SEPARATOR = ':'
ATTACHMENT_REGEX = r'[^/]*\.xml'
def _rename_key(dct, old, new):
if old in dct:
if new in dct and dct[new]:
dct["%s_backup_%s" % (new, hex(random.getrandbits(32))[2:-1])] = dct[new]
dct[new] = dct[old]
del dct[old]
@memoized
def load_case_reserved_words():
with open(os.path.join(os.path.dirname(__file__), 'static', 'app_manager', 'json', 'case-reserved-words.json')) as f:
return json.load(f)
@memoized
def load_form_template(filename):
with open(os.path.join(os.path.dirname(__file__), 'data', filename)) as f:
return f.read()
def partial_escape(xpath):
"""
Copied from http://stackoverflow.com/questions/275174/how-do-i-perform-html-decoding-encoding-using-python-django
but without replacing the single quote
"""
return mark_safe(force_unicode(xpath).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"'))
class IndexedSchema(DocumentSchema):
"""
Abstract class.
Meant for documents that appear in a list within another document
and need to know their own position within that list.
"""
def with_id(self, i, parent):
self._i = i
self._parent = parent
return self
@property
def id(self):
return self._i
def __eq__(self, other):
return other and (self.id == other.id) and (self._parent == other._parent)
class Getter(object):
def __init__(self, attr):
self.attr = attr
def __call__(self, instance):
items = getattr(instance, self.attr)
l = len(items)
for i,item in enumerate(items):
yield item.with_id(i%l, instance)
def __get__(self, instance, owner):
# thanks, http://metapython.blogspot.com/2010/11/python-instance-methods-how-are-they.html
# this makes Getter('foo') act like a bound method
return types.MethodType(self, instance, owner)
class FormActionCondition(DocumentSchema):
"""
The condition under which to open/update/close a case/referral
Either {'type': 'if', 'question': '/xpath/to/node', 'answer': 'value'}
in which case the action takes place if question has answer answer,
or {'type': 'always'} in which case the action always takes place.
"""
type = StringProperty(choices=["if", "always", "never"], default="never")
question = StringProperty()
answer = StringProperty()
operator = StringProperty(choices=['=', 'selected'], default='=')
def is_active(self):
return self.type in ('if', 'always')
class FormAction(DocumentSchema):
"""
Corresponds to Case XML
"""
condition = SchemaProperty(FormActionCondition)
def is_active(self):
return self.condition.is_active()
@classmethod
def get_action_paths(cls, action):
action_properties = action.properties()
if action.condition.type == 'if':
yield action.condition.question
if 'name_path' in action_properties and action.name_path:
yield action.name_path
if 'case_name' in action_properties:
yield action.case_name
if 'external_id' in action_properties and action.external_id:
yield action.external_id
if 'update' in action_properties:
for _, path in action.update.items():
yield path
if 'case_properties' in action_properties:
for _, path in action.case_properties.items():
yield path
if 'preload' in action_properties:
for path, _ in action.preload.items():
yield path
class UpdateCaseAction(FormAction):
update = DictProperty()
class PreloadAction(FormAction):
preload = DictProperty()
def is_active(self):
return bool(self.preload)
class UpdateReferralAction(FormAction):
followup_date = StringProperty()
def get_followup_date(self):
if self.followup_date:
return "if(date({followup_date}) >= date(today()), {followup_date}, date(today() + 2))".format(
followup_date=self.followup_date,
)
return self.followup_date or "date(today() + 2)"
class OpenReferralAction(UpdateReferralAction):
name_path = StringProperty()
class OpenCaseAction(FormAction):
name_path = StringProperty()
external_id = StringProperty()
class OpenSubCaseAction(FormAction):
case_type = StringProperty()
case_name = StringProperty()
reference_id = StringProperty()
case_properties = DictProperty()
repeat_context = StringProperty()
close_condition = SchemaProperty(FormActionCondition)
class FormActions(DocumentSchema):
open_case = SchemaProperty(OpenCaseAction)
update_case = SchemaProperty(UpdateCaseAction)
close_case = SchemaProperty(FormAction)
open_referral = SchemaProperty(OpenReferralAction)
update_referral = SchemaProperty(UpdateReferralAction)
close_referral = SchemaProperty(FormAction)
case_preload = SchemaProperty(PreloadAction)
referral_preload = SchemaProperty(PreloadAction)
subcases = SchemaListProperty(OpenSubCaseAction)
def all_property_names(self):
names = set()
names.update(self.update_case.update.keys())
names.update(self.case_preload.preload.values())
for subcase in self.subcases:
names.update(subcase.case_properties.keys())
return names
class AdvancedAction(DocumentSchema):
case_type = StringProperty()
case_tag = StringProperty()
case_properties = DictProperty()
parent_tag = StringProperty()
parent_reference_id = StringProperty(default='parent')
close_condition = SchemaProperty(FormActionCondition)
def get_paths(self):
for path in self.case_properties.values():
yield path
if self.close_condition.type == 'if':
yield self.close_condition.question
def get_property_names(self):
return set(self.case_properties.keys())
@property
def case_session_var(self):
return 'case_id_{0}'.format(self.case_tag)
class AutoSelectCase(DocumentSchema):
"""
Configuration for auto-selecting a case.
Attributes:
value_source Reference to the source of the value. For mode = fixture,
this represents the FixtureDataType ID. For mode = case
this represents the 'case_tag' for the case.
The modes 'user' and 'raw' don't require a value_source.
value_key The actual field that contains the case ID. Can be a case
index or a user data key or a fixture field name or the raw
xpath expression.
"""
mode = StringProperty(choices=[AUTO_SELECT_USER, AUTO_SELECT_FIXTURE, AUTO_SELECT_CASE, AUTO_SELECT_RAW])
value_source = StringProperty()
value_key = StringProperty(required=True)
class LoadUpdateAction(AdvancedAction):
"""
details_module: Use the case list configuration from this module to show the cases.
preload: Value from the case to load into the form.
auto_select: Configuration for auto-selecting the case
show_product_stock: If True list the product stock using the module's Product List configuration.
product_program: Only show products for this CommTrack program.
"""
details_module = StringProperty()
preload = DictProperty()
auto_select = SchemaProperty(AutoSelectCase, default=None)
show_product_stock = BooleanProperty(default=False)
product_program = StringProperty()
def get_paths(self):
for path in super(LoadUpdateAction, self).get_paths():
yield path
for path in self.preload.values():
yield path
def get_property_names(self):
names = super(LoadUpdateAction, self).get_property_names()
names.update(self.preload.keys())
return names
class AdvancedOpenCaseAction(AdvancedAction):
name_path = StringProperty()
repeat_context = StringProperty()
open_condition = SchemaProperty(FormActionCondition)
def get_paths(self):
for path in super(AdvancedOpenCaseAction, self).get_paths():
yield path
yield self.name_path
if self.open_condition.type == 'if':
yield self.open_condition.question
class AdvancedFormActions(DocumentSchema):
load_update_cases = SchemaListProperty(LoadUpdateAction)
open_cases = SchemaListProperty(AdvancedOpenCaseAction)
def get_all_actions(self):
return self.load_update_cases + self.open_cases
def get_subcase_actions(self):
return (a for a in self.get_all_actions() if a.parent_tag)
def get_open_subcase_actions(self, parent_case_type=None):
for action in [a for a in self.open_cases if a.parent_tag]:
if not parent_case_type:
yield action
else:
parent = self.actions_meta_by_tag[action.parent_tag]['action']
if parent.case_type == parent_case_type:
yield action
def get_case_tags(self):
for action in self.get_all_actions():
yield action.case_tag
def get_action_from_tag(self, tag):
return self.actions_meta_by_tag.get(tag, {}).get('action', None)
@property
def actions_meta_by_tag(self):
return self._action_meta()['by_tag']
@property
def actions_meta_by_parent_tag(self):
return self._action_meta()['by_parent_tag']
def get_action_hierarchy(self, action):
current = action
hierarchy = [current]
while current and current.parent_tag:
parent = self.get_action_from_tag(current.parent_tag)
current = parent
if parent:
if parent in hierarchy:
circular = [a.case_tag for a in hierarchy + [parent]]
raise ValueError("Circular reference in subcase hierarchy: {0}".format(circular))
hierarchy.append(parent)
return hierarchy
@property
def auto_select_actions(self):
return self._action_meta()['by_auto_select_mode']
@memoized
def _action_meta(self):
meta = {
'by_tag': {},
'by_parent_tag': {},
'by_auto_select_mode': {
AUTO_SELECT_USER: [],
AUTO_SELECT_CASE: [],
AUTO_SELECT_FIXTURE: [],
AUTO_SELECT_RAW: [],
}
}
def add_actions(type, action_list):
for action in action_list:
meta['by_tag'][action.case_tag] = {
'type': type,
'action': action
}
if action.parent_tag:
meta['by_parent_tag'][action.parent_tag] = {
'type': type,
'action': action
}
if type == 'load' and action.auto_select and action.auto_select.mode:
meta['by_auto_select_mode'][action.auto_select.mode].append(action)
add_actions('load', self.load_update_cases)
add_actions('open', self.open_cases)
return meta
class FormSource(object):
def __get__(self, form, form_cls):
if not form:
return self
unique_id = form.get_unique_id()
app = form.get_app()
filename = "%s.xml" % unique_id
# for backwards compatibility of really old apps
try:
old_contents = form['contents']
except AttributeError:
pass
else:
app.lazy_put_attachment(old_contents, filename)
del form['contents']
try:
source = app.lazy_fetch_attachment(filename)
except (ResourceNotFound, KeyError):
source = ''
return source
def __set__(self, form, value):
unique_id = form.get_unique_id()
app = form.get_app()
filename = "%s.xml" % unique_id
app.lazy_put_attachment(value, filename)
form.validation_cache = None
try:
form.xmlns = form.wrapped_xform().data_node.tag_xmlns
except Exception:
form.xmlns = None
class CachedStringProperty(object):
def __init__(self, key):
self.get_key = key
def __get__(self, instance, owner):
return self.get(self.get_key(instance))
def __set__(self, instance, value):
self.set(self.get_key(instance), value)
@classmethod
def get(cls, key):
return cache.get(key)
@classmethod
def set(cls, key, value):
cache.set(key, value, 7*24*60*60) # cache for 7 days
class ScheduleVisit(DocumentSchema):
"""
due: Days after the anchor date that this visit is due
late_window: Days after the due day that this visit is valid until
"""
due = IntegerProperty()
late_window = IntegerProperty()
class FormSchedule(DocumentSchema):
"""
anchor: Case property containing a date after which this schedule becomes active
expiry: Days after the anchor date that this schedule expires (optional)
visit_list: List of visits in this schedule
post_schedule_increment: Repeat period for visits to occur after the last fixed visit (optional)
transition_condition: Condition under which the schedule transitions to the next phase
termination_condition: Condition under which the schedule terminates
"""
anchor = StringProperty()
expires = IntegerProperty()
visits = SchemaListProperty(ScheduleVisit)
post_schedule_increment = IntegerProperty()
transition_condition = SchemaProperty(FormActionCondition)
termination_condition = SchemaProperty(FormActionCondition)
class FormBase(DocumentSchema):
"""
Part of a Managed Application; configuration for a form.
Translates to a second-level menu on the phone
"""
form_type = None
name = DictProperty(unicode)
unique_id = StringProperty()
show_count = BooleanProperty(default=False)
xmlns = StringProperty()
version = IntegerProperty()
source = FormSource()
validation_cache = CachedStringProperty(
lambda self: "cache-%s-%s-validation" % (self.get_app().get_id, self.unique_id)
)
post_form_workflow = StringProperty(
default=WORKFLOW_DEFAULT,
choices=[WORKFLOW_DEFAULT, WORKFLOW_MODULE, WORKFLOW_PREVIOUS]
)
auto_gps_capture = BooleanProperty(default=False)
@classmethod
def wrap(cls, data):
data.pop('validation_cache', '')
if cls is FormBase:
doc_type = data['doc_type']
if doc_type == 'Form':
return Form.wrap(data)
elif doc_type == 'AdvancedForm':
return AdvancedForm.wrap(data)
else:
try:
return CareplanForm.wrap(data)
except ValueError:
raise ValueError('Unexpected doc_type for Form', doc_type)
else:
return super(FormBase, cls).wrap(data)
@classmethod
def generate_id(cls):
return hex(random.getrandbits(160))[2:-1]
@classmethod
def get_form(cls, form_unique_id, and_app=False):
try:
d = get_db().view(
'app_manager/xforms_index',
key=form_unique_id
).one()
except MultipleResultsFound as e:
raise XFormIdNotUnique(
"xform id '%s' not unique: %s" % (form_unique_id, e)
)
if d:
d = d['value']
else:
raise ResourceNotFound()
# unpack the dict into variables app_id, module_id, form_id
app_id, unique_id = [d[key] for key in ('app_id', 'unique_id')]
app = Application.get(app_id)
form = app.get_form(unique_id)
if and_app:
return form, app
else:
return form
@property
def schedule_form_id(self):
return self.unique_id[:6]
def wrapped_xform(self):
return XForm(self.source)
def validate_form(self):
vc = self.validation_cache
if vc is None:
try:
validate_xform(self.source,
version=self.get_app().application_version)
except XFormValidationError as e:
validation_dict = {
"fatal_error": e.fatal_error,
"validation_problems": e.validation_problems,
"version": e.version,
}
vc = self.validation_cache = json.dumps(validation_dict)
else:
vc = self.validation_cache = ""
if vc:
try:
raise XFormValidationError(**json.loads(vc))
except ValueError:
self.validation_cache = None
return self.validate_form()
return self
def validate_for_build(self, validate_module=True):
errors = []
try:
module = self.get_module()
except AttributeError:
module = None
meta = {
'form_type': self.form_type,
'module': module.get_module_info() if module else {},
'form': {"id": self.id if hasattr(self, 'id') else None, "name": self.name}
}
xml_valid = False
if self.source == '':
errors.append(dict(type="blank form", **meta))
else:
try:
_parse_xml(self.source)
xml_valid = True
except XFormError as e:
errors.append(dict(
type="invalid xml",
message=unicode(e) if self.source else '',
**meta
))
except ValueError:
logging.error("Failed: _parse_xml(string=%r)" % self.source)
raise
else:
try:
self.validate_form()
except XFormValidationError as e:
error = {'type': 'validation error', 'validation_message': unicode(e)}
error.update(meta)
errors.append(error)
errors.extend(self.extended_build_validation(meta, xml_valid, validate_module))
return errors
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
"""
Override to perform additional validation during build process.
"""
return []
def get_unique_id(self):
"""
Return unique_id if it exists, otherwise initialize it
Does _not_ force a save, so it's the caller's responsibility to save the app
"""
if not self.unique_id:
self.unique_id = FormBase.generate_id()
return self.unique_id
def get_app(self):
return self._app
def get_version(self):
return self.version if self.version else self.get_app().version
def add_stuff_to_xform(self, xform):
app = self.get_app()
xform.exclude_languages(app.build_langs)
xform.set_default_language(app.build_langs[0])
xform.normalize_itext()
xform.set_version(self.get_version())
def render_xform(self):
xform = XForm(self.source)
self.add_stuff_to_xform(xform)
return xform.render()
def get_questions(self, langs, **kwargs):
return XForm(self.source).get_questions(langs, **kwargs)
@memoized
def get_case_property_name_formatter(self):
"""Get a function that formats case property names
The returned function requires two arguments
`(case_property_name, data_path)` and returns a string.
"""
try:
valid_paths = {question['value']: question['tag']
for question in self.get_questions(langs=[])}
except XFormError as e:
# punt on invalid xml (sorry, no rich attachments)
valid_paths = {}
def format_key(key, path):
if valid_paths.get(path) == "upload":
return u"{}{}".format(ATTACHMENT_PREFIX, key)
return key
return format_key
def export_json(self, dump_json=True):
source = self.to_json()
del source['unique_id']
return json.dumps(source) if dump_json else source
def rename_lang(self, old_lang, new_lang):
_rename_key(self.name, old_lang, new_lang)
try:
self.rename_xform_language(old_lang, new_lang)
except XFormError:
pass
def rename_xform_language(self, old_code, new_code):
source = XForm(self.source)
source.rename_language(old_code, new_code)
source = source.render()
self.source = source
def default_name(self):
app = self.get_app()
return trans(
self.name,
[app.default_language] + app.build_langs,
include_lang=False
)
@property
def full_path_name(self):
return "%(app_name)s > %(module_name)s > %(form_name)s" % {
'app_name': self.get_app().name,
'module_name': self.get_module().default_name(),
'form_name': self.default_name()
}
@property
def has_fixtures(self):
return 'src="jr://fixture/item-list:' in self.source
def get_auto_gps_capture(self):
app = self.get_app()
if app.build_version and app.enable_auto_gps:
return self.auto_gps_capture or app.auto_gps_capture
else:
return False
class IndexedFormBase(FormBase, IndexedSchema):
def get_app(self):
return self._parent._parent
def get_module(self):
return self._parent
def get_case_type(self):
return self._parent.case_type
def check_case_properties(self, all_names=None, subcase_names=None, case_tag=None):
all_names = all_names or []
subcase_names = subcase_names or []
errors = []
# reserved_words are hard-coded in three different places!
# Here, case-config-ui-*.js, and module_view.html
reserved_words = load_case_reserved_words()
for key in all_names:
try:
validate_property(key)
except ValueError:
errors.append({'type': 'update_case word illegal', 'word': key, 'case_tag': case_tag})
_, key = split_path(key)
if key in reserved_words:
errors.append({'type': 'update_case uses reserved word', 'word': key, 'case_tag': case_tag})
# no parent properties for subcase
for key in subcase_names:
if not re.match(r'^[a-zA-Z][\w_-]*$', key):
errors.append({'type': 'update_case word illegal', 'word': key, 'case_tag': case_tag})
return errors
def check_paths(self, paths):
errors = []
try:
valid_paths = {question['value']: question['tag']
for question in self.get_questions(langs=[])}
except XFormError as e:
errors.append({'type': 'invalid xml', 'message': unicode(e)})
else:
no_multimedia = not self.get_app().enable_multimedia_case_property
for path in set(paths):
if path not in valid_paths:
errors.append({'type': 'path error', 'path': path})
elif no_multimedia and valid_paths[path] == "upload":
errors.append({'type': 'multimedia case property not supported', 'path': path})
return errors
class JRResourceProperty(StringProperty):
def validate(self, value, required=True):
super(JRResourceProperty, self).validate(value, required)
if value is not None and not value.startswith('jr://'):
raise BadValueError("JR Resources must start with 'jr://")
return value
class NavMenuItemMediaMixin(DocumentSchema):
media_image = JRResourceProperty(required=False)
media_audio = JRResourceProperty(required=False)
class Form(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'module_form'
form_filter = StringProperty()
requires = StringProperty(choices=["case", "referral", "none"], default="none")
actions = SchemaProperty(FormActions)
def add_stuff_to_xform(self, xform):
super(Form, self).add_stuff_to_xform(xform)
xform.add_case_and_meta(self)
def all_other_forms_require_a_case(self):
m = self.get_module()
return all([form.requires == 'case' for form in m.get_forms() if form.id != self.id])
def _get_active_actions(self, types):
actions = {}
for action_type in types:
a = getattr(self.actions, action_type)
if isinstance(a, list):
if a:
actions[action_type] = a
elif a.is_active():
actions[action_type] = a
return actions
def active_actions(self):
if self.get_app().application_version == APP_V1:
action_types = (
'open_case', 'update_case', 'close_case',
'open_referral', 'update_referral', 'close_referral',
'case_preload', 'referral_preload'
)
else:
if self.requires == 'none':
action_types = (
'open_case', 'update_case', 'close_case', 'subcases',
)
elif self.requires == 'case':
action_types = (
'update_case', 'close_case', 'case_preload', 'subcases',
)
else:
# this is left around for legacy migrated apps
action_types = (
'open_case', 'update_case', 'close_case',
'case_preload', 'subcases',
)
return self._get_active_actions(action_types)
def active_non_preloader_actions(self):
return self._get_active_actions((
'open_case', 'update_case', 'close_case',
'open_referral', 'update_referral', 'close_referral'))
def check_actions(self):
errors = []
subcase_names = set()
for subcase_action in self.actions.subcases:
if not subcase_action.case_type:
errors.append({'type': 'subcase has no case type'})
subcase_names.update(subcase_action.case_properties)
if self.requires == 'none' and self.actions.open_case.is_active() \
and not self.actions.open_case.name_path:
errors.append({'type': 'case_name required'})
errors.extend(self.check_case_properties(
all_names=self.actions.all_property_names(),
subcase_names=subcase_names
))
def generate_paths():
for action in self.active_actions().values():
if isinstance(action, list):
actions = action
else:
actions = [action]
for action in actions:
for path in FormAction.get_action_paths(action):
yield path
errors.extend(self.check_paths(generate_paths()))
return errors
def requires_case(self):
# all referrals also require cases
return self.requires in ("case", "referral")
def requires_case_type(self):
return self.requires_case() or \
bool(self.active_non_preloader_actions())
def requires_referral(self):
return self.requires == "referral"
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
errors = []
if xml_valid:
for error in self.check_actions():
error.update(error_meta)
errors.append(error)
if validate_module:
needs_case_type = False
needs_case_detail = False
needs_referral_detail = False
if self.requires_case():
needs_case_detail = True
needs_case_type = True
if self.requires_case_type():
needs_case_type = True
if self.requires_referral():
needs_referral_detail = True
errors.extend(self.get_module().get_case_errors(
needs_case_type=needs_case_type,
needs_case_detail=needs_case_detail,
needs_referral_detail=needs_referral_detail,
))
return errors
def get_case_updates(self, case_type):
if self.get_module().case_type == case_type:
format_key = self.get_case_property_name_formatter()
return [format_key(*item)
for item in self.actions.update_case.update.items()]
return []
@memoized
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
for subcase in self.actions.subcases:
if subcase.case_type == case_type:
case_properties.update(
subcase.case_properties.keys()
)
if case_type != module_case_type and (
self.actions.open_case.is_active() or
self.actions.update_case.is_active() or
self.actions.close_case.is_active()):
parent_types.add((module_case_type, 'parent'))
return parent_types, case_properties
class UserRegistrationForm(FormBase):
form_type = 'user_registration'
username_path = StringProperty(default='username')
password_path = StringProperty(default='password')
data_paths = DictProperty()
def add_stuff_to_xform(self, xform):
super(UserRegistrationForm, self).add_stuff_to_xform(xform)
xform.add_user_registration(self.username_path, self.password_path, self.data_paths)
class MappingItem(DocumentSchema):
key = StringProperty()
# lang => localized string
value = DictProperty()
class DetailColumn(IndexedSchema):
"""
Represents a column in case selection screen on the phone. Ex:
{
'header': {'en': 'Sex', 'por': 'Sexo'},
'model': 'case',
'field': 'sex',
'format': 'enum',
'xpath': '.',
'enum': [
{'key': 'm', 'value': {'en': 'Male', 'por': 'Macho'},
{'key': 'f', 'value': {'en': 'Female', 'por': 'Fêmea'},
],
}
"""
header = DictProperty()
model = StringProperty()
field = StringProperty()
format = StringProperty()
enum = SchemaListProperty(MappingItem)
late_flag = IntegerProperty(default=30)
advanced = StringProperty(default="")
calc_xpath = StringProperty(default=".")
filter_xpath = StringProperty(default="")
time_ago_interval = FloatProperty(default=365.25)
@property
def enum_dict(self):
"""for backwards compatibility with building 1.0 apps"""
import warnings
warnings.warn('You should not use enum_dict. Use enum instead',
DeprecationWarning)
return dict((item.key, item.value) for item in self.enum)
def rename_lang(self, old_lang, new_lang):
for dct in [self.header] + [item.value for item in self.enum]:
_rename_key(dct, old_lang, new_lang)
@property
def field_type(self):
if FIELD_SEPARATOR in self.field:
return self.field.split(FIELD_SEPARATOR, 1)[0]
else:
return 'property' # equivalent to property:parent/case_property
@property
def field_property(self):
if FIELD_SEPARATOR in self.field:
return self.field.split(FIELD_SEPARATOR, 1)[1]
else:
return self.field
class TimeAgoInterval(object):
map = {
'day': 1.0,
'week': 7.0,
'month': 30.4375,
'year': 365.25
}
@classmethod
def get_from_old_format(cls, format):
if format == 'years-ago':
return cls.map['year']
elif format == 'months-ago':
return cls.map['month']
@classmethod
def wrap(cls, data):
if data.get('format') in ('months-ago', 'years-ago'):
data['time_ago_interval'] = cls.TimeAgoInterval.get_from_old_format(data['format'])
data['format'] = 'time-ago'
# Lazy migration: enum used to be a dict, now is a list
if isinstance(data.get('enum'), dict):
data['enum'] = sorted({'key': key, 'value': value}
for key, value in data['enum'].items())
return super(DetailColumn, cls).wrap(data)
class SortElement(IndexedSchema):
field = StringProperty()
type = StringProperty()
direction = StringProperty()
def values(self):
values = {
'field': self.field,
'type': self.type,
'direction': self.direction,
}
return values
class SortOnlyDetailColumn(DetailColumn):
"""This is a mock type, not intended to be part of a document"""
@property
def _i(self):
"""
assert that SortOnlyDetailColumn never has ._i or .id called
since it should never be in an app document
"""
raise NotImplementedError()
class Detail(IndexedSchema):
"""
Full configuration for a case selection screen
"""
display = StringProperty(choices=['short', 'long'])
columns = SchemaListProperty(DetailColumn)
get_columns = IndexedSchema.Getter('columns')
sort_elements = SchemaListProperty(SortElement)
@parse_int([1])
def get_column(self, i):
return self.columns[i].with_id(i%len(self.columns), self)
def rename_lang(self, old_lang, new_lang):
for column in self.columns:
column.rename_lang(old_lang, new_lang)
def filter_xpath(self):
filters = []
for i,column in enumerate(self.columns):
if column.format == 'filter':
value = dot_interpolate(
column.filter_xpath,
'%s_%s_%s' % (column.model, column.field, i + 1)
)
filters.append("(%s)" % value)
xpath = ' and '.join(filters)
return partial_escape(xpath)
class CaseList(IndexedSchema):
label = DictProperty()
show = BooleanProperty(default=False)
def rename_lang(self, old_lang, new_lang):
for dct in (self.label,):
_rename_key(dct, old_lang, new_lang)
class ParentSelect(DocumentSchema):
active = BooleanProperty(default=False)
relationship = StringProperty(default='parent')
module_id = StringProperty()
class DetailPair(DocumentSchema):
short = SchemaProperty(Detail)
long = SchemaProperty(Detail)
@classmethod
def wrap(cls, data):
self = super(DetailPair, cls).wrap(data)
self.short.display = 'short'
self.long.display = 'long'
return self
class ModuleBase(IndexedSchema, NavMenuItemMediaMixin):
name = DictProperty(unicode)
unique_id = StringProperty()
case_type = StringProperty()
@classmethod
def wrap(cls, data):
if cls is ModuleBase:
doc_type = data['doc_type']
if doc_type == 'Module':
return Module.wrap(data)
elif doc_type == 'CareplanModule':
return CareplanModule.wrap(data)
elif doc_type == 'AdvancedModule':
return AdvancedModule.wrap(data)
else:
raise ValueError('Unexpected doc_type for Module', doc_type)
else:
return super(ModuleBase, cls).wrap(data)
def get_or_create_unique_id(self):
"""
It is the caller's responsibility to save the Application
after calling this function.
WARNING: If called on the same doc in different requests without saving,
this function will return a different uuid each time,
likely causing unexpected behavior
"""
if not self.unique_id:
self.unique_id = FormBase.generate_id()
return self.unique_id
get_forms = IndexedSchema.Getter('forms')
@parse_int([1])
def get_form(self, i):
try:
return self.forms[i].with_id(i % len(self.forms), self)
except IndexError:
raise FormNotFoundException()
def requires_case_details(self):
return False
def get_case_types(self):
return set([self.case_type])
def get_module_info(self):
return {
'id': self.id,
'name': self.name,
}
def get_app(self):
return self._parent
def default_name(self):
app = self.get_app()
return trans(
self.name,
[app.default_language] + app.build_langs,
include_lang=False
)
def rename_lang(self, old_lang, new_lang):
_rename_key(self.name, old_lang, new_lang)
for form in self.get_forms():
form.rename_lang(old_lang, new_lang)
for _, detail, _ in self.get_details():
detail.rename_lang(old_lang, new_lang)
def validate_detail_columns(self, columns):
from corehq.apps.app_manager.suite_xml import FIELD_TYPE_LOCATION
from corehq.apps.locations.util import parent_child
hierarchy = None
for column in columns:
if column.format in ('enum', 'enum-image'):
for item in column.enum:
key = item.key
if not re.match('^([\w_-]*)$', key):
yield {
'type': 'invalid id key',
'key': key,
'module': self.get_module_info(),
}
elif column.format == 'filter':
try:
etree.XPath(column.filter_xpath or '')
except etree.XPathSyntaxError:
yield {
'type': 'invalid filter xpath',
'module': self.get_module_info(),
'column': column,
}
elif column.field_type == FIELD_TYPE_LOCATION:
hierarchy = hierarchy or parent_child(self.get_app().domain)
try:
LocationXpath('').validate(column.field_property, hierarchy)
except LocationXpathValidationError, e:
yield {
'type': 'invalid location xpath',
'details': unicode(e),
'module': self.get_module_info(),
'column': column,
}
def validate_for_build(self):
errors = []
if not self.forms:
errors.append({
'type': 'no forms',
'module': self.get_module_info(),
})
if self.requires_case_details():
errors.extend(self.get_case_errors(
needs_case_type=True,
needs_case_detail=True
))
return errors
class Module(ModuleBase):
"""
A group of related forms, and configuration that applies to them all.
Translates to a top-level menu on the phone.
"""
module_type = 'basic'
case_label = DictProperty()
referral_label = DictProperty()
forms = SchemaListProperty(Form)
case_details = SchemaProperty(DetailPair)
ref_details = SchemaProperty(DetailPair)
put_in_root = BooleanProperty(default=False)
case_list = SchemaProperty(CaseList)
referral_list = SchemaProperty(CaseList)
task_list = SchemaProperty(CaseList)
parent_select = SchemaProperty(ParentSelect)
@classmethod
def wrap(cls, data):
if 'details' in data:
try:
case_short, case_long, ref_short, ref_long = data['details']
except ValueError:
# "need more than 0 values to unpack"
pass
else:
data['case_details'] = {
'short': case_short,
'long': case_long,
}
data['ref_details'] = {
'short': ref_short,
'long': ref_long,
}
finally:
del data['details']
return super(Module, cls).wrap(data)
@classmethod
def new_module(cls, name, lang):
detail = Detail(
columns=[DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Name")},
field='name',
model='case',
)]
)
module = Module(
name={(lang or 'en'): name or ugettext("Untitled Module")},
forms=[],
case_type='',
case_details=DetailPair(
short=Detail(detail.to_json()),
long=Detail(detail.to_json()),
),
)
module.get_or_create_unique_id()
return module
def new_form(self, name, lang, attachment=''):
form = Form(
name={lang if lang else "en": name if name else _("Untitled Form")},
)
self.forms.append(form)
form = self.get_form(-1)
form.source = attachment
return form
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, Form):
new_form = form
elif isinstance(form, AdvancedForm) and not form.actions.get_all_actions():
new_form = Form(
name=form.name,
form_filter=form.form_filter,
media_image=form.media_image,
media_audio=form.media_audio
)
new_form._parent = self
form._parent = self
if with_source:
new_form.source = form.source
else:
raise IncompatibleFormTypeException()
if index:
self.forms.insert(index, new_form)
else:
self.forms.append(new_form)
return self.get_form(index or -1)
def rename_lang(self, old_lang, new_lang):
super(Module, self).rename_lang(old_lang, new_lang)
for case_list in (self.case_list, self.referral_list):
case_list.rename_lang(old_lang, new_lang)
def get_details(self):
return (
('case_short', self.case_details.short, True),
('case_long', self.case_details.long, True),
('ref_short', self.ref_details.short, False),
('ref_long', self.ref_details.long, False),
)
@property
def detail_sort_elements(self):
try:
return self.case_details.short.sort_elements
except Exception:
return []
def validate_for_build(self):
errors = super(Module, self).validate_for_build()
for sort_element in self.detail_sort_elements:
try:
validate_detail_screen_field(sort_element.field)
except ValueError:
errors.append({
'type': 'invalid sort field',
'field': sort_element.field,
'module': self.get_module_info(),
})
if self.parent_select.active and not self.parent_select.module_id:
errors.append({
'type': 'no parent select id',
'module': self.get_module_info()
})
return errors
def export_json(self, dump_json=True, keep_unique_id=False):
source = self.to_json()
if not keep_unique_id:
for form in source['forms']:
del form['unique_id']
return json.dumps(source) if dump_json else source
def export_jvalue(self):
return self.export_json(dump_json=False, keep_unique_id=True)
def requires(self):
r = set(["none"])
for form in self.get_forms():
r.add(form.requires)
if self.case_list.show:
r.add('case')
if self.referral_list.show:
r.add('referral')
for val in ("referral", "case", "none"):
if val in r:
return val
def detail_types(self):
return {
"referral": ["case_short", "case_long", "ref_short", "ref_long"],
"case": ["case_short", "case_long"],
"none": []
}[self.requires()]
def requires_case_details(self):
ret = False
if self.case_list.show:
return True
for form in self.get_forms():
if form.requires_case():
ret = True
break
return ret
@memoized
def all_forms_require_a_case(self):
return all([form.requires == 'case' for form in self.get_forms()])
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.case_details.short.columns:
yield {
'type': 'no case detail',
'module': module_info,
}
columns = self.case_details.short.columns + self.case_details.long.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
if needs_referral_detail and not self.ref_details.short.columns:
yield {
'type': 'no ref detail',
'module': module_info,
}
class AdvancedForm(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'advanced_form'
form_filter = StringProperty()
actions = SchemaProperty(AdvancedFormActions)
schedule = SchemaProperty(FormSchedule, default=None)
def add_stuff_to_xform(self, xform):
super(AdvancedForm, self).add_stuff_to_xform(xform)
xform.add_case_and_meta_advanced(self)
@property
def requires(self):
return 'case' if self.actions.load_update_cases else 'none'
def all_other_forms_require_a_case(self):
m = self.get_module()
return all([form.requires == 'case' for form in m.get_forms() if form.id != self.id])
def check_actions(self):
errors = []
for action in self.actions.get_subcase_actions():
if action.parent_tag not in self.actions.get_case_tags():
errors.append({'type': 'missing parent tag', 'case_tag': action.parent_tag})
if isinstance(action, AdvancedOpenCaseAction):
if not action.name_path:
errors.append({'type': 'case_name required', 'case_tag': action.case_tag})
meta = self.actions.actions_meta_by_tag.get(action.parent_tag)
if meta and meta['type'] == 'open' and meta['action'].repeat_context:
if not action.repeat_context or not action.repeat_context.startswith(meta['action'].repeat_context):
errors.append({'type': 'subcase repeat context', 'case_tag': action.case_tag})
try:
self.actions.get_action_hierarchy(action)
except ValueError:
errors.append({'type': 'circular ref', 'case_tag': action.case_tag})
errors.extend(self.check_case_properties(
subcase_names=action.get_property_names(),
case_tag=action.case_tag
))
for action in self.actions.get_all_actions():
if not action.case_type and (not isinstance(action, LoadUpdateAction) or not action.auto_select):
errors.append({'type': "no case type in action", 'case_tag': action.case_tag})
if isinstance(action, LoadUpdateAction) and action.auto_select:
mode = action.auto_select.mode
if not action.auto_select.value_key:
key_name = {
AUTO_SELECT_CASE: _('Case property'),
AUTO_SELECT_FIXTURE: _('Lookup Table field'),
AUTO_SELECT_USER: _('custom user property'),
AUTO_SELECT_RAW: _('custom XPath expression'),
}[mode]
errors.append({'type': 'auto select key', 'key_name': key_name})
if not action.auto_select.value_source:
source_names = {
AUTO_SELECT_CASE: _('Case tag'),
AUTO_SELECT_FIXTURE: _('Lookup Table tag'),
}
if mode in source_names:
errors.append({'type': 'auto select source', 'source_name': source_names[mode]})
elif mode == AUTO_SELECT_CASE:
case_tag = action.auto_select.value_source
if not self.actions.get_action_from_tag(case_tag):
errors.append({'type': 'auto select case ref', 'case_tag': action.case_tag})
errors.extend(self.check_case_properties(
all_names=action.get_property_names(),
case_tag=action.case_tag
))
if self.form_filter:
if not any(action for action in self.actions.load_update_cases if not action.auto_select):
errors.append({'type': "filtering without case"})
def generate_paths():
for action in self.actions.get_all_actions():
for path in action.get_paths():
yield path
errors.extend(self.check_paths(generate_paths()))
return errors
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
errors = []
if xml_valid:
for error in self.check_actions():
error.update(error_meta)
errors.append(error)
module = self.get_module()
if module.has_schedule and not (self.schedule and self.schedule.anchor):
error = {
'type': 'validation error',
'validation_message': _("All forms in this module require a visit schedule.")
}
error.update(error_meta)
errors.append(error)
if validate_module:
errors.extend(module.get_case_errors(
needs_case_type=False,
needs_case_detail=module.requires_case_details(),
needs_referral_detail=False,
))
return errors
def get_case_updates(self, case_type):
updates = set()
format_key = self.get_case_property_name_formatter()
for action in self.actions.get_all_actions():
if action.case_type == case_type:
updates.update(format_key(*item)
for item in action.case_properties.iteritems())
return updates
@memoized
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
for subcase in self.actions.get_subcase_actions():
if subcase.case_type == case_type:
case_properties.update(
subcase.case_properties.keys()
)
parent = self.actions.get_action_from_tag(subcase.parent_tag)
if parent:
parent_types.add((parent.case_type, subcase.parent_reference_id or 'parent'))
return parent_types, case_properties
class AdvancedModule(ModuleBase):
module_type = 'advanced'
case_label = DictProperty()
forms = SchemaListProperty(AdvancedForm)
case_details = SchemaProperty(DetailPair)
product_details = SchemaProperty(DetailPair)
put_in_root = BooleanProperty(default=False)
case_list = SchemaProperty(CaseList)
has_schedule = BooleanProperty()
@classmethod
def new_module(cls, name, lang):
detail = Detail(
columns=[DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Name")},
field='name',
model='case',
)]
)
module = AdvancedModule(
name={(lang or 'en'): name or ugettext("Untitled Module")},
forms=[],
case_type='',
case_details=DetailPair(
short=Detail(detail.to_json()),
long=Detail(detail.to_json()),
),
product_details=DetailPair(
short=Detail(
columns=[
DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Product")},
field='name',
model='product',
),
],
),
long=Detail(),
),
)
module.get_or_create_unique_id()
return module
def new_form(self, name, lang, attachment=''):
form = AdvancedForm(
name={lang if lang else "en": name if name else _("Untitled Form")},
)
self.forms.append(form)
form = self.get_form(-1)
form.source = attachment
return form
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, AdvancedForm):
new_form = form
elif isinstance(form, Form):
new_form = AdvancedForm(
name=form.name,
form_filter=form.form_filter,
media_image=form.media_image,
media_audio=form.media_audio
)
new_form._parent = self
form._parent = self
if with_source:
new_form.source = form.source
actions = form.active_actions()
open = actions.get('open_case', None)
update = actions.get('update_case', None)
close = actions.get('close_case', None)
preload = actions.get('case_preload', None)
subcases = actions.get('subcases', None)
case_type = from_module.case_type
def convert_preload(preload):
return dict(zip(preload.values(),preload.keys()))
base_action = None
if open:
base_action = AdvancedOpenCaseAction(
case_type=case_type,
case_tag='open_{0}_0'.format(case_type),
name_path=open.name_path,
open_condition=open.condition,
case_properties=update.update if update else {},
)
new_form.actions.open_cases.append(base_action)
elif update or preload or close:
base_action = LoadUpdateAction(
case_type=case_type,
case_tag='load_{0}_0'.format(case_type),
case_properties=update.update if update else {},
preload=convert_preload(preload.preload) if preload else {}
)
if from_module.parent_select.active:
gen = suite_xml.SuiteGenerator(self.get_app())
select_chain = gen.get_select_chain(from_module, include_self=False)
for n, link in enumerate(reversed(list(enumerate(select_chain)))):
i, module = link
new_form.actions.load_update_cases.append(LoadUpdateAction(
case_type=module.case_type,
case_tag='_'.join(['parent'] * (i + 1)),
details_module=module.unique_id,
parent_tag='_'.join(['parent'] * (i + 2)) if n > 0 else ''
))
base_action.parent_tag = 'parent'
if close:
base_action.close_condition = close.condition
new_form.actions.load_update_cases.append(base_action)
if subcases:
for i, subcase in enumerate(subcases):
open_subcase_action = AdvancedOpenCaseAction(
case_type=subcase.case_type,
case_tag='open_{0}_{1}'.format(subcase.case_type, i+1),
name_path=subcase.case_name,
open_condition=subcase.condition,
case_properties=subcase.case_properties,
repeat_context=subcase.repeat_context,
parent_reference_id=subcase.reference_id,
parent_tag=base_action.case_tag if base_action else ''
)
new_form.actions.open_cases.append(open_subcase_action)
else:
raise IncompatibleFormTypeException()
if index:
self.forms.insert(index, new_form)
else:
self.forms.append(new_form)
return self.get_form(index or -1)
def rename_lang(self, old_lang, new_lang):
super(AdvancedModule, self).rename_lang(old_lang, new_lang)
self.case_list.rename_lang(old_lang, new_lang)
def requires_case_details(self):
if self.case_list.show:
return True
for form in self.forms:
if any(action.case_type == self.case_type for action in form.actions.load_update_cases):
return True
def all_forms_require_a_case(self):
return all(form.requires == 'case' for form in self.forms)
def get_details(self):
return (
('case_short', self.case_details.short, True),
('case_long', self.case_details.long, True),
('product_short', self.product_details.short, self.get_app().commtrack_enabled),
('product_long', self.product_details.long, False),
)
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.case_details.short.columns:
yield {
'type': 'no case detail',
'module': module_info,
}
if self.get_app().commtrack_enabled and not self.product_details.short.columns:
for form in self.forms:
if self.case_list.show or \
any(action.show_product_stock for action in form.actions.load_update_cases):
yield {
'type': 'no product detail',
'module': module_info,
}
break
columns = self.case_details.short.columns + self.case_details.long.columns
if self.get_app().commtrack_enabled:
columns += self.product_details.short.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
class CareplanForm(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'careplan_form'
mode = StringProperty(required=True, choices=['create', 'update'])
custom_case_updates = DictProperty()
case_preload = DictProperty()
@classmethod
def wrap(cls, data):
if cls is CareplanForm:
doc_type = data['doc_type']
if doc_type == 'CareplanGoalForm':
return CareplanGoalForm.wrap(data)
elif doc_type == 'CareplanTaskForm':
return CareplanTaskForm.wrap(data)
else:
raise ValueError('Unexpected doc_type for CareplanForm', doc_type)
else:
return super(CareplanForm, cls).wrap(data)
def add_stuff_to_xform(self, xform):
super(CareplanForm, self).add_stuff_to_xform(xform)
xform.add_care_plan(self)
def get_case_updates(self, case_type):
if case_type == self.case_type:
format_key = self.get_case_property_name_formatter()
return [format_key(*item) for item in self.case_updates().iteritems()]
else:
return []
def get_case_type(self):
return self.case_type
def get_parent_case_type(self):
return self._parent.case_type
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
if case_type == self.case_type:
if case_type == CAREPLAN_GOAL:
parent_types.add((module_case_type, 'parent'))
elif case_type == CAREPLAN_TASK:
parent_types.add((CAREPLAN_GOAL, 'goal'))
case_properties.update(self.case_updates().keys())
return parent_types, case_properties
class CareplanGoalForm(CareplanForm):
case_type = CAREPLAN_GOAL
name_path = StringProperty(required=True, default='/data/name')
date_followup_path = StringProperty(required=True, default='/data/date_followup')
description_path = StringProperty(required=True, default='/data/description')
close_path = StringProperty(required=True, default='/data/close_goal')
@classmethod
def new_form(cls, lang, name, mode):
action = 'Update' if mode == 'update' else 'New'
form = CareplanGoalForm(mode=mode)
name = name or '%s Careplan %s' % (action, CAREPLAN_CASE_NAMES[form.case_type])
form.name = {lang: name}
if mode == 'update':
form.description_path = '/data/description_group/description'
source = load_form_template('%s_%s.xml' % (form.case_type, mode))
return form, source
def case_updates(self):
changes = self.custom_case_updates.copy()
changes.update({
'date_followup': self.date_followup_path,
'description': self.description_path,
})
return changes
def get_fixed_questions(self):
def q(name, case_key, label):
return {
'name': name,
'key': case_key,
'label': label,
'path': self[name]
}
questions = [
q('description_path', 'description', _('Description')),
q('date_followup_path', 'date_followup', _('Followup date')),
]
if self.mode == 'create':
return [q('name_path', 'name', _('Name'))] + questions
else:
return questions + [q('close_path', 'close', _('Close if'))]
class CareplanTaskForm(CareplanForm):
case_type = CAREPLAN_TASK
name_path = StringProperty(required=True, default='/data/task_repeat/name')
date_followup_path = StringProperty(required=True, default='/data/date_followup')
description_path = StringProperty(required=True, default='/data/description')
latest_report_path = StringProperty(required=True, default='/data/progress_group/progress_update')
close_path = StringProperty(required=True, default='/data/task_complete')
@classmethod
def new_form(cls, lang, name, mode):
action = 'Update' if mode == 'update' else 'New'
form = CareplanTaskForm(mode=mode)
name = name or '%s Careplan %s' % (action, CAREPLAN_CASE_NAMES[form.case_type])
form.name = {lang: name}
if mode == 'create':
form.date_followup_path = '/data/task_repeat/date_followup'
form.description_path = '/data/task_repeat/description'
source = load_form_template('%s_%s.xml' % (form.case_type, mode))
return form, source
def case_updates(self):
changes = self.custom_case_updates.copy()
changes.update({
'date_followup': self.date_followup_path,
})
if self.mode == 'create':
changes['description'] = self.description_path
else:
changes['latest_report'] = self.latest_report_path
return changes
def get_fixed_questions(self):
def q(name, case_key, label):
return {
'name': name,
'key': case_key,
'label': label,
'path': self[name]
}
questions = [
q('date_followup_path', 'date_followup', _('Followup date')),
]
if self.mode == 'create':
return [
q('name_path', 'name', _('Name')),
q('description_path', 'description', _('Description')),
] + questions
else:
return questions + [
q('latest_report_path', 'latest_report', _('Latest report')),
q('close_path', 'close', _('Close if')),
]
class CareplanModule(ModuleBase):
"""
A set of forms and configuration for managing the Care Plan workflow.
"""
module_type = 'careplan'
parent_select = SchemaProperty(ParentSelect)
display_separately = BooleanProperty(default=False)
forms = SchemaListProperty(CareplanForm)
goal_details = SchemaProperty(DetailPair)
task_details = SchemaProperty(DetailPair)
@classmethod
def new_module(cls, app, name, lang, target_module_id, target_case_type):
lang = lang or 'en'
module = CareplanModule(
name={lang: name or ugettext("Care Plan")},
parent_select=ParentSelect(
active=True,
relationship='parent',
module_id=target_module_id
),
case_type=target_case_type,
goal_details=DetailPair(
short=cls._get_detail(lang, 'goal_short'),
long=cls._get_detail(lang, 'goal_long'),
),
task_details=DetailPair(
short=cls._get_detail(lang, 'task_short'),
long=cls._get_detail(lang, 'task_long'),
)
)
module.get_or_create_unique_id()
return module
@classmethod
def _get_detail(cls, lang, detail_type):
header = ugettext('Goal') if detail_type.startswith('goal') else ugettext('Task')
columns = [
DetailColumn(
format='plain',
header={lang: header},
field='name',
model='case'),
DetailColumn(
format='date',
header={lang: ugettext("Followup")},
field='date_followup',
model='case')]
if detail_type.endswith('long'):
columns.append(DetailColumn(
format='plain',
header={lang: ugettext("Description")},
field='description',
model='case'))
if detail_type == 'tasks_long':
columns.append(DetailColumn(
format='plain',
header={lang: ugettext("Last update")},
field='latest_report',
model='case'))
return Detail(type=detail_type, columns=columns)
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, CareplanForm):
if index:
self.forms.insert(index, form)
else:
self.forms.append(form)
return self.get_form(index or -1)
else:
raise IncompatibleFormTypeException()
def requires_case_details(self):
return True
def get_case_types(self):
return set([self.case_type]) | set(f.case_type for f in self.forms)
def get_form_by_type(self, case_type, mode):
for form in self.get_forms():
if form.case_type == case_type and form.mode == mode:
return form
def get_details(self):
return (
('%s_short' % CAREPLAN_GOAL, self.goal_details.short, True),
('%s_long' % CAREPLAN_GOAL, self.goal_details.long, True),
('%s_short' % CAREPLAN_TASK, self.task_details.short, True),
('%s_long' % CAREPLAN_TASK, self.task_details.long, True),
)
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.goal_details.short.columns:
yield {
'type': 'no case detail for goals',
'module': module_info,
}
if not self.task_details.short.columns:
yield {
'type': 'no case detail for tasks',
'module': module_info,
}
columns = self.goal_details.short.columns + self.goal_details.long.columns
columns += self.task_details.short.columns + self.task_details.long.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
class VersionedDoc(LazyAttachmentDoc):
"""
A document that keeps an auto-incrementing version number, knows how to make copies of itself,
delete a copy of itself, and revert back to an earlier copy of itself.
"""
domain = StringProperty()
copy_of = StringProperty()
version = IntegerProperty()
short_url = StringProperty()
short_odk_url = StringProperty()
short_odk_media_url = StringProperty()
_meta_fields = ['_id', '_rev', 'domain', 'copy_of', 'version', 'short_url', 'short_odk_url', 'short_odk_media_url']
@property
def id(self):
return self._id
def save(self, response_json=None, increment_version=None, **params):
if increment_version is None:
increment_version = not self.copy_of
if increment_version:
self.version = self.version + 1 if self.version else 1
super(VersionedDoc, self).save(**params)
if response_json is not None:
if 'update' not in response_json:
response_json['update'] = {}
response_json['update']['app-version'] = self.version
def make_build(self):
assert self.get_id
assert self.copy_of is None
cls = self.__class__
copies = cls.view('app_manager/applications', key=[self.domain, self._id, self.version], include_docs=True, limit=1).all()
if copies:
copy = copies[0]
else:
copy = deepcopy(self.to_json())
bad_keys = ('_id', '_rev', '_attachments',
'short_url', 'short_odk_url', 'short_odk_media_url', 'recipients')
for bad_key in bad_keys:
if bad_key in copy:
del copy[bad_key]
copy = cls.wrap(copy)
copy['copy_of'] = self._id
copy.copy_attachments(self)
return copy
def copy_attachments(self, other, regexp=ATTACHMENT_REGEX):
for name in other.lazy_list_attachments() or {}:
if regexp is None or re.match(regexp, name):
self.lazy_put_attachment(other.lazy_fetch_attachment(name), name)
def make_reversion_to_copy(self, copy):
"""
Replaces couch doc with a copy of the backup ("copy").
Returns the another Application/RemoteApp referring to this
updated couch doc. The returned doc should be used in place of
the original doc, i.e. should be called as follows:
app = app.make_reversion_to_copy(copy)
app.save()
"""
if copy.copy_of != self._id:
raise VersioningError("%s is not a copy of %s" % (copy, self))
app = deepcopy(copy.to_json())
app['_rev'] = self._rev
app['_id'] = self._id
app['version'] = self.version
app['copy_of'] = None
if '_attachments' in app:
del app['_attachments']
cls = self.__class__
app = cls.wrap(app)
app.copy_attachments(copy)
return app
def delete_copy(self, copy):
if copy.copy_of != self._id:
raise VersioningError("%s is not a copy of %s" % (copy, self))
copy.delete_app()
copy.save(increment_version=False)
def scrub_source(self, source):
"""
To be overridden.
Use this to scrub out anything
that should be shown in the
application source, such as ids, etc.
"""
raise NotImplemented()
def export_json(self, dump_json=True):
source = deepcopy(self.to_json())
for field in self._meta_fields:
if field in source:
del source[field]
_attachments = {}
for name in source.get('_attachments', {}):
if re.match(ATTACHMENT_REGEX, name):
_attachments[name] = self.fetch_attachment(name)
source['_attachments'] = _attachments
self.scrub_source(source)
return json.dumps(source) if dump_json else source
@classmethod
def from_source(cls, source, domain):
for field in cls._meta_fields:
if field in source:
del source[field]
source['domain'] = domain
app = cls.wrap(source)
return app
def is_deleted(self):
return self.doc_type.endswith(DELETED_SUFFIX)
def unretire(self):
self.doc_type = self.get_doc_type()
self.save()
def get_doc_type(self):
if self.doc_type.endswith(DELETED_SUFFIX):
return self.doc_type[:-len(DELETED_SUFFIX)]
else:
return self.doc_type
def absolute_url_property(method):
"""
Helper for the various fully qualified application URLs
Turns a method returning an unqualified URL
into a property returning a fully qualified URL
(e.g., '/my_url/' => 'https://www.commcarehq.org/my_url/')
Expects `self.url_base` to be fully qualified url base
"""
@wraps(method)
def _inner(self):
return "%s%s" % (self.url_base, method(self))
return property(_inner)
class ApplicationBase(VersionedDoc, SnapshotMixin,
CommCareFeatureSupportMixin):
"""
Abstract base class for Application and RemoteApp.
Contains methods for generating the various files and zipping them into CommCare.jar
"""
recipients = StringProperty(default="")
# this is the supported way of specifying which commcare build to use
build_spec = SchemaProperty(BuildSpec)
platform = StringProperty(
choices=["nokia/s40", "nokia/s60", "winmo", "generic"],
default="nokia/s40"
)
text_input = StringProperty(
choices=['roman', 'native', 'custom-keys', 'qwerty'],
default="roman"
)
success_message = DictProperty()
# The following properties should only appear on saved builds
# built_with stores a record of CommCare build used in a saved app
built_with = SchemaProperty(BuildRecord)
build_signed = BooleanProperty(default=True)
built_on = DateTimeProperty(required=False)
build_comment = StringProperty()
comment_from = StringProperty()
build_broken = BooleanProperty(default=False)
# not used yet, but nice for tagging/debugging
# currently only canonical value is 'incomplete-build',
# for when build resources aren't found where they should be
build_broken_reason = StringProperty()
# watch out for a past bug:
# when reverting to a build that happens to be released
# that got copied into into the new app doc, and when new releases were made,
# they were automatically starred
# AFAIK this is fixed in code, but my rear its ugly head in an as-yet-not-understood
# way for apps that already had this problem. Just keep an eye out
is_released = BooleanProperty(default=False)
# django-style salted hash of the admin password
admin_password = StringProperty()
# a=Alphanumeric, n=Numeric, x=Neither (not allowed)
admin_password_charset = StringProperty(choices=['a', 'n', 'x'], default='n')
# This is here instead of in Application because it needs to be available in stub representation
application_version = StringProperty(default=APP_V2, choices=[APP_V1, APP_V2], required=False)
langs = StringListProperty()
# only the languages that go in the build
build_langs = StringListProperty()
secure_submissions = BooleanProperty(default=False)
# exchange properties
cached_properties = DictProperty()
description = StringProperty()
deployment_date = DateTimeProperty()
phone_model = StringProperty()
user_type = StringProperty()
attribution_notes = StringProperty()
# always false for RemoteApp
case_sharing = BooleanProperty(default=False)
@classmethod
def wrap(cls, data):
# scrape for old conventions and get rid of them
if 'commcare_build' in data:
version, build_number = data['commcare_build'].split('/')
data['build_spec'] = BuildSpec.from_string("%s/latest" % version).to_json()
del data['commcare_build']
if 'commcare_tag' in data:
version, build_number = current_builds.TAG_MAP[data['commcare_tag']]
data['build_spec'] = BuildSpec.from_string("%s/latest" % version).to_json()
del data['commcare_tag']
if data.has_key("built_with") and isinstance(data['built_with'], basestring):
data['built_with'] = BuildSpec.from_string(data['built_with']).to_json()
if 'native_input' in data:
if 'text_input' not in data:
data['text_input'] = 'native' if data['native_input'] else 'roman'
del data['native_input']
should_save = False
if data.has_key('original_doc'):
data['copy_history'] = [data.pop('original_doc')]
should_save = True
data["description"] = data.get('description') or data.get('short_description')
self = super(ApplicationBase, cls).wrap(data)
if not self.build_spec or self.build_spec.is_null():
self.build_spec = get_default_build_spec(self.application_version)
if should_save:
self.save()
return self
@classmethod
def by_domain(cls, domain):
return cls.view('app_manager/applications_brief',
startkey=[domain],
endkey=[domain, {}],
include_docs=True,
#stale=settings.COUCH_STALE_QUERY,
).all()
@classmethod
def get_latest_build(cls, domain, app_id):
build = cls.view('app_manager/saved_app',
startkey=[domain, app_id, {}],
endkey=[domain, app_id],
descending=True,
limit=1).one()
return build if build else None
def rename_lang(self, old_lang, new_lang):
validate_lang(new_lang)
def is_remote_app(self):
return False
def get_latest_app(self, released_only=True):
if released_only:
return get_app(self.domain, self.get_id, latest=True)
else:
return self.view('app_manager/applications',
startkey=[self.domain, self.get_id, {}],
endkey=[self.domain, self.get_id],
include_docs=True,
limit=1,
descending=True,
).first()
def get_latest_saved(self):
"""
This looks really similar to get_latest_app, not sure why tim added
"""
if not hasattr(self, '_latest_saved'):
released = self.__class__.view('app_manager/applications',
startkey=['^ReleasedApplications', self.domain, self._id, {}],
endkey=['^ReleasedApplications', self.domain, self._id],
limit=1,
descending=True,
include_docs=True
)
if len(released) > 0:
self._latest_saved = released.all()[0]
else:
saved = self.__class__.view('app_manager/saved_app',
startkey=[self.domain, self._id, {}],
endkey=[self.domain, self._id],
descending=True,
limit=1,
include_docs=True
)
if len(saved) > 0:
self._latest_saved = saved.all()[0]
else:
self._latest_saved = None # do not return this app!
return self._latest_saved
def set_admin_password(self, raw_password):
salt = os.urandom(5).encode('hex')
self.admin_password = make_password(raw_password, salt=salt)
if raw_password.isnumeric():
self.admin_password_charset = 'n'
elif raw_password.isalnum():
self.admin_password_charset = 'a'
else:
self.admin_password_charset = 'x'
def check_password_charset(self):
errors = []
if hasattr(self, 'profile'):
password_format = self.profile.get('properties', {}).get('password_format', 'n')
message = ('Your app requires {0} passwords '
'but the admin password is not {0}')
if password_format == 'n' and self.admin_password_charset in 'ax':
errors.append({'type': 'password_format',
'message': message.format('numeric')})
if password_format == 'a' and self.admin_password_charset in 'x':
errors.append({'type': 'password_format',
'message': message.format('alphanumeric')})
return errors
def get_build(self):
return self.build_spec.get_build()
@property
def build_version(self):
# `LooseVersion`s are smart!
# LooseVersion('2.12.0') > '2.2'
# (even though '2.12.0' < '2.2')
if self.build_spec.version:
return LooseVersion(self.build_spec.version)
def get_preview_build(self):
preview = self.get_build()
for path in getattr(preview, '_attachments', {}):
if path.startswith('Generic/WebDemo'):
return preview
return CommCareBuildConfig.fetch().preview.get_build()
@property
def commcare_minor_release(self):
"""This is mostly just for views"""
return '%d.%d' % self.build_spec.minor_release()
def get_build_label(self):
for item in CommCareBuildConfig.fetch().menu:
if item['build'].to_string() == self.build_spec.to_string():
return item['label']
return self.build_spec.get_label()
@property
def short_name(self):
return self.name if len(self.name) <= 12 else '%s..' % self.name[:10]
@property
def has_careplan_module(self):
return False
@property
def url_base(self):
return get_url_base()
@absolute_url_property
def post_url(self):
if self.secure_submissions:
url_name = 'receiver_secure_post_with_app_id'
else:
url_name = 'receiver_post_with_app_id'
return reverse(url_name, args=[self.domain, self.get_id])
@absolute_url_property
def key_server_url(self):
return reverse('key_server_url', args=[self.domain])
@absolute_url_property
def ota_restore_url(self):
return reverse('corehq.apps.ota.views.restore', args=[self.domain])
@absolute_url_property
def form_record_url(self):
return '/a/%s/api/custom/pact_formdata/v1/' % self.domain
@absolute_url_property
def hq_profile_url(self):
return "%s?latest=true" % (
reverse('download_profile', args=[self.domain, self._id])
)
@absolute_url_property
def hq_media_profile_url(self):
return "%s?latest=true" % (
reverse('download_media_profile', args=[self.domain, self._id])
)
@property
def profile_loc(self):
return "jr://resource/profile.xml"
@absolute_url_property
def jar_url(self):
return reverse('corehq.apps.app_manager.views.download_jar', args=[self.domain, self._id])
def get_jar_path(self):
spec = {
'nokia/s40': 'Nokia/S40',
'nokia/s60': 'Nokia/S60',
'generic': 'Generic/Default',
'winmo': 'Native/WinMo'
}[self.platform]
if self.platform in ('nokia/s40', 'nokia/s60'):
spec += {
('native',): '-native-input',
('roman',): '-generic',
('custom-keys',): '-custom-keys',
('qwerty',): '-qwerty'
}[(self.text_input,)]
return spec
def get_jadjar(self):
return self.get_build().get_jadjar(self.get_jar_path())
def validate_fixtures(self):
if not domain_has_privilege(self.domain, privileges.LOOKUP_TABLES):
# remote apps don't support get_forms yet.
# for now they can circumvent the fixture limitation. sneaky bastards.
if hasattr(self, 'get_forms'):
for form in self.get_forms():
if form.has_fixtures:
raise PermissionDenied(_(
"Usage of lookup tables is not supported by your "
"current subscription. Please upgrade your "
"subscription before using this feature."
))
def validate_jar_path(self):
build = self.get_build()
setting = commcare_settings.SETTINGS_LOOKUP['hq']['text_input']
value = self.text_input
setting_version = setting['since'].get(value)
if setting_version:
setting_version = tuple(map(int, setting_version.split('.')))
my_version = build.minor_release()
if my_version < setting_version:
i = setting['values'].index(value)
assert i != -1
name = _(setting['value_names'][i])
raise AppEditingError((
'%s Text Input is not supported '
'in CommCare versions before %s.%s. '
'(You are using %s.%s)'
) % ((name,) + setting_version + my_version))
@property
def jad_settings(self):
settings = {
'JavaRosa-Admin-Password': self.admin_password,
'Profile': self.profile_loc,
'MIDlet-Jar-URL': self.jar_url,
#'MIDlet-Name': self.name,
# e.g. 2011-Apr-11 20:45
'CommCare-Release': "true",
}
if self.build_version < '2.8':
settings['Build-Number'] = self.version
return settings
def create_jadjar(self, save=False):
try:
return (
self.lazy_fetch_attachment('CommCare.jad'),
self.lazy_fetch_attachment('CommCare.jar'),
)
except (ResourceError, KeyError):
built_on = datetime.utcnow()
all_files = self.create_all_files()
jad_settings = {
'Released-on': built_on.strftime("%Y-%b-%d %H:%M"),
}
jad_settings.update(self.jad_settings)
jadjar = self.get_jadjar().pack(all_files, jad_settings)
if save:
self.built_on = built_on
self.built_with = BuildRecord(
version=jadjar.version,
build_number=jadjar.build_number,
signed=jadjar.signed,
datetime=built_on,
)
self.lazy_put_attachment(jadjar.jad, 'CommCare.jad')
self.lazy_put_attachment(jadjar.jar, 'CommCare.jar')
for filepath in all_files:
self.lazy_put_attachment(all_files[filepath],
'files/%s' % filepath)
return jadjar.jad, jadjar.jar
def validate_app(self):
errors = []
errors.extend(self.check_password_charset())
try:
self.validate_fixtures()
self.validate_jar_path()
self.create_all_files()
except (AppEditingError, XFormValidationError, XFormError,
PermissionDenied) as e:
errors.append({'type': 'error', 'message': unicode(e)})
except Exception as e:
if settings.DEBUG:
raise
# this is much less useful/actionable without a URL
# so make sure to include the request
logging.error('Unexpected error building app', exc_info=True,
extra={'request': view_utils.get_request()})
errors.append({'type': 'error', 'message': 'unexpected error: %s' % e})
return errors
@absolute_url_property
def odk_profile_url(self):
return reverse('corehq.apps.app_manager.views.download_odk_profile', args=[self.domain, self._id])
@absolute_url_property
def odk_media_profile_url(self):
return reverse('corehq.apps.app_manager.views.download_odk_media_profile', args=[self.domain, self._id])
@property
def odk_profile_display_url(self):
return self.short_odk_url or self.odk_profile_url
@property
def odk_media_profile_display_url(self):
return self.short_odk_media_url or self.odk_media_profile_url
def get_odk_qr_code(self, with_media=False):
"""Returns a QR code, as a PNG to install on CC-ODK"""
try:
return self.lazy_fetch_attachment("qrcode.png")
except ResourceNotFound:
try:
from pygooglechart import QRChart
except ImportError:
raise Exception(
"Aw shucks, someone forgot to install "
"the google chart library on this machine "
"and this feature needs it. "
"To get it, run easy_install pygooglechart. "
"Until you do that this won't work."
)
HEIGHT = WIDTH = 250
code = QRChart(HEIGHT, WIDTH)
code.add_data(self.odk_profile_url if not with_media else self.odk_media_profile_url)
# "Level L" error correction with a 0 pixel margin
code.set_ec('L', 0)
f, fname = tempfile.mkstemp()
code.download(fname)
os.close(f)
with open(fname, "rb") as f:
png_data = f.read()
self.lazy_put_attachment(png_data, "qrcode.png",
content_type="image/png")
return png_data
def fetch_jar(self):
return self.get_jadjar().fetch_jar()
def make_build(self, comment=None, user_id=None, previous_version=None):
copy = super(ApplicationBase, self).make_build()
if not copy._id:
# I expect this always to be the case
# but check explicitly so as not to change the _id if it exists
copy._id = copy.get_db().server.next_uuid()
copy.set_form_versions(previous_version)
copy.set_media_versions(previous_version)
copy.create_jadjar(save=True)
try:
# since this hard to put in a test
# I'm putting this assert here if copy._id is ever None
# which makes tests error
assert copy._id
if settings.BITLY_LOGIN:
copy.short_url = bitly.shorten(
get_url_base() + reverse('corehq.apps.app_manager.views.download_jad', args=[copy.domain, copy._id])
)
copy.short_odk_url = bitly.shorten(
get_url_base() + reverse('corehq.apps.app_manager.views.download_odk_profile', args=[copy.domain, copy._id])
)
copy.short_odk_media_url = bitly.shorten(
get_url_base() + reverse('corehq.apps.app_manager.views.download_odk_media_profile', args=[copy.domain, copy._id])
)
except AssertionError:
raise
except Exception: # URLError, BitlyError
# for offline only
logging.exception("Problem creating bitly url for app %s. Do you have network?" % self.get_id)
copy.short_url = None
copy.short_odk_url = None
copy.short_odk_media_url = None
copy.build_comment = comment
copy.comment_from = user_id
copy.is_released = False
return copy
def delete_app(self):
self.doc_type += '-Deleted'
record = DeleteApplicationRecord(
domain=self.domain,
app_id=self.id,
datetime=datetime.utcnow()
)
record.save()
return record
def set_form_versions(self, previous_version):
# by default doing nothing here is fine.
pass
def set_media_versions(self, previous_version):
pass
def validate_lang(lang):
if not re.match(r'^[a-z]{2,3}(-[a-z]*)?$', lang):
raise ValueError("Invalid Language")
def validate_property(property):
# this regex is also copied in propertyList.ejs
if not re.match(r'^[a-zA-Z][\w_-]*(/[a-zA-Z][\w_-]*)*$', property):
raise ValueError("Invalid Property")
def validate_detail_screen_field(field):
# If you change here, also change here:
# corehq/apps/app_manager/static/app_manager/js/detail-screen-config.js
field_re = r'^([a-zA-Z][\w_-]*:)*([a-zA-Z][\w_-]*/)*#?[a-zA-Z][\w_-]*$'
if not re.match(field_re, field):
raise ValueError("Invalid Sort Field")
class SavedAppBuild(ApplicationBase):
def to_saved_build_json(self, timezone):
data = super(SavedAppBuild, self).to_json().copy()
for key in ('modules', 'user_registration',
'_attachments', 'profile', 'translations'
'description', 'short_description'):
data.pop(key, None)
data.update({
'id': self.id,
'built_on_date': utc_to_timezone(data['built_on'], timezone, "%b %d, %Y"),
'built_on_time': utc_to_timezone(data['built_on'], timezone, "%H:%M %Z"),
'build_label': self.built_with.get_label(),
'jar_path': self.get_jar_path(),
'short_name': self.short_name,
'enable_offline_install': self.enable_offline_install,
})
comment_from = data['comment_from']
if comment_from:
try:
comment_user = CouchUser.get(comment_from)
except ResourceNotFound:
data['comment_user_name'] = comment_from
else:
data['comment_user_name'] = comment_user.full_name
return data
class Application(ApplicationBase, TranslationMixin, HQMediaMixin):
"""
An Application that can be created entirely through the online interface
"""
user_registration = SchemaProperty(UserRegistrationForm)
show_user_registration = BooleanProperty(default=False, required=True)
modules = SchemaListProperty(ModuleBase)
name = StringProperty()
# profile's schema is {'features': {}, 'properties': {}}
# ended up not using a schema because properties is a reserved word
profile = DictProperty()
use_custom_suite = BooleanProperty(default=False)
cloudcare_enabled = BooleanProperty(default=False)
translation_strategy = StringProperty(default='select-known',
choices=app_strings.CHOICES.keys())
commtrack_enabled = BooleanProperty(default=False)
commtrack_requisition_mode = StringProperty(choices=CT_REQUISITION_MODES)
auto_gps_capture = BooleanProperty(default=False)
@classmethod
def wrap(cls, data):
for module in data.get('modules', []):
for attr in ('case_label', 'referral_label'):
if not module.has_key(attr):
module[attr] = {}
for lang in data['langs']:
if not module['case_label'].get(lang):
module['case_label'][lang] = commcare_translations.load_translations(lang).get('cchq.case', 'Cases')
if not module['referral_label'].get(lang):
module['referral_label'][lang] = commcare_translations.load_translations(lang).get('cchq.referral', 'Referrals')
if not data.get('build_langs'):
data['build_langs'] = data['langs']
self = super(Application, cls).wrap(data)
# make sure all form versions are None on working copies
if not self.copy_of:
for form in self.get_forms():
form.version = None
# weird edge case where multimedia_map gets set to null and causes issues
if self.multimedia_map is None:
self.multimedia_map = {}
return self
def save(self, *args, **kwargs):
super(Application, self).save(*args, **kwargs)
# Import loop if this is imported at the top
# TODO: revamp so signal_connections <- models <- signals
from corehq.apps.app_manager import signals
signals.app_post_save.send(Application, application=self)
def make_reversion_to_copy(self, copy):
app = super(Application, self).make_reversion_to_copy(copy)
for form in app.get_forms():
# reset the form's validation cache, since the form content is
# likely to have changed in the revert!
form.validation_cache = None
form.version = None
app.build_broken = False
return app
@property
def profile_url(self):
return self.hq_profile_url
@property
def media_profile_url(self):
return self.hq_media_profile_url
@property
def url_base(self):
return get_url_base()
@absolute_url_property
def suite_url(self):
return reverse('download_suite', args=[self.domain, self.get_id])
@property
def suite_loc(self):
if self.enable_relative_suite_path:
return './suite.xml'
else:
return "jr://resource/suite.xml"
@absolute_url_property
def media_suite_url(self):
return reverse('download_media_suite', args=[self.domain, self.get_id])
@property
def media_suite_loc(self):
if self.enable_relative_suite_path:
return "./media_suite.xml"
else:
return "jr://resource/media_suite.xml"
@property
def default_language(self):
return self.build_langs[0] if len(self.build_langs) > 0 else "en"
def fetch_xform(self, module_id=None, form_id=None, form=None):
if not form:
form = self.get_module(module_id).get_form(form_id)
return form.validate_form().render_xform().encode('utf-8')
def set_form_versions(self, previous_version):
# this will make builds slower, but they're async now so hopefully
# that's fine.
def _hash(val):
return hashlib.md5(val).hexdigest()
if previous_version:
for form_stuff in self.get_forms(bare=False):
filename = 'files/%s' % self.get_form_filename(**form_stuff)
form = form_stuff["form"]
form_version = None
try:
previous_form = previous_version.get_form(form.unique_id)
# take the previous version's compiled form as-is
# (generation code may have changed since last build)
previous_source = previous_version.fetch_attachment(filename)
except (ResourceNotFound, FormNotFoundException):
pass
else:
previous_hash = _hash(previous_source)
# hack - temporarily set my version to the previous version
# so that that's not treated as the diff
previous_form_version = previous_form.get_version()
form.version = previous_form_version
my_hash = _hash(self.fetch_xform(form=form))
if previous_hash == my_hash:
form_version = previous_form_version
if form_version is None:
form.version = None
else:
form.version = form_version
def set_media_versions(self, previous_version):
# access to .multimedia_map is slow
prev_multimedia_map = previous_version.multimedia_map if previous_version else {}
for path, map_item in self.multimedia_map.iteritems():
pre_map_item = prev_multimedia_map.get(path, None)
if pre_map_item and pre_map_item.version and pre_map_item.multimedia_id == map_item.multimedia_id:
map_item.version = pre_map_item.version
else:
map_item.version = self.version
def create_app_strings(self, lang):
gen = app_strings.CHOICES[self.translation_strategy]
if lang == 'default':
return gen.create_default_app_strings(self)
else:
return gen.create_app_strings(self, lang)
@property
def skip_validation(self):
properties = (self.profile or {}).get('properties', {})
return properties.get('cc-content-valid', 'yes')
@property
def jad_settings(self):
s = super(Application, self).jad_settings
s.update({
'Skip-Validation': self.skip_validation,
})
return s
def create_profile(self, is_odk=False, with_media=False, template='app_manager/profile.xml'):
self__profile = self.profile
app_profile = defaultdict(dict)
for setting in commcare_settings.SETTINGS:
setting_type = setting['type']
setting_id = setting['id']
if setting_type not in ('properties', 'features'):
setting_value = None
elif setting_id not in self__profile.get(setting_type, {}):
if 'commcare_default' in setting and setting['commcare_default'] != setting['default']:
setting_value = setting['default']
else:
setting_value = None
else:
setting_value = self__profile[setting_type][setting_id]
if setting_value:
app_profile[setting_type][setting_id] = {
'value': setting_value,
'force': setting.get('force', False)
}
# assert that it gets explicitly set once per loop
del setting_value
if self.case_sharing:
app_profile['properties']['server-tether'] = {
'force': True,
'value': 'sync',
}
if with_media:
profile_url = self.media_profile_url if not is_odk else (self.odk_media_profile_url + '?latest=true')
else:
profile_url = self.profile_url if not is_odk else (self.odk_profile_url + '?latest=true')
return render_to_string(template, {
'is_odk': is_odk,
'app': self,
'profile_url': profile_url,
'app_profile': app_profile,
'cc_user_domain': cc_user_domain(self.domain),
'include_media_suite': with_media,
'descriptor': u"Profile File"
}).decode('utf-8')
@property
def custom_suite(self):
try:
return self.lazy_fetch_attachment('custom_suite.xml')
except ResourceNotFound:
return ""
def set_custom_suite(self, value):
self.put_attachment(value, 'custom_suite.xml')
def create_suite(self):
if self.application_version == APP_V1:
template='app_manager/suite-%s.xml' % self.application_version
return render_to_string(template, {
'app': self,
'langs': ["default"] + self.build_langs
})
else:
return suite_xml.SuiteGenerator(self).generate_suite()
def create_media_suite(self):
return suite_xml.MediaSuiteGenerator(self).generate_suite()
@classmethod
def get_form_filename(cls, type=None, form=None, module=None):
if type == 'user_registration':
return 'user_registration.xml'
else:
return 'modules-%s/forms-%s.xml' % (module.id, form.id)
def create_all_files(self):
files = {
'profile.xml': self.create_profile(is_odk=False),
'profile.ccpr': self.create_profile(is_odk=True),
'media_profile.xml': self.create_profile(is_odk=False, with_media=True),
'media_profile.ccpr': self.create_profile(is_odk=True, with_media=True),
'suite.xml': self.create_suite(),
'media_suite.xml': self.create_media_suite(),
}
for lang in ['default'] + self.build_langs:
files["%s/app_strings.txt" % lang] = self.create_app_strings(lang)
for form_stuff in self.get_forms(bare=False):
filename = self.get_form_filename(**form_stuff)
form = form_stuff['form']
files[filename] = self.fetch_xform(form=form)
return files
get_modules = IndexedSchema.Getter('modules')
@parse_int([1])
def get_module(self, i):
try:
return self.modules[i].with_id(i % len(self.modules), self)
except IndexError:
raise ModuleNotFoundException()
def get_user_registration(self):
form = self.user_registration
form._app = self
if not form.source:
form.source = load_form_template('register_user.xhtml')
return form
def get_module_by_unique_id(self, unique_id):
def matches(module):
return module.get_or_create_unique_id() == unique_id
for obj in self.get_modules():
if matches(obj):
return obj
raise ModuleNotFoundException(
("Module in app '%s' with unique id '%s' not found"
% (self.id, unique_id)))
def get_forms(self, bare=True):
if self.show_user_registration:
yield self.get_user_registration() if bare else {
'type': 'user_registration',
'form': self.get_user_registration()
}
for module in self.get_modules():
for form in module.get_forms():
yield form if bare else {
'type': 'module_form',
'module': module,
'form': form
}
def get_form(self, unique_form_id, bare=True):
def matches(form):
return form.get_unique_id() == unique_form_id
for obj in self.get_forms(bare):
if matches(obj if bare else obj['form']):
return obj
raise FormNotFoundException(
("Form in app '%s' with unique id '%s' not found"
% (self.id, unique_form_id)))
def get_form_location(self, unique_form_id):
for m_index, module in enumerate(self.get_modules()):
for f_index, form in enumerate(module.get_forms()):
if unique_form_id == form.unique_id:
return m_index, f_index
raise KeyError("Form in app '%s' with unique id '%s' not found" % (self.id, unique_form_id))
@classmethod
def new_app(cls, domain, name, application_version, lang="en"):
app = cls(domain=domain, modules=[], name=name, langs=[lang], build_langs=[lang], application_version=application_version)
return app
def add_module(self, module):
self.modules.append(module)
return self.get_module(-1)
def delete_module(self, module_unique_id):
try:
module = self.get_module_by_unique_id(module_unique_id)
except ModuleNotFoundException:
return None
record = DeleteModuleRecord(
domain=self.domain,
app_id=self.id,
module_id=module.id,
module=module,
datetime=datetime.utcnow()
)
del self.modules[module.id]
record.save()
return record
def new_form(self, module_id, name, lang, attachment=""):
module = self.get_module(module_id)
return module.new_form(name, lang, attachment)
def delete_form(self, module_unique_id, form_unique_id):
try:
module = self.get_module_by_unique_id(module_unique_id)
form = self.get_form(form_unique_id)
except (ModuleNotFoundException, FormNotFoundException):
return None
record = DeleteFormRecord(
domain=self.domain,
app_id=self.id,
module_unique_id=module_unique_id,
form_id=form.id,
form=form,
datetime=datetime.utcnow(),
)
record.save()
del module['forms'][form.id]
return record
def rename_lang(self, old_lang, new_lang):
validate_lang(new_lang)
if old_lang == new_lang:
return
if new_lang in self.langs:
raise AppEditingError("Language %s already exists!" % new_lang)
for i,lang in enumerate(self.langs):
if lang == old_lang:
self.langs[i] = new_lang
for module in self.get_modules():
module.rename_lang(old_lang, new_lang)
_rename_key(self.translations, old_lang, new_lang)
def rearrange_modules(self, i, j):
modules = self.modules
try:
modules.insert(i, modules.pop(j))
except IndexError:
raise RearrangeError()
self.modules = modules
def rearrange_forms(self, to_module_id, from_module_id, i, j):
"""
The case type of the two modules conflict,
ConflictingCaseTypeError is raised,
but the rearrangement (confusingly) goes through anyway.
This is intentional.
"""
to_module = self.get_module(to_module_id)
from_module = self.get_module(from_module_id)
try:
form = from_module.forms.pop(j)
to_module.add_insert_form(from_module, form, index=i, with_source=True)
except IndexError:
raise RearrangeError()
if to_module.case_type != from_module.case_type:
raise ConflictingCaseTypeError()
def scrub_source(self, source):
def change_unique_id(form):
unique_id = form['unique_id']
new_unique_id = FormBase.generate_id()
form['unique_id'] = new_unique_id
if source['_attachments'].has_key("%s.xml" % unique_id):
source['_attachments']["%s.xml" % new_unique_id] = source['_attachments'].pop("%s.xml" % unique_id)
change_unique_id(source['user_registration'])
for m, module in enumerate(source['modules']):
for f, form in enumerate(module['forms']):
change_unique_id(source['modules'][m]['forms'][f])
def copy_form(self, module_id, form_id, to_module_id):
"""
The case type of the two modules conflict,
ConflictingCaseTypeError is raised,
but the copying (confusingly) goes through anyway.
This is intentional.
"""
from_module = self.get_module(module_id)
form = from_module.get_form(form_id)
to_module = self.get_module(to_module_id)
self._copy_form(from_module, form, to_module)
def _copy_form(self, from_module, form, to_module):
if not form.source:
raise BlankXFormError()
copy_source = deepcopy(form.to_json())
if 'unique_id' in copy_source:
del copy_source['unique_id']
copy_form = to_module.add_insert_form(from_module, FormBase.wrap(copy_source))
save_xform(self, copy_form, form.source)
if from_module['case_type'] != to_module['case_type']:
raise ConflictingCaseTypeError()
def convert_module_to_advanced(self, module_id):
from_module = self.get_module(module_id)
name = {lang: u'{} (advanced)'.format(name) for lang, name in from_module.name.items()}
case_details = deepcopy(from_module.case_details.to_json())
to_module = AdvancedModule(
name=name,
forms=[],
case_type=from_module.case_type,
case_label=from_module.case_label,
put_in_root=from_module.put_in_root,
case_list=from_module.case_list,
case_details=DetailPair.wrap(case_details),
product_details=DetailPair(
short=Detail(
columns=[
DetailColumn(
format='plain',
header={'en': ugettext("Product")},
field='name',
model='product',
),
],
),
long=Detail(),
),
)
to_module.get_or_create_unique_id()
to_module = self.add_module(to_module)
for form in from_module.get_forms():
self._copy_form(from_module, form, to_module)
return to_module
@cached_property
def has_case_management(self):
for module in self.get_modules():
for form in module.get_forms():
if len(form.active_actions()) > 0:
return True
return False
@memoized
def case_type_exists(self, case_type):
return case_type in self.get_case_types()
@memoized
def get_case_types(self):
return set(chain(*[m.get_case_types() for m in self.get_modules()]))
def has_media(self):
return len(self.multimedia_map) > 0
@memoized
def get_xmlns_map(self):
xmlns_map = defaultdict(list)
for form in self.get_forms():
xmlns_map[form.xmlns].append(form)
return xmlns_map
def get_form_by_xmlns(self, xmlns):
if xmlns == "http://code.javarosa.org/devicereport":
return None
forms = self.get_xmlns_map()[xmlns]
if len(forms) != 1:
logging.error('App %s in domain %s has %s forms with xmlns %s' % (
self.get_id,
self.domain,
len(forms),
xmlns,
))
return None
else:
form, = forms
return form
def get_questions(self, xmlns):
form = self.get_form_by_xmlns(xmlns)
if not form:
return []
return form.get_questions(self.langs)
def validate_app(self):
xmlns_count = defaultdict(int)
errors = []
for lang in self.langs:
if not lang:
errors.append({'type': 'empty lang'})
if not self.modules:
errors.append({'type': "no modules"})
for module in self.get_modules():
errors.extend(module.validate_for_build())
for form in self.get_forms():
errors.extend(form.validate_for_build(validate_module=False))
# make sure that there aren't duplicate xmlns's
xmlns_count[form.xmlns] += 1
for xmlns in xmlns_count:
if xmlns_count[xmlns] > 1:
errors.append({'type': "duplicate xmlns", "xmlns": xmlns})
if self._has_parent_child_selection_cycle({m.unique_id:m for m in self.get_modules()}):
errors.append({'type': 'parent cycle'})
if not errors:
errors = super(Application, self).validate_app()
return errors
def _has_parent_child_selection_cycle(self, modules):
"""
:param modules: A mapping of module unique_ids to Module objects
:return: True if there is a cycle in the parent-child selection graph
"""
visited = set()
completed = set()
def cycle_helper(m):
if m.id in visited:
if m.id in completed:
return False
return True
visited.add(m.id)
if hasattr(m, 'parent_select') and m.parent_select.active:
parent = modules.get(m.parent_select.module_id, None)
if parent != None and cycle_helper(parent):
return True
completed.add(m.id)
return False
for module in modules.values():
if cycle_helper(module):
return True
return False
@classmethod
def get_by_xmlns(cls, domain, xmlns):
r = cls.get_db().view('exports_forms/by_xmlns',
key=[domain, {}, xmlns],
group=True,
stale=settings.COUCH_STALE_QUERY,
).one()
return cls.get(r['value']['app']['id']) if r and 'app' in r['value'] else None
def get_profile_setting(self, s_type, s_id):
setting = self.profile.get(s_type, {}).get(s_id)
if setting is not None:
return setting
yaml_setting = commcare_settings.SETTINGS_LOOKUP[s_type][s_id]
for contingent in yaml_setting.get("contingent_default", []):
if check_condition(self, contingent["condition"]):
setting = contingent["value"]
if setting is not None:
return setting
if self.build_version < yaml_setting.get("since", "0"):
setting = yaml_setting.get("disabled_default", None)
if setting is not None:
return setting
return yaml_setting.get("default")
@property
def has_careplan_module(self):
return any((module for module in self.modules if isinstance(module, CareplanModule)))
class RemoteApp(ApplicationBase):
"""
A wrapper for a url pointing to a suite or profile file. This allows you to
write all the files for an app by hand, and then give the url to app_manager
and let it package everything together for you.
"""
profile_url = StringProperty(default="http://")
name = StringProperty()
manage_urls = BooleanProperty(default=False)
questions_map = DictProperty(required=False)
def is_remote_app(self):
return True
@classmethod
def new_app(cls, domain, name, lang='en'):
app = cls(domain=domain, name=name, langs=[lang])
return app
def create_profile(self, is_odk=False):
# we don't do odk for now anyway
return remote_app.make_remote_profile(self)
def strip_location(self, location):
return remote_app.strip_location(self.profile_url, location)
def fetch_file(self, location):
location = self.strip_location(location)
url = urljoin(self.profile_url, location)
try:
content = urlopen(url).read()
except Exception:
raise AppEditingError('Unable to access resource url: "%s"' % url)
return location, content
@classmethod
def get_locations(cls, suite):
for resource in suite.findall('*/resource'):
try:
loc = resource.findtext('location[@authority="local"]')
except Exception:
loc = resource.findtext('location[@authority="remote"]')
yield resource.getparent().tag, loc
@property
def SUITE_XPATH(self):
return 'suite/resource/location[@authority="local"]'
def create_all_files(self):
files = {
'profile.xml': self.create_profile(),
}
tree = _parse_xml(files['profile.xml'])
def add_file_from_path(path, strict=False):
added_files = []
# must find at least one
try:
tree.find(path).text
except (TypeError, AttributeError):
if strict:
raise AppEditingError("problem with file path reference!")
else:
return
for loc_node in tree.findall(path):
loc, file = self.fetch_file(loc_node.text)
files[loc] = file
added_files.append(file)
return added_files
add_file_from_path('features/users/logo')
try:
suites = add_file_from_path(self.SUITE_XPATH, strict=True)
except AppEditingError:
raise AppEditingError(ugettext('Problem loading suite file from profile file. Is your profile file correct?'))
for suite in suites:
suite_xml = _parse_xml(suite)
for tag, location in self.get_locations(suite_xml):
location, data = self.fetch_file(location)
if tag == 'xform' and self.build_langs:
try:
xform = XForm(data)
except XFormError as e:
raise XFormError('In file %s: %s' % (location, e))
xform.exclude_languages(whitelist=self.build_langs)
data = xform.render()
files.update({location: data})
return files
def scrub_source(self, source):
pass
def make_questions_map(self):
if self.copy_of:
xmlns_map = {}
def fetch(location):
filepath = self.strip_location(location)
return self.fetch_attachment('files/%s' % filepath)
profile_xml = _parse_xml(fetch('profile.xml'))
suite_location = profile_xml.find(self.SUITE_XPATH).text
suite_xml = _parse_xml(fetch(suite_location))
for tag, location in self.get_locations(suite_xml):
if tag == 'xform':
xform = XForm(fetch(location))
xmlns = xform.data_node.tag_xmlns
questions = xform.get_questions(self.build_langs)
xmlns_map[xmlns] = questions
return xmlns_map
else:
return None
def get_questions(self, xmlns):
if not self.questions_map:
self.questions_map = self.make_questions_map()
if not self.questions_map:
return []
self.save()
questions = self.questions_map.get(xmlns, [])
return questions
def get_apps_in_domain(domain, full=False, include_remote=True):
view_name = 'app_manager/applications' if full else 'app_manager/applications_brief'
view_results = Application.get_db().view(view_name,
startkey=[domain, None],
endkey=[domain, None, {}],
include_docs=True,
)
remote_app_filter = None if include_remote else lambda app: not app.is_remote_app()
wrapped_apps = [get_correct_app_class(row['doc']).wrap(row['doc']) for row in view_results]
return filter(remote_app_filter, wrapped_apps)
def get_app(domain, app_id, wrap_cls=None, latest=False):
"""
Utility for getting an app, making sure it's in the domain specified, and wrapping it in the right class
(Application or RemoteApp).
"""
if latest:
try:
original_app = get_db().get(app_id)
except ResourceNotFound:
raise Http404()
if not domain:
try:
domain = original_app['domain']
except Exception:
raise Http404()
if original_app.get('copy_of'):
parent_app_id = original_app.get('copy_of')
min_version = original_app['version'] if original_app.get('is_released') else -1
else:
parent_app_id = original_app['_id']
min_version = -1
latest_app = get_db().view('app_manager/applications',
startkey=['^ReleasedApplications', domain, parent_app_id, {}],
endkey=['^ReleasedApplications', domain, parent_app_id, min_version],
limit=1,
descending=True,
include_docs=True
).one()
try:
app = latest_app['doc']
except TypeError:
# If no starred builds, return act as if latest=False
app = original_app
else:
try:
app = get_db().get(app_id)
except Exception:
raise Http404()
if domain and app['domain'] != domain:
raise Http404()
try:
cls = wrap_cls or get_correct_app_class(app)
except DocTypeError:
raise Http404()
app = cls.wrap(app)
return app
EXAMPLE_DOMAIN = 'example'
BUG_REPORTS_DOMAIN = 'bug-reports'
def _get_or_create_app(app_id):
if app_id == "example--hello-world":
try:
app = Application.get(app_id)
except ResourceNotFound:
app = Application.wrap(fixtures.hello_world_example)
app._id = app_id
app.domain = EXAMPLE_DOMAIN
app.save()
return _get_or_create_app(app_id)
return app
else:
return get_app(None, app_id)
str_to_cls = {
"Application": Application,
"Application-Deleted": Application,
"RemoteApp": RemoteApp,
"RemoteApp-Deleted": RemoteApp,
}
def import_app(app_id_or_source, domain, name=None, validate_source_domain=None):
if isinstance(app_id_or_source, basestring):
app_id = app_id_or_source
source = _get_or_create_app(app_id)
src_dom = source['domain']
if validate_source_domain:
validate_source_domain(src_dom)
source = source.export_json()
source = json.loads(source)
else:
source = app_id_or_source
try:
attachments = source['_attachments']
except KeyError:
attachments = {}
finally:
source['_attachments'] = {}
if name:
source['name'] = name
cls = str_to_cls[source['doc_type']]
# Allow the wrapper to update to the current default build_spec
if 'build_spec' in source:
del source['build_spec']
app = cls.from_source(source, domain)
app.save()
if not app.is_remote_app():
for _, m in app.get_media_objects():
if domain not in m.valid_domains:
m.valid_domains.append(domain)
m.save()
for name, attachment in attachments.items():
if re.match(ATTACHMENT_REGEX, name):
app.put_attachment(attachment, name)
return app
class DeleteApplicationRecord(DeleteRecord):
app_id = StringProperty()
def undo(self):
app = ApplicationBase.get(self.app_id)
app.doc_type = app.get_doc_type()
app.save(increment_version=False)
class DeleteModuleRecord(DeleteRecord):
app_id = StringProperty()
module_id = IntegerProperty()
module = SchemaProperty(ModuleBase)
def undo(self):
app = Application.get(self.app_id)
modules = app.modules
modules.insert(self.module_id, self.module)
app.modules = modules
app.save()
class DeleteFormRecord(DeleteRecord):
app_id = StringProperty()
module_id = IntegerProperty()
module_unique_id = StringProperty()
form_id = IntegerProperty()
form = SchemaProperty(FormBase)
def undo(self):
app = Application.get(self.app_id)
if self.module_unique_id is not None:
module = app.get_module_by_unique_id(self.module_unique_id)
else:
module = app.modules[self.module_id]
forms = module.forms
forms.insert(self.form_id, self.form)
module.forms = forms
app.save()
class CareplanAppProperties(DocumentSchema):
name = StringProperty()
latest_release = StringProperty()
case_type = StringProperty()
goal_conf = DictProperty()
task_conf = DictProperty()
class CareplanConfig(Document):
domain = StringProperty()
app_configs = SchemaDictProperty(CareplanAppProperties)
@classmethod
def for_domain(cls, domain):
res = cache_core.cached_view(
cls.get_db(),
"domain/docs",
key=[domain, 'CareplanConfig', None],
reduce=False,
include_docs=True,
wrapper=cls.wrap)
if len(res) > 0:
result = res[0]
else:
result = None
return result
# backwards compatibility with suite-1.0.xml
FormBase.get_command_id = lambda self: id_strings.form_command(self)
FormBase.get_locale_id = lambda self: id_strings.form_locale(self)
ModuleBase.get_locale_id = lambda self: id_strings.module_locale(self)
ModuleBase.get_case_list_command_id = lambda self: id_strings.case_list_command(self)
ModuleBase.get_case_list_locale_id = lambda self: id_strings.case_list_locale(self)
Module.get_referral_list_command_id = lambda self: id_strings.referral_list_command(self)
Module.get_referral_list_locale_id = lambda self: id_strings.referral_list_locale(self)
Use new get_apps_in_domain
# coding=utf-8
from distutils.version import LooseVersion
from itertools import chain
import tempfile
import os
import logging
import hashlib
import random
import json
import types
import re
from collections import defaultdict
from datetime import datetime
from functools import wraps
from copy import deepcopy
from urllib2 import urlopen
from urlparse import urljoin
from couchdbkit import ResourceConflict, MultipleResultsFound
from lxml import etree
from django.core.cache import cache
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ugettext
from couchdbkit.exceptions import BadValueError, DocTypeError
from couchdbkit.ext.django.schema import *
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404
from django.template.loader import render_to_string
from restkit.errors import ResourceError
from couchdbkit.resource import ResourceNotFound
from corehq import toggles, privileges
from corehq.apps.app_manager.feature_support import CommCareFeatureSupportMixin
from django_prbac.exceptions import PermissionDenied
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.app_manager.commcare_settings import check_condition
from corehq.apps.app_manager.const import *
from corehq.apps.app_manager.xpath import dot_interpolate, LocationXpath
from corehq.apps.builds import get_default_build_spec
from corehq.util.hash_compat import make_password
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.couch.lazy_attachment_doc import LazyAttachmentDoc
from dimagi.utils.couch.undo import DeleteRecord, DELETED_SUFFIX
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import get_url_base, parse_int
from dimagi.utils.couch.database import get_db
import commcare_translations
from corehq.util import bitly
from corehq.util import view_utils
from corehq.apps.appstore.models import SnapshotMixin
from corehq.apps.builds.models import BuildSpec, CommCareBuildConfig, BuildRecord
from corehq.apps.hqmedia.models import HQMediaMixin
from corehq.apps.reports.templatetags.timezone_tags import utc_to_timezone
from corehq.apps.translations.models import TranslationMixin
from corehq.apps.users.models import CouchUser
from corehq.apps.users.util import cc_user_domain
from corehq.apps.domain.models import cached_property
from corehq.apps.app_manager import current_builds, app_strings, remote_app
from corehq.apps.app_manager import fixtures, suite_xml, commcare_settings
from corehq.apps.app_manager.util import split_path, save_xform, get_correct_app_class
from corehq.apps.app_manager.xform import XForm, parse_xml as _parse_xml, \
validate_xform
from corehq.apps.app_manager.templatetags.xforms_extras import trans
from .exceptions import (
AppEditingError,
BlankXFormError,
ConflictingCaseTypeError,
FormNotFoundException,
IncompatibleFormTypeException,
LocationXpathValidationError,
ModuleNotFoundException,
RearrangeError,
VersioningError,
XFormError,
XFormIdNotUnique,
XFormValidationError,
)
from corehq.apps.app_manager import id_strings
WORKFLOW_DEFAULT = 'default'
WORKFLOW_MODULE = 'module'
WORKFLOW_PREVIOUS = 'previous_screen'
AUTO_SELECT_USER = 'user'
AUTO_SELECT_FIXTURE = 'fixture'
AUTO_SELECT_CASE = 'case'
AUTO_SELECT_RAW = 'raw'
DETAIL_TYPES = ['case_short', 'case_long', 'ref_short', 'ref_long']
FIELD_SEPARATOR = ':'
ATTACHMENT_REGEX = r'[^/]*\.xml'
def _rename_key(dct, old, new):
if old in dct:
if new in dct and dct[new]:
dct["%s_backup_%s" % (new, hex(random.getrandbits(32))[2:-1])] = dct[new]
dct[new] = dct[old]
del dct[old]
@memoized
def load_case_reserved_words():
with open(os.path.join(os.path.dirname(__file__), 'static', 'app_manager', 'json', 'case-reserved-words.json')) as f:
return json.load(f)
@memoized
def load_form_template(filename):
with open(os.path.join(os.path.dirname(__file__), 'data', filename)) as f:
return f.read()
def partial_escape(xpath):
"""
Copied from http://stackoverflow.com/questions/275174/how-do-i-perform-html-decoding-encoding-using-python-django
but without replacing the single quote
"""
return mark_safe(force_unicode(xpath).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"'))
class IndexedSchema(DocumentSchema):
"""
Abstract class.
Meant for documents that appear in a list within another document
and need to know their own position within that list.
"""
def with_id(self, i, parent):
self._i = i
self._parent = parent
return self
@property
def id(self):
return self._i
def __eq__(self, other):
return other and (self.id == other.id) and (self._parent == other._parent)
class Getter(object):
def __init__(self, attr):
self.attr = attr
def __call__(self, instance):
items = getattr(instance, self.attr)
l = len(items)
for i,item in enumerate(items):
yield item.with_id(i%l, instance)
def __get__(self, instance, owner):
# thanks, http://metapython.blogspot.com/2010/11/python-instance-methods-how-are-they.html
# this makes Getter('foo') act like a bound method
return types.MethodType(self, instance, owner)
class FormActionCondition(DocumentSchema):
"""
The condition under which to open/update/close a case/referral
Either {'type': 'if', 'question': '/xpath/to/node', 'answer': 'value'}
in which case the action takes place if question has answer answer,
or {'type': 'always'} in which case the action always takes place.
"""
type = StringProperty(choices=["if", "always", "never"], default="never")
question = StringProperty()
answer = StringProperty()
operator = StringProperty(choices=['=', 'selected'], default='=')
def is_active(self):
return self.type in ('if', 'always')
class FormAction(DocumentSchema):
"""
Corresponds to Case XML
"""
condition = SchemaProperty(FormActionCondition)
def is_active(self):
return self.condition.is_active()
@classmethod
def get_action_paths(cls, action):
action_properties = action.properties()
if action.condition.type == 'if':
yield action.condition.question
if 'name_path' in action_properties and action.name_path:
yield action.name_path
if 'case_name' in action_properties:
yield action.case_name
if 'external_id' in action_properties and action.external_id:
yield action.external_id
if 'update' in action_properties:
for _, path in action.update.items():
yield path
if 'case_properties' in action_properties:
for _, path in action.case_properties.items():
yield path
if 'preload' in action_properties:
for path, _ in action.preload.items():
yield path
class UpdateCaseAction(FormAction):
update = DictProperty()
class PreloadAction(FormAction):
preload = DictProperty()
def is_active(self):
return bool(self.preload)
class UpdateReferralAction(FormAction):
followup_date = StringProperty()
def get_followup_date(self):
if self.followup_date:
return "if(date({followup_date}) >= date(today()), {followup_date}, date(today() + 2))".format(
followup_date=self.followup_date,
)
return self.followup_date or "date(today() + 2)"
class OpenReferralAction(UpdateReferralAction):
name_path = StringProperty()
class OpenCaseAction(FormAction):
name_path = StringProperty()
external_id = StringProperty()
class OpenSubCaseAction(FormAction):
case_type = StringProperty()
case_name = StringProperty()
reference_id = StringProperty()
case_properties = DictProperty()
repeat_context = StringProperty()
close_condition = SchemaProperty(FormActionCondition)
class FormActions(DocumentSchema):
open_case = SchemaProperty(OpenCaseAction)
update_case = SchemaProperty(UpdateCaseAction)
close_case = SchemaProperty(FormAction)
open_referral = SchemaProperty(OpenReferralAction)
update_referral = SchemaProperty(UpdateReferralAction)
close_referral = SchemaProperty(FormAction)
case_preload = SchemaProperty(PreloadAction)
referral_preload = SchemaProperty(PreloadAction)
subcases = SchemaListProperty(OpenSubCaseAction)
def all_property_names(self):
names = set()
names.update(self.update_case.update.keys())
names.update(self.case_preload.preload.values())
for subcase in self.subcases:
names.update(subcase.case_properties.keys())
return names
class AdvancedAction(DocumentSchema):
case_type = StringProperty()
case_tag = StringProperty()
case_properties = DictProperty()
parent_tag = StringProperty()
parent_reference_id = StringProperty(default='parent')
close_condition = SchemaProperty(FormActionCondition)
def get_paths(self):
for path in self.case_properties.values():
yield path
if self.close_condition.type == 'if':
yield self.close_condition.question
def get_property_names(self):
return set(self.case_properties.keys())
@property
def case_session_var(self):
return 'case_id_{0}'.format(self.case_tag)
class AutoSelectCase(DocumentSchema):
"""
Configuration for auto-selecting a case.
Attributes:
value_source Reference to the source of the value. For mode = fixture,
this represents the FixtureDataType ID. For mode = case
this represents the 'case_tag' for the case.
The modes 'user' and 'raw' don't require a value_source.
value_key The actual field that contains the case ID. Can be a case
index or a user data key or a fixture field name or the raw
xpath expression.
"""
mode = StringProperty(choices=[AUTO_SELECT_USER, AUTO_SELECT_FIXTURE, AUTO_SELECT_CASE, AUTO_SELECT_RAW])
value_source = StringProperty()
value_key = StringProperty(required=True)
class LoadUpdateAction(AdvancedAction):
"""
details_module: Use the case list configuration from this module to show the cases.
preload: Value from the case to load into the form.
auto_select: Configuration for auto-selecting the case
show_product_stock: If True list the product stock using the module's Product List configuration.
product_program: Only show products for this CommTrack program.
"""
details_module = StringProperty()
preload = DictProperty()
auto_select = SchemaProperty(AutoSelectCase, default=None)
show_product_stock = BooleanProperty(default=False)
product_program = StringProperty()
def get_paths(self):
for path in super(LoadUpdateAction, self).get_paths():
yield path
for path in self.preload.values():
yield path
def get_property_names(self):
names = super(LoadUpdateAction, self).get_property_names()
names.update(self.preload.keys())
return names
class AdvancedOpenCaseAction(AdvancedAction):
name_path = StringProperty()
repeat_context = StringProperty()
open_condition = SchemaProperty(FormActionCondition)
def get_paths(self):
for path in super(AdvancedOpenCaseAction, self).get_paths():
yield path
yield self.name_path
if self.open_condition.type == 'if':
yield self.open_condition.question
class AdvancedFormActions(DocumentSchema):
load_update_cases = SchemaListProperty(LoadUpdateAction)
open_cases = SchemaListProperty(AdvancedOpenCaseAction)
def get_all_actions(self):
return self.load_update_cases + self.open_cases
def get_subcase_actions(self):
return (a for a in self.get_all_actions() if a.parent_tag)
def get_open_subcase_actions(self, parent_case_type=None):
for action in [a for a in self.open_cases if a.parent_tag]:
if not parent_case_type:
yield action
else:
parent = self.actions_meta_by_tag[action.parent_tag]['action']
if parent.case_type == parent_case_type:
yield action
def get_case_tags(self):
for action in self.get_all_actions():
yield action.case_tag
def get_action_from_tag(self, tag):
return self.actions_meta_by_tag.get(tag, {}).get('action', None)
@property
def actions_meta_by_tag(self):
return self._action_meta()['by_tag']
@property
def actions_meta_by_parent_tag(self):
return self._action_meta()['by_parent_tag']
def get_action_hierarchy(self, action):
current = action
hierarchy = [current]
while current and current.parent_tag:
parent = self.get_action_from_tag(current.parent_tag)
current = parent
if parent:
if parent in hierarchy:
circular = [a.case_tag for a in hierarchy + [parent]]
raise ValueError("Circular reference in subcase hierarchy: {0}".format(circular))
hierarchy.append(parent)
return hierarchy
@property
def auto_select_actions(self):
return self._action_meta()['by_auto_select_mode']
@memoized
def _action_meta(self):
meta = {
'by_tag': {},
'by_parent_tag': {},
'by_auto_select_mode': {
AUTO_SELECT_USER: [],
AUTO_SELECT_CASE: [],
AUTO_SELECT_FIXTURE: [],
AUTO_SELECT_RAW: [],
}
}
def add_actions(type, action_list):
for action in action_list:
meta['by_tag'][action.case_tag] = {
'type': type,
'action': action
}
if action.parent_tag:
meta['by_parent_tag'][action.parent_tag] = {
'type': type,
'action': action
}
if type == 'load' and action.auto_select and action.auto_select.mode:
meta['by_auto_select_mode'][action.auto_select.mode].append(action)
add_actions('load', self.load_update_cases)
add_actions('open', self.open_cases)
return meta
class FormSource(object):
def __get__(self, form, form_cls):
if not form:
return self
unique_id = form.get_unique_id()
app = form.get_app()
filename = "%s.xml" % unique_id
# for backwards compatibility of really old apps
try:
old_contents = form['contents']
except AttributeError:
pass
else:
app.lazy_put_attachment(old_contents, filename)
del form['contents']
try:
source = app.lazy_fetch_attachment(filename)
except (ResourceNotFound, KeyError):
source = ''
return source
def __set__(self, form, value):
unique_id = form.get_unique_id()
app = form.get_app()
filename = "%s.xml" % unique_id
app.lazy_put_attachment(value, filename)
form.validation_cache = None
try:
form.xmlns = form.wrapped_xform().data_node.tag_xmlns
except Exception:
form.xmlns = None
class CachedStringProperty(object):
def __init__(self, key):
self.get_key = key
def __get__(self, instance, owner):
return self.get(self.get_key(instance))
def __set__(self, instance, value):
self.set(self.get_key(instance), value)
@classmethod
def get(cls, key):
return cache.get(key)
@classmethod
def set(cls, key, value):
cache.set(key, value, 7*24*60*60) # cache for 7 days
class ScheduleVisit(DocumentSchema):
"""
due: Days after the anchor date that this visit is due
late_window: Days after the due day that this visit is valid until
"""
due = IntegerProperty()
late_window = IntegerProperty()
class FormSchedule(DocumentSchema):
"""
anchor: Case property containing a date after which this schedule becomes active
expiry: Days after the anchor date that this schedule expires (optional)
visit_list: List of visits in this schedule
post_schedule_increment: Repeat period for visits to occur after the last fixed visit (optional)
transition_condition: Condition under which the schedule transitions to the next phase
termination_condition: Condition under which the schedule terminates
"""
anchor = StringProperty()
expires = IntegerProperty()
visits = SchemaListProperty(ScheduleVisit)
post_schedule_increment = IntegerProperty()
transition_condition = SchemaProperty(FormActionCondition)
termination_condition = SchemaProperty(FormActionCondition)
class FormBase(DocumentSchema):
"""
Part of a Managed Application; configuration for a form.
Translates to a second-level menu on the phone
"""
form_type = None
name = DictProperty(unicode)
unique_id = StringProperty()
show_count = BooleanProperty(default=False)
xmlns = StringProperty()
version = IntegerProperty()
source = FormSource()
validation_cache = CachedStringProperty(
lambda self: "cache-%s-%s-validation" % (self.get_app().get_id, self.unique_id)
)
post_form_workflow = StringProperty(
default=WORKFLOW_DEFAULT,
choices=[WORKFLOW_DEFAULT, WORKFLOW_MODULE, WORKFLOW_PREVIOUS]
)
auto_gps_capture = BooleanProperty(default=False)
@classmethod
def wrap(cls, data):
data.pop('validation_cache', '')
if cls is FormBase:
doc_type = data['doc_type']
if doc_type == 'Form':
return Form.wrap(data)
elif doc_type == 'AdvancedForm':
return AdvancedForm.wrap(data)
else:
try:
return CareplanForm.wrap(data)
except ValueError:
raise ValueError('Unexpected doc_type for Form', doc_type)
else:
return super(FormBase, cls).wrap(data)
@classmethod
def generate_id(cls):
return hex(random.getrandbits(160))[2:-1]
@classmethod
def get_form(cls, form_unique_id, and_app=False):
try:
d = get_db().view(
'app_manager/xforms_index',
key=form_unique_id
).one()
except MultipleResultsFound as e:
raise XFormIdNotUnique(
"xform id '%s' not unique: %s" % (form_unique_id, e)
)
if d:
d = d['value']
else:
raise ResourceNotFound()
# unpack the dict into variables app_id, module_id, form_id
app_id, unique_id = [d[key] for key in ('app_id', 'unique_id')]
app = Application.get(app_id)
form = app.get_form(unique_id)
if and_app:
return form, app
else:
return form
@property
def schedule_form_id(self):
return self.unique_id[:6]
def wrapped_xform(self):
return XForm(self.source)
def validate_form(self):
vc = self.validation_cache
if vc is None:
try:
validate_xform(self.source,
version=self.get_app().application_version)
except XFormValidationError as e:
validation_dict = {
"fatal_error": e.fatal_error,
"validation_problems": e.validation_problems,
"version": e.version,
}
vc = self.validation_cache = json.dumps(validation_dict)
else:
vc = self.validation_cache = ""
if vc:
try:
raise XFormValidationError(**json.loads(vc))
except ValueError:
self.validation_cache = None
return self.validate_form()
return self
def validate_for_build(self, validate_module=True):
errors = []
try:
module = self.get_module()
except AttributeError:
module = None
meta = {
'form_type': self.form_type,
'module': module.get_module_info() if module else {},
'form': {"id": self.id if hasattr(self, 'id') else None, "name": self.name}
}
xml_valid = False
if self.source == '':
errors.append(dict(type="blank form", **meta))
else:
try:
_parse_xml(self.source)
xml_valid = True
except XFormError as e:
errors.append(dict(
type="invalid xml",
message=unicode(e) if self.source else '',
**meta
))
except ValueError:
logging.error("Failed: _parse_xml(string=%r)" % self.source)
raise
else:
try:
self.validate_form()
except XFormValidationError as e:
error = {'type': 'validation error', 'validation_message': unicode(e)}
error.update(meta)
errors.append(error)
errors.extend(self.extended_build_validation(meta, xml_valid, validate_module))
return errors
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
"""
Override to perform additional validation during build process.
"""
return []
def get_unique_id(self):
"""
Return unique_id if it exists, otherwise initialize it
Does _not_ force a save, so it's the caller's responsibility to save the app
"""
if not self.unique_id:
self.unique_id = FormBase.generate_id()
return self.unique_id
def get_app(self):
return self._app
def get_version(self):
return self.version if self.version else self.get_app().version
def add_stuff_to_xform(self, xform):
app = self.get_app()
xform.exclude_languages(app.build_langs)
xform.set_default_language(app.build_langs[0])
xform.normalize_itext()
xform.set_version(self.get_version())
def render_xform(self):
xform = XForm(self.source)
self.add_stuff_to_xform(xform)
return xform.render()
def get_questions(self, langs, **kwargs):
return XForm(self.source).get_questions(langs, **kwargs)
@memoized
def get_case_property_name_formatter(self):
"""Get a function that formats case property names
The returned function requires two arguments
`(case_property_name, data_path)` and returns a string.
"""
try:
valid_paths = {question['value']: question['tag']
for question in self.get_questions(langs=[])}
except XFormError as e:
# punt on invalid xml (sorry, no rich attachments)
valid_paths = {}
def format_key(key, path):
if valid_paths.get(path) == "upload":
return u"{}{}".format(ATTACHMENT_PREFIX, key)
return key
return format_key
def export_json(self, dump_json=True):
source = self.to_json()
del source['unique_id']
return json.dumps(source) if dump_json else source
def rename_lang(self, old_lang, new_lang):
_rename_key(self.name, old_lang, new_lang)
try:
self.rename_xform_language(old_lang, new_lang)
except XFormError:
pass
def rename_xform_language(self, old_code, new_code):
source = XForm(self.source)
source.rename_language(old_code, new_code)
source = source.render()
self.source = source
def default_name(self):
app = self.get_app()
return trans(
self.name,
[app.default_language] + app.build_langs,
include_lang=False
)
@property
def full_path_name(self):
return "%(app_name)s > %(module_name)s > %(form_name)s" % {
'app_name': self.get_app().name,
'module_name': self.get_module().default_name(),
'form_name': self.default_name()
}
@property
def has_fixtures(self):
return 'src="jr://fixture/item-list:' in self.source
def get_auto_gps_capture(self):
app = self.get_app()
if app.build_version and app.enable_auto_gps:
return self.auto_gps_capture or app.auto_gps_capture
else:
return False
class IndexedFormBase(FormBase, IndexedSchema):
def get_app(self):
return self._parent._parent
def get_module(self):
return self._parent
def get_case_type(self):
return self._parent.case_type
def check_case_properties(self, all_names=None, subcase_names=None, case_tag=None):
all_names = all_names or []
subcase_names = subcase_names or []
errors = []
# reserved_words are hard-coded in three different places!
# Here, case-config-ui-*.js, and module_view.html
reserved_words = load_case_reserved_words()
for key in all_names:
try:
validate_property(key)
except ValueError:
errors.append({'type': 'update_case word illegal', 'word': key, 'case_tag': case_tag})
_, key = split_path(key)
if key in reserved_words:
errors.append({'type': 'update_case uses reserved word', 'word': key, 'case_tag': case_tag})
# no parent properties for subcase
for key in subcase_names:
if not re.match(r'^[a-zA-Z][\w_-]*$', key):
errors.append({'type': 'update_case word illegal', 'word': key, 'case_tag': case_tag})
return errors
def check_paths(self, paths):
errors = []
try:
valid_paths = {question['value']: question['tag']
for question in self.get_questions(langs=[])}
except XFormError as e:
errors.append({'type': 'invalid xml', 'message': unicode(e)})
else:
no_multimedia = not self.get_app().enable_multimedia_case_property
for path in set(paths):
if path not in valid_paths:
errors.append({'type': 'path error', 'path': path})
elif no_multimedia and valid_paths[path] == "upload":
errors.append({'type': 'multimedia case property not supported', 'path': path})
return errors
class JRResourceProperty(StringProperty):
def validate(self, value, required=True):
super(JRResourceProperty, self).validate(value, required)
if value is not None and not value.startswith('jr://'):
raise BadValueError("JR Resources must start with 'jr://")
return value
class NavMenuItemMediaMixin(DocumentSchema):
media_image = JRResourceProperty(required=False)
media_audio = JRResourceProperty(required=False)
class Form(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'module_form'
form_filter = StringProperty()
requires = StringProperty(choices=["case", "referral", "none"], default="none")
actions = SchemaProperty(FormActions)
def add_stuff_to_xform(self, xform):
super(Form, self).add_stuff_to_xform(xform)
xform.add_case_and_meta(self)
def all_other_forms_require_a_case(self):
m = self.get_module()
return all([form.requires == 'case' for form in m.get_forms() if form.id != self.id])
def _get_active_actions(self, types):
actions = {}
for action_type in types:
a = getattr(self.actions, action_type)
if isinstance(a, list):
if a:
actions[action_type] = a
elif a.is_active():
actions[action_type] = a
return actions
def active_actions(self):
if self.get_app().application_version == APP_V1:
action_types = (
'open_case', 'update_case', 'close_case',
'open_referral', 'update_referral', 'close_referral',
'case_preload', 'referral_preload'
)
else:
if self.requires == 'none':
action_types = (
'open_case', 'update_case', 'close_case', 'subcases',
)
elif self.requires == 'case':
action_types = (
'update_case', 'close_case', 'case_preload', 'subcases',
)
else:
# this is left around for legacy migrated apps
action_types = (
'open_case', 'update_case', 'close_case',
'case_preload', 'subcases',
)
return self._get_active_actions(action_types)
def active_non_preloader_actions(self):
return self._get_active_actions((
'open_case', 'update_case', 'close_case',
'open_referral', 'update_referral', 'close_referral'))
def check_actions(self):
errors = []
subcase_names = set()
for subcase_action in self.actions.subcases:
if not subcase_action.case_type:
errors.append({'type': 'subcase has no case type'})
subcase_names.update(subcase_action.case_properties)
if self.requires == 'none' and self.actions.open_case.is_active() \
and not self.actions.open_case.name_path:
errors.append({'type': 'case_name required'})
errors.extend(self.check_case_properties(
all_names=self.actions.all_property_names(),
subcase_names=subcase_names
))
def generate_paths():
for action in self.active_actions().values():
if isinstance(action, list):
actions = action
else:
actions = [action]
for action in actions:
for path in FormAction.get_action_paths(action):
yield path
errors.extend(self.check_paths(generate_paths()))
return errors
def requires_case(self):
# all referrals also require cases
return self.requires in ("case", "referral")
def requires_case_type(self):
return self.requires_case() or \
bool(self.active_non_preloader_actions())
def requires_referral(self):
return self.requires == "referral"
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
errors = []
if xml_valid:
for error in self.check_actions():
error.update(error_meta)
errors.append(error)
if validate_module:
needs_case_type = False
needs_case_detail = False
needs_referral_detail = False
if self.requires_case():
needs_case_detail = True
needs_case_type = True
if self.requires_case_type():
needs_case_type = True
if self.requires_referral():
needs_referral_detail = True
errors.extend(self.get_module().get_case_errors(
needs_case_type=needs_case_type,
needs_case_detail=needs_case_detail,
needs_referral_detail=needs_referral_detail,
))
return errors
def get_case_updates(self, case_type):
if self.get_module().case_type == case_type:
format_key = self.get_case_property_name_formatter()
return [format_key(*item)
for item in self.actions.update_case.update.items()]
return []
@memoized
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
for subcase in self.actions.subcases:
if subcase.case_type == case_type:
case_properties.update(
subcase.case_properties.keys()
)
if case_type != module_case_type and (
self.actions.open_case.is_active() or
self.actions.update_case.is_active() or
self.actions.close_case.is_active()):
parent_types.add((module_case_type, 'parent'))
return parent_types, case_properties
class UserRegistrationForm(FormBase):
form_type = 'user_registration'
username_path = StringProperty(default='username')
password_path = StringProperty(default='password')
data_paths = DictProperty()
def add_stuff_to_xform(self, xform):
super(UserRegistrationForm, self).add_stuff_to_xform(xform)
xform.add_user_registration(self.username_path, self.password_path, self.data_paths)
class MappingItem(DocumentSchema):
key = StringProperty()
# lang => localized string
value = DictProperty()
class DetailColumn(IndexedSchema):
"""
Represents a column in case selection screen on the phone. Ex:
{
'header': {'en': 'Sex', 'por': 'Sexo'},
'model': 'case',
'field': 'sex',
'format': 'enum',
'xpath': '.',
'enum': [
{'key': 'm', 'value': {'en': 'Male', 'por': 'Macho'},
{'key': 'f', 'value': {'en': 'Female', 'por': 'Fêmea'},
],
}
"""
header = DictProperty()
model = StringProperty()
field = StringProperty()
format = StringProperty()
enum = SchemaListProperty(MappingItem)
late_flag = IntegerProperty(default=30)
advanced = StringProperty(default="")
calc_xpath = StringProperty(default=".")
filter_xpath = StringProperty(default="")
time_ago_interval = FloatProperty(default=365.25)
@property
def enum_dict(self):
"""for backwards compatibility with building 1.0 apps"""
import warnings
warnings.warn('You should not use enum_dict. Use enum instead',
DeprecationWarning)
return dict((item.key, item.value) for item in self.enum)
def rename_lang(self, old_lang, new_lang):
for dct in [self.header] + [item.value for item in self.enum]:
_rename_key(dct, old_lang, new_lang)
@property
def field_type(self):
if FIELD_SEPARATOR in self.field:
return self.field.split(FIELD_SEPARATOR, 1)[0]
else:
return 'property' # equivalent to property:parent/case_property
@property
def field_property(self):
if FIELD_SEPARATOR in self.field:
return self.field.split(FIELD_SEPARATOR, 1)[1]
else:
return self.field
class TimeAgoInterval(object):
map = {
'day': 1.0,
'week': 7.0,
'month': 30.4375,
'year': 365.25
}
@classmethod
def get_from_old_format(cls, format):
if format == 'years-ago':
return cls.map['year']
elif format == 'months-ago':
return cls.map['month']
@classmethod
def wrap(cls, data):
if data.get('format') in ('months-ago', 'years-ago'):
data['time_ago_interval'] = cls.TimeAgoInterval.get_from_old_format(data['format'])
data['format'] = 'time-ago'
# Lazy migration: enum used to be a dict, now is a list
if isinstance(data.get('enum'), dict):
data['enum'] = sorted({'key': key, 'value': value}
for key, value in data['enum'].items())
return super(DetailColumn, cls).wrap(data)
class SortElement(IndexedSchema):
field = StringProperty()
type = StringProperty()
direction = StringProperty()
def values(self):
values = {
'field': self.field,
'type': self.type,
'direction': self.direction,
}
return values
class SortOnlyDetailColumn(DetailColumn):
"""This is a mock type, not intended to be part of a document"""
@property
def _i(self):
"""
assert that SortOnlyDetailColumn never has ._i or .id called
since it should never be in an app document
"""
raise NotImplementedError()
class Detail(IndexedSchema):
"""
Full configuration for a case selection screen
"""
display = StringProperty(choices=['short', 'long'])
columns = SchemaListProperty(DetailColumn)
get_columns = IndexedSchema.Getter('columns')
sort_elements = SchemaListProperty(SortElement)
@parse_int([1])
def get_column(self, i):
return self.columns[i].with_id(i%len(self.columns), self)
def rename_lang(self, old_lang, new_lang):
for column in self.columns:
column.rename_lang(old_lang, new_lang)
def filter_xpath(self):
filters = []
for i,column in enumerate(self.columns):
if column.format == 'filter':
value = dot_interpolate(
column.filter_xpath,
'%s_%s_%s' % (column.model, column.field, i + 1)
)
filters.append("(%s)" % value)
xpath = ' and '.join(filters)
return partial_escape(xpath)
class CaseList(IndexedSchema):
label = DictProperty()
show = BooleanProperty(default=False)
def rename_lang(self, old_lang, new_lang):
for dct in (self.label,):
_rename_key(dct, old_lang, new_lang)
class ParentSelect(DocumentSchema):
active = BooleanProperty(default=False)
relationship = StringProperty(default='parent')
module_id = StringProperty()
class DetailPair(DocumentSchema):
short = SchemaProperty(Detail)
long = SchemaProperty(Detail)
@classmethod
def wrap(cls, data):
self = super(DetailPair, cls).wrap(data)
self.short.display = 'short'
self.long.display = 'long'
return self
class ModuleBase(IndexedSchema, NavMenuItemMediaMixin):
name = DictProperty(unicode)
unique_id = StringProperty()
case_type = StringProperty()
@classmethod
def wrap(cls, data):
if cls is ModuleBase:
doc_type = data['doc_type']
if doc_type == 'Module':
return Module.wrap(data)
elif doc_type == 'CareplanModule':
return CareplanModule.wrap(data)
elif doc_type == 'AdvancedModule':
return AdvancedModule.wrap(data)
else:
raise ValueError('Unexpected doc_type for Module', doc_type)
else:
return super(ModuleBase, cls).wrap(data)
def get_or_create_unique_id(self):
"""
It is the caller's responsibility to save the Application
after calling this function.
WARNING: If called on the same doc in different requests without saving,
this function will return a different uuid each time,
likely causing unexpected behavior
"""
if not self.unique_id:
self.unique_id = FormBase.generate_id()
return self.unique_id
get_forms = IndexedSchema.Getter('forms')
@parse_int([1])
def get_form(self, i):
try:
return self.forms[i].with_id(i % len(self.forms), self)
except IndexError:
raise FormNotFoundException()
def requires_case_details(self):
return False
def get_case_types(self):
return set([self.case_type])
def get_module_info(self):
return {
'id': self.id,
'name': self.name,
}
def get_app(self):
return self._parent
def default_name(self):
app = self.get_app()
return trans(
self.name,
[app.default_language] + app.build_langs,
include_lang=False
)
def rename_lang(self, old_lang, new_lang):
_rename_key(self.name, old_lang, new_lang)
for form in self.get_forms():
form.rename_lang(old_lang, new_lang)
for _, detail, _ in self.get_details():
detail.rename_lang(old_lang, new_lang)
def validate_detail_columns(self, columns):
from corehq.apps.app_manager.suite_xml import FIELD_TYPE_LOCATION
from corehq.apps.locations.util import parent_child
hierarchy = None
for column in columns:
if column.format in ('enum', 'enum-image'):
for item in column.enum:
key = item.key
if not re.match('^([\w_-]*)$', key):
yield {
'type': 'invalid id key',
'key': key,
'module': self.get_module_info(),
}
elif column.format == 'filter':
try:
etree.XPath(column.filter_xpath or '')
except etree.XPathSyntaxError:
yield {
'type': 'invalid filter xpath',
'module': self.get_module_info(),
'column': column,
}
elif column.field_type == FIELD_TYPE_LOCATION:
hierarchy = hierarchy or parent_child(self.get_app().domain)
try:
LocationXpath('').validate(column.field_property, hierarchy)
except LocationXpathValidationError, e:
yield {
'type': 'invalid location xpath',
'details': unicode(e),
'module': self.get_module_info(),
'column': column,
}
def validate_for_build(self):
errors = []
if not self.forms:
errors.append({
'type': 'no forms',
'module': self.get_module_info(),
})
if self.requires_case_details():
errors.extend(self.get_case_errors(
needs_case_type=True,
needs_case_detail=True
))
return errors
class Module(ModuleBase):
"""
A group of related forms, and configuration that applies to them all.
Translates to a top-level menu on the phone.
"""
module_type = 'basic'
case_label = DictProperty()
referral_label = DictProperty()
forms = SchemaListProperty(Form)
case_details = SchemaProperty(DetailPair)
ref_details = SchemaProperty(DetailPair)
put_in_root = BooleanProperty(default=False)
case_list = SchemaProperty(CaseList)
referral_list = SchemaProperty(CaseList)
task_list = SchemaProperty(CaseList)
parent_select = SchemaProperty(ParentSelect)
@classmethod
def wrap(cls, data):
if 'details' in data:
try:
case_short, case_long, ref_short, ref_long = data['details']
except ValueError:
# "need more than 0 values to unpack"
pass
else:
data['case_details'] = {
'short': case_short,
'long': case_long,
}
data['ref_details'] = {
'short': ref_short,
'long': ref_long,
}
finally:
del data['details']
return super(Module, cls).wrap(data)
@classmethod
def new_module(cls, name, lang):
detail = Detail(
columns=[DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Name")},
field='name',
model='case',
)]
)
module = Module(
name={(lang or 'en'): name or ugettext("Untitled Module")},
forms=[],
case_type='',
case_details=DetailPair(
short=Detail(detail.to_json()),
long=Detail(detail.to_json()),
),
)
module.get_or_create_unique_id()
return module
def new_form(self, name, lang, attachment=''):
form = Form(
name={lang if lang else "en": name if name else _("Untitled Form")},
)
self.forms.append(form)
form = self.get_form(-1)
form.source = attachment
return form
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, Form):
new_form = form
elif isinstance(form, AdvancedForm) and not form.actions.get_all_actions():
new_form = Form(
name=form.name,
form_filter=form.form_filter,
media_image=form.media_image,
media_audio=form.media_audio
)
new_form._parent = self
form._parent = self
if with_source:
new_form.source = form.source
else:
raise IncompatibleFormTypeException()
if index:
self.forms.insert(index, new_form)
else:
self.forms.append(new_form)
return self.get_form(index or -1)
def rename_lang(self, old_lang, new_lang):
super(Module, self).rename_lang(old_lang, new_lang)
for case_list in (self.case_list, self.referral_list):
case_list.rename_lang(old_lang, new_lang)
def get_details(self):
return (
('case_short', self.case_details.short, True),
('case_long', self.case_details.long, True),
('ref_short', self.ref_details.short, False),
('ref_long', self.ref_details.long, False),
)
@property
def detail_sort_elements(self):
try:
return self.case_details.short.sort_elements
except Exception:
return []
def validate_for_build(self):
errors = super(Module, self).validate_for_build()
for sort_element in self.detail_sort_elements:
try:
validate_detail_screen_field(sort_element.field)
except ValueError:
errors.append({
'type': 'invalid sort field',
'field': sort_element.field,
'module': self.get_module_info(),
})
if self.parent_select.active and not self.parent_select.module_id:
errors.append({
'type': 'no parent select id',
'module': self.get_module_info()
})
return errors
def export_json(self, dump_json=True, keep_unique_id=False):
source = self.to_json()
if not keep_unique_id:
for form in source['forms']:
del form['unique_id']
return json.dumps(source) if dump_json else source
def export_jvalue(self):
return self.export_json(dump_json=False, keep_unique_id=True)
def requires(self):
r = set(["none"])
for form in self.get_forms():
r.add(form.requires)
if self.case_list.show:
r.add('case')
if self.referral_list.show:
r.add('referral')
for val in ("referral", "case", "none"):
if val in r:
return val
def detail_types(self):
return {
"referral": ["case_short", "case_long", "ref_short", "ref_long"],
"case": ["case_short", "case_long"],
"none": []
}[self.requires()]
def requires_case_details(self):
ret = False
if self.case_list.show:
return True
for form in self.get_forms():
if form.requires_case():
ret = True
break
return ret
@memoized
def all_forms_require_a_case(self):
return all([form.requires == 'case' for form in self.get_forms()])
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.case_details.short.columns:
yield {
'type': 'no case detail',
'module': module_info,
}
columns = self.case_details.short.columns + self.case_details.long.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
if needs_referral_detail and not self.ref_details.short.columns:
yield {
'type': 'no ref detail',
'module': module_info,
}
class AdvancedForm(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'advanced_form'
form_filter = StringProperty()
actions = SchemaProperty(AdvancedFormActions)
schedule = SchemaProperty(FormSchedule, default=None)
def add_stuff_to_xform(self, xform):
super(AdvancedForm, self).add_stuff_to_xform(xform)
xform.add_case_and_meta_advanced(self)
@property
def requires(self):
return 'case' if self.actions.load_update_cases else 'none'
def all_other_forms_require_a_case(self):
m = self.get_module()
return all([form.requires == 'case' for form in m.get_forms() if form.id != self.id])
def check_actions(self):
errors = []
for action in self.actions.get_subcase_actions():
if action.parent_tag not in self.actions.get_case_tags():
errors.append({'type': 'missing parent tag', 'case_tag': action.parent_tag})
if isinstance(action, AdvancedOpenCaseAction):
if not action.name_path:
errors.append({'type': 'case_name required', 'case_tag': action.case_tag})
meta = self.actions.actions_meta_by_tag.get(action.parent_tag)
if meta and meta['type'] == 'open' and meta['action'].repeat_context:
if not action.repeat_context or not action.repeat_context.startswith(meta['action'].repeat_context):
errors.append({'type': 'subcase repeat context', 'case_tag': action.case_tag})
try:
self.actions.get_action_hierarchy(action)
except ValueError:
errors.append({'type': 'circular ref', 'case_tag': action.case_tag})
errors.extend(self.check_case_properties(
subcase_names=action.get_property_names(),
case_tag=action.case_tag
))
for action in self.actions.get_all_actions():
if not action.case_type and (not isinstance(action, LoadUpdateAction) or not action.auto_select):
errors.append({'type': "no case type in action", 'case_tag': action.case_tag})
if isinstance(action, LoadUpdateAction) and action.auto_select:
mode = action.auto_select.mode
if not action.auto_select.value_key:
key_name = {
AUTO_SELECT_CASE: _('Case property'),
AUTO_SELECT_FIXTURE: _('Lookup Table field'),
AUTO_SELECT_USER: _('custom user property'),
AUTO_SELECT_RAW: _('custom XPath expression'),
}[mode]
errors.append({'type': 'auto select key', 'key_name': key_name})
if not action.auto_select.value_source:
source_names = {
AUTO_SELECT_CASE: _('Case tag'),
AUTO_SELECT_FIXTURE: _('Lookup Table tag'),
}
if mode in source_names:
errors.append({'type': 'auto select source', 'source_name': source_names[mode]})
elif mode == AUTO_SELECT_CASE:
case_tag = action.auto_select.value_source
if not self.actions.get_action_from_tag(case_tag):
errors.append({'type': 'auto select case ref', 'case_tag': action.case_tag})
errors.extend(self.check_case_properties(
all_names=action.get_property_names(),
case_tag=action.case_tag
))
if self.form_filter:
if not any(action for action in self.actions.load_update_cases if not action.auto_select):
errors.append({'type': "filtering without case"})
def generate_paths():
for action in self.actions.get_all_actions():
for path in action.get_paths():
yield path
errors.extend(self.check_paths(generate_paths()))
return errors
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
errors = []
if xml_valid:
for error in self.check_actions():
error.update(error_meta)
errors.append(error)
module = self.get_module()
if module.has_schedule and not (self.schedule and self.schedule.anchor):
error = {
'type': 'validation error',
'validation_message': _("All forms in this module require a visit schedule.")
}
error.update(error_meta)
errors.append(error)
if validate_module:
errors.extend(module.get_case_errors(
needs_case_type=False,
needs_case_detail=module.requires_case_details(),
needs_referral_detail=False,
))
return errors
def get_case_updates(self, case_type):
updates = set()
format_key = self.get_case_property_name_formatter()
for action in self.actions.get_all_actions():
if action.case_type == case_type:
updates.update(format_key(*item)
for item in action.case_properties.iteritems())
return updates
@memoized
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
for subcase in self.actions.get_subcase_actions():
if subcase.case_type == case_type:
case_properties.update(
subcase.case_properties.keys()
)
parent = self.actions.get_action_from_tag(subcase.parent_tag)
if parent:
parent_types.add((parent.case_type, subcase.parent_reference_id or 'parent'))
return parent_types, case_properties
class AdvancedModule(ModuleBase):
module_type = 'advanced'
case_label = DictProperty()
forms = SchemaListProperty(AdvancedForm)
case_details = SchemaProperty(DetailPair)
product_details = SchemaProperty(DetailPair)
put_in_root = BooleanProperty(default=False)
case_list = SchemaProperty(CaseList)
has_schedule = BooleanProperty()
@classmethod
def new_module(cls, name, lang):
detail = Detail(
columns=[DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Name")},
field='name',
model='case',
)]
)
module = AdvancedModule(
name={(lang or 'en'): name or ugettext("Untitled Module")},
forms=[],
case_type='',
case_details=DetailPair(
short=Detail(detail.to_json()),
long=Detail(detail.to_json()),
),
product_details=DetailPair(
short=Detail(
columns=[
DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Product")},
field='name',
model='product',
),
],
),
long=Detail(),
),
)
module.get_or_create_unique_id()
return module
def new_form(self, name, lang, attachment=''):
form = AdvancedForm(
name={lang if lang else "en": name if name else _("Untitled Form")},
)
self.forms.append(form)
form = self.get_form(-1)
form.source = attachment
return form
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, AdvancedForm):
new_form = form
elif isinstance(form, Form):
new_form = AdvancedForm(
name=form.name,
form_filter=form.form_filter,
media_image=form.media_image,
media_audio=form.media_audio
)
new_form._parent = self
form._parent = self
if with_source:
new_form.source = form.source
actions = form.active_actions()
open = actions.get('open_case', None)
update = actions.get('update_case', None)
close = actions.get('close_case', None)
preload = actions.get('case_preload', None)
subcases = actions.get('subcases', None)
case_type = from_module.case_type
def convert_preload(preload):
return dict(zip(preload.values(),preload.keys()))
base_action = None
if open:
base_action = AdvancedOpenCaseAction(
case_type=case_type,
case_tag='open_{0}_0'.format(case_type),
name_path=open.name_path,
open_condition=open.condition,
case_properties=update.update if update else {},
)
new_form.actions.open_cases.append(base_action)
elif update or preload or close:
base_action = LoadUpdateAction(
case_type=case_type,
case_tag='load_{0}_0'.format(case_type),
case_properties=update.update if update else {},
preload=convert_preload(preload.preload) if preload else {}
)
if from_module.parent_select.active:
gen = suite_xml.SuiteGenerator(self.get_app())
select_chain = gen.get_select_chain(from_module, include_self=False)
for n, link in enumerate(reversed(list(enumerate(select_chain)))):
i, module = link
new_form.actions.load_update_cases.append(LoadUpdateAction(
case_type=module.case_type,
case_tag='_'.join(['parent'] * (i + 1)),
details_module=module.unique_id,
parent_tag='_'.join(['parent'] * (i + 2)) if n > 0 else ''
))
base_action.parent_tag = 'parent'
if close:
base_action.close_condition = close.condition
new_form.actions.load_update_cases.append(base_action)
if subcases:
for i, subcase in enumerate(subcases):
open_subcase_action = AdvancedOpenCaseAction(
case_type=subcase.case_type,
case_tag='open_{0}_{1}'.format(subcase.case_type, i+1),
name_path=subcase.case_name,
open_condition=subcase.condition,
case_properties=subcase.case_properties,
repeat_context=subcase.repeat_context,
parent_reference_id=subcase.reference_id,
parent_tag=base_action.case_tag if base_action else ''
)
new_form.actions.open_cases.append(open_subcase_action)
else:
raise IncompatibleFormTypeException()
if index:
self.forms.insert(index, new_form)
else:
self.forms.append(new_form)
return self.get_form(index or -1)
def rename_lang(self, old_lang, new_lang):
super(AdvancedModule, self).rename_lang(old_lang, new_lang)
self.case_list.rename_lang(old_lang, new_lang)
def requires_case_details(self):
if self.case_list.show:
return True
for form in self.forms:
if any(action.case_type == self.case_type for action in form.actions.load_update_cases):
return True
def all_forms_require_a_case(self):
return all(form.requires == 'case' for form in self.forms)
def get_details(self):
return (
('case_short', self.case_details.short, True),
('case_long', self.case_details.long, True),
('product_short', self.product_details.short, self.get_app().commtrack_enabled),
('product_long', self.product_details.long, False),
)
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.case_details.short.columns:
yield {
'type': 'no case detail',
'module': module_info,
}
if self.get_app().commtrack_enabled and not self.product_details.short.columns:
for form in self.forms:
if self.case_list.show or \
any(action.show_product_stock for action in form.actions.load_update_cases):
yield {
'type': 'no product detail',
'module': module_info,
}
break
columns = self.case_details.short.columns + self.case_details.long.columns
if self.get_app().commtrack_enabled:
columns += self.product_details.short.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
class CareplanForm(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'careplan_form'
mode = StringProperty(required=True, choices=['create', 'update'])
custom_case_updates = DictProperty()
case_preload = DictProperty()
@classmethod
def wrap(cls, data):
if cls is CareplanForm:
doc_type = data['doc_type']
if doc_type == 'CareplanGoalForm':
return CareplanGoalForm.wrap(data)
elif doc_type == 'CareplanTaskForm':
return CareplanTaskForm.wrap(data)
else:
raise ValueError('Unexpected doc_type for CareplanForm', doc_type)
else:
return super(CareplanForm, cls).wrap(data)
def add_stuff_to_xform(self, xform):
super(CareplanForm, self).add_stuff_to_xform(xform)
xform.add_care_plan(self)
def get_case_updates(self, case_type):
if case_type == self.case_type:
format_key = self.get_case_property_name_formatter()
return [format_key(*item) for item in self.case_updates().iteritems()]
else:
return []
def get_case_type(self):
return self.case_type
def get_parent_case_type(self):
return self._parent.case_type
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
if case_type == self.case_type:
if case_type == CAREPLAN_GOAL:
parent_types.add((module_case_type, 'parent'))
elif case_type == CAREPLAN_TASK:
parent_types.add((CAREPLAN_GOAL, 'goal'))
case_properties.update(self.case_updates().keys())
return parent_types, case_properties
class CareplanGoalForm(CareplanForm):
case_type = CAREPLAN_GOAL
name_path = StringProperty(required=True, default='/data/name')
date_followup_path = StringProperty(required=True, default='/data/date_followup')
description_path = StringProperty(required=True, default='/data/description')
close_path = StringProperty(required=True, default='/data/close_goal')
@classmethod
def new_form(cls, lang, name, mode):
action = 'Update' if mode == 'update' else 'New'
form = CareplanGoalForm(mode=mode)
name = name or '%s Careplan %s' % (action, CAREPLAN_CASE_NAMES[form.case_type])
form.name = {lang: name}
if mode == 'update':
form.description_path = '/data/description_group/description'
source = load_form_template('%s_%s.xml' % (form.case_type, mode))
return form, source
def case_updates(self):
changes = self.custom_case_updates.copy()
changes.update({
'date_followup': self.date_followup_path,
'description': self.description_path,
})
return changes
def get_fixed_questions(self):
def q(name, case_key, label):
return {
'name': name,
'key': case_key,
'label': label,
'path': self[name]
}
questions = [
q('description_path', 'description', _('Description')),
q('date_followup_path', 'date_followup', _('Followup date')),
]
if self.mode == 'create':
return [q('name_path', 'name', _('Name'))] + questions
else:
return questions + [q('close_path', 'close', _('Close if'))]
class CareplanTaskForm(CareplanForm):
case_type = CAREPLAN_TASK
name_path = StringProperty(required=True, default='/data/task_repeat/name')
date_followup_path = StringProperty(required=True, default='/data/date_followup')
description_path = StringProperty(required=True, default='/data/description')
latest_report_path = StringProperty(required=True, default='/data/progress_group/progress_update')
close_path = StringProperty(required=True, default='/data/task_complete')
@classmethod
def new_form(cls, lang, name, mode):
action = 'Update' if mode == 'update' else 'New'
form = CareplanTaskForm(mode=mode)
name = name or '%s Careplan %s' % (action, CAREPLAN_CASE_NAMES[form.case_type])
form.name = {lang: name}
if mode == 'create':
form.date_followup_path = '/data/task_repeat/date_followup'
form.description_path = '/data/task_repeat/description'
source = load_form_template('%s_%s.xml' % (form.case_type, mode))
return form, source
def case_updates(self):
changes = self.custom_case_updates.copy()
changes.update({
'date_followup': self.date_followup_path,
})
if self.mode == 'create':
changes['description'] = self.description_path
else:
changes['latest_report'] = self.latest_report_path
return changes
def get_fixed_questions(self):
def q(name, case_key, label):
return {
'name': name,
'key': case_key,
'label': label,
'path': self[name]
}
questions = [
q('date_followup_path', 'date_followup', _('Followup date')),
]
if self.mode == 'create':
return [
q('name_path', 'name', _('Name')),
q('description_path', 'description', _('Description')),
] + questions
else:
return questions + [
q('latest_report_path', 'latest_report', _('Latest report')),
q('close_path', 'close', _('Close if')),
]
class CareplanModule(ModuleBase):
"""
A set of forms and configuration for managing the Care Plan workflow.
"""
module_type = 'careplan'
parent_select = SchemaProperty(ParentSelect)
display_separately = BooleanProperty(default=False)
forms = SchemaListProperty(CareplanForm)
goal_details = SchemaProperty(DetailPair)
task_details = SchemaProperty(DetailPair)
@classmethod
def new_module(cls, app, name, lang, target_module_id, target_case_type):
lang = lang or 'en'
module = CareplanModule(
name={lang: name or ugettext("Care Plan")},
parent_select=ParentSelect(
active=True,
relationship='parent',
module_id=target_module_id
),
case_type=target_case_type,
goal_details=DetailPair(
short=cls._get_detail(lang, 'goal_short'),
long=cls._get_detail(lang, 'goal_long'),
),
task_details=DetailPair(
short=cls._get_detail(lang, 'task_short'),
long=cls._get_detail(lang, 'task_long'),
)
)
module.get_or_create_unique_id()
return module
@classmethod
def _get_detail(cls, lang, detail_type):
header = ugettext('Goal') if detail_type.startswith('goal') else ugettext('Task')
columns = [
DetailColumn(
format='plain',
header={lang: header},
field='name',
model='case'),
DetailColumn(
format='date',
header={lang: ugettext("Followup")},
field='date_followup',
model='case')]
if detail_type.endswith('long'):
columns.append(DetailColumn(
format='plain',
header={lang: ugettext("Description")},
field='description',
model='case'))
if detail_type == 'tasks_long':
columns.append(DetailColumn(
format='plain',
header={lang: ugettext("Last update")},
field='latest_report',
model='case'))
return Detail(type=detail_type, columns=columns)
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, CareplanForm):
if index:
self.forms.insert(index, form)
else:
self.forms.append(form)
return self.get_form(index or -1)
else:
raise IncompatibleFormTypeException()
def requires_case_details(self):
return True
def get_case_types(self):
return set([self.case_type]) | set(f.case_type for f in self.forms)
def get_form_by_type(self, case_type, mode):
for form in self.get_forms():
if form.case_type == case_type and form.mode == mode:
return form
def get_details(self):
return (
('%s_short' % CAREPLAN_GOAL, self.goal_details.short, True),
('%s_long' % CAREPLAN_GOAL, self.goal_details.long, True),
('%s_short' % CAREPLAN_TASK, self.task_details.short, True),
('%s_long' % CAREPLAN_TASK, self.task_details.long, True),
)
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.goal_details.short.columns:
yield {
'type': 'no case detail for goals',
'module': module_info,
}
if not self.task_details.short.columns:
yield {
'type': 'no case detail for tasks',
'module': module_info,
}
columns = self.goal_details.short.columns + self.goal_details.long.columns
columns += self.task_details.short.columns + self.task_details.long.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
class VersionedDoc(LazyAttachmentDoc):
"""
A document that keeps an auto-incrementing version number, knows how to make copies of itself,
delete a copy of itself, and revert back to an earlier copy of itself.
"""
domain = StringProperty()
copy_of = StringProperty()
version = IntegerProperty()
short_url = StringProperty()
short_odk_url = StringProperty()
short_odk_media_url = StringProperty()
_meta_fields = ['_id', '_rev', 'domain', 'copy_of', 'version', 'short_url', 'short_odk_url', 'short_odk_media_url']
@property
def id(self):
return self._id
def save(self, response_json=None, increment_version=None, **params):
if increment_version is None:
increment_version = not self.copy_of
if increment_version:
self.version = self.version + 1 if self.version else 1
super(VersionedDoc, self).save(**params)
if response_json is not None:
if 'update' not in response_json:
response_json['update'] = {}
response_json['update']['app-version'] = self.version
def make_build(self):
assert self.get_id
assert self.copy_of is None
cls = self.__class__
copies = cls.view('app_manager/applications', key=[self.domain, self._id, self.version], include_docs=True, limit=1).all()
if copies:
copy = copies[0]
else:
copy = deepcopy(self.to_json())
bad_keys = ('_id', '_rev', '_attachments',
'short_url', 'short_odk_url', 'short_odk_media_url', 'recipients')
for bad_key in bad_keys:
if bad_key in copy:
del copy[bad_key]
copy = cls.wrap(copy)
copy['copy_of'] = self._id
copy.copy_attachments(self)
return copy
def copy_attachments(self, other, regexp=ATTACHMENT_REGEX):
for name in other.lazy_list_attachments() or {}:
if regexp is None or re.match(regexp, name):
self.lazy_put_attachment(other.lazy_fetch_attachment(name), name)
def make_reversion_to_copy(self, copy):
"""
Replaces couch doc with a copy of the backup ("copy").
Returns the another Application/RemoteApp referring to this
updated couch doc. The returned doc should be used in place of
the original doc, i.e. should be called as follows:
app = app.make_reversion_to_copy(copy)
app.save()
"""
if copy.copy_of != self._id:
raise VersioningError("%s is not a copy of %s" % (copy, self))
app = deepcopy(copy.to_json())
app['_rev'] = self._rev
app['_id'] = self._id
app['version'] = self.version
app['copy_of'] = None
if '_attachments' in app:
del app['_attachments']
cls = self.__class__
app = cls.wrap(app)
app.copy_attachments(copy)
return app
def delete_copy(self, copy):
if copy.copy_of != self._id:
raise VersioningError("%s is not a copy of %s" % (copy, self))
copy.delete_app()
copy.save(increment_version=False)
def scrub_source(self, source):
"""
To be overridden.
Use this to scrub out anything
that should be shown in the
application source, such as ids, etc.
"""
raise NotImplemented()
def export_json(self, dump_json=True):
source = deepcopy(self.to_json())
for field in self._meta_fields:
if field in source:
del source[field]
_attachments = {}
for name in source.get('_attachments', {}):
if re.match(ATTACHMENT_REGEX, name):
_attachments[name] = self.fetch_attachment(name)
source['_attachments'] = _attachments
self.scrub_source(source)
return json.dumps(source) if dump_json else source
@classmethod
def from_source(cls, source, domain):
for field in cls._meta_fields:
if field in source:
del source[field]
source['domain'] = domain
app = cls.wrap(source)
return app
def is_deleted(self):
return self.doc_type.endswith(DELETED_SUFFIX)
def unretire(self):
self.doc_type = self.get_doc_type()
self.save()
def get_doc_type(self):
if self.doc_type.endswith(DELETED_SUFFIX):
return self.doc_type[:-len(DELETED_SUFFIX)]
else:
return self.doc_type
def absolute_url_property(method):
"""
Helper for the various fully qualified application URLs
Turns a method returning an unqualified URL
into a property returning a fully qualified URL
(e.g., '/my_url/' => 'https://www.commcarehq.org/my_url/')
Expects `self.url_base` to be fully qualified url base
"""
@wraps(method)
def _inner(self):
return "%s%s" % (self.url_base, method(self))
return property(_inner)
class ApplicationBase(VersionedDoc, SnapshotMixin,
CommCareFeatureSupportMixin):
"""
Abstract base class for Application and RemoteApp.
Contains methods for generating the various files and zipping them into CommCare.jar
"""
recipients = StringProperty(default="")
# this is the supported way of specifying which commcare build to use
build_spec = SchemaProperty(BuildSpec)
platform = StringProperty(
choices=["nokia/s40", "nokia/s60", "winmo", "generic"],
default="nokia/s40"
)
text_input = StringProperty(
choices=['roman', 'native', 'custom-keys', 'qwerty'],
default="roman"
)
success_message = DictProperty()
# The following properties should only appear on saved builds
# built_with stores a record of CommCare build used in a saved app
built_with = SchemaProperty(BuildRecord)
build_signed = BooleanProperty(default=True)
built_on = DateTimeProperty(required=False)
build_comment = StringProperty()
comment_from = StringProperty()
build_broken = BooleanProperty(default=False)
# not used yet, but nice for tagging/debugging
# currently only canonical value is 'incomplete-build',
# for when build resources aren't found where they should be
build_broken_reason = StringProperty()
# watch out for a past bug:
# when reverting to a build that happens to be released
# that got copied into into the new app doc, and when new releases were made,
# they were automatically starred
# AFAIK this is fixed in code, but my rear its ugly head in an as-yet-not-understood
# way for apps that already had this problem. Just keep an eye out
is_released = BooleanProperty(default=False)
# django-style salted hash of the admin password
admin_password = StringProperty()
# a=Alphanumeric, n=Numeric, x=Neither (not allowed)
admin_password_charset = StringProperty(choices=['a', 'n', 'x'], default='n')
# This is here instead of in Application because it needs to be available in stub representation
application_version = StringProperty(default=APP_V2, choices=[APP_V1, APP_V2], required=False)
langs = StringListProperty()
# only the languages that go in the build
build_langs = StringListProperty()
secure_submissions = BooleanProperty(default=False)
# exchange properties
cached_properties = DictProperty()
description = StringProperty()
deployment_date = DateTimeProperty()
phone_model = StringProperty()
user_type = StringProperty()
attribution_notes = StringProperty()
# always false for RemoteApp
case_sharing = BooleanProperty(default=False)
@classmethod
def wrap(cls, data):
# scrape for old conventions and get rid of them
if 'commcare_build' in data:
version, build_number = data['commcare_build'].split('/')
data['build_spec'] = BuildSpec.from_string("%s/latest" % version).to_json()
del data['commcare_build']
if 'commcare_tag' in data:
version, build_number = current_builds.TAG_MAP[data['commcare_tag']]
data['build_spec'] = BuildSpec.from_string("%s/latest" % version).to_json()
del data['commcare_tag']
if data.has_key("built_with") and isinstance(data['built_with'], basestring):
data['built_with'] = BuildSpec.from_string(data['built_with']).to_json()
if 'native_input' in data:
if 'text_input' not in data:
data['text_input'] = 'native' if data['native_input'] else 'roman'
del data['native_input']
should_save = False
if data.has_key('original_doc'):
data['copy_history'] = [data.pop('original_doc')]
should_save = True
data["description"] = data.get('description') or data.get('short_description')
self = super(ApplicationBase, cls).wrap(data)
if not self.build_spec or self.build_spec.is_null():
self.build_spec = get_default_build_spec(self.application_version)
if should_save:
self.save()
return self
@classmethod
def by_domain(cls, domain):
return get_apps_in_domain(domain)
@classmethod
def get_latest_build(cls, domain, app_id):
build = cls.view('app_manager/saved_app',
startkey=[domain, app_id, {}],
endkey=[domain, app_id],
descending=True,
limit=1).one()
return build if build else None
def rename_lang(self, old_lang, new_lang):
validate_lang(new_lang)
def is_remote_app(self):
return False
def get_latest_app(self, released_only=True):
if released_only:
return get_app(self.domain, self.get_id, latest=True)
else:
return self.view('app_manager/applications',
startkey=[self.domain, self.get_id, {}],
endkey=[self.domain, self.get_id],
include_docs=True,
limit=1,
descending=True,
).first()
def get_latest_saved(self):
"""
This looks really similar to get_latest_app, not sure why tim added
"""
if not hasattr(self, '_latest_saved'):
released = self.__class__.view('app_manager/applications',
startkey=['^ReleasedApplications', self.domain, self._id, {}],
endkey=['^ReleasedApplications', self.domain, self._id],
limit=1,
descending=True,
include_docs=True
)
if len(released) > 0:
self._latest_saved = released.all()[0]
else:
saved = self.__class__.view('app_manager/saved_app',
startkey=[self.domain, self._id, {}],
endkey=[self.domain, self._id],
descending=True,
limit=1,
include_docs=True
)
if len(saved) > 0:
self._latest_saved = saved.all()[0]
else:
self._latest_saved = None # do not return this app!
return self._latest_saved
def set_admin_password(self, raw_password):
salt = os.urandom(5).encode('hex')
self.admin_password = make_password(raw_password, salt=salt)
if raw_password.isnumeric():
self.admin_password_charset = 'n'
elif raw_password.isalnum():
self.admin_password_charset = 'a'
else:
self.admin_password_charset = 'x'
def check_password_charset(self):
errors = []
if hasattr(self, 'profile'):
password_format = self.profile.get('properties', {}).get('password_format', 'n')
message = ('Your app requires {0} passwords '
'but the admin password is not {0}')
if password_format == 'n' and self.admin_password_charset in 'ax':
errors.append({'type': 'password_format',
'message': message.format('numeric')})
if password_format == 'a' and self.admin_password_charset in 'x':
errors.append({'type': 'password_format',
'message': message.format('alphanumeric')})
return errors
def get_build(self):
return self.build_spec.get_build()
@property
def build_version(self):
# `LooseVersion`s are smart!
# LooseVersion('2.12.0') > '2.2'
# (even though '2.12.0' < '2.2')
if self.build_spec.version:
return LooseVersion(self.build_spec.version)
def get_preview_build(self):
preview = self.get_build()
for path in getattr(preview, '_attachments', {}):
if path.startswith('Generic/WebDemo'):
return preview
return CommCareBuildConfig.fetch().preview.get_build()
@property
def commcare_minor_release(self):
"""This is mostly just for views"""
return '%d.%d' % self.build_spec.minor_release()
def get_build_label(self):
for item in CommCareBuildConfig.fetch().menu:
if item['build'].to_string() == self.build_spec.to_string():
return item['label']
return self.build_spec.get_label()
@property
def short_name(self):
return self.name if len(self.name) <= 12 else '%s..' % self.name[:10]
@property
def has_careplan_module(self):
return False
@property
def url_base(self):
return get_url_base()
@absolute_url_property
def post_url(self):
if self.secure_submissions:
url_name = 'receiver_secure_post_with_app_id'
else:
url_name = 'receiver_post_with_app_id'
return reverse(url_name, args=[self.domain, self.get_id])
@absolute_url_property
def key_server_url(self):
return reverse('key_server_url', args=[self.domain])
@absolute_url_property
def ota_restore_url(self):
return reverse('corehq.apps.ota.views.restore', args=[self.domain])
@absolute_url_property
def form_record_url(self):
return '/a/%s/api/custom/pact_formdata/v1/' % self.domain
@absolute_url_property
def hq_profile_url(self):
return "%s?latest=true" % (
reverse('download_profile', args=[self.domain, self._id])
)
@absolute_url_property
def hq_media_profile_url(self):
return "%s?latest=true" % (
reverse('download_media_profile', args=[self.domain, self._id])
)
@property
def profile_loc(self):
return "jr://resource/profile.xml"
@absolute_url_property
def jar_url(self):
return reverse('corehq.apps.app_manager.views.download_jar', args=[self.domain, self._id])
def get_jar_path(self):
spec = {
'nokia/s40': 'Nokia/S40',
'nokia/s60': 'Nokia/S60',
'generic': 'Generic/Default',
'winmo': 'Native/WinMo'
}[self.platform]
if self.platform in ('nokia/s40', 'nokia/s60'):
spec += {
('native',): '-native-input',
('roman',): '-generic',
('custom-keys',): '-custom-keys',
('qwerty',): '-qwerty'
}[(self.text_input,)]
return spec
def get_jadjar(self):
return self.get_build().get_jadjar(self.get_jar_path())
def validate_fixtures(self):
if not domain_has_privilege(self.domain, privileges.LOOKUP_TABLES):
# remote apps don't support get_forms yet.
# for now they can circumvent the fixture limitation. sneaky bastards.
if hasattr(self, 'get_forms'):
for form in self.get_forms():
if form.has_fixtures:
raise PermissionDenied(_(
"Usage of lookup tables is not supported by your "
"current subscription. Please upgrade your "
"subscription before using this feature."
))
def validate_jar_path(self):
build = self.get_build()
setting = commcare_settings.SETTINGS_LOOKUP['hq']['text_input']
value = self.text_input
setting_version = setting['since'].get(value)
if setting_version:
setting_version = tuple(map(int, setting_version.split('.')))
my_version = build.minor_release()
if my_version < setting_version:
i = setting['values'].index(value)
assert i != -1
name = _(setting['value_names'][i])
raise AppEditingError((
'%s Text Input is not supported '
'in CommCare versions before %s.%s. '
'(You are using %s.%s)'
) % ((name,) + setting_version + my_version))
@property
def jad_settings(self):
settings = {
'JavaRosa-Admin-Password': self.admin_password,
'Profile': self.profile_loc,
'MIDlet-Jar-URL': self.jar_url,
#'MIDlet-Name': self.name,
# e.g. 2011-Apr-11 20:45
'CommCare-Release': "true",
}
if self.build_version < '2.8':
settings['Build-Number'] = self.version
return settings
def create_jadjar(self, save=False):
try:
return (
self.lazy_fetch_attachment('CommCare.jad'),
self.lazy_fetch_attachment('CommCare.jar'),
)
except (ResourceError, KeyError):
built_on = datetime.utcnow()
all_files = self.create_all_files()
jad_settings = {
'Released-on': built_on.strftime("%Y-%b-%d %H:%M"),
}
jad_settings.update(self.jad_settings)
jadjar = self.get_jadjar().pack(all_files, jad_settings)
if save:
self.built_on = built_on
self.built_with = BuildRecord(
version=jadjar.version,
build_number=jadjar.build_number,
signed=jadjar.signed,
datetime=built_on,
)
self.lazy_put_attachment(jadjar.jad, 'CommCare.jad')
self.lazy_put_attachment(jadjar.jar, 'CommCare.jar')
for filepath in all_files:
self.lazy_put_attachment(all_files[filepath],
'files/%s' % filepath)
return jadjar.jad, jadjar.jar
def validate_app(self):
errors = []
errors.extend(self.check_password_charset())
try:
self.validate_fixtures()
self.validate_jar_path()
self.create_all_files()
except (AppEditingError, XFormValidationError, XFormError,
PermissionDenied) as e:
errors.append({'type': 'error', 'message': unicode(e)})
except Exception as e:
if settings.DEBUG:
raise
# this is much less useful/actionable without a URL
# so make sure to include the request
logging.error('Unexpected error building app', exc_info=True,
extra={'request': view_utils.get_request()})
errors.append({'type': 'error', 'message': 'unexpected error: %s' % e})
return errors
@absolute_url_property
def odk_profile_url(self):
return reverse('corehq.apps.app_manager.views.download_odk_profile', args=[self.domain, self._id])
@absolute_url_property
def odk_media_profile_url(self):
return reverse('corehq.apps.app_manager.views.download_odk_media_profile', args=[self.domain, self._id])
@property
def odk_profile_display_url(self):
return self.short_odk_url or self.odk_profile_url
@property
def odk_media_profile_display_url(self):
return self.short_odk_media_url or self.odk_media_profile_url
def get_odk_qr_code(self, with_media=False):
"""Returns a QR code, as a PNG to install on CC-ODK"""
try:
return self.lazy_fetch_attachment("qrcode.png")
except ResourceNotFound:
try:
from pygooglechart import QRChart
except ImportError:
raise Exception(
"Aw shucks, someone forgot to install "
"the google chart library on this machine "
"and this feature needs it. "
"To get it, run easy_install pygooglechart. "
"Until you do that this won't work."
)
HEIGHT = WIDTH = 250
code = QRChart(HEIGHT, WIDTH)
code.add_data(self.odk_profile_url if not with_media else self.odk_media_profile_url)
# "Level L" error correction with a 0 pixel margin
code.set_ec('L', 0)
f, fname = tempfile.mkstemp()
code.download(fname)
os.close(f)
with open(fname, "rb") as f:
png_data = f.read()
self.lazy_put_attachment(png_data, "qrcode.png",
content_type="image/png")
return png_data
def fetch_jar(self):
return self.get_jadjar().fetch_jar()
def make_build(self, comment=None, user_id=None, previous_version=None):
copy = super(ApplicationBase, self).make_build()
if not copy._id:
# I expect this always to be the case
# but check explicitly so as not to change the _id if it exists
copy._id = copy.get_db().server.next_uuid()
copy.set_form_versions(previous_version)
copy.set_media_versions(previous_version)
copy.create_jadjar(save=True)
try:
# since this hard to put in a test
# I'm putting this assert here if copy._id is ever None
# which makes tests error
assert copy._id
if settings.BITLY_LOGIN:
copy.short_url = bitly.shorten(
get_url_base() + reverse('corehq.apps.app_manager.views.download_jad', args=[copy.domain, copy._id])
)
copy.short_odk_url = bitly.shorten(
get_url_base() + reverse('corehq.apps.app_manager.views.download_odk_profile', args=[copy.domain, copy._id])
)
copy.short_odk_media_url = bitly.shorten(
get_url_base() + reverse('corehq.apps.app_manager.views.download_odk_media_profile', args=[copy.domain, copy._id])
)
except AssertionError:
raise
except Exception: # URLError, BitlyError
# for offline only
logging.exception("Problem creating bitly url for app %s. Do you have network?" % self.get_id)
copy.short_url = None
copy.short_odk_url = None
copy.short_odk_media_url = None
copy.build_comment = comment
copy.comment_from = user_id
copy.is_released = False
return copy
def delete_app(self):
self.doc_type += '-Deleted'
record = DeleteApplicationRecord(
domain=self.domain,
app_id=self.id,
datetime=datetime.utcnow()
)
record.save()
return record
def set_form_versions(self, previous_version):
# by default doing nothing here is fine.
pass
def set_media_versions(self, previous_version):
pass
def validate_lang(lang):
if not re.match(r'^[a-z]{2,3}(-[a-z]*)?$', lang):
raise ValueError("Invalid Language")
def validate_property(property):
# this regex is also copied in propertyList.ejs
if not re.match(r'^[a-zA-Z][\w_-]*(/[a-zA-Z][\w_-]*)*$', property):
raise ValueError("Invalid Property")
def validate_detail_screen_field(field):
# If you change here, also change here:
# corehq/apps/app_manager/static/app_manager/js/detail-screen-config.js
field_re = r'^([a-zA-Z][\w_-]*:)*([a-zA-Z][\w_-]*/)*#?[a-zA-Z][\w_-]*$'
if not re.match(field_re, field):
raise ValueError("Invalid Sort Field")
class SavedAppBuild(ApplicationBase):
def to_saved_build_json(self, timezone):
data = super(SavedAppBuild, self).to_json().copy()
for key in ('modules', 'user_registration',
'_attachments', 'profile', 'translations'
'description', 'short_description'):
data.pop(key, None)
data.update({
'id': self.id,
'built_on_date': utc_to_timezone(data['built_on'], timezone, "%b %d, %Y"),
'built_on_time': utc_to_timezone(data['built_on'], timezone, "%H:%M %Z"),
'build_label': self.built_with.get_label(),
'jar_path': self.get_jar_path(),
'short_name': self.short_name,
'enable_offline_install': self.enable_offline_install,
})
comment_from = data['comment_from']
if comment_from:
try:
comment_user = CouchUser.get(comment_from)
except ResourceNotFound:
data['comment_user_name'] = comment_from
else:
data['comment_user_name'] = comment_user.full_name
return data
class Application(ApplicationBase, TranslationMixin, HQMediaMixin):
"""
An Application that can be created entirely through the online interface
"""
user_registration = SchemaProperty(UserRegistrationForm)
show_user_registration = BooleanProperty(default=False, required=True)
modules = SchemaListProperty(ModuleBase)
name = StringProperty()
# profile's schema is {'features': {}, 'properties': {}}
# ended up not using a schema because properties is a reserved word
profile = DictProperty()
use_custom_suite = BooleanProperty(default=False)
cloudcare_enabled = BooleanProperty(default=False)
translation_strategy = StringProperty(default='select-known',
choices=app_strings.CHOICES.keys())
commtrack_enabled = BooleanProperty(default=False)
commtrack_requisition_mode = StringProperty(choices=CT_REQUISITION_MODES)
auto_gps_capture = BooleanProperty(default=False)
@classmethod
def wrap(cls, data):
for module in data.get('modules', []):
for attr in ('case_label', 'referral_label'):
if not module.has_key(attr):
module[attr] = {}
for lang in data['langs']:
if not module['case_label'].get(lang):
module['case_label'][lang] = commcare_translations.load_translations(lang).get('cchq.case', 'Cases')
if not module['referral_label'].get(lang):
module['referral_label'][lang] = commcare_translations.load_translations(lang).get('cchq.referral', 'Referrals')
if not data.get('build_langs'):
data['build_langs'] = data['langs']
self = super(Application, cls).wrap(data)
# make sure all form versions are None on working copies
if not self.copy_of:
for form in self.get_forms():
form.version = None
# weird edge case where multimedia_map gets set to null and causes issues
if self.multimedia_map is None:
self.multimedia_map = {}
return self
def save(self, *args, **kwargs):
super(Application, self).save(*args, **kwargs)
# Import loop if this is imported at the top
# TODO: revamp so signal_connections <- models <- signals
from corehq.apps.app_manager import signals
signals.app_post_save.send(Application, application=self)
def make_reversion_to_copy(self, copy):
app = super(Application, self).make_reversion_to_copy(copy)
for form in app.get_forms():
# reset the form's validation cache, since the form content is
# likely to have changed in the revert!
form.validation_cache = None
form.version = None
app.build_broken = False
return app
@property
def profile_url(self):
return self.hq_profile_url
@property
def media_profile_url(self):
return self.hq_media_profile_url
@property
def url_base(self):
return get_url_base()
@absolute_url_property
def suite_url(self):
return reverse('download_suite', args=[self.domain, self.get_id])
@property
def suite_loc(self):
if self.enable_relative_suite_path:
return './suite.xml'
else:
return "jr://resource/suite.xml"
@absolute_url_property
def media_suite_url(self):
return reverse('download_media_suite', args=[self.domain, self.get_id])
@property
def media_suite_loc(self):
if self.enable_relative_suite_path:
return "./media_suite.xml"
else:
return "jr://resource/media_suite.xml"
@property
def default_language(self):
return self.build_langs[0] if len(self.build_langs) > 0 else "en"
def fetch_xform(self, module_id=None, form_id=None, form=None):
if not form:
form = self.get_module(module_id).get_form(form_id)
return form.validate_form().render_xform().encode('utf-8')
def set_form_versions(self, previous_version):
# this will make builds slower, but they're async now so hopefully
# that's fine.
def _hash(val):
return hashlib.md5(val).hexdigest()
if previous_version:
for form_stuff in self.get_forms(bare=False):
filename = 'files/%s' % self.get_form_filename(**form_stuff)
form = form_stuff["form"]
form_version = None
try:
previous_form = previous_version.get_form(form.unique_id)
# take the previous version's compiled form as-is
# (generation code may have changed since last build)
previous_source = previous_version.fetch_attachment(filename)
except (ResourceNotFound, FormNotFoundException):
pass
else:
previous_hash = _hash(previous_source)
# hack - temporarily set my version to the previous version
# so that that's not treated as the diff
previous_form_version = previous_form.get_version()
form.version = previous_form_version
my_hash = _hash(self.fetch_xform(form=form))
if previous_hash == my_hash:
form_version = previous_form_version
if form_version is None:
form.version = None
else:
form.version = form_version
def set_media_versions(self, previous_version):
# access to .multimedia_map is slow
prev_multimedia_map = previous_version.multimedia_map if previous_version else {}
for path, map_item in self.multimedia_map.iteritems():
pre_map_item = prev_multimedia_map.get(path, None)
if pre_map_item and pre_map_item.version and pre_map_item.multimedia_id == map_item.multimedia_id:
map_item.version = pre_map_item.version
else:
map_item.version = self.version
def create_app_strings(self, lang):
gen = app_strings.CHOICES[self.translation_strategy]
if lang == 'default':
return gen.create_default_app_strings(self)
else:
return gen.create_app_strings(self, lang)
@property
def skip_validation(self):
properties = (self.profile or {}).get('properties', {})
return properties.get('cc-content-valid', 'yes')
@property
def jad_settings(self):
s = super(Application, self).jad_settings
s.update({
'Skip-Validation': self.skip_validation,
})
return s
def create_profile(self, is_odk=False, with_media=False, template='app_manager/profile.xml'):
self__profile = self.profile
app_profile = defaultdict(dict)
for setting in commcare_settings.SETTINGS:
setting_type = setting['type']
setting_id = setting['id']
if setting_type not in ('properties', 'features'):
setting_value = None
elif setting_id not in self__profile.get(setting_type, {}):
if 'commcare_default' in setting and setting['commcare_default'] != setting['default']:
setting_value = setting['default']
else:
setting_value = None
else:
setting_value = self__profile[setting_type][setting_id]
if setting_value:
app_profile[setting_type][setting_id] = {
'value': setting_value,
'force': setting.get('force', False)
}
# assert that it gets explicitly set once per loop
del setting_value
if self.case_sharing:
app_profile['properties']['server-tether'] = {
'force': True,
'value': 'sync',
}
if with_media:
profile_url = self.media_profile_url if not is_odk else (self.odk_media_profile_url + '?latest=true')
else:
profile_url = self.profile_url if not is_odk else (self.odk_profile_url + '?latest=true')
return render_to_string(template, {
'is_odk': is_odk,
'app': self,
'profile_url': profile_url,
'app_profile': app_profile,
'cc_user_domain': cc_user_domain(self.domain),
'include_media_suite': with_media,
'descriptor': u"Profile File"
}).decode('utf-8')
@property
def custom_suite(self):
try:
return self.lazy_fetch_attachment('custom_suite.xml')
except ResourceNotFound:
return ""
def set_custom_suite(self, value):
self.put_attachment(value, 'custom_suite.xml')
def create_suite(self):
if self.application_version == APP_V1:
template='app_manager/suite-%s.xml' % self.application_version
return render_to_string(template, {
'app': self,
'langs': ["default"] + self.build_langs
})
else:
return suite_xml.SuiteGenerator(self).generate_suite()
def create_media_suite(self):
return suite_xml.MediaSuiteGenerator(self).generate_suite()
@classmethod
def get_form_filename(cls, type=None, form=None, module=None):
if type == 'user_registration':
return 'user_registration.xml'
else:
return 'modules-%s/forms-%s.xml' % (module.id, form.id)
def create_all_files(self):
files = {
'profile.xml': self.create_profile(is_odk=False),
'profile.ccpr': self.create_profile(is_odk=True),
'media_profile.xml': self.create_profile(is_odk=False, with_media=True),
'media_profile.ccpr': self.create_profile(is_odk=True, with_media=True),
'suite.xml': self.create_suite(),
'media_suite.xml': self.create_media_suite(),
}
for lang in ['default'] + self.build_langs:
files["%s/app_strings.txt" % lang] = self.create_app_strings(lang)
for form_stuff in self.get_forms(bare=False):
filename = self.get_form_filename(**form_stuff)
form = form_stuff['form']
files[filename] = self.fetch_xform(form=form)
return files
get_modules = IndexedSchema.Getter('modules')
@parse_int([1])
def get_module(self, i):
try:
return self.modules[i].with_id(i % len(self.modules), self)
except IndexError:
raise ModuleNotFoundException()
def get_user_registration(self):
form = self.user_registration
form._app = self
if not form.source:
form.source = load_form_template('register_user.xhtml')
return form
def get_module_by_unique_id(self, unique_id):
def matches(module):
return module.get_or_create_unique_id() == unique_id
for obj in self.get_modules():
if matches(obj):
return obj
raise ModuleNotFoundException(
("Module in app '%s' with unique id '%s' not found"
% (self.id, unique_id)))
def get_forms(self, bare=True):
if self.show_user_registration:
yield self.get_user_registration() if bare else {
'type': 'user_registration',
'form': self.get_user_registration()
}
for module in self.get_modules():
for form in module.get_forms():
yield form if bare else {
'type': 'module_form',
'module': module,
'form': form
}
def get_form(self, unique_form_id, bare=True):
def matches(form):
return form.get_unique_id() == unique_form_id
for obj in self.get_forms(bare):
if matches(obj if bare else obj['form']):
return obj
raise FormNotFoundException(
("Form in app '%s' with unique id '%s' not found"
% (self.id, unique_form_id)))
def get_form_location(self, unique_form_id):
for m_index, module in enumerate(self.get_modules()):
for f_index, form in enumerate(module.get_forms()):
if unique_form_id == form.unique_id:
return m_index, f_index
raise KeyError("Form in app '%s' with unique id '%s' not found" % (self.id, unique_form_id))
@classmethod
def new_app(cls, domain, name, application_version, lang="en"):
app = cls(domain=domain, modules=[], name=name, langs=[lang], build_langs=[lang], application_version=application_version)
return app
def add_module(self, module):
self.modules.append(module)
return self.get_module(-1)
def delete_module(self, module_unique_id):
try:
module = self.get_module_by_unique_id(module_unique_id)
except ModuleNotFoundException:
return None
record = DeleteModuleRecord(
domain=self.domain,
app_id=self.id,
module_id=module.id,
module=module,
datetime=datetime.utcnow()
)
del self.modules[module.id]
record.save()
return record
def new_form(self, module_id, name, lang, attachment=""):
module = self.get_module(module_id)
return module.new_form(name, lang, attachment)
def delete_form(self, module_unique_id, form_unique_id):
try:
module = self.get_module_by_unique_id(module_unique_id)
form = self.get_form(form_unique_id)
except (ModuleNotFoundException, FormNotFoundException):
return None
record = DeleteFormRecord(
domain=self.domain,
app_id=self.id,
module_unique_id=module_unique_id,
form_id=form.id,
form=form,
datetime=datetime.utcnow(),
)
record.save()
del module['forms'][form.id]
return record
def rename_lang(self, old_lang, new_lang):
validate_lang(new_lang)
if old_lang == new_lang:
return
if new_lang in self.langs:
raise AppEditingError("Language %s already exists!" % new_lang)
for i,lang in enumerate(self.langs):
if lang == old_lang:
self.langs[i] = new_lang
for module in self.get_modules():
module.rename_lang(old_lang, new_lang)
_rename_key(self.translations, old_lang, new_lang)
def rearrange_modules(self, i, j):
modules = self.modules
try:
modules.insert(i, modules.pop(j))
except IndexError:
raise RearrangeError()
self.modules = modules
def rearrange_forms(self, to_module_id, from_module_id, i, j):
"""
The case type of the two modules conflict,
ConflictingCaseTypeError is raised,
but the rearrangement (confusingly) goes through anyway.
This is intentional.
"""
to_module = self.get_module(to_module_id)
from_module = self.get_module(from_module_id)
try:
form = from_module.forms.pop(j)
to_module.add_insert_form(from_module, form, index=i, with_source=True)
except IndexError:
raise RearrangeError()
if to_module.case_type != from_module.case_type:
raise ConflictingCaseTypeError()
def scrub_source(self, source):
def change_unique_id(form):
unique_id = form['unique_id']
new_unique_id = FormBase.generate_id()
form['unique_id'] = new_unique_id
if source['_attachments'].has_key("%s.xml" % unique_id):
source['_attachments']["%s.xml" % new_unique_id] = source['_attachments'].pop("%s.xml" % unique_id)
change_unique_id(source['user_registration'])
for m, module in enumerate(source['modules']):
for f, form in enumerate(module['forms']):
change_unique_id(source['modules'][m]['forms'][f])
def copy_form(self, module_id, form_id, to_module_id):
"""
The case type of the two modules conflict,
ConflictingCaseTypeError is raised,
but the copying (confusingly) goes through anyway.
This is intentional.
"""
from_module = self.get_module(module_id)
form = from_module.get_form(form_id)
to_module = self.get_module(to_module_id)
self._copy_form(from_module, form, to_module)
def _copy_form(self, from_module, form, to_module):
if not form.source:
raise BlankXFormError()
copy_source = deepcopy(form.to_json())
if 'unique_id' in copy_source:
del copy_source['unique_id']
copy_form = to_module.add_insert_form(from_module, FormBase.wrap(copy_source))
save_xform(self, copy_form, form.source)
if from_module['case_type'] != to_module['case_type']:
raise ConflictingCaseTypeError()
def convert_module_to_advanced(self, module_id):
from_module = self.get_module(module_id)
name = {lang: u'{} (advanced)'.format(name) for lang, name in from_module.name.items()}
case_details = deepcopy(from_module.case_details.to_json())
to_module = AdvancedModule(
name=name,
forms=[],
case_type=from_module.case_type,
case_label=from_module.case_label,
put_in_root=from_module.put_in_root,
case_list=from_module.case_list,
case_details=DetailPair.wrap(case_details),
product_details=DetailPair(
short=Detail(
columns=[
DetailColumn(
format='plain',
header={'en': ugettext("Product")},
field='name',
model='product',
),
],
),
long=Detail(),
),
)
to_module.get_or_create_unique_id()
to_module = self.add_module(to_module)
for form in from_module.get_forms():
self._copy_form(from_module, form, to_module)
return to_module
@cached_property
def has_case_management(self):
for module in self.get_modules():
for form in module.get_forms():
if len(form.active_actions()) > 0:
return True
return False
@memoized
def case_type_exists(self, case_type):
return case_type in self.get_case_types()
@memoized
def get_case_types(self):
return set(chain(*[m.get_case_types() for m in self.get_modules()]))
def has_media(self):
return len(self.multimedia_map) > 0
@memoized
def get_xmlns_map(self):
xmlns_map = defaultdict(list)
for form in self.get_forms():
xmlns_map[form.xmlns].append(form)
return xmlns_map
def get_form_by_xmlns(self, xmlns):
if xmlns == "http://code.javarosa.org/devicereport":
return None
forms = self.get_xmlns_map()[xmlns]
if len(forms) != 1:
logging.error('App %s in domain %s has %s forms with xmlns %s' % (
self.get_id,
self.domain,
len(forms),
xmlns,
))
return None
else:
form, = forms
return form
def get_questions(self, xmlns):
form = self.get_form_by_xmlns(xmlns)
if not form:
return []
return form.get_questions(self.langs)
def validate_app(self):
xmlns_count = defaultdict(int)
errors = []
for lang in self.langs:
if not lang:
errors.append({'type': 'empty lang'})
if not self.modules:
errors.append({'type': "no modules"})
for module in self.get_modules():
errors.extend(module.validate_for_build())
for form in self.get_forms():
errors.extend(form.validate_for_build(validate_module=False))
# make sure that there aren't duplicate xmlns's
xmlns_count[form.xmlns] += 1
for xmlns in xmlns_count:
if xmlns_count[xmlns] > 1:
errors.append({'type': "duplicate xmlns", "xmlns": xmlns})
if self._has_parent_child_selection_cycle({m.unique_id:m for m in self.get_modules()}):
errors.append({'type': 'parent cycle'})
if not errors:
errors = super(Application, self).validate_app()
return errors
def _has_parent_child_selection_cycle(self, modules):
"""
:param modules: A mapping of module unique_ids to Module objects
:return: True if there is a cycle in the parent-child selection graph
"""
visited = set()
completed = set()
def cycle_helper(m):
if m.id in visited:
if m.id in completed:
return False
return True
visited.add(m.id)
if hasattr(m, 'parent_select') and m.parent_select.active:
parent = modules.get(m.parent_select.module_id, None)
if parent != None and cycle_helper(parent):
return True
completed.add(m.id)
return False
for module in modules.values():
if cycle_helper(module):
return True
return False
@classmethod
def get_by_xmlns(cls, domain, xmlns):
r = cls.get_db().view('exports_forms/by_xmlns',
key=[domain, {}, xmlns],
group=True,
stale=settings.COUCH_STALE_QUERY,
).one()
return cls.get(r['value']['app']['id']) if r and 'app' in r['value'] else None
def get_profile_setting(self, s_type, s_id):
setting = self.profile.get(s_type, {}).get(s_id)
if setting is not None:
return setting
yaml_setting = commcare_settings.SETTINGS_LOOKUP[s_type][s_id]
for contingent in yaml_setting.get("contingent_default", []):
if check_condition(self, contingent["condition"]):
setting = contingent["value"]
if setting is not None:
return setting
if self.build_version < yaml_setting.get("since", "0"):
setting = yaml_setting.get("disabled_default", None)
if setting is not None:
return setting
return yaml_setting.get("default")
@property
def has_careplan_module(self):
return any((module for module in self.modules if isinstance(module, CareplanModule)))
class RemoteApp(ApplicationBase):
"""
A wrapper for a url pointing to a suite or profile file. This allows you to
write all the files for an app by hand, and then give the url to app_manager
and let it package everything together for you.
"""
profile_url = StringProperty(default="http://")
name = StringProperty()
manage_urls = BooleanProperty(default=False)
questions_map = DictProperty(required=False)
def is_remote_app(self):
return True
@classmethod
def new_app(cls, domain, name, lang='en'):
app = cls(domain=domain, name=name, langs=[lang])
return app
def create_profile(self, is_odk=False):
# we don't do odk for now anyway
return remote_app.make_remote_profile(self)
def strip_location(self, location):
return remote_app.strip_location(self.profile_url, location)
def fetch_file(self, location):
location = self.strip_location(location)
url = urljoin(self.profile_url, location)
try:
content = urlopen(url).read()
except Exception:
raise AppEditingError('Unable to access resource url: "%s"' % url)
return location, content
@classmethod
def get_locations(cls, suite):
for resource in suite.findall('*/resource'):
try:
loc = resource.findtext('location[@authority="local"]')
except Exception:
loc = resource.findtext('location[@authority="remote"]')
yield resource.getparent().tag, loc
@property
def SUITE_XPATH(self):
return 'suite/resource/location[@authority="local"]'
def create_all_files(self):
files = {
'profile.xml': self.create_profile(),
}
tree = _parse_xml(files['profile.xml'])
def add_file_from_path(path, strict=False):
added_files = []
# must find at least one
try:
tree.find(path).text
except (TypeError, AttributeError):
if strict:
raise AppEditingError("problem with file path reference!")
else:
return
for loc_node in tree.findall(path):
loc, file = self.fetch_file(loc_node.text)
files[loc] = file
added_files.append(file)
return added_files
add_file_from_path('features/users/logo')
try:
suites = add_file_from_path(self.SUITE_XPATH, strict=True)
except AppEditingError:
raise AppEditingError(ugettext('Problem loading suite file from profile file. Is your profile file correct?'))
for suite in suites:
suite_xml = _parse_xml(suite)
for tag, location in self.get_locations(suite_xml):
location, data = self.fetch_file(location)
if tag == 'xform' and self.build_langs:
try:
xform = XForm(data)
except XFormError as e:
raise XFormError('In file %s: %s' % (location, e))
xform.exclude_languages(whitelist=self.build_langs)
data = xform.render()
files.update({location: data})
return files
def scrub_source(self, source):
pass
def make_questions_map(self):
if self.copy_of:
xmlns_map = {}
def fetch(location):
filepath = self.strip_location(location)
return self.fetch_attachment('files/%s' % filepath)
profile_xml = _parse_xml(fetch('profile.xml'))
suite_location = profile_xml.find(self.SUITE_XPATH).text
suite_xml = _parse_xml(fetch(suite_location))
for tag, location in self.get_locations(suite_xml):
if tag == 'xform':
xform = XForm(fetch(location))
xmlns = xform.data_node.tag_xmlns
questions = xform.get_questions(self.build_langs)
xmlns_map[xmlns] = questions
return xmlns_map
else:
return None
def get_questions(self, xmlns):
if not self.questions_map:
self.questions_map = self.make_questions_map()
if not self.questions_map:
return []
self.save()
questions = self.questions_map.get(xmlns, [])
return questions
def get_apps_in_domain(domain, full=False, include_remote=True):
view_name = 'app_manager/applications' if full else 'app_manager/applications_brief'
view_results = Application.get_db().view(view_name,
startkey=[domain, None],
endkey=[domain, None, {}],
include_docs=True,
)
remote_app_filter = None if include_remote else lambda app: not app.is_remote_app()
wrapped_apps = [get_correct_app_class(row['doc']).wrap(row['doc']) for row in view_results]
return filter(remote_app_filter, wrapped_apps)
def get_app(domain, app_id, wrap_cls=None, latest=False):
"""
Utility for getting an app, making sure it's in the domain specified, and wrapping it in the right class
(Application or RemoteApp).
"""
if latest:
try:
original_app = get_db().get(app_id)
except ResourceNotFound:
raise Http404()
if not domain:
try:
domain = original_app['domain']
except Exception:
raise Http404()
if original_app.get('copy_of'):
parent_app_id = original_app.get('copy_of')
min_version = original_app['version'] if original_app.get('is_released') else -1
else:
parent_app_id = original_app['_id']
min_version = -1
latest_app = get_db().view('app_manager/applications',
startkey=['^ReleasedApplications', domain, parent_app_id, {}],
endkey=['^ReleasedApplications', domain, parent_app_id, min_version],
limit=1,
descending=True,
include_docs=True
).one()
try:
app = latest_app['doc']
except TypeError:
# If no starred builds, return act as if latest=False
app = original_app
else:
try:
app = get_db().get(app_id)
except Exception:
raise Http404()
if domain and app['domain'] != domain:
raise Http404()
try:
cls = wrap_cls or get_correct_app_class(app)
except DocTypeError:
raise Http404()
app = cls.wrap(app)
return app
EXAMPLE_DOMAIN = 'example'
BUG_REPORTS_DOMAIN = 'bug-reports'
def _get_or_create_app(app_id):
if app_id == "example--hello-world":
try:
app = Application.get(app_id)
except ResourceNotFound:
app = Application.wrap(fixtures.hello_world_example)
app._id = app_id
app.domain = EXAMPLE_DOMAIN
app.save()
return _get_or_create_app(app_id)
return app
else:
return get_app(None, app_id)
str_to_cls = {
"Application": Application,
"Application-Deleted": Application,
"RemoteApp": RemoteApp,
"RemoteApp-Deleted": RemoteApp,
}
def import_app(app_id_or_source, domain, name=None, validate_source_domain=None):
if isinstance(app_id_or_source, basestring):
app_id = app_id_or_source
source = _get_or_create_app(app_id)
src_dom = source['domain']
if validate_source_domain:
validate_source_domain(src_dom)
source = source.export_json()
source = json.loads(source)
else:
source = app_id_or_source
try:
attachments = source['_attachments']
except KeyError:
attachments = {}
finally:
source['_attachments'] = {}
if name:
source['name'] = name
cls = str_to_cls[source['doc_type']]
# Allow the wrapper to update to the current default build_spec
if 'build_spec' in source:
del source['build_spec']
app = cls.from_source(source, domain)
app.save()
if not app.is_remote_app():
for _, m in app.get_media_objects():
if domain not in m.valid_domains:
m.valid_domains.append(domain)
m.save()
for name, attachment in attachments.items():
if re.match(ATTACHMENT_REGEX, name):
app.put_attachment(attachment, name)
return app
class DeleteApplicationRecord(DeleteRecord):
app_id = StringProperty()
def undo(self):
app = ApplicationBase.get(self.app_id)
app.doc_type = app.get_doc_type()
app.save(increment_version=False)
class DeleteModuleRecord(DeleteRecord):
app_id = StringProperty()
module_id = IntegerProperty()
module = SchemaProperty(ModuleBase)
def undo(self):
app = Application.get(self.app_id)
modules = app.modules
modules.insert(self.module_id, self.module)
app.modules = modules
app.save()
class DeleteFormRecord(DeleteRecord):
app_id = StringProperty()
module_id = IntegerProperty()
module_unique_id = StringProperty()
form_id = IntegerProperty()
form = SchemaProperty(FormBase)
def undo(self):
app = Application.get(self.app_id)
if self.module_unique_id is not None:
module = app.get_module_by_unique_id(self.module_unique_id)
else:
module = app.modules[self.module_id]
forms = module.forms
forms.insert(self.form_id, self.form)
module.forms = forms
app.save()
class CareplanAppProperties(DocumentSchema):
name = StringProperty()
latest_release = StringProperty()
case_type = StringProperty()
goal_conf = DictProperty()
task_conf = DictProperty()
class CareplanConfig(Document):
domain = StringProperty()
app_configs = SchemaDictProperty(CareplanAppProperties)
@classmethod
def for_domain(cls, domain):
res = cache_core.cached_view(
cls.get_db(),
"domain/docs",
key=[domain, 'CareplanConfig', None],
reduce=False,
include_docs=True,
wrapper=cls.wrap)
if len(res) > 0:
result = res[0]
else:
result = None
return result
# backwards compatibility with suite-1.0.xml
FormBase.get_command_id = lambda self: id_strings.form_command(self)
FormBase.get_locale_id = lambda self: id_strings.form_locale(self)
ModuleBase.get_locale_id = lambda self: id_strings.module_locale(self)
ModuleBase.get_case_list_command_id = lambda self: id_strings.case_list_command(self)
ModuleBase.get_case_list_locale_id = lambda self: id_strings.case_list_locale(self)
Module.get_referral_list_command_id = lambda self: id_strings.referral_list_command(self)
Module.get_referral_list_locale_id = lambda self: id_strings.referral_list_locale(self)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
#from sqlalchemy import Table
#from sqlalchemy import Column
#from sqlalchemy import ForeignKey
#from sqlalchemy import UniqueConstraint
#from sqlalchemy import CheckConstraint
#from sqlalchemy import Integer
#from sqlalchemy import Unicode
#from sqlalchemy.orm import relationship
#from sqlalchemy.orm import backref
#from sqlalchemy.orm import relation
from sqlalchemy.orm.exc import NoResultFound
#from sqlalchemy.ext.declarative import declarative_base
#from sqlalchemy.ext.declarative import declared_attr
#from sqlalchemy.ext.associationproxy import association_proxy
#from sqlalchemy.ext.hybrid import hybrid_property
#from .get_one_or_create import get_one_or_create
#from .BaseMixin import BASE
#from .Config import CONFIG
#from .Word import Word
from .Word import WordMisSpelling
from .TagWord import TagWord
def find_tag(session, tag):
'''
iterates over the tagwords table to check for a existing tag
checks each word for a misspelling and replaces the mispelling if it's found
returns the tag if found, else returns False
it's reasonable to return the tag if it's found beacuse unlike an alias, a tag cant point to
the wrong thing. returning a "duplicate" alias only makes sense if it's pointing to the same tag.
'''
corrected_tag = tag
possible_tag_set = set([])
assert isinstance(tag, str)
try:
for index, word in enumerate(tag.split(' ')):
try:
wordmisspelling = session.query(WordMisSpelling).filter_by(wordmisspelling=tag).one()
target_word = wordmisspelling.word
word = str(target_word)
corrected_tag = tag.replace(wordmisspelling.wordmisspelling, word)
except NoResultFound:
pass
current_word = session.query(Word).filter_by(word=word).one()
current_tagword_list = session.query(TagWord).filter_by(word=current_word, position=index).all()
if current_tagword_list:
current_tagword_list_tag_set = set([tagword.tag for tagword in current_tagword_list])
possible_tag_set = possible_tag_set & current_tagword_list_tag_set
for tagword in current_tagword_list:
if index == 0: # only add tags that start with the correct word
possible_tag_set.add(tagword.tag)
else:
if tagword.tag not in possible_tag_set:
return False
if not possible_tag_set:
return False
if len(possible_tag_set) == 1:
last_tag = list(possible_tag_set)[0]
last_tag_text = str(last_tag)
if last_tag_text == corrected_tag:
return last_tag
else:
return False
except NoResultFound: # any failed query
return False
auto-commit
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
#from sqlalchemy import Table
#from sqlalchemy import Column
#from sqlalchemy import ForeignKey
#from sqlalchemy import UniqueConstraint
#from sqlalchemy import CheckConstraint
#from sqlalchemy import Integer
#from sqlalchemy import Unicode
#from sqlalchemy.orm import relationship
#from sqlalchemy.orm import backref
#from sqlalchemy.orm import relation
from sqlalchemy.orm.exc import NoResultFound
#from sqlalchemy.ext.declarative import declarative_base
#from sqlalchemy.ext.declarative import declared_attr
#from sqlalchemy.ext.associationproxy import association_proxy
#from sqlalchemy.ext.hybrid import hybrid_property
#from .get_one_or_create import get_one_or_create
#from .BaseMixin import BASE
#from .Config import CONFIG
from .Word import Word
from .Word import WordMisSpelling
from .TagWord import TagWord
def find_tag(session, tag):
'''
iterates over the tagwords table to check for a existing tag
checks each word for a misspelling and replaces the mispelling if it's found
returns the tag if found, else returns False
it's reasonable to return the tag if it's found beacuse unlike an alias, a tag cant point to
the wrong thing. returning a "duplicate" alias only makes sense if it's pointing to the same tag.
'''
corrected_tag = tag
possible_tag_set = set([])
assert isinstance(tag, str)
try:
for index, word in enumerate(tag.split(' ')):
try:
wordmisspelling = session.query(WordMisSpelling).filter_by(wordmisspelling=tag).one()
target_word = wordmisspelling.word
word = str(target_word)
corrected_tag = tag.replace(wordmisspelling.wordmisspelling, word)
except NoResultFound:
pass
current_word = session.query(Word).filter_by(word=word).one()
current_tagword_list = session.query(TagWord).filter_by(word=current_word, position=index).all()
if current_tagword_list:
current_tagword_list_tag_set = set([tagword.tag for tagword in current_tagword_list])
possible_tag_set = possible_tag_set & current_tagword_list_tag_set
for tagword in current_tagword_list:
if index == 0: # only add tags that start with the correct word
possible_tag_set.add(tagword.tag)
else:
if tagword.tag not in possible_tag_set:
return False
if not possible_tag_set:
return False
if len(possible_tag_set) == 1:
last_tag = list(possible_tag_set)[0]
last_tag_text = str(last_tag)
if last_tag_text == corrected_tag:
return last_tag
else:
return False
except NoResultFound: # any failed query
return False
|
import time
import os
from hazelcast.config import IndexType, IntType
from hazelcast.errors import HazelcastError
from hazelcast.proxy.map import EntryEventType
from hazelcast.serialization.api import IdentifiedDataSerializable
from hazelcast.predicate import (
sql,
greater_or_equal,
less_or_equal,
)
from tests.base import SingleMemberTestCase
from tests.integration.backward_compatible.util import (
write_string_to_output,
read_string_from_input,
)
from tests.util import (
random_string,
event_collector,
fill_map,
is_server_version_older_than,
is_client_version_older_than,
get_current_timestamp,
mark_client_version_at_least,
)
from hazelcast import six
from hazelcast.six.moves import range
import unittest
from hazelcast.aggregator import (
count,
double_avg,
double_sum,
number_avg,
fixed_point_sum,
floating_point_sum,
max_,
min_,
int_avg,
int_sum,
long_avg,
long_sum,
)
class EntryProcessor(IdentifiedDataSerializable):
FACTORY_ID = 66
CLASS_ID = 1
def __init__(self, value=None):
self.value = value
def write_data(self, object_data_output):
write_string_to_output(object_data_output, self.value)
def read_data(self, object_data_input):
self.value = read_string_from_input(object_data_input)
def get_factory_id(self):
return self.FACTORY_ID
def get_class_id(self):
return self.CLASS_ID
class MapTest(SingleMemberTestCase):
@classmethod
def configure_cluster(cls):
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
with open(os.path.join(dir_path, "hazelcast.xml")) as f:
return f.read()
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
config["data_serializable_factories"] = {
EntryProcessor.FACTORY_ID: {EntryProcessor.CLASS_ID: EntryProcessor}
}
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
def tearDown(self):
self.map.destroy()
def test_add_entry_listener_item_added(self):
collector = event_collector()
self.map.add_entry_listener(include_value=True, added_func=collector)
self.map.put("key", "value")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(event, key="key", event_type=EntryEventType.ADDED, value="value")
self.assertTrueEventually(assert_event, 5)
def test_add_entry_listener_item_removed(self):
collector = event_collector()
self.map.add_entry_listener(include_value=True, removed_func=collector)
self.map.put("key", "value")
self.map.remove("key")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event, key="key", event_type=EntryEventType.REMOVED, old_value="value"
)
self.assertTrueEventually(assert_event, 5)
def test_add_entry_listener_item_updated(self):
collector = event_collector()
self.map.add_entry_listener(include_value=True, updated_func=collector)
self.map.put("key", "value")
self.map.put("key", "new_value")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event,
key="key",
event_type=EntryEventType.UPDATED,
old_value="value",
value="new_value",
)
self.assertTrueEventually(assert_event, 5)
def test_add_entry_listener_item_expired(self):
collector = event_collector()
self.map.add_entry_listener(include_value=True, expired_func=collector)
self.map.put("key", "value", ttl=0.1)
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event, key="key", event_type=EntryEventType.EXPIRED, old_value="value"
)
self.assertTrueEventually(assert_event, 10)
def test_add_entry_listener_with_key(self):
collector = event_collector()
self.map.add_entry_listener(key="key1", include_value=True, added_func=collector)
self.map.put("key2", "value2")
self.map.put("key1", "value1")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event, key="key1", event_type=EntryEventType.ADDED, value="value1"
)
self.assertTrueEventually(assert_event, 5)
def test_add_entry_listener_with_predicate(self):
collector = event_collector()
self.map.add_entry_listener(
predicate=sql("this == value1"), include_value=True, added_func=collector
)
self.map.put("key2", "value2")
self.map.put("key1", "value1")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event, key="key1", event_type=EntryEventType.ADDED, value="value1"
)
self.assertTrueEventually(assert_event, 5)
def test_add_entry_listener_with_key_and_predicate(self):
collector = event_collector()
self.map.add_entry_listener(
key="key1", predicate=sql("this == value3"), include_value=True, added_func=collector
)
self.map.put("key2", "value2")
self.map.put("key1", "value1")
self.map.remove("key1")
self.map.put("key1", "value3")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event, key="key1", event_type=EntryEventType.ADDED, value="value3"
)
self.assertTrueEventually(assert_event, 5)
def test_add_index(self):
self.map.add_index(attributes=["this"])
self.map.add_index(attributes=["this"], index_type=IndexType.HASH)
self.map.add_index(
attributes=["this"],
index_type=IndexType.BITMAP,
bitmap_index_options={
"unique_key": "this",
},
)
def test_add_index_duplicate_fields(self):
with self.assertRaises(ValueError):
self.map.add_index(attributes=["this", "this"])
def test_add_index_invalid_attribute(self):
with self.assertRaises(ValueError):
self.map.add_index(attributes=["this.x."])
def test_clear(self):
self.fill_map()
self.map.clear()
self.assertEqual(self.map.size(), 0)
def test_contains_key(self):
self.fill_map()
self.assertTrue(self.map.contains_key("key-1"))
self.assertFalse(self.map.contains_key("key-10"))
def test_contains_value(self):
self.fill_map()
self.assertTrue(self.map.contains_value("value-1"))
self.assertFalse(self.map.contains_value("value-10"))
def test_delete(self):
self.fill_map()
self.map.delete("key-1")
self.assertEqual(self.map.size(), 9)
self.assertFalse(self.map.contains_key("key-1"))
def test_entry_set(self):
entries = self.fill_map()
six.assertCountEqual(self, self.map.entry_set(), list(six.iteritems(entries)))
def test_entry_set_with_predicate(self):
self.fill_map()
self.assertEqual(self.map.entry_set(sql("this == 'value-1'")), [("key-1", "value-1")])
def test_evict(self):
self.fill_map()
self.map.evict("key-1")
self.assertEqual(self.map.size(), 9)
self.assertFalse(self.map.contains_key("key-1"))
def test_evict_all(self):
self.fill_map()
self.map.evict_all()
self.assertEqual(self.map.size(), 0)
def test_execute_on_entries(self):
m = self.fill_map()
expected_entry_set = [(key, "processed") for key in m]
values = self.map.execute_on_entries(EntryProcessor("processed"))
six.assertCountEqual(self, expected_entry_set, self.map.entry_set())
six.assertCountEqual(self, expected_entry_set, values)
def test_execute_on_entries_with_predicate(self):
m = self.fill_map()
expected_entry_set = [(key, "processed") if key < "key-5" else (key, m[key]) for key in m]
expected_values = [(key, "processed") for key in m if key < "key-5"]
values = self.map.execute_on_entries(EntryProcessor("processed"), sql("__key < 'key-5'"))
six.assertCountEqual(self, expected_entry_set, self.map.entry_set())
six.assertCountEqual(self, expected_values, values)
def test_execute_on_key(self):
self.map.put("test-key", "test-value")
value = self.map.execute_on_key("test-key", EntryProcessor("processed"))
self.assertEqual("processed", self.map.get("test-key"))
self.assertEqual("processed", value)
def test_execute_on_keys(self):
m = self.fill_map()
expected_entry_set = [(key, "processed") for key in m]
values = self.map.execute_on_keys(list(m.keys()), EntryProcessor("processed"))
six.assertCountEqual(self, expected_entry_set, self.map.entry_set())
six.assertCountEqual(self, expected_entry_set, values)
def test_execute_on_keys_with_empty_key_list(self):
m = self.fill_map()
expected_entry_set = [(key, m[key]) for key in m]
values = self.map.execute_on_keys([], EntryProcessor("processed"))
self.assertEqual([], values)
six.assertCountEqual(self, expected_entry_set, self.map.entry_set())
def test_flush(self):
self.fill_map()
self.map.flush()
def test_force_unlock(self):
self.map.put("key", "value")
self.map.lock("key")
self.start_new_thread(lambda: self.map.force_unlock("key"))
self.assertTrueEventually(lambda: self.assertFalse(self.map.is_locked("key")))
def test_get_all(self):
expected = self.fill_map(1000)
actual = self.map.get_all(list(expected.keys()))
six.assertCountEqual(self, expected, actual)
def test_get_all_when_no_keys(self):
self.assertEqual(self.map.get_all([]), {})
def test_get_entry_view(self):
self.map.put("key", "value")
self.map.get("key")
self.map.put("key", "new_value")
entry_view = self.map.get_entry_view("key")
self.assertEqual(entry_view.key, "key")
self.assertEqual(entry_view.value, "new_value")
self.assertIsNotNone(entry_view.cost)
self.assertIsNotNone(entry_view.creation_time)
self.assertIsNotNone(entry_view.expiration_time)
if is_server_version_older_than(self.client, "4.2"):
self.assertEqual(entry_view.hits, 2)
else:
# 4.2+ servers do not collect per entry stats by default
self.assertIsNotNone(entry_view.hits)
self.assertIsNotNone(entry_view.last_access_time)
self.assertIsNotNone(entry_view.last_stored_time)
self.assertIsNotNone(entry_view.last_update_time)
self.assertEqual(entry_view.version, 1)
self.assertIsNotNone(entry_view.ttl)
self.assertIsNotNone(entry_view.max_idle)
def test_is_empty(self):
self.map.put("key", "value")
self.assertFalse(self.map.is_empty())
self.map.clear()
self.assertTrue(self.map.is_empty())
def test_is_locked(self):
self.map.put("key", "value")
self.assertFalse(self.map.is_locked("key"))
self.map.lock("key")
self.assertTrue(self.map.is_locked("key"))
self.map.unlock("key")
self.assertFalse(self.map.is_locked("key"))
def test_key_set(self):
keys = list(self.fill_map().keys())
six.assertCountEqual(self, self.map.key_set(), keys)
def test_key_set_with_predicate(self):
self.fill_map()
self.assertEqual(self.map.key_set(sql("this == 'value-1'")), ["key-1"])
def test_lock(self):
self.map.put("key", "value")
t = self.start_new_thread(lambda: self.map.lock("key"))
t.join()
self.assertFalse(self.map.try_put("key", "new_value", timeout=0.01))
def test_put_all(self):
m = {"key-%d" % x: "value-%d" % x for x in range(0, 1000)}
self.map.put_all(m)
entries = self.map.entry_set()
six.assertCountEqual(self, entries, six.iteritems(m))
def test_put_all_when_no_keys(self):
self.assertIsNone(self.map.put_all({}))
def test_put_if_absent_when_missing_value(self):
returned_value = self.map.put_if_absent("key", "new_value")
self.assertIsNone(returned_value)
self.assertEqual(self.map.get("key"), "new_value")
def test_put_if_absent_when_existing_value(self):
self.map.put("key", "value")
returned_value = self.map.put_if_absent("key", "new_value")
self.assertEqual(returned_value, "value")
self.assertEqual(self.map.get("key"), "value")
def test_put_get(self):
self.assertIsNone(self.map.put("key", "value"))
self.assertEqual(self.map.get("key"), "value")
def test_put_get_large_payload(self):
# The fix for reading large payloads is introduced in 4.2.1
# See https://github.com/hazelcast/hazelcast-python-client/pull/436
mark_client_version_at_least(self, "4.2.1")
payload = bytearray(os.urandom(16 * 1024 * 1024))
start = get_current_timestamp()
self.assertIsNone(self.map.put("key", payload))
self.assertEqual(self.map.get("key"), payload)
self.assertLessEqual(get_current_timestamp() - start, 5)
def test_put_get2(self):
val = "x" * 5000
self.assertIsNone(self.map.put("key-x", val))
self.assertEqual(self.map.get("key-x"), val)
def test_put_when_existing(self):
self.map.put("key", "value")
self.assertEqual(self.map.put("key", "new_value"), "value")
self.assertEqual(self.map.get("key"), "new_value")
def test_put_transient(self):
self.map.put_transient("key", "value")
self.assertEqual(self.map.get("key"), "value")
def test_remove(self):
self.map.put("key", "value")
removed = self.map.remove("key")
self.assertEqual(removed, "value")
self.assertEqual(0, self.map.size())
self.assertFalse(self.map.contains_key("key"))
def test_remove_if_same_when_same(self):
self.map.put("key", "value")
self.assertTrue(self.map.remove_if_same("key", "value"))
self.assertFalse(self.map.contains_key("key"))
def test_remove_if_same_when_different(self):
self.map.put("key", "value")
self.assertFalse(self.map.remove_if_same("key", "another_value"))
self.assertTrue(self.map.contains_key("key"))
def test_remove_entry_listener(self):
collector = event_collector()
reg_id = self.map.add_entry_listener(added_func=collector)
self.map.put("key", "value")
self.assertTrueEventually(lambda: self.assertEqual(len(collector.events), 1))
self.map.remove_entry_listener(reg_id)
self.map.put("key2", "value")
time.sleep(1)
self.assertEqual(len(collector.events), 1)
def test_remove_entry_listener_with_none_id(self):
with self.assertRaises(AssertionError) as cm:
self.map.remove_entry_listener(None)
e = cm.exception
self.assertEqual(e.args[0], "None user_registration_id is not allowed!")
def test_replace(self):
self.map.put("key", "value")
replaced = self.map.replace("key", "new_value")
self.assertEqual(replaced, "value")
self.assertEqual(self.map.get("key"), "new_value")
def test_replace_if_same_when_same(self):
self.map.put("key", "value")
self.assertTrue(self.map.replace_if_same("key", "value", "new_value"))
self.assertEqual(self.map.get("key"), "new_value")
def test_replace_if_same_when_different(self):
self.map.put("key", "value")
self.assertFalse(self.map.replace_if_same("key", "another_value", "new_value"))
self.assertEqual(self.map.get("key"), "value")
def test_set(self):
self.map.set("key", "value")
self.assertEqual(self.map.get("key"), "value")
def test_set_ttl(self):
self.map.put("key", "value")
self.map.set_ttl("key", 0.1)
def evicted():
self.assertFalse(self.map.contains_key("key"))
self.assertTrueEventually(evicted, 5)
def test_size(self):
self.fill_map()
self.assertEqual(10, self.map.size())
def test_try_lock_when_unlocked(self):
self.assertTrue(self.map.try_lock("key"))
self.assertTrue(self.map.is_locked("key"))
def test_try_lock_when_locked(self):
t = self.start_new_thread(lambda: self.map.lock("key"))
t.join()
self.assertFalse(self.map.try_lock("key", timeout=0.1))
def test_try_put_when_unlocked(self):
self.assertTrue(self.map.try_put("key", "value"))
self.assertEqual(self.map.get("key"), "value")
def test_try_put_when_locked(self):
t = self.start_new_thread(lambda: self.map.lock("key"))
t.join()
self.assertFalse(self.map.try_put("key", "value", timeout=0.1))
def test_try_remove_when_unlocked(self):
self.map.put("key", "value")
self.assertTrue(self.map.try_remove("key"))
self.assertIsNone(self.map.get("key"))
def test_try_remove_when_locked(self):
self.map.put("key", "value")
t = self.start_new_thread(lambda: self.map.lock("key"))
t.join()
self.assertFalse(self.map.try_remove("key", timeout=0.1))
def test_unlock(self):
self.map.lock("key")
self.assertTrue(self.map.is_locked("key"))
self.map.unlock("key")
self.assertFalse(self.map.is_locked("key"))
def test_unlock_when_no_lock(self):
with self.assertRaises(HazelcastError):
self.map.unlock("key")
def test_values(self):
values = list(self.fill_map().values())
six.assertCountEqual(self, list(self.map.values()), values)
def test_values_with_predicate(self):
self.fill_map()
self.assertEqual(self.map.values(sql("this == 'value-1'")), ["value-1"])
def test_str(self):
self.assertTrue(str(self.map).startswith("Map"))
def fill_map(self, count=10):
m = {"key-%d" % x: "value-%d" % x for x in range(0, count)}
self.map.put_all(m)
return m
class MapStoreTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
@classmethod
def configure_cluster(cls):
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
with open(os.path.join(dir_path, "hazelcast_mapstore.xml")) as f:
return f.read()
def setUp(self):
self.map = self.client.get_map("mapstore-test").blocking()
self.entries = fill_map(self.map, size=10, key_prefix="key", value_prefix="val")
def tearDown(self):
self.map.destroy()
def test_load_all_with_no_args_loads_all_keys(self):
self.map.evict_all()
self.map.load_all()
entry_set = self.map.get_all(self.entries.keys())
six.assertCountEqual(self, entry_set, self.entries)
def test_load_all_with_key_set_loads_given_keys(self):
self.map.evict_all()
self.map.load_all(["key0", "key1"])
entry_set = self.map.get_all(["key0", "key1"])
six.assertCountEqual(self, entry_set, {"key0": "val0", "key1": "val1"})
def test_load_all_overrides_entries_in_memory_by_default(self):
self.map.evict_all()
self.map.put_transient("key0", "new0")
self.map.put_transient("key1", "new1")
self.map.load_all(["key0", "key1"])
entry_set = self.map.get_all(["key0", "key1"])
six.assertCountEqual(self, entry_set, {"key0": "val0", "key1": "val1"})
def test_load_all_with_replace_existing_false_does_not_override(self):
self.map.evict_all()
self.map.put_transient("key0", "new0")
self.map.put_transient("key1", "new1")
self.map.load_all(["key0", "key1"], replace_existing_values=False)
entry_set = self.map.get_all(["key0", "key1"])
six.assertCountEqual(self, entry_set, {"key0": "new0", "key1": "new1"})
def test_evict(self):
self.map.evict("key0")
self.assertEqual(self.map.size(), 9)
def test_evict_non_existing_key(self):
self.map.evict("non_existing_key")
self.assertEqual(self.map.size(), 10)
def test_evict_all(self):
self.map.evict_all()
self.assertEqual(self.map.size(), 0)
def test_add_entry_listener_item_loaded(self):
collector = event_collector()
self.map.add_entry_listener(include_value=True, loaded_func=collector)
self.map.put("key", "value", ttl=0.1)
time.sleep(2)
self.map.get("key")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(event, key="key", value="value", event_type=EntryEventType.LOADED)
self.assertTrueEventually(assert_event, 10)
class MapTTLTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
def tearDown(self):
self.map.destroy()
def test_put_default_ttl(self):
self.map.put("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put(self):
self.map.put("key", "value", 0.1)
self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key")))
def test_put_transient_default_ttl(self):
self.map.put_transient("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put_transient(self):
self.map.put_transient("key", "value", 0.1)
self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key")))
def test_put_if_absent_ttl(self):
self.map.put_if_absent("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put_if_absent(self):
self.map.put_if_absent("key", "value", 0.1)
self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key")))
def test_set_default_ttl(self):
self.map.set("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_set(self):
self.map.set("key", "value", 0.1)
self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key")))
class MapMaxIdleTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
def tearDown(self):
self.map.destroy()
def test_put_default_max_idle(self):
self.map.put("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put(self):
self.map.put("key", "value", max_idle=0.1)
time.sleep(1.0)
self.assertFalse(self.map.contains_key("key"))
def test_put_transient_default_max_idle(self):
self.map.put_transient("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put_transient(self):
self.map.put_transient("key", "value", max_idle=0.1)
time.sleep(1.0)
self.assertFalse(self.map.contains_key("key"))
def test_put_if_absent_max_idle(self):
self.map.put_if_absent("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put_if_absent(self):
self.map.put_if_absent("key", "value", max_idle=0.1)
time.sleep(1.0)
self.assertFalse(self.map.contains_key("key"))
def test_set_default_ttl(self):
self.map.set("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_set(self):
self.map.set("key", "value", max_idle=0.1)
time.sleep(1.0)
self.assertFalse(self.map.contains_key("key"))
@unittest.skipIf(
is_client_version_older_than("4.2.1"), "Tests the features added in 4.2.1 version of the client"
)
class MapAggregatorsIntTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
config["default_int_type"] = IntType.INT
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
self.map.put_all({"key-%d" % i: i for i in range(50)})
def tearDown(self):
self.map.destroy()
def test_int_average(self):
average = self.map.aggregate(int_avg())
self.assertEqual(24.5, average)
def test_int_average_with_attribute_path(self):
average = self.map.aggregate(int_avg("this"))
self.assertEqual(24.5, average)
def test_int_average_with_predicate(self):
average = self.map.aggregate(int_avg(), predicate=greater_or_equal("this", 47))
self.assertEqual(48, average)
def test_int_sum(self):
sum = self.map.aggregate(int_sum())
self.assertEqual(1225, sum)
def test_int_sum_with_attribute_path(self):
sum = self.map.aggregate(int_sum("this"))
self.assertEqual(1225, sum)
def test_int_sum_with_predicate(self):
sum = self.map.aggregate(int_sum(), predicate=greater_or_equal("this", 47))
self.assertEqual(144, sum)
def test_fixed_point_sum(self):
sum = self.map.aggregate(fixed_point_sum())
self.assertEqual(1225, sum)
def test_fixed_point_sum_with_attribute_path(self):
sum = self.map.aggregate(fixed_point_sum("this"))
self.assertEqual(1225, sum)
def test_fixed_point_sum_with_predicate(self):
sum = self.map.aggregate(fixed_point_sum(), predicate=greater_or_equal("this", 47))
self.assertEqual(144, sum)
@unittest.skipIf(
is_client_version_older_than("4.2.1"), "Tests the features added in 4.2.1 version of the client"
)
class MapAggregatorsLongTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
config["default_int_type"] = IntType.LONG
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
self.map.put_all({"key-%d" % i: i for i in range(50)})
def tearDown(self):
self.map.destroy()
def test_long_average(self):
average = self.map.aggregate(long_avg())
self.assertEqual(24.5, average)
def test_long_average_with_attribute_path(self):
average = self.map.aggregate(long_avg("this"))
self.assertEqual(24.5, average)
def test_long_average_with_predicate(self):
average = self.map.aggregate(long_avg(), predicate=greater_or_equal("this", 47))
self.assertEqual(48, average)
def test_long_sum(self):
sum = self.map.aggregate(long_sum())
self.assertEqual(1225, sum)
def test_long_sum_with_attribute_path(self):
sum = self.map.aggregate(long_sum("this"))
self.assertEqual(1225, sum)
def test_long_sum_with_predicate(self):
sum = self.map.aggregate(long_sum(), predicate=greater_or_equal("this", 47))
self.assertEqual(144, sum)
@unittest.skipIf(
is_client_version_older_than("4.2.1"), "Tests the features added in 4.2.1 version of the client"
)
class MapAggregatorsDoubleTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
self.map.put_all({"key-%d" % i: float(i) for i in range(50)})
def tearDown(self):
self.map.destroy()
def test_count(self):
count_ = self.map.aggregate(count())
self.assertEqual(50, count_)
def test_count_with_attribute_path(self):
count_ = self.map.aggregate(count("this"))
self.assertEqual(50, count_)
def test_count_with_predicate(self):
count_ = self.map.aggregate(count(), predicate=greater_or_equal("this", 1))
self.assertEqual(49, count_)
def test_double_average(self):
average = self.map.aggregate(double_avg())
self.assertEqual(24.5, average)
def test_double_average_with_attribute_path(self):
average = self.map.aggregate(double_avg("this"))
self.assertEqual(24.5, average)
def test_double_average_with_predicate(self):
average = self.map.aggregate(double_avg(), predicate=greater_or_equal("this", 47))
self.assertEqual(48, average)
def test_double_sum(self):
sum = self.map.aggregate(double_sum())
self.assertEqual(1225, sum)
def test_double_sum_with_attribute_path(self):
sum = self.map.aggregate(double_sum("this"))
self.assertEqual(1225, sum)
def test_double_sum_with_predicate(self):
sum = self.map.aggregate(double_sum(), predicate=greater_or_equal("this", 47))
self.assertEqual(144, sum)
def test_floating_point_sum(self):
sum = self.map.aggregate(floating_point_sum())
self.assertEqual(1225, sum)
def test_floating_point_sum_with_attribute_path(self):
sum = self.map.aggregate(floating_point_sum("this"))
self.assertEqual(1225, sum)
def test_floating_point_sum_with_predicate(self):
sum = self.map.aggregate(floating_point_sum(), predicate=greater_or_equal("this", 47))
self.assertEqual(144, sum)
def test_number_avg(self):
average = self.map.aggregate(number_avg())
self.assertEqual(24.5, average)
def test_number_avg_with_attribute_path(self):
average = self.map.aggregate(number_avg("this"))
self.assertEqual(24.5, average)
def test_number_avg_with_predicate(self):
average = self.map.aggregate(number_avg(), predicate=greater_or_equal("this", 47))
self.assertEqual(48, average)
def test_max(self):
average = self.map.aggregate(max_())
self.assertEqual(49, average)
def test_max_with_attribute_path(self):
average = self.map.aggregate(max_("this"))
self.assertEqual(49, average)
def test_max_with_predicate(self):
average = self.map.aggregate(max_(), predicate=less_or_equal("this", 3))
self.assertEqual(3, average)
def test_min(self):
average = self.map.aggregate(min_())
self.assertEqual(0, average)
def test_min_with_attribute_path(self):
average = self.map.aggregate(min_("this"))
self.assertEqual(0, average)
def test_min_with_predicate(self):
average = self.map.aggregate(min_(), predicate=greater_or_equal("this", 3))
self.assertEqual(3, average)
Fix map tests for backward compatibility (#444)
Puts the aggregator imports inside a try block to not fail
the tests in clients that don't have this feature implemented.
import os
import time
import unittest
from hazelcast import six
try:
from hazelcast.aggregator import (
count,
double_avg,
double_sum,
fixed_point_sum,
floating_point_sum,
int_avg,
int_sum,
long_avg,
long_sum,
max_,
min_,
number_avg,
)
except ImportError:
# If the import of those fail, we won't use
# them in the tests thanks to client version check.
pass
from hazelcast.config import IndexType, IntType
from hazelcast.errors import HazelcastError
from hazelcast.predicate import greater_or_equal, less_or_equal, sql
from hazelcast.proxy.map import EntryEventType
from hazelcast.serialization.api import IdentifiedDataSerializable
from hazelcast.six.moves import range
from tests.base import SingleMemberTestCase
from tests.integration.backward_compatible.util import (
read_string_from_input,
write_string_to_output,
)
from tests.util import (
event_collector,
fill_map,
get_current_timestamp,
is_client_version_older_than,
is_server_version_older_than,
mark_client_version_at_least,
random_string,
)
class EntryProcessor(IdentifiedDataSerializable):
FACTORY_ID = 66
CLASS_ID = 1
def __init__(self, value=None):
self.value = value
def write_data(self, object_data_output):
write_string_to_output(object_data_output, self.value)
def read_data(self, object_data_input):
self.value = read_string_from_input(object_data_input)
def get_factory_id(self):
return self.FACTORY_ID
def get_class_id(self):
return self.CLASS_ID
class MapTest(SingleMemberTestCase):
@classmethod
def configure_cluster(cls):
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
with open(os.path.join(dir_path, "hazelcast.xml")) as f:
return f.read()
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
config["data_serializable_factories"] = {
EntryProcessor.FACTORY_ID: {EntryProcessor.CLASS_ID: EntryProcessor}
}
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
def tearDown(self):
self.map.destroy()
def test_add_entry_listener_item_added(self):
collector = event_collector()
self.map.add_entry_listener(include_value=True, added_func=collector)
self.map.put("key", "value")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(event, key="key", event_type=EntryEventType.ADDED, value="value")
self.assertTrueEventually(assert_event, 5)
def test_add_entry_listener_item_removed(self):
collector = event_collector()
self.map.add_entry_listener(include_value=True, removed_func=collector)
self.map.put("key", "value")
self.map.remove("key")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event, key="key", event_type=EntryEventType.REMOVED, old_value="value"
)
self.assertTrueEventually(assert_event, 5)
def test_add_entry_listener_item_updated(self):
collector = event_collector()
self.map.add_entry_listener(include_value=True, updated_func=collector)
self.map.put("key", "value")
self.map.put("key", "new_value")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event,
key="key",
event_type=EntryEventType.UPDATED,
old_value="value",
value="new_value",
)
self.assertTrueEventually(assert_event, 5)
def test_add_entry_listener_item_expired(self):
collector = event_collector()
self.map.add_entry_listener(include_value=True, expired_func=collector)
self.map.put("key", "value", ttl=0.1)
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event, key="key", event_type=EntryEventType.EXPIRED, old_value="value"
)
self.assertTrueEventually(assert_event, 10)
def test_add_entry_listener_with_key(self):
collector = event_collector()
self.map.add_entry_listener(key="key1", include_value=True, added_func=collector)
self.map.put("key2", "value2")
self.map.put("key1", "value1")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event, key="key1", event_type=EntryEventType.ADDED, value="value1"
)
self.assertTrueEventually(assert_event, 5)
def test_add_entry_listener_with_predicate(self):
collector = event_collector()
self.map.add_entry_listener(
predicate=sql("this == value1"), include_value=True, added_func=collector
)
self.map.put("key2", "value2")
self.map.put("key1", "value1")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event, key="key1", event_type=EntryEventType.ADDED, value="value1"
)
self.assertTrueEventually(assert_event, 5)
def test_add_entry_listener_with_key_and_predicate(self):
collector = event_collector()
self.map.add_entry_listener(
key="key1", predicate=sql("this == value3"), include_value=True, added_func=collector
)
self.map.put("key2", "value2")
self.map.put("key1", "value1")
self.map.remove("key1")
self.map.put("key1", "value3")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(
event, key="key1", event_type=EntryEventType.ADDED, value="value3"
)
self.assertTrueEventually(assert_event, 5)
def test_add_index(self):
self.map.add_index(attributes=["this"])
self.map.add_index(attributes=["this"], index_type=IndexType.HASH)
self.map.add_index(
attributes=["this"],
index_type=IndexType.BITMAP,
bitmap_index_options={
"unique_key": "this",
},
)
def test_add_index_duplicate_fields(self):
with self.assertRaises(ValueError):
self.map.add_index(attributes=["this", "this"])
def test_add_index_invalid_attribute(self):
with self.assertRaises(ValueError):
self.map.add_index(attributes=["this.x."])
def test_clear(self):
self.fill_map()
self.map.clear()
self.assertEqual(self.map.size(), 0)
def test_contains_key(self):
self.fill_map()
self.assertTrue(self.map.contains_key("key-1"))
self.assertFalse(self.map.contains_key("key-10"))
def test_contains_value(self):
self.fill_map()
self.assertTrue(self.map.contains_value("value-1"))
self.assertFalse(self.map.contains_value("value-10"))
def test_delete(self):
self.fill_map()
self.map.delete("key-1")
self.assertEqual(self.map.size(), 9)
self.assertFalse(self.map.contains_key("key-1"))
def test_entry_set(self):
entries = self.fill_map()
six.assertCountEqual(self, self.map.entry_set(), list(six.iteritems(entries)))
def test_entry_set_with_predicate(self):
self.fill_map()
self.assertEqual(self.map.entry_set(sql("this == 'value-1'")), [("key-1", "value-1")])
def test_evict(self):
self.fill_map()
self.map.evict("key-1")
self.assertEqual(self.map.size(), 9)
self.assertFalse(self.map.contains_key("key-1"))
def test_evict_all(self):
self.fill_map()
self.map.evict_all()
self.assertEqual(self.map.size(), 0)
def test_execute_on_entries(self):
m = self.fill_map()
expected_entry_set = [(key, "processed") for key in m]
values = self.map.execute_on_entries(EntryProcessor("processed"))
six.assertCountEqual(self, expected_entry_set, self.map.entry_set())
six.assertCountEqual(self, expected_entry_set, values)
def test_execute_on_entries_with_predicate(self):
m = self.fill_map()
expected_entry_set = [(key, "processed") if key < "key-5" else (key, m[key]) for key in m]
expected_values = [(key, "processed") for key in m if key < "key-5"]
values = self.map.execute_on_entries(EntryProcessor("processed"), sql("__key < 'key-5'"))
six.assertCountEqual(self, expected_entry_set, self.map.entry_set())
six.assertCountEqual(self, expected_values, values)
def test_execute_on_key(self):
self.map.put("test-key", "test-value")
value = self.map.execute_on_key("test-key", EntryProcessor("processed"))
self.assertEqual("processed", self.map.get("test-key"))
self.assertEqual("processed", value)
def test_execute_on_keys(self):
m = self.fill_map()
expected_entry_set = [(key, "processed") for key in m]
values = self.map.execute_on_keys(list(m.keys()), EntryProcessor("processed"))
six.assertCountEqual(self, expected_entry_set, self.map.entry_set())
six.assertCountEqual(self, expected_entry_set, values)
def test_execute_on_keys_with_empty_key_list(self):
m = self.fill_map()
expected_entry_set = [(key, m[key]) for key in m]
values = self.map.execute_on_keys([], EntryProcessor("processed"))
self.assertEqual([], values)
six.assertCountEqual(self, expected_entry_set, self.map.entry_set())
def test_flush(self):
self.fill_map()
self.map.flush()
def test_force_unlock(self):
self.map.put("key", "value")
self.map.lock("key")
self.start_new_thread(lambda: self.map.force_unlock("key"))
self.assertTrueEventually(lambda: self.assertFalse(self.map.is_locked("key")))
def test_get_all(self):
expected = self.fill_map(1000)
actual = self.map.get_all(list(expected.keys()))
six.assertCountEqual(self, expected, actual)
def test_get_all_when_no_keys(self):
self.assertEqual(self.map.get_all([]), {})
def test_get_entry_view(self):
self.map.put("key", "value")
self.map.get("key")
self.map.put("key", "new_value")
entry_view = self.map.get_entry_view("key")
self.assertEqual(entry_view.key, "key")
self.assertEqual(entry_view.value, "new_value")
self.assertIsNotNone(entry_view.cost)
self.assertIsNotNone(entry_view.creation_time)
self.assertIsNotNone(entry_view.expiration_time)
if is_server_version_older_than(self.client, "4.2"):
self.assertEqual(entry_view.hits, 2)
else:
# 4.2+ servers do not collect per entry stats by default
self.assertIsNotNone(entry_view.hits)
self.assertIsNotNone(entry_view.last_access_time)
self.assertIsNotNone(entry_view.last_stored_time)
self.assertIsNotNone(entry_view.last_update_time)
self.assertEqual(entry_view.version, 1)
self.assertIsNotNone(entry_view.ttl)
self.assertIsNotNone(entry_view.max_idle)
def test_is_empty(self):
self.map.put("key", "value")
self.assertFalse(self.map.is_empty())
self.map.clear()
self.assertTrue(self.map.is_empty())
def test_is_locked(self):
self.map.put("key", "value")
self.assertFalse(self.map.is_locked("key"))
self.map.lock("key")
self.assertTrue(self.map.is_locked("key"))
self.map.unlock("key")
self.assertFalse(self.map.is_locked("key"))
def test_key_set(self):
keys = list(self.fill_map().keys())
six.assertCountEqual(self, self.map.key_set(), keys)
def test_key_set_with_predicate(self):
self.fill_map()
self.assertEqual(self.map.key_set(sql("this == 'value-1'")), ["key-1"])
def test_lock(self):
self.map.put("key", "value")
t = self.start_new_thread(lambda: self.map.lock("key"))
t.join()
self.assertFalse(self.map.try_put("key", "new_value", timeout=0.01))
def test_put_all(self):
m = {"key-%d" % x: "value-%d" % x for x in range(0, 1000)}
self.map.put_all(m)
entries = self.map.entry_set()
six.assertCountEqual(self, entries, six.iteritems(m))
def test_put_all_when_no_keys(self):
self.assertIsNone(self.map.put_all({}))
def test_put_if_absent_when_missing_value(self):
returned_value = self.map.put_if_absent("key", "new_value")
self.assertIsNone(returned_value)
self.assertEqual(self.map.get("key"), "new_value")
def test_put_if_absent_when_existing_value(self):
self.map.put("key", "value")
returned_value = self.map.put_if_absent("key", "new_value")
self.assertEqual(returned_value, "value")
self.assertEqual(self.map.get("key"), "value")
def test_put_get(self):
self.assertIsNone(self.map.put("key", "value"))
self.assertEqual(self.map.get("key"), "value")
def test_put_get_large_payload(self):
# The fix for reading large payloads is introduced in 4.2.1
# See https://github.com/hazelcast/hazelcast-python-client/pull/436
mark_client_version_at_least(self, "4.2.1")
payload = bytearray(os.urandom(16 * 1024 * 1024))
start = get_current_timestamp()
self.assertIsNone(self.map.put("key", payload))
self.assertEqual(self.map.get("key"), payload)
self.assertLessEqual(get_current_timestamp() - start, 5)
def test_put_get2(self):
val = "x" * 5000
self.assertIsNone(self.map.put("key-x", val))
self.assertEqual(self.map.get("key-x"), val)
def test_put_when_existing(self):
self.map.put("key", "value")
self.assertEqual(self.map.put("key", "new_value"), "value")
self.assertEqual(self.map.get("key"), "new_value")
def test_put_transient(self):
self.map.put_transient("key", "value")
self.assertEqual(self.map.get("key"), "value")
def test_remove(self):
self.map.put("key", "value")
removed = self.map.remove("key")
self.assertEqual(removed, "value")
self.assertEqual(0, self.map.size())
self.assertFalse(self.map.contains_key("key"))
def test_remove_if_same_when_same(self):
self.map.put("key", "value")
self.assertTrue(self.map.remove_if_same("key", "value"))
self.assertFalse(self.map.contains_key("key"))
def test_remove_if_same_when_different(self):
self.map.put("key", "value")
self.assertFalse(self.map.remove_if_same("key", "another_value"))
self.assertTrue(self.map.contains_key("key"))
def test_remove_entry_listener(self):
collector = event_collector()
reg_id = self.map.add_entry_listener(added_func=collector)
self.map.put("key", "value")
self.assertTrueEventually(lambda: self.assertEqual(len(collector.events), 1))
self.map.remove_entry_listener(reg_id)
self.map.put("key2", "value")
time.sleep(1)
self.assertEqual(len(collector.events), 1)
def test_remove_entry_listener_with_none_id(self):
with self.assertRaises(AssertionError) as cm:
self.map.remove_entry_listener(None)
e = cm.exception
self.assertEqual(e.args[0], "None user_registration_id is not allowed!")
def test_replace(self):
self.map.put("key", "value")
replaced = self.map.replace("key", "new_value")
self.assertEqual(replaced, "value")
self.assertEqual(self.map.get("key"), "new_value")
def test_replace_if_same_when_same(self):
self.map.put("key", "value")
self.assertTrue(self.map.replace_if_same("key", "value", "new_value"))
self.assertEqual(self.map.get("key"), "new_value")
def test_replace_if_same_when_different(self):
self.map.put("key", "value")
self.assertFalse(self.map.replace_if_same("key", "another_value", "new_value"))
self.assertEqual(self.map.get("key"), "value")
def test_set(self):
self.map.set("key", "value")
self.assertEqual(self.map.get("key"), "value")
def test_set_ttl(self):
self.map.put("key", "value")
self.map.set_ttl("key", 0.1)
def evicted():
self.assertFalse(self.map.contains_key("key"))
self.assertTrueEventually(evicted, 5)
def test_size(self):
self.fill_map()
self.assertEqual(10, self.map.size())
def test_try_lock_when_unlocked(self):
self.assertTrue(self.map.try_lock("key"))
self.assertTrue(self.map.is_locked("key"))
def test_try_lock_when_locked(self):
t = self.start_new_thread(lambda: self.map.lock("key"))
t.join()
self.assertFalse(self.map.try_lock("key", timeout=0.1))
def test_try_put_when_unlocked(self):
self.assertTrue(self.map.try_put("key", "value"))
self.assertEqual(self.map.get("key"), "value")
def test_try_put_when_locked(self):
t = self.start_new_thread(lambda: self.map.lock("key"))
t.join()
self.assertFalse(self.map.try_put("key", "value", timeout=0.1))
def test_try_remove_when_unlocked(self):
self.map.put("key", "value")
self.assertTrue(self.map.try_remove("key"))
self.assertIsNone(self.map.get("key"))
def test_try_remove_when_locked(self):
self.map.put("key", "value")
t = self.start_new_thread(lambda: self.map.lock("key"))
t.join()
self.assertFalse(self.map.try_remove("key", timeout=0.1))
def test_unlock(self):
self.map.lock("key")
self.assertTrue(self.map.is_locked("key"))
self.map.unlock("key")
self.assertFalse(self.map.is_locked("key"))
def test_unlock_when_no_lock(self):
with self.assertRaises(HazelcastError):
self.map.unlock("key")
def test_values(self):
values = list(self.fill_map().values())
six.assertCountEqual(self, list(self.map.values()), values)
def test_values_with_predicate(self):
self.fill_map()
self.assertEqual(self.map.values(sql("this == 'value-1'")), ["value-1"])
def test_str(self):
self.assertTrue(str(self.map).startswith("Map"))
def fill_map(self, count=10):
m = {"key-%d" % x: "value-%d" % x for x in range(0, count)}
self.map.put_all(m)
return m
class MapStoreTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
@classmethod
def configure_cluster(cls):
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
with open(os.path.join(dir_path, "hazelcast_mapstore.xml")) as f:
return f.read()
def setUp(self):
self.map = self.client.get_map("mapstore-test").blocking()
self.entries = fill_map(self.map, size=10, key_prefix="key", value_prefix="val")
def tearDown(self):
self.map.destroy()
def test_load_all_with_no_args_loads_all_keys(self):
self.map.evict_all()
self.map.load_all()
entry_set = self.map.get_all(self.entries.keys())
six.assertCountEqual(self, entry_set, self.entries)
def test_load_all_with_key_set_loads_given_keys(self):
self.map.evict_all()
self.map.load_all(["key0", "key1"])
entry_set = self.map.get_all(["key0", "key1"])
six.assertCountEqual(self, entry_set, {"key0": "val0", "key1": "val1"})
def test_load_all_overrides_entries_in_memory_by_default(self):
self.map.evict_all()
self.map.put_transient("key0", "new0")
self.map.put_transient("key1", "new1")
self.map.load_all(["key0", "key1"])
entry_set = self.map.get_all(["key0", "key1"])
six.assertCountEqual(self, entry_set, {"key0": "val0", "key1": "val1"})
def test_load_all_with_replace_existing_false_does_not_override(self):
self.map.evict_all()
self.map.put_transient("key0", "new0")
self.map.put_transient("key1", "new1")
self.map.load_all(["key0", "key1"], replace_existing_values=False)
entry_set = self.map.get_all(["key0", "key1"])
six.assertCountEqual(self, entry_set, {"key0": "new0", "key1": "new1"})
def test_evict(self):
self.map.evict("key0")
self.assertEqual(self.map.size(), 9)
def test_evict_non_existing_key(self):
self.map.evict("non_existing_key")
self.assertEqual(self.map.size(), 10)
def test_evict_all(self):
self.map.evict_all()
self.assertEqual(self.map.size(), 0)
def test_add_entry_listener_item_loaded(self):
collector = event_collector()
self.map.add_entry_listener(include_value=True, loaded_func=collector)
self.map.put("key", "value", ttl=0.1)
time.sleep(2)
self.map.get("key")
def assert_event():
self.assertEqual(len(collector.events), 1)
event = collector.events[0]
self.assertEntryEvent(event, key="key", value="value", event_type=EntryEventType.LOADED)
self.assertTrueEventually(assert_event, 10)
class MapTTLTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
def tearDown(self):
self.map.destroy()
def test_put_default_ttl(self):
self.map.put("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put(self):
self.map.put("key", "value", 0.1)
self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key")))
def test_put_transient_default_ttl(self):
self.map.put_transient("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put_transient(self):
self.map.put_transient("key", "value", 0.1)
self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key")))
def test_put_if_absent_ttl(self):
self.map.put_if_absent("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put_if_absent(self):
self.map.put_if_absent("key", "value", 0.1)
self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key")))
def test_set_default_ttl(self):
self.map.set("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_set(self):
self.map.set("key", "value", 0.1)
self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key")))
class MapMaxIdleTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
def tearDown(self):
self.map.destroy()
def test_put_default_max_idle(self):
self.map.put("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put(self):
self.map.put("key", "value", max_idle=0.1)
time.sleep(1.0)
self.assertFalse(self.map.contains_key("key"))
def test_put_transient_default_max_idle(self):
self.map.put_transient("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put_transient(self):
self.map.put_transient("key", "value", max_idle=0.1)
time.sleep(1.0)
self.assertFalse(self.map.contains_key("key"))
def test_put_if_absent_max_idle(self):
self.map.put_if_absent("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_put_if_absent(self):
self.map.put_if_absent("key", "value", max_idle=0.1)
time.sleep(1.0)
self.assertFalse(self.map.contains_key("key"))
def test_set_default_ttl(self):
self.map.set("key", "value")
time.sleep(1.0)
self.assertTrue(self.map.contains_key("key"))
def test_set(self):
self.map.set("key", "value", max_idle=0.1)
time.sleep(1.0)
self.assertFalse(self.map.contains_key("key"))
@unittest.skipIf(
is_client_version_older_than("4.2.1"), "Tests the features added in 4.2.1 version of the client"
)
class MapAggregatorsIntTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
config["default_int_type"] = IntType.INT
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
self.map.put_all({"key-%d" % i: i for i in range(50)})
def tearDown(self):
self.map.destroy()
def test_int_average(self):
average = self.map.aggregate(int_avg())
self.assertEqual(24.5, average)
def test_int_average_with_attribute_path(self):
average = self.map.aggregate(int_avg("this"))
self.assertEqual(24.5, average)
def test_int_average_with_predicate(self):
average = self.map.aggregate(int_avg(), predicate=greater_or_equal("this", 47))
self.assertEqual(48, average)
def test_int_sum(self):
sum = self.map.aggregate(int_sum())
self.assertEqual(1225, sum)
def test_int_sum_with_attribute_path(self):
sum = self.map.aggregate(int_sum("this"))
self.assertEqual(1225, sum)
def test_int_sum_with_predicate(self):
sum = self.map.aggregate(int_sum(), predicate=greater_or_equal("this", 47))
self.assertEqual(144, sum)
def test_fixed_point_sum(self):
sum = self.map.aggregate(fixed_point_sum())
self.assertEqual(1225, sum)
def test_fixed_point_sum_with_attribute_path(self):
sum = self.map.aggregate(fixed_point_sum("this"))
self.assertEqual(1225, sum)
def test_fixed_point_sum_with_predicate(self):
sum = self.map.aggregate(fixed_point_sum(), predicate=greater_or_equal("this", 47))
self.assertEqual(144, sum)
@unittest.skipIf(
is_client_version_older_than("4.2.1"), "Tests the features added in 4.2.1 version of the client"
)
class MapAggregatorsLongTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
config["default_int_type"] = IntType.LONG
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
self.map.put_all({"key-%d" % i: i for i in range(50)})
def tearDown(self):
self.map.destroy()
def test_long_average(self):
average = self.map.aggregate(long_avg())
self.assertEqual(24.5, average)
def test_long_average_with_attribute_path(self):
average = self.map.aggregate(long_avg("this"))
self.assertEqual(24.5, average)
def test_long_average_with_predicate(self):
average = self.map.aggregate(long_avg(), predicate=greater_or_equal("this", 47))
self.assertEqual(48, average)
def test_long_sum(self):
sum = self.map.aggregate(long_sum())
self.assertEqual(1225, sum)
def test_long_sum_with_attribute_path(self):
sum = self.map.aggregate(long_sum("this"))
self.assertEqual(1225, sum)
def test_long_sum_with_predicate(self):
sum = self.map.aggregate(long_sum(), predicate=greater_or_equal("this", 47))
self.assertEqual(144, sum)
@unittest.skipIf(
is_client_version_older_than("4.2.1"), "Tests the features added in 4.2.1 version of the client"
)
class MapAggregatorsDoubleTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
self.map.put_all({"key-%d" % i: float(i) for i in range(50)})
def tearDown(self):
self.map.destroy()
def test_count(self):
count_ = self.map.aggregate(count())
self.assertEqual(50, count_)
def test_count_with_attribute_path(self):
count_ = self.map.aggregate(count("this"))
self.assertEqual(50, count_)
def test_count_with_predicate(self):
count_ = self.map.aggregate(count(), predicate=greater_or_equal("this", 1))
self.assertEqual(49, count_)
def test_double_average(self):
average = self.map.aggregate(double_avg())
self.assertEqual(24.5, average)
def test_double_average_with_attribute_path(self):
average = self.map.aggregate(double_avg("this"))
self.assertEqual(24.5, average)
def test_double_average_with_predicate(self):
average = self.map.aggregate(double_avg(), predicate=greater_or_equal("this", 47))
self.assertEqual(48, average)
def test_double_sum(self):
sum = self.map.aggregate(double_sum())
self.assertEqual(1225, sum)
def test_double_sum_with_attribute_path(self):
sum = self.map.aggregate(double_sum("this"))
self.assertEqual(1225, sum)
def test_double_sum_with_predicate(self):
sum = self.map.aggregate(double_sum(), predicate=greater_or_equal("this", 47))
self.assertEqual(144, sum)
def test_floating_point_sum(self):
sum = self.map.aggregate(floating_point_sum())
self.assertEqual(1225, sum)
def test_floating_point_sum_with_attribute_path(self):
sum = self.map.aggregate(floating_point_sum("this"))
self.assertEqual(1225, sum)
def test_floating_point_sum_with_predicate(self):
sum = self.map.aggregate(floating_point_sum(), predicate=greater_or_equal("this", 47))
self.assertEqual(144, sum)
def test_number_avg(self):
average = self.map.aggregate(number_avg())
self.assertEqual(24.5, average)
def test_number_avg_with_attribute_path(self):
average = self.map.aggregate(number_avg("this"))
self.assertEqual(24.5, average)
def test_number_avg_with_predicate(self):
average = self.map.aggregate(number_avg(), predicate=greater_or_equal("this", 47))
self.assertEqual(48, average)
def test_max(self):
average = self.map.aggregate(max_())
self.assertEqual(49, average)
def test_max_with_attribute_path(self):
average = self.map.aggregate(max_("this"))
self.assertEqual(49, average)
def test_max_with_predicate(self):
average = self.map.aggregate(max_(), predicate=less_or_equal("this", 3))
self.assertEqual(3, average)
def test_min(self):
average = self.map.aggregate(min_())
self.assertEqual(0, average)
def test_min_with_attribute_path(self):
average = self.map.aggregate(min_("this"))
self.assertEqual(0, average)
def test_min_with_predicate(self):
average = self.map.aggregate(min_(), predicate=greater_or_equal("this", 3))
self.assertEqual(3, average)
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino, Cyril Sester
#
# The licence is in the file __openerp__.py
#
##############################################################################
import logging
from openerp import models, fields, api, _
from datetime import datetime, timedelta
from ..mappings.compassion_child_mapping import CompassionChildMapping
from openerp.exceptions import Warning
from openerp.addons.connector.queue.job import job
from openerp.addons.connector.session import ConnectorSession
from openerp.addons.message_center_compassion.tools.onramp_connector import \
OnrampConnector
logger = logging.getLogger(__name__)
class CompassionChild(models.Model):
""" A sponsored child """
_name = 'compassion.child'
_rec_name = 'local_id'
_inherit = ['compassion.generic.child', 'mail.thread',
'translatable.model']
_description = "Sponsored Child"
_order = 'local_id asc,date desc'
##########################################################################
# FIELDS #
##########################################################################
# General Information
#####################
local_id = fields.Char(track_visibility='onchange')
code = fields.Char(help='Old child reference')
compass_id = fields.Char('Compass ID', oldname='unique_id')
estimated_birthdate = fields.Boolean(readonly=True)
cognitive_age_group = fields.Char(readonly=True)
cdsp_type = fields.Selection([
('Home Based', 'Home based'),
('Center Based', 'Center based'),
], track_visibility='onchange', readonly=True)
last_review_date = fields.Date(track_visibility='onchange', readonly=True)
last_photo_date = fields.Date()
type = fields.Selection('_get_ctype', required=True, default='CDSP')
date = fields.Date('Allocation date')
completion_date = fields.Date(readonly=True)
completion_date_change_reason = fields.Char(readonly=True)
state = fields.Selection(
'_get_child_states', readonly=True, required=True,
track_visibility='onchange', default='N',)
is_available = fields.Boolean(compute='_set_available')
sponsor_id = fields.Many2one(
'res.partner', 'Sponsor', track_visibility='onchange', readonly=True)
sponsor_ref = fields.Char(
'Sponsor reference', related='sponsor_id.ref')
has_been_sponsored = fields.Boolean()
exit_reason = fields.Char(compute='_compute_exit_reason')
non_latin_name = fields.Char()
# Hold Information
##################
hold_id = fields.Many2one('compassion.hold', 'Hold', readonly=True)
hold_type = fields.Selection(related='hold_id.type', store=True)
hold_channel = fields.Selection(related='hold_id.channel', store=True)
hold_owner = fields.Many2one(related='hold_id.primary_owner', store=True)
hold_ambassador = fields.Many2one(related='hold_id.ambassador', store=True)
hold_expiration = fields.Datetime(related='hold_id.expiration_date',
string='Hold expiration', store=True)
# Beneficiary Favorites
#######################
hobby_ids = fields.Many2many('child.hobby', string='Hobbies',
readonly=True)
duty_ids = fields.Many2many(
'child.household.duty', string='Household duties', readonly=True)
activity_ids = fields.Many2many(
'child.project.activity', string='Project activities', readonly=True)
subject_ids = fields.Many2many(
'child.school.subject', string='School subjects', readonly=True)
# Education information
#######################
education_level = fields.Selection([
('Not Enrolled', 'Not Enrolled'),
('Preschool', 'preschool'),
('Primary', 'primary school'),
('Secondary', 'secondary school'),
('University Graduate', 'university'),
], readonly=True)
local_grade_level = fields.Char(readonly=True)
us_grade_level = fields.Char(readonly=True)
academic_performance = fields.Selection([
('Above Average', 'Above average'),
('Average', 'Average'),
('Below Average', 'Below average'),
], readonly=True)
vocational_training_type = fields.Selection([
('Agriculture', 'Agriculture'),
('Automotive', 'Automotive'),
('Business/Administrative', 'Business administration'),
('Clothing Trades', 'Clothing trades'),
('Computer Technology', 'Computer technology'),
('Construction/ Tradesman', 'Construction'),
('Cooking / Food Service', 'Cooking and food service'),
('Cosmetology', 'Cosmetology'),
('Electrical/ Electronics', 'Electronics'),
('Graphic Arts', 'Graphic arts'),
('Income-Generating Program at Project',
'Income-generating program at project'),
('Manufacturing/ Fabrication', 'Manufacturing / Fabrication'),
('Medical/ Health Services', 'Medical / Health services'),
('Not Enrolled', 'Not enrolled'),
('Not enrolled', 'Not enrolled'),
('Telecommunication', 'Telecommunication'),
('Transportation', 'Transportation'),
('Transportation/ Driver', 'Driver'),
], readonly=True)
university_year = fields.Integer(readonly=True)
major_course_study = fields.Selection([
('Accounting', 'Accounting'),
('Agriculture', 'Agriculture'),
('Biology / Medicine', 'Biology / Medicine'),
('Business / Management / Commerce', 'Business management'),
('Community Development', 'Community development'),
('Computer Science / Information Technology', 'Computer science'),
('Criminology / Law Enforcement', 'Criminology'),
('Economics', 'Economics'),
('Education', 'Education'),
('Engineering', 'Engineering'),
('English', 'English'),
('Graphic Arts / Fine Arts', 'Graphic arts'),
('History', 'History'),
('Hospitality / Hotel Management', 'Hospitality / Hotel '
'management'),
('Law', 'Law'),
('Mathematics', 'Mathematics'),
('Nursing', 'Nursing'),
('Psychology', 'Psychology'),
('Sales and Marketing', 'Sales and marketing'),
('Science', 'Science'),
('Sociology / Social Science', 'Sociology'),
('Theology', 'Theology'),
('Tourism', 'Tourism'),
], readonly=True)
not_enrolled_reason = fields.Char(readonly=True)
# Spiritual information
#######################
christian_activity_ids = fields.Many2many(
'child.christian.activity', string='Christian activities',
readonly=True)
# Medical information
#####################
weight = fields.Char(readonly=True)
height = fields.Char(readonly=True)
physical_disability_ids = fields.Many2many(
'child.physical.disability', string='Physical disabilities',
readonly=True
)
chronic_illness_ids = fields.Many2many(
'child.chronic.illness', string='Chronic illnesses', readonly=True
)
# Case Studies
##############
lifecycle_ids = fields.One2many(
'compassion.child.ble', 'child_id', 'Lifecycle events', readonly=True)
assessment_ids = fields.One2many(
'compassion.child.cdpr', 'child_id', 'Assessments', readonly=True
)
note_ids = fields.One2many(
'compassion.child.note', 'child_id', 'Notes', readonly=True
)
revised_value_ids = fields.One2many(
'compassion.major.revision', 'child_id', 'Major revisions',
readonly=True
)
pictures_ids = fields.One2many(
'compassion.child.pictures', 'child_id', 'Child pictures',
track_visibility='onchange', readonly=True)
household_id = fields.Many2one('compassion.household', 'Household',
readonly=True)
portrait = fields.Binary(related='pictures_ids.headshot')
fullshot = fields.Binary(related='pictures_ids.fullshot')
child_disaster_impact_ids = fields.One2many(
'child.disaster.impact', 'child_id', 'Child Disaster Impact',
readonly=True
)
# Descriptions
##############
desc_en = fields.Text('English description', readonly=True)
desc_fr = fields.Text('French description', readonly=True)
desc_de = fields.Text('German description', readonly=True)
desc_it = fields.Text('Italian description', readonly=True)
# Just for migration
delegated_comment = fields.Text()
_sql_constraints = [
('compass_id', 'unique(compass_id)',
'The child already exists in database.'),
('global_id', 'unique(global_id)',
'The child already exists in database.')
]
##########################################################################
# FIELDS METHODS #
##########################################################################
@api.model
def _get_child_states(self):
return [
('W', _('Waiting Hold')),
('N', _('Consigned')),
('I', _('On Internet')),
('P', _('Sponsored')),
('F', _('Departed')),
('R', _('Released')),
]
def _set_available(self):
for child in self:
child.is_available = child.state in self._available_states()
@api.model
def _available_states(self):
return ['N', 'I']
def _compute_exit_reason(self):
for child in self:
exit_details = child.lifecycle_ids.with_context(
lang='en_US').filtered(
lambda l: l.type in ('Planned Exit', 'Unplanned Exit'))
if exit_details:
child.exit_reason = exit_details[0].request_reason
@api.model
def _get_ctype(self):
return [('CDSP', 'CDSP'), ('LDP', 'LDP')]
##########################################################################
# ORM METHODS #
##########################################################################
@api.model
def create(self, vals):
"""
If child with global_id already exists, update it instead of creating
a new one.
"""
global_id = vals.get('global_id')
child = self.search([('global_id', '=', global_id)])
if child:
child.write(vals)
else:
child = super(CompassionChild, self).create(vals)
return child
@api.multi
def unlink(self):
holds = self.mapped('hold_id').filtered(lambda h: h.state == 'active')
holds.release_hold()
return super(CompassionChild, self).unlink()
##########################################################################
# PUBLIC METHODS #
##########################################################################
def details_answer(self, vals):
""" Called when receiving the answer of GetDetails message. """
self.ensure_one()
self.write(vals)
self.env['compassion.child.description'].create({'child_id': self.id})
self.update_child_pictures()
return True
@api.model
def major_revision(self, commkit_data):
""" Called when a MajorRevision Kit is received. """
child_ids = list()
child_mapping = CompassionChildMapping(self.env)
for child_data in commkit_data.get('BeneficiaryMajorRevisionList',
[commkit_data]):
global_id = child_data.get('Beneficiary_GlobalID')
child = self.search([('global_id', '=', global_id)])
if child:
child_ids.append(child.id)
child._major_revision(child_mapping.get_vals_from_connect(
child_data))
return child_ids
@api.model
def new_kit(self, commkit_data):
""" New child kit is received. """
child_ids = list()
child_mapping = CompassionChildMapping(self.env)
for child_data in commkit_data.get('BeneficiaryResponseList',
[commkit_data]):
global_id = child_data.get('Beneficiary_GlobalID')
child = self.search([('global_id', '=', global_id)])
if child:
child_ids.append(child.id)
child.write(child_mapping.get_vals_from_connect(child_data))
return child_ids
##########################################################################
# VIEW CALLBACKS #
##########################################################################
@api.multi
def get_infos(self):
"""Get the most recent case study, basic informations, updates
portrait picture and creates the project if it doesn't exist.
"""
message_obj = self.env['gmc.message.pool']
try:
action_id = self.env.ref(
'child_compassion.beneficiaries_details').id
except ValueError:
# At migration, the action does not yet exist. We should
# just avoid updating children at that moment.
return True
message_vals = {
'action_id': action_id,
'object_id': self.id,
'child_id': self.id,
}
message = message_obj.create(message_vals)
if message.state == 'failure' and not self.env.context.get(
'async_mode'):
raise Warning(message.failure_reason)
return True
@api.multi
def update_child_pictures(self):
res = True
# Update child's pictures
for child in self:
res = child._get_last_pictures() and res
return res
# Lifecycle methods
###################
def depart(self):
self.signal_workflow('release')
def reinstatement(self):
""" Called by Lifecycle Event. Hold and state of Child is
handled by the Reinstatement Hold Notification. """
self.delete_workflow()
self.create_workflow()
def new_photo(self):
self.get_infos()
@api.multi
def get_lifecycle_event(self):
onramp = OnrampConnector()
endpoint = 'beneficiaries/{}/kits/beneficiarylifecycleeventkit'
lifecylcle_ids = list()
for child in self:
result = onramp.send_message(
endpoint.format(child.global_id), 'GET')
if 'BeneficiaryLifecycleEventList' in result.get('content', {}):
lifecylcle_ids.extend(self.env[
'compassion.child.ble'].process_commkit(result['content']))
return lifecylcle_ids
##########################################################################
# WORKFLOW METHODS #
##########################################################################
@api.multi
def child_waiting_hold(self):
""" Called on child creation. """
self.write({'state': 'W', 'sponsor_id': False})
@api.multi
def child_consigned(self):
"""Called on child allocation."""
self.write({'state': 'N'})
# Cancel planned deletion
jobs = self.env['queue.job'].search([
('name', '=', 'Job for deleting released children.'),
('func_string', 'like', self.ids),
('state', '=', 'enqueued')
])
jobs.button_done()
jobs.unlink()
self.get_infos()
return True
@api.multi
def child_sponsored(self):
for child in self:
self.env['compassion.child.pictures'].create({
'child_id': child.id,
'image_url': child.image_url
})
return self.write({
'state': 'P',
'has_been_sponsored': True
})
@api.multi
def child_released(self):
""" Is called when a child is released to the global childpool. """
self.write({
'sponsor_id': False,
'state': 'R'
})
sponsored_children = self.filtered('has_been_sponsored')
other_children = self - sponsored_children
other_children.get_lifecycle_event()
# the children will be deleted when we reach their expiration date
default_expiration = datetime.now() + timedelta(weeks=1)
for child in other_children:
postpone = fields.Datetime.from_string(child.hold_expiration) or \
default_expiration
session = ConnectorSession.from_env(other_children.env)
unlink_children_job.delay(session, self._name, child.ids,
eta=postpone)
return True
@api.multi
def child_departed(self):
""" Is called when a child is departed. """
sponsored_children = self.filtered('has_been_sponsored')
sponsored_children.write({
'sponsor_id': False,
'state': 'F'
})
return True
##########################################################################
# PRIVATE METHODS #
##########################################################################
@api.multi
def _get_last_pictures(self):
self.ensure_one()
pictures_obj = self.env['compassion.child.pictures']
pictures = pictures_obj.create({
'child_id': self.id,
'image_url': self.image_url})
if pictures:
# Add a note in child
self.message_post(
"The picture has been updated.",
"Picture update", 'comment')
return pictures
def _major_revision(self, vals):
""" Private method when a major revision is received for a child.
:param vals: Record values received from connect
"""
self.ensure_one()
self.write(vals)
self.get_infos()
##############################################################################
# CONNECTOR METHODS #
##############################################################################
@job(default_channel='root.child_compassion')
def unlink_children_job(session, model_name, message_ids):
"""Job for deleting released children."""
children = session.env[model_name].browse(message_ids)
children.unlink()
Fetch last Lifecycle event when receiving a Beneficiary Kit.
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino, Cyril Sester
#
# The licence is in the file __openerp__.py
#
##############################################################################
import logging
from openerp import models, fields, api, _
from datetime import datetime, timedelta
from ..mappings.compassion_child_mapping import CompassionChildMapping
from openerp.exceptions import Warning
from openerp.addons.connector.queue.job import job
from openerp.addons.connector.session import ConnectorSession
from openerp.addons.message_center_compassion.tools.onramp_connector import \
OnrampConnector
logger = logging.getLogger(__name__)
class CompassionChild(models.Model):
""" A sponsored child """
_name = 'compassion.child'
_rec_name = 'local_id'
_inherit = ['compassion.generic.child', 'mail.thread',
'translatable.model']
_description = "Sponsored Child"
_order = 'local_id asc,date desc'
##########################################################################
# FIELDS #
##########################################################################
# General Information
#####################
local_id = fields.Char(track_visibility='onchange')
code = fields.Char(help='Old child reference')
compass_id = fields.Char('Compass ID', oldname='unique_id')
estimated_birthdate = fields.Boolean(readonly=True)
cognitive_age_group = fields.Char(readonly=True)
cdsp_type = fields.Selection([
('Home Based', 'Home based'),
('Center Based', 'Center based'),
], track_visibility='onchange', readonly=True)
last_review_date = fields.Date(track_visibility='onchange', readonly=True)
last_photo_date = fields.Date()
type = fields.Selection('_get_ctype', required=True, default='CDSP')
date = fields.Date('Allocation date')
completion_date = fields.Date(readonly=True)
completion_date_change_reason = fields.Char(readonly=True)
state = fields.Selection(
'_get_child_states', readonly=True, required=True,
track_visibility='onchange', default='N',)
is_available = fields.Boolean(compute='_set_available')
sponsor_id = fields.Many2one(
'res.partner', 'Sponsor', track_visibility='onchange', readonly=True)
sponsor_ref = fields.Char(
'Sponsor reference', related='sponsor_id.ref')
has_been_sponsored = fields.Boolean()
exit_reason = fields.Char(compute='_compute_exit_reason')
non_latin_name = fields.Char()
# Hold Information
##################
hold_id = fields.Many2one('compassion.hold', 'Hold', readonly=True)
hold_type = fields.Selection(related='hold_id.type', store=True)
hold_channel = fields.Selection(related='hold_id.channel', store=True)
hold_owner = fields.Many2one(related='hold_id.primary_owner', store=True)
hold_ambassador = fields.Many2one(related='hold_id.ambassador', store=True)
hold_expiration = fields.Datetime(related='hold_id.expiration_date',
string='Hold expiration', store=True)
# Beneficiary Favorites
#######################
hobby_ids = fields.Many2many('child.hobby', string='Hobbies',
readonly=True)
duty_ids = fields.Many2many(
'child.household.duty', string='Household duties', readonly=True)
activity_ids = fields.Many2many(
'child.project.activity', string='Project activities', readonly=True)
subject_ids = fields.Many2many(
'child.school.subject', string='School subjects', readonly=True)
# Education information
#######################
education_level = fields.Selection([
('Not Enrolled', 'Not Enrolled'),
('Preschool', 'preschool'),
('Primary', 'primary school'),
('Secondary', 'secondary school'),
('University Graduate', 'university'),
], readonly=True)
local_grade_level = fields.Char(readonly=True)
us_grade_level = fields.Char(readonly=True)
academic_performance = fields.Selection([
('Above Average', 'Above average'),
('Average', 'Average'),
('Below Average', 'Below average'),
], readonly=True)
vocational_training_type = fields.Selection([
('Agriculture', 'Agriculture'),
('Automotive', 'Automotive'),
('Business/Administrative', 'Business administration'),
('Clothing Trades', 'Clothing trades'),
('Computer Technology', 'Computer technology'),
('Construction/ Tradesman', 'Construction'),
('Cooking / Food Service', 'Cooking and food service'),
('Cosmetology', 'Cosmetology'),
('Electrical/ Electronics', 'Electronics'),
('Graphic Arts', 'Graphic arts'),
('Income-Generating Program at Project',
'Income-generating program at project'),
('Manufacturing/ Fabrication', 'Manufacturing / Fabrication'),
('Medical/ Health Services', 'Medical / Health services'),
('Not Enrolled', 'Not enrolled'),
('Not enrolled', 'Not enrolled'),
('Telecommunication', 'Telecommunication'),
('Transportation', 'Transportation'),
('Transportation/ Driver', 'Driver'),
], readonly=True)
university_year = fields.Integer(readonly=True)
major_course_study = fields.Selection([
('Accounting', 'Accounting'),
('Agriculture', 'Agriculture'),
('Biology / Medicine', 'Biology / Medicine'),
('Business / Management / Commerce', 'Business management'),
('Community Development', 'Community development'),
('Computer Science / Information Technology', 'Computer science'),
('Criminology / Law Enforcement', 'Criminology'),
('Economics', 'Economics'),
('Education', 'Education'),
('Engineering', 'Engineering'),
('English', 'English'),
('Graphic Arts / Fine Arts', 'Graphic arts'),
('History', 'History'),
('Hospitality / Hotel Management', 'Hospitality / Hotel '
'management'),
('Law', 'Law'),
('Mathematics', 'Mathematics'),
('Nursing', 'Nursing'),
('Psychology', 'Psychology'),
('Sales and Marketing', 'Sales and marketing'),
('Science', 'Science'),
('Sociology / Social Science', 'Sociology'),
('Theology', 'Theology'),
('Tourism', 'Tourism'),
], readonly=True)
not_enrolled_reason = fields.Char(readonly=True)
# Spiritual information
#######################
christian_activity_ids = fields.Many2many(
'child.christian.activity', string='Christian activities',
readonly=True)
# Medical information
#####################
weight = fields.Char(readonly=True)
height = fields.Char(readonly=True)
physical_disability_ids = fields.Many2many(
'child.physical.disability', string='Physical disabilities',
readonly=True
)
chronic_illness_ids = fields.Many2many(
'child.chronic.illness', string='Chronic illnesses', readonly=True
)
# Case Studies
##############
lifecycle_ids = fields.One2many(
'compassion.child.ble', 'child_id', 'Lifecycle events', readonly=True)
assessment_ids = fields.One2many(
'compassion.child.cdpr', 'child_id', 'Assessments', readonly=True
)
note_ids = fields.One2many(
'compassion.child.note', 'child_id', 'Notes', readonly=True
)
revised_value_ids = fields.One2many(
'compassion.major.revision', 'child_id', 'Major revisions',
readonly=True
)
pictures_ids = fields.One2many(
'compassion.child.pictures', 'child_id', 'Child pictures',
track_visibility='onchange', readonly=True)
household_id = fields.Many2one('compassion.household', 'Household',
readonly=True)
portrait = fields.Binary(related='pictures_ids.headshot')
fullshot = fields.Binary(related='pictures_ids.fullshot')
child_disaster_impact_ids = fields.One2many(
'child.disaster.impact', 'child_id', 'Child Disaster Impact',
readonly=True
)
# Descriptions
##############
desc_en = fields.Text('English description', readonly=True)
desc_fr = fields.Text('French description', readonly=True)
desc_de = fields.Text('German description', readonly=True)
desc_it = fields.Text('Italian description', readonly=True)
# Just for migration
delegated_comment = fields.Text()
_sql_constraints = [
('compass_id', 'unique(compass_id)',
'The child already exists in database.'),
('global_id', 'unique(global_id)',
'The child already exists in database.')
]
##########################################################################
# FIELDS METHODS #
##########################################################################
@api.model
def _get_child_states(self):
return [
('W', _('Waiting Hold')),
('N', _('Consigned')),
('I', _('On Internet')),
('P', _('Sponsored')),
('F', _('Departed')),
('R', _('Released')),
]
def _set_available(self):
for child in self:
child.is_available = child.state in self._available_states()
@api.model
def _available_states(self):
return ['N', 'I']
def _compute_exit_reason(self):
for child in self:
exit_details = child.lifecycle_ids.with_context(
lang='en_US').filtered(
lambda l: l.type in ('Planned Exit', 'Unplanned Exit'))
if exit_details:
child.exit_reason = exit_details[0].request_reason
@api.model
def _get_ctype(self):
return [('CDSP', 'CDSP'), ('LDP', 'LDP')]
##########################################################################
# ORM METHODS #
##########################################################################
@api.model
def create(self, vals):
"""
If child with global_id already exists, update it instead of creating
a new one.
"""
global_id = vals.get('global_id')
child = self.search([('global_id', '=', global_id)])
if child:
child.write(vals)
else:
child = super(CompassionChild, self).create(vals)
return child
@api.multi
def unlink(self):
holds = self.mapped('hold_id').filtered(lambda h: h.state == 'active')
holds.release_hold()
return super(CompassionChild, self).unlink()
##########################################################################
# PUBLIC METHODS #
##########################################################################
def details_answer(self, vals):
""" Called when receiving the answer of GetDetails message. """
self.ensure_one()
self.write(vals)
self.env['compassion.child.description'].create({'child_id': self.id})
self.update_child_pictures()
return True
@api.model
def major_revision(self, commkit_data):
""" Called when a MajorRevision Kit is received. """
child_ids = list()
child_mapping = CompassionChildMapping(self.env)
for child_data in commkit_data.get('BeneficiaryMajorRevisionList',
[commkit_data]):
global_id = child_data.get('Beneficiary_GlobalID')
child = self.search([('global_id', '=', global_id)])
if child:
child_ids.append(child.id)
child._major_revision(child_mapping.get_vals_from_connect(
child_data))
return child_ids
@api.model
def new_kit(self, commkit_data):
""" New child kit is received. """
child_mapping = CompassionChildMapping(self.env)
children = self
for child_data in commkit_data.get('BeneficiaryResponseList',
[commkit_data]):
global_id = child_data.get('Beneficiary_GlobalID')
child = self.search([('global_id', '=', global_id)])
if child:
children += child
child.write(child_mapping.get_vals_from_connect(child_data))
children.get_lifecycle_event()
return children.ids
##########################################################################
# VIEW CALLBACKS #
##########################################################################
@api.multi
def get_infos(self):
"""Get the most recent case study, basic informations, updates
portrait picture and creates the project if it doesn't exist.
"""
message_obj = self.env['gmc.message.pool']
action_id = self.env.ref('child_compassion.beneficiaries_details').id
for child in self:
message_vals = {
'action_id': action_id,
'object_id': child.id,
'child_id': child.id,
}
message = message_obj.create(message_vals)
if message.state == 'failure' and not self.env.context.get(
'async_mode'):
raise Warning(message.failure_reason)
return True
@api.multi
def update_child_pictures(self):
res = True
# Update child's pictures
for child in self:
res = child._get_last_pictures() and res
return res
# Lifecycle methods
###################
def depart(self):
self.signal_workflow('release')
def reinstatement(self):
""" Called by Lifecycle Event. Hold and state of Child is
handled by the Reinstatement Hold Notification. """
self.delete_workflow()
self.create_workflow()
def new_photo(self):
self.get_infos()
@api.multi
def get_lifecycle_event(self):
onramp = OnrampConnector()
endpoint = 'beneficiaries/{}/kits/beneficiarylifecycleeventkit'
lifecylcle_ids = list()
for child in self:
result = onramp.send_message(
endpoint.format(child.global_id), 'GET')
if 'BeneficiaryLifecycleEventList' in result.get('content', {}):
lifecylcle_ids.extend(self.env[
'compassion.child.ble'].process_commkit(result['content']))
return lifecylcle_ids
##########################################################################
# WORKFLOW METHODS #
##########################################################################
@api.multi
def child_waiting_hold(self):
""" Called on child creation. """
self.write({'state': 'W', 'sponsor_id': False})
@api.multi
def child_consigned(self):
"""Called on child allocation."""
self.write({'state': 'N'})
# Cancel planned deletion
jobs = self.env['queue.job'].search([
('name', '=', 'Job for deleting released children.'),
('func_string', 'like', self.ids),
('state', '=', 'enqueued')
])
jobs.button_done()
jobs.unlink()
self.get_infos()
return True
@api.multi
def child_sponsored(self):
for child in self:
self.env['compassion.child.pictures'].create({
'child_id': child.id,
'image_url': child.image_url
})
return self.write({
'state': 'P',
'has_been_sponsored': True
})
@api.multi
def child_released(self):
""" Is called when a child is released to the global childpool. """
self.write({
'sponsor_id': False,
'state': 'R'
})
sponsored_children = self.filtered('has_been_sponsored')
other_children = self - sponsored_children
other_children.get_lifecycle_event()
# the children will be deleted when we reach their expiration date
default_expiration = datetime.now() + timedelta(weeks=1)
for child in other_children:
postpone = fields.Datetime.from_string(child.hold_expiration) or \
default_expiration
session = ConnectorSession.from_env(other_children.env)
unlink_children_job.delay(session, self._name, child.ids,
eta=postpone)
return True
@api.multi
def child_departed(self):
""" Is called when a child is departed. """
sponsored_children = self.filtered('has_been_sponsored')
sponsored_children.write({
'sponsor_id': False,
'state': 'F'
})
return True
##########################################################################
# PRIVATE METHODS #
##########################################################################
@api.multi
def _get_last_pictures(self):
self.ensure_one()
pictures_obj = self.env['compassion.child.pictures']
pictures = pictures_obj.create({
'child_id': self.id,
'image_url': self.image_url})
if pictures:
# Add a note in child
self.message_post(
"The picture has been updated.",
"Picture update", 'comment')
return pictures
def _major_revision(self, vals):
""" Private method when a major revision is received for a child.
:param vals: Record values received from connect
"""
self.ensure_one()
self.write(vals)
self.get_infos()
##############################################################################
# CONNECTOR METHODS #
##############################################################################
@job(default_channel='root.child_compassion')
def unlink_children_job(session, model_name, message_ids):
"""Job for deleting released children."""
children = session.env[model_name].browse(message_ids)
children.unlink()
|
import unittest
import color_contrast_calc
from color_contrast_calc import color_from
from color_contrast_calc import InvalidColorRepresentationError
class TestColorContrastCalc(unittest.TestCase):
def setUp(self):
pass
def test_color(self):
yellow = color_contrast_calc.color.Color.from_name('yellow')
black = color_contrast_calc.color.Color.from_name('black')
contrast_ratio = yellow.contrast_ratio_against(black)
self.assertAlmostEqual(contrast_ratio, 19.56, 2)
def test_grayscale(self):
yellow = color_contrast_calc.color.Color.from_name('yellow')
orange = color_contrast_calc.color.Color.from_name('orange')
self.assertEqual(yellow.new_grayscale_color().hex, '#ededed')
self.assertEqual(orange.new_grayscale_color().hex, '#acacac')
def test_color_from(self):
yellow_name = 'yellow'
yellow_hex = '#ffff00'
yellow_short_hex = '#ff0'
yellow_rgb = (255, 255, 0)
invalid_name = 'imaginaryblue'
invalid_hex = '#ff00'
invalid_rgb = (255, 256, 0)
self.assertEqual(color_from(yellow_name).hex, yellow_hex)
self.assertEqual(color_from(yellow_hex).hex, yellow_hex)
self.assertEqual(color_from(yellow_short_hex).hex, yellow_hex)
self.assertEqual(color_from(yellow_rgb).hex, yellow_hex)
with self.assertRaises(InvalidColorRepresentationError):
color_from(invalid_name)
with self.assertRaises(InvalidColorRepresentationError):
color_from(invalid_hex)
with self.assertRaises(InvalidColorRepresentationError):
color_from(invalid_rgb)
add a test case in which a number is passed to color_contrast_calc.color_from()
import unittest
import color_contrast_calc
from color_contrast_calc import color_from
from color_contrast_calc import InvalidColorRepresentationError
class TestColorContrastCalc(unittest.TestCase):
def setUp(self):
pass
def test_color(self):
yellow = color_contrast_calc.color.Color.from_name('yellow')
black = color_contrast_calc.color.Color.from_name('black')
contrast_ratio = yellow.contrast_ratio_against(black)
self.assertAlmostEqual(contrast_ratio, 19.56, 2)
def test_grayscale(self):
yellow = color_contrast_calc.color.Color.from_name('yellow')
orange = color_contrast_calc.color.Color.from_name('orange')
self.assertEqual(yellow.new_grayscale_color().hex, '#ededed')
self.assertEqual(orange.new_grayscale_color().hex, '#acacac')
def test_color_from(self):
yellow_name = 'yellow'
yellow_hex = '#ffff00'
yellow_short_hex = '#ff0'
yellow_rgb = (255, 255, 0)
invalid_name = 'imaginaryblue'
invalid_hex = '#ff00'
invalid_rgb = (255, 256, 0)
self.assertEqual(color_from(yellow_name).hex, yellow_hex)
self.assertEqual(color_from(yellow_hex).hex, yellow_hex)
self.assertEqual(color_from(yellow_short_hex).hex, yellow_hex)
self.assertEqual(color_from(yellow_rgb).hex, yellow_hex)
with self.assertRaises(InvalidColorRepresentationError):
color_from(invalid_name)
with self.assertRaises(InvalidColorRepresentationError):
color_from(invalid_hex)
with self.assertRaises(InvalidColorRepresentationError):
color_from(invalid_rgb)
with self.assertRaises(InvalidColorRepresentationError):
color_from(0)
|
import glob
import subprocess
import os
import re
import shutil
def leftPart( rez, elems ):
[a,b,c] = elems
return [rez, b, c]
def rightPart( rez, elems ):
[a,b,c] = elems
return [a, b, rez]
def middlePart( rez, elems ):
[a,b,c] = elems
return [a, rez, c]
searches = [ ('Left file: (.*) ', leftPart )
, ('Right file: (.*) ', rightPart)
, ('<img src="([^"]+)".*', middlePart) ]
def doReport( filename1, filename2 ):
beyondCompareScriptLine = "picture-report layout:side-by-side output-to:t.html output-options:html-color "
script_file = open("test_script.txt", "w")
script_file.write(beyondCompareScriptLine + filename1 + " " + filename2 )
script_file.close()
subprocess.call(["C:\Program Files (x86)\Beyond Compare 3\BComp.com", "/closescript", "@test_script.txt"])
infile = open("t.html", "r")
ret = ["", "", ""]
for line in infile:
for (rexp, action) in searches:
rez = re.search(rexp, line)
if rez:
ret = action(rez.group(1), ret)
infile.close()
return ret
def performTests():
ofile = open("testResult.html","w")
ofile.write("<html><head><title>Test results</title></head><body>")
counter = 0
for pngFile in glob.glob('tests/pngsuite/*.png'):
bmpFile = pngFile + ".bmp"
if os.path.exists(bmpFile):
(left, comp, right) = doReport(pngFile, bmpFile)
neofile = "BcImages/Diff_" + str(counter) + ".png"
shutil.copyfile("BcImages/Diff0.png", neofile )
counter = counter + 1
ofile.write("""
<table>
<!-- <tr><td>{0}</td><td>{1}</td><td>{2}</td></tr> -->
<tr>
<td><img width="64" height="64" src="file:///{0}" /></td>
<td><img width="64" height="64" src="{1}" /></td>
<td><img width="64" height="64" src="file:///{2}" /></td>
<td>{0}</td></tr>
</table>
""".format(left, neofile, right))
ofile.write("</body></html>")
ofile.close()
if __name__ == "__main__":
performTests()
_MUCH_ faster test automation
import glob
import subprocess
import os
def performTests():
ofile = open("testResult.html","w")
ofile.write("<html><head><title>Test results</title></head><body>")
beyondCompareScriptLine = "picture-report layout:side-by-side output-to:t.html output-options:html-color "
script_file = open("test_script.txt", "w")
counter = 0
for pngFile in glob.glob('tests/pngsuite/*.png'):
bmpFile = pngFile + ".bmp"
if os.path.exists(bmpFile):
script_file.write(beyondCompareScriptLine + '"' + pngFile + '" "' + bmpFile + "\"\n")
ofile.write("""
<table>
<!-- <tr><td>{0}</td><td>{1}</td><td>{2}</td></tr> -->
<tr>
<td><img width="64" height="64" src="{0}" /></td>
<td><img width="64" height="64" src="{1}" /></td>
<td><img width="64" height="64" src="{3}" /></td>
<td><img width="64" height="64" src="{2}" /></td>
<td>{0}</td></tr>
</table>
""".format(pngFile, "BcImages/Diff" + str(counter) + ".png", bmpFile,
"BcImages/DiffMono" + str(counter) + ".png" ))
counter = counter + 1
script_file.close()
ofile.write("</body></html>")
ofile.close()
subprocess.call(["C:\Program Files (x86)\Beyond Compare 3\BComp.com",
"/closescript", "@test_script.txt"])
def convertImages():
for file in glob.glob("dist/build/imageTest/imageTest*"):
if os.path.isfile(file):
subprocess.call([file])
if __name__ == "__main__":
convertImages()
performTests()
|
# Copyright 2011 Element 34
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
===============
django_provider
===============
"""
import saunter.ConfigWrapper
class DjangoProvider(object):
"""
Uses Django's Models to access the database
"""
def __init__(self):
cf = saunter.ConfigWrapper.ConfigWrapper().config
django_where = cf.get("Django", "installation")
if django_where not in sys.path:
sys.path.append(django_where)
django_name = cf.get("Django", "app")
if not 'DJANGO_SETTINGS_MODULE' in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = "%s.settings" % django_name
def get_random_user(self):
"""
Gets a random user from the provider
:returns: Dictionary
"""
from provider.models import User
u = User.objects.order_by('?')[0]
return {"username": u.username, "password": u.password, "fullname": u.fullname}
fixing imports of django rpovider
# Copyright 2011 Element 34
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
===============
django_provider
===============
"""
import os
import sys
import saunter.ConfigWrapper
class DjangoProvider(object):
"""
Uses Django's Models to access the database
"""
def __init__(self):
cf = saunter.ConfigWrapper.ConfigWrapper().config
django_where = cf.get("Django", "installation")
if django_where not in sys.path:
sys.path.append(django_where)
django_name = cf.get("Django", "app")
if not 'DJANGO_SETTINGS_MODULE' in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = "%s.settings" % django_name
def get_random_user(self):
"""
Gets a random user from the provider
:returns: Dictionary
"""
from provider.models import User
u = User.objects.order_by('?')[0]
return {"username": u.username, "password": u.password, "fullname": u.fullname} |
#!/usr/bin/env python
#
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure it, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
'''
# TODO(rsc):
# fix utf-8 upload bug
# look for and clear submitted CLs during sync / add "adopt" command?
# creating an issue prints the URL twice
# better documentation
from mercurial import cmdutil, commands, hg, util, error, match
from mercurial.node import nullrev, hex, nullid, short
import os, re
import stat
import threading
from HTMLParser import HTMLParser
try:
hgversion = util.version()
except:
from mercurial.version import version as v
hgversion = v.get_version()
# To experiment with Mercurial in the python interpreter:
# >>> repo = hg.repository(ui.ui(), path = ".")
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
class CL(object):
def __init__(self, name):
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
def DiskText(self):
cl = self
s = ""
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
return s
def PendingText(self):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
os.rename(path+'!', path)
if self.web:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc))
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
return s
def Upload(self, ui, repo, send_mail=False):
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
# Would prefer not to change the subject
# on reupload, but /upload requires it.
("subject", self.Subject()),
]
# NOTE(rsc): This duplicates too much of RealMain,
# but RealMain doesn't have the most reusable interface.
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = GuessVCS(upload_options)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
ui.status(msg + "\n")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
print response_body
raise "failed to update issue"
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if send_mail:
MySend("/" + issue + "/mail", payload="")
self.web = True
self.Flush(ui, repo)
return
def GoodCLName(name):
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
sname = None
lineno = 0
sections = {
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if cl.desc == '<enter description here>':
cl.desc = '';
return cl, 0, ''
def SplitCommaSpace(s):
return s.replace(",", " ").split()
def JoinComma(l):
return ", ".join(l)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
try:
f = GetSettings(name)
except:
return None, "cannot load CL data from code review server: "+ExceptionDetail()
if 'reviewers' not in f:
return None, "malformed response loading CL data from code review server"
cl.reviewer = SplitCommaSpace(f['reviewers'])
cl.cc = SplitCommaSpace(f['cc'])
cl.desc = f['description']
cl.url = server_url_base + name
cl.web = True
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
if web:
# Authenticate now, so we can use threads below
MySend(None)
active = []
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:/'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
return dir
# Strip maximal common leading white space prefix from text
def StripCommon(text):
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
return t
# Indent text with indent.
def Indent(text, indent):
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
return t
# Return the first line of l
def line1(text):
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
#######################################################################
# Mercurial helper functions
# Return list of changed files in repository that match pats.
def ChangedFiles(ui, repo, pats, opts):
# Find list of files being operated on.
matcher = cmdutil.match(repo, pats, opts)
node1, node2 = cmdutil.revpair(repo, None)
modified, added, removed = repo.status(node1, node2, matcher)[:3]
l = modified + added + removed
l.sort()
return l
# Return list of files claimed by existing CLs
def TakenFiles(ui, repo):
return Taken(ui, repo).keys()
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats, opts):
return Sub(ChangedFiles(ui, repo, pats, opts), TakenFiles(ui, repo))
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
def getremote(ui, repo, opts):
# save $http_proxy; creating the HTTP repo object will
# delete it in an attempt to "help"
proxy = os.environ.get('http_proxy')
source, _, _ = hg.parseurl(ui.expandpath("default"), None)
other = hg.repository(cmdutil.remoteui(repo, opts), source)
if proxy is not None:
os.environ['http_proxy'] = proxy
return other
def Incoming(ui, repo, opts):
_, incoming, _ = repo.findcommonincoming(getremote(ui, repo, opts))
return incoming
def EditCL(ui, repo, cl):
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
clx, line, err = ParseCL(s, cl.name)
if err != '':
if ui.prompt("error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err), ["&yes", "&no"], "y") == "n":
return "change list not modified"
continue
cl.desc = clx.desc;
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
if cl.desc == '':
if ui.prompt("change list should have description\nre-edit (y/n)?", ["&yes", "&no"], "y") != "n":
continue
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
else:
cl = CL("new")
cl.local = True
cl.files = Sub(ChangedFiles(ui, repo, pats, opts), TakenFiles(ui, repo))
if not cl.files:
return None, "no files changed"
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
# reposetup replaces cmdutil.match with this wrapper,
# which expands the syntax @clnumber to mean the files
# in that CL.
original_match = None
def ReplacementForCmdutilMatch(repo, pats=[], opts={}, globbed=False, default='relpath'):
taken = []
files = []
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if not GoodCLName(clname):
raise util.Abort("invalid CL name " + clname)
cl, err = LoadCL(repo.ui, repo, clname, web=False)
if err != '':
raise util.Abort("loading CL " + clname + ": " + err)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
return original_match(repo, pats=pats, opts=opts, globbed=globbed, default=default)
#######################################################################
# Mercurial commands
# until done debugging
server = "localhost:1"
# server = "codereview.appspot.com"
server_url_base = None
# every command must take a ui and and repo as arguments.
# opts is a dict where you can find other command line flags
#
# Other parameters are taken in order from items on the command line that
# don't start with a dash. If no default value is given in the parameter list,
# they are required.
#
def change(ui, repo, *pats, **opts):
"""create or edit a change list
Create or edit a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
"""
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
return "cannot specify CL name and file patterns"
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
return err
if not cl.local and (opts["stdin"] or not opts["stdout"]):
return "cannot change non-local CL " + name
else:
name = "new"
cl = CL("new")
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, opts)
taken = TakenFiles(ui, repo)
files = Sub(files, taken)
if opts["delete"]:
if name == "new":
return "cannot use -d with file patterns"
if opts["stdin"] or opts["stdout"]:
return "cannot use -d with -i or -o"
if not cl.local:
return "cannot change non-local CL " + name
PostMessage(cl.name, "*** Abandoned ***", send_mail="checked")
EditDesc(cl.name, closed="checked")
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
return "error parsing change list: line %d: %s" % (line, err)
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
return err
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
if opts["stdout"]:
ui.write(cl.EditorText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
def codereview_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
MySend(None)
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats, opts)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
def mail(ui, repo, *pats, **opts):
cl, err = CommandLineCL(ui, repo, pats, opts)
if err != "":
return err
if not cl.reviewer:
return "no reviewers listed in CL"
cl.Upload(ui, repo)
pmsg = "Hello " + JoinComma(cl.reviewer) + ",\n"
pmsg += "\n"
pmsg += "I'd like you to review the following change.\n"
PostMessage(cl.name, pmsg, send_mail="checked", subject=cl.Subject())
def nocommit(ui, repo, *pats, **opts):
return "The codereview extension is enabled; do not use commit."
def pending(ui, repo, *pats, **opts):
m = LoadAllCL(ui, repo, web=True)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
ui.write(cl.PendingText() + "\n")
files = DefaultFiles(ui, repo, [], opts)
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
def reposetup(ui, repo):
global original_match
original_match = cmdutil.match
cmdutil.match = ReplacementForCmdutilMatch
RietveldSetup(ui, repo)
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
repo.ui.quiet = True
if not opts["no_incoming"] and Incoming(ui, repo, opts):
return "local repository out of date; must sync before submit"
cl, err = CommandLineCL(ui, repo, pats, opts)
if err != "":
return err
about = ""
if cl.reviewer:
about += "R=" + JoinComma(cl.reviewer) + "\n"
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma(tbr) + "\n"
if cl.cc:
about += "CC=" + JoinComma(cl.cc) + "\n"
if not cl.reviewer:
return "no reviewers listed in CL"
if not cl.local:
return "cannot submit non-local CL"
# upload, to sync current patch and also get change number if CL is new.
cl.Upload(ui, repo)
about += "%s%s\n" % (server_url_base, cl.name)
# submit changes locally
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
opts['message'] = cl.desc.rstrip() + "\n\n" + about
m = match.exact(repo.root, repo.getcwd(), cl.files)
node = repo.commit(opts['message'], opts.get('user'), opts.get('date'), m)
if not node:
return "nothing changed"
log = repo.changelog
rev = log.rev(node)
parents = log.parentrevs(rev)
if (rev-1 not in parents and
(parents == (nullrev, nullrev) or
len(log.heads(log.node(parents[0]))) > 1 and
(parents[1] == nullrev or len(log.heads(log.node(parents[1]))) > 1))):
repo.rollback()
return "local repository out of date (created new head); must sync before submit"
# push changes to remote.
# if it works, we're committed.
# if not, roll back
other = getremote(ui, repo, opts)
r = repo.push(other, False, None)
if r == 0:
repo.rollback()
return "local repository out of date; must sync before submit"
# we're committed. upload final patch, close review, add commit message
changeURL = short(node)
url = other.url()
m = re.match("^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/", url)
if m:
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(2), changeURL)
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + opts['message']
PostMessage(cl.name, pmsg, send_mail="checked")
EditDesc(cl.name, closed="checked")
cl.Delete(ui, repo)
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
ui.status = sync_note
ui.note = sync_note
other = getremote(ui, repo, opts)
modheads = repo.pull(other)
err = commands.postincoming(ui, repo, modheads, True, "tip")
if err:
return err
sync_changes(ui, repo)
def sync_note(msg):
if msg == 'resolving manifests\n' or msg == 'searching for changes\n':
return
sys.stdout.write(msg)
def sync_changes(ui, repo):
pass
def uisetup(ui):
if "^commit|ci" in commands.table:
commands.table["^commit|ci"] = (nocommit, [], "")
def upload(ui, repo, name, **opts):
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
return err
if not cl.local:
return "cannot upload non-local change"
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
],
"[-i] [-o] change# or FILE ..."
),
"codereview-login": (
codereview_login,
[],
"",
),
"commit|ci": (
nocommit,
[],
"",
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^pending|p": (
pending,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
] + commands.walkopts + commands.commitopts + commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[],
"",
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated:
self._Authenticate()
if request_path is None:
return
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(MySend(url))
f.close()
for k,v in f.map.items():
f.map[k] = v.replace("\r\n", "\n");
return f.map
def GetSettings(issue):
f = GetForm("/" + issue + "/edit")
if not f or 'reviewers' not in f:
f = GetForm("/" + issue + "/publish")
return f
def CreateIssue(subject, desc):
form_fields = [
("content_upload", "1"),
# ("user", upload_options.email),
("reviewers", ''),
("cc", ''),
("description", desc),
("base_hashes", ""),
("subject", subject),
]
uploaded_diff_file = [
("data", "data.diff", emptydiff),
]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response = MySend("/upload", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error creating issue:\n" + response
sys.exit(2)
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=None):
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed is not None:
form_fields['closed'] = closed
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(issue, message, reviewers=None, cc=None, send_mail=None, subject=None):
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail is not None:
form_fields['send_mail'] = send_mail
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1'
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global upload_options, rpc, server, server_url_base, force_google_account, verbosity
# TODO(rsc): If the repository config has no codereview section,
# do not enable the extension. This allows users to
# put the extension in their global .hgrc but only
# enable it for some repositories.
# if not ui.has_section("codereview"):
# cmdtable = {}
# return
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
cc = None
x = ui.config("codereview", "cc")
if x is not None:
cc = x
server_url_base = "http://" + server + "/"
x = ui.config("codereview", "server_url_base")
if x is not None:
server_url_base = x
if not server_url_base.endswith("/"):
server_url_base += "/"
testing = ui.config("codereview", "testing")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = cc
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.revision = None
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
#######################################################################
# We keep a full copy of upload.py here to avoid import path hell.
# It would be nice if hg added the hg repository root
# to the default PYTHONPATH.
# Edit .+2,<hget http://codereview.appspot.com/static/upload.py
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_UNKNOWN = "Unknown"
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = ['application/javascript', 'application/x-javascript',
'application/x-freemind']
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_GIT.lower(): VCS_GIT,
}
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the files, so we can upload them along with our diff.
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
extra_args = extra_args[:]
if self.options.revision:
extra_args = [self.options.revision] + extra_args
extra_args.append('-M')
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
gitdiff = RunShell(["git", "diff", "--no-ext-diff", "--full-index"]
+ extra_args, env=env)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
is_binary = self.IsBinary(filename)
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(["git", "show", filename])
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# We should include file contents if it's text or it's an image.
if not is_binary or is_image:
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName():
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, or VCS_UNKNOWN.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return (VCS_MERCURIAL, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return (VCS_SUBVERSION, None)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return (VCS_GIT, None)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName()
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
print >>sys.stderr, msg
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
various tweaks to code review.
main one is to check at submit time that
user name being used in checkin message
is listed in the CONTRIBUTORS file.
this should catch misconfigurations.
another is to cut the @domain part
from the R= and CC= lines on checkin
messages, so that cc'ing someone on
a change does not mean their email
address is recorded for all time.
R=r
CC=go-dev
http://go/go-review/1016036
#!/usr/bin/env python
#
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure it, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
'''
# TODO(rsc):
# fix utf-8 upload bug
# look for and clear submitted CLs during sync / add "adopt" command?
# creating an issue prints the URL twice
# better documentation
from mercurial import cmdutil, commands, hg, util, error, match
from mercurial.node import nullrev, hex, nullid, short
import os, re
import stat
import threading
from HTMLParser import HTMLParser
try:
hgversion = util.version()
except:
from mercurial.version import version as v
hgversion = v.get_version()
# To experiment with Mercurial in the python interpreter:
# >>> repo = hg.repository(ui.ui(), path = ".")
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
class CL(object):
def __init__(self, name):
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
def DiskText(self):
cl = self
s = ""
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
return s
def PendingText(self):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
os.rename(path+'!', path)
if self.web:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc))
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
return s
def Upload(self, ui, repo, send_mail=False):
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
# Would prefer not to change the subject
# on reupload, but /upload requires it.
("subject", self.Subject()),
]
# NOTE(rsc): This duplicates too much of RealMain,
# but RealMain doesn't have the most reusable interface.
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = GuessVCS(upload_options)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
ui.status(msg + "\n")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if send_mail:
MySend("/" + issue + "/mail", payload="")
self.web = True
self.Flush(ui, repo)
return
def GoodCLName(name):
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
sname = None
lineno = 0
sections = {
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if cl.desc == '<enter description here>':
cl.desc = '';
return cl, 0, ''
def SplitCommaSpace(s):
return s.replace(",", " ").split()
def CutDomain(s):
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
return ", ".join(l)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
try:
f = GetSettings(name)
except:
return None, "cannot load CL data from code review server: "+ExceptionDetail()
if 'reviewers' not in f:
return None, "malformed response loading CL data from code review server"
cl.reviewer = SplitCommaSpace(f['reviewers'])
cl.cc = SplitCommaSpace(f['cc'])
cl.desc = f['description']
cl.url = server_url_base + name
cl.web = True
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
if web:
# Authenticate now, so we can use threads below
MySend(None)
active = []
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:/'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
return dir
# Strip maximal common leading white space prefix from text
def StripCommon(text):
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
return t
# Indent text with indent.
def Indent(text, indent):
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
return t
# Return the first line of l
def line1(text):
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
#######################################################################
# Mercurial helper functions
# Return list of changed files in repository that match pats.
def ChangedFiles(ui, repo, pats, opts):
# Find list of files being operated on.
matcher = cmdutil.match(repo, pats, opts)
node1, node2 = cmdutil.revpair(repo, None)
modified, added, removed = repo.status(node1, node2, matcher)[:3]
l = modified + added + removed
l.sort()
return l
# Return list of files claimed by existing CLs
def TakenFiles(ui, repo):
return Taken(ui, repo).keys()
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats, opts):
return Sub(ChangedFiles(ui, repo, pats, opts), TakenFiles(ui, repo))
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
def getremote(ui, repo, opts):
# save $http_proxy; creating the HTTP repo object will
# delete it in an attempt to "help"
proxy = os.environ.get('http_proxy')
source, _, _ = hg.parseurl(ui.expandpath("default"), None)
other = hg.repository(cmdutil.remoteui(repo, opts), source)
if proxy is not None:
os.environ['http_proxy'] = proxy
return other
def Incoming(ui, repo, opts):
_, incoming, _ = repo.findcommonincoming(getremote(ui, repo, opts))
return incoming
def EditCL(ui, repo, cl):
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
clx, line, err = ParseCL(s, cl.name)
if err != '':
if ui.prompt("error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err), ["&yes", "&no"], "y") == "n":
return "change list not modified"
continue
cl.desc = clx.desc;
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
if cl.desc == '':
if ui.prompt("change list should have description\nre-edit (y/n)?", ["&yes", "&no"], "y") != "n":
continue
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
else:
cl = CL("new")
cl.local = True
cl.files = Sub(ChangedFiles(ui, repo, pats, opts), TakenFiles(ui, repo))
if not cl.files:
return None, "no files changed"
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
# reposetup replaces cmdutil.match with this wrapper,
# which expands the syntax @clnumber to mean the files
# in that CL.
original_match = None
def ReplacementForCmdutilMatch(repo, pats=[], opts={}, globbed=False, default='relpath'):
taken = []
files = []
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if not GoodCLName(clname):
raise util.Abort("invalid CL name " + clname)
cl, err = LoadCL(repo.ui, repo, clname, web=False)
if err != '':
raise util.Abort("loading CL " + clname + ": " + err)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
return original_match(repo, pats=pats, opts=opts, globbed=globbed, default=default)
#######################################################################
# Mercurial commands
# until done debugging
server = "localhost:1"
# server = "codereview.appspot.com"
server_url_base = None
# every command must take a ui and and repo as arguments.
# opts is a dict where you can find other command line flags
#
# Other parameters are taken in order from items on the command line that
# don't start with a dash. If no default value is given in the parameter list,
# they are required.
#
def change(ui, repo, *pats, **opts):
"""create or edit a change list
Create or edit a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
"""
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
return "cannot specify CL name and file patterns"
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
return err
if not cl.local and (opts["stdin"] or not opts["stdout"]):
return "cannot change non-local CL " + name
else:
name = "new"
cl = CL("new")
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, opts)
taken = TakenFiles(ui, repo)
files = Sub(files, taken)
if opts["delete"]:
if name == "new":
return "cannot use -d with file patterns"
if opts["stdin"] or opts["stdout"]:
return "cannot use -d with -i or -o"
if not cl.local:
return "cannot change non-local CL " + name
PostMessage(cl.name, "*** Abandoned ***", send_mail="checked")
EditDesc(cl.name, closed="checked")
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
return "error parsing change list: line %d: %s" % (line, err)
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
return err
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
if opts["stdout"]:
ui.write(cl.EditorText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
def codereview_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
MySend(None)
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats, opts)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
def mail(ui, repo, *pats, **opts):
cl, err = CommandLineCL(ui, repo, pats, opts)
if err != "":
return err
if not cl.reviewer:
return "no reviewers listed in CL"
cl.Upload(ui, repo)
pmsg = "Hello " + JoinComma(cl.reviewer) + ",\n"
pmsg += "\n"
pmsg += "I'd like you to review the following change.\n"
PostMessage(cl.name, pmsg, send_mail="checked", subject=cl.Subject())
def nocommit(ui, repo, *pats, **opts):
return "The codereview extension is enabled; do not use commit."
def pending(ui, repo, *pats, **opts):
m = LoadAllCL(ui, repo, web=True)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
ui.write(cl.PendingText() + "\n")
files = DefaultFiles(ui, repo, [], opts)
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
def reposetup(ui, repo):
global original_match
original_match = cmdutil.match
cmdutil.match = ReplacementForCmdutilMatch
RietveldSetup(ui, repo)
def CheckContributor(ui, repo):
user = ui.config("ui", "username")
if not user:
raise util.Abort("[ui] username is not configured in .hgrc")
try:
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
raise util.Abort("cannot open %s: %s" % (repo.root+'/CONTRIBUTORS', ExceptionDetail()))
for line in f.readlines():
if line.rstrip() == user.rstrip():
return
raise util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
CheckContributor(ui, repo)
repo.ui.quiet = True
if not opts["no_incoming"] and Incoming(ui, repo, opts):
return "local repository out of date; must sync before submit"
cl, err = CommandLineCL(ui, repo, pats, opts)
if err != "":
return err
about = ""
if cl.reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in cl.reviewer]) + "\n"
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer:
return "no reviewers listed in CL"
if not cl.local:
return "cannot submit non-local CL"
# upload, to sync current patch and also get change number if CL is new.
cl.Upload(ui, repo)
about += "%s%s\n" % (server_url_base, cl.name)
# submit changes locally
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
opts['message'] = cl.desc.rstrip() + "\n\n" + about
m = match.exact(repo.root, repo.getcwd(), cl.files)
node = repo.commit(opts['message'], opts.get('user'), opts.get('date'), m)
if not node:
return "nothing changed"
log = repo.changelog
rev = log.rev(node)
parents = log.parentrevs(rev)
if (rev-1 not in parents and
(parents == (nullrev, nullrev) or
len(log.heads(log.node(parents[0]))) > 1 and
(parents[1] == nullrev or len(log.heads(log.node(parents[1]))) > 1))):
repo.rollback()
return "local repository out of date (created new head); must sync before submit"
# push changes to remote.
# if it works, we're committed.
# if not, roll back
other = getremote(ui, repo, opts)
r = repo.push(other, False, None)
if r == 0:
repo.rollback()
return "local repository out of date; must sync before submit"
# we're committed. upload final patch, close review, add commit message
changeURL = short(node)
url = other.url()
m = re.match("^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/", url)
if m:
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(2), changeURL)
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + opts['message']
PostMessage(cl.name, pmsg, send_mail="checked")
EditDesc(cl.name, closed="checked")
cl.Delete(ui, repo)
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
ui.status = sync_note
ui.note = sync_note
other = getremote(ui, repo, opts)
modheads = repo.pull(other)
err = commands.postincoming(ui, repo, modheads, True, "tip")
if err:
return err
sync_changes(ui, repo)
def sync_note(msg):
if msg == 'resolving manifests\n' or msg == 'searching for changes\n':
return
sys.stdout.write(msg)
def sync_changes(ui, repo):
pass
def uisetup(ui):
if "^commit|ci" in commands.table:
commands.table["^commit|ci"] = (nocommit, [], "")
def upload(ui, repo, name, **opts):
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
return err
if not cl.local:
return "cannot upload non-local change"
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
],
"[-i] [-o] change# or FILE ..."
),
"codereview-login": (
codereview_login,
[],
"",
),
"commit|ci": (
nocommit,
[],
"",
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^pending|p": (
pending,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
] + commands.walkopts + commands.commitopts + commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[],
"",
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated:
self._Authenticate()
if request_path is None:
return
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(MySend(url))
f.close()
for k,v in f.map.items():
f.map[k] = v.replace("\r\n", "\n");
return f.map
def GetSettings(issue):
f = GetForm("/" + issue + "/edit")
if not f or 'reviewers' not in f:
f = GetForm("/" + issue + "/publish")
return f
def CreateIssue(subject, desc):
form_fields = [
("content_upload", "1"),
# ("user", upload_options.email),
("reviewers", ''),
("cc", ''),
("description", desc),
("base_hashes", ""),
("subject", subject),
]
uploaded_diff_file = [
("data", "data.diff", emptydiff),
]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response = MySend("/upload", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error creating issue:\n" + response
sys.exit(2)
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=None):
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed is not None:
form_fields['closed'] = closed
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(issue, message, reviewers=None, cc=None, send_mail=None, subject=None):
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail is not None:
form_fields['send_mail'] = send_mail
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1'
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global upload_options, rpc, server, server_url_base, force_google_account, verbosity
# TODO(rsc): If the repository config has no codereview section,
# do not enable the extension. This allows users to
# put the extension in their global .hgrc but only
# enable it for some repositories.
# if not ui.has_section("codereview"):
# cmdtable = {}
# return
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
cc = None
x = ui.config("codereview", "cc")
if x is not None:
cc = x
server_url_base = "http://" + server + "/"
# TODO(rsc): Remove after release
server_url_base = "http://go/go-review/"
testing = ui.config("codereview", "testing")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = cc
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.revision = None
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
#######################################################################
# We keep a full copy of upload.py here to avoid import path hell.
# It would be nice if hg added the hg repository root
# to the default PYTHONPATH.
# Edit .+2,<hget http://codereview.appspot.com/static/upload.py
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_UNKNOWN = "Unknown"
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = ['application/javascript', 'application/x-javascript',
'application/x-freemind']
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_GIT.lower(): VCS_GIT,
}
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the files, so we can upload them along with our diff.
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
extra_args = extra_args[:]
if self.options.revision:
extra_args = [self.options.revision] + extra_args
extra_args.append('-M')
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
gitdiff = RunShell(["git", "diff", "--no-ext-diff", "--full-index"]
+ extra_args, env=env)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
is_binary = self.IsBinary(filename)
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(["git", "show", filename])
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# We should include file contents if it's text or it's an image.
if not is_binary or is_image:
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName():
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, or VCS_UNKNOWN.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return (VCS_MERCURIAL, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return (VCS_SUBVERSION, None)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return (VCS_GIT, None)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName()
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
print >>sys.stderr, msg
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
|
"""
.. module:: linalg
:platform: Unix, Windows
:synopsis: Provides linear algebra utility functions
.. moduleauthor:: Onur Rauf Bingol <orbingol@gmail.com>
"""
import os
import math
from functools import reduce
from . import _linalg
try:
from functools import lru_cache
except ImportError:
from .functools_lru_cache import lru_cache
def vector_cross(vector1, vector2):
""" Computes the cross-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the cross product
:rtype: tuple
"""
try:
if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0:
raise ValueError("Input vectors cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
if not 1 < len(vector1) <= 3 or not 1 < len(vector2) <= 3:
raise ValueError("The input vectors should contain 2 or 3 elements")
# Convert 2-D to 3-D, if necessary
if len(vector1) == 2:
v1 = [float(v) for v in vector1] + [0.0]
else:
v1 = vector1
if len(vector2) == 2:
v2 = [float(v) for v in vector2] + [0.0]
else:
v2 = vector2
# Compute cross product
vector_out = [(v1[1] * v2[2]) - (v1[2] * v2[1]),
(v1[2] * v2[0]) - (v1[0] * v2[2]),
(v1[0] * v2[1]) - (v1[1] * v2[0])]
# Return the cross product of the input vectors
return vector_out
def vector_dot(vector1, vector2):
""" Computes the dot-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the dot product
:rtype: float
"""
try:
if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0:
raise ValueError("Input vectors cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Compute dot product
prod = 0.0
for v1, v2 in zip(vector1, vector2):
prod += v1 * v2
# Return the dot product of the input vectors
return prod
def vector_multiply(vector_in, scalar):
""" Multiplies the vector with a scalar value.
This operation is also called *vector scaling*.
:param vector_in: vector
:type vector_in: list, tuple
:param scalar: scalar value
:type scalar: int, float
:return: updated vector
:rtype: tuple
"""
scaled_vector = [v * scalar for v in vector_in]
return scaled_vector
def vector_sum(vector1, vector2, coeff=1.0):
""" Sums the vectors.
This function computes the result of the vector operation :math:`\\overline{v}_{1} + c * \\overline{v}_{2}`, where
:math:`\\overline{v}_{1}` is ``vector1``, :math:`\\overline{v}_{2}` is ``vector2`` and :math:`c` is ``coeff``.
:param vector1: vector 1
:type vector1: list, tuple
:param vector2: vector 2
:type vector2: list, tuple
:param coeff: multiplier for vector 2
:type coeff: float
:return: updated vector
:rtype: list
"""
summed_vector = [v1 + (coeff * v2) for v1, v2 in zip(vector1, vector2)]
return summed_vector
def vector_normalize(vector_in, decimals=18):
""" Generates a unit vector from the input.
:param vector_in: vector to be normalized
:type vector_in: list, tuple
:param decimals: number of significands
:type decimals: int
:return: the normalized vector (i.e. the unit vector)
:rtype: list
"""
try:
if vector_in is None or len(vector_in) == 0:
raise ValueError("Input vector cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Calculate magnitude of the vector
magnitude = vector_magnitude(vector_in)
# Normalize the vector
if magnitude > 0:
vector_out = []
for vin in vector_in:
vector_out.append(vin / magnitude)
# Return the normalized vector and consider the number of significands
return [float(("{:." + str(decimals) + "f}").format(vout)) for vout in vector_out]
else:
raise ValueError("The magnitude of the vector is zero")
def vector_generate(start_pt, end_pt, normalize=False):
""" Generates a vector from 2 input points.
:param start_pt: start point of the vector
:type start_pt: list, tuple
:param end_pt: end point of the vector
:type end_pt: list, tuple
:param normalize: if True, the generated vector is normalized
:type normalize: bool
:return: a vector from start_pt to end_pt
:rtype: list
"""
try:
if start_pt is None or len(start_pt) == 0 or end_pt is None or len(end_pt) == 0:
raise ValueError("Input points cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
ret_vec = []
for sp, ep in zip(start_pt, end_pt):
ret_vec.append(ep - sp)
if normalize:
ret_vec = vector_normalize(ret_vec)
return ret_vec
def vector_mean(*args):
""" Computes the mean (average) of a list of vectors.
The function computes the arithmetic mean of a list of vectors, which are also organized as a list of
integers or floating point numbers.
.. code-block:: python
:linenos:
# Import geomdl.utilities module
from geomdl import utilities
# Create a list of vectors as an example
vector_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# Compute mean vector
mean_vector = utilities.vector_mean(*vector_list)
# Alternative usage example (same as above):
mean_vector = utilities.vector_mean([1, 2, 3], [4, 5, 6], [7, 8, 9])
:param args: list of vectors
:type args: list, tuple
:return: mean vector
:rtype: list
"""
sz = len(args)
mean_vector = [0.0 for _ in range(len(args[0]))]
for input_vector in args:
mean_vector = [a+b for a, b in zip(mean_vector, input_vector)]
mean_vector = [a / sz for a in mean_vector]
return mean_vector
def vector_magnitude(vector_in):
""" Computes the magnitude of the input vector.
:param vector_in: input vector
:type vector_in: list, tuple
:return: magnitude of the vector
:rtype: float
"""
sq_sum = 0.0
for vin in vector_in:
sq_sum += vin**2
return math.sqrt(sq_sum)
def vector_angle_between(vector1, vector2, **kwargs):
""" Computes the angle between the two input vectors.
If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be
in radians. By default, ``degrees`` is set to *True*.
:param vector1: vector
:type vector1: list, tuple
:param vector2: vector
:type vector2: list, tuple
:return: angle between the vectors
:rtype: float
"""
degrees = kwargs.get('degrees', True)
magn1 = vector_magnitude(vector1)
magn2 = vector_magnitude(vector2)
acos_val = vector_dot(vector1, vector2) / (magn1 * magn2)
angle_radians = math.acos(acos_val)
if degrees:
return math.degrees(angle_radians)
else:
return angle_radians
def vector_is_zero(vector_in, tol=10e-8):
""" Checks if the input vector is a zero vector.
:param vector_in: input vector
:type vector_in: list, tuple
:param tol: tolerance value
:type tol: float
:return: True if the input vector is zero, False otherwise
:rtype: bool
"""
if not isinstance(vector_in, (list, tuple)):
raise TypeError("Input vector must be a list or a tuple")
res = [False for _ in range(len(vector_in))]
for idx in range(len(vector_in)):
if abs(vector_in[idx]) < tol:
res[idx] = True
return all(res)
def point_translate(point_in, vector_in):
""" Translates the input points using the input vector.
:param point_in: input point
:type point_in: list, tuple
:param vector_in: input vector
:type vector_in: list, tuple
:return: translated point
:rtype: list
"""
try:
if point_in is None or len(point_in) == 0 or vector_in is None or len(vector_in) == 0:
raise ValueError("Input arguments cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Translate the point using the input vector
point_out = [coord + comp for coord, comp in zip(point_in, vector_in)]
return point_out
def point_distance(pt1, pt2):
""" Computes distance between two points.
:param pt1: point 1
:type pt1: list, tuple
:param pt2: point 2
:type pt2: list, tuple
:return: distance between input points
:rtype: float
"""
if len(pt1) != len(pt2):
raise ValueError("The input points should have the same dimension")
dist_vector = vector_generate(pt1, pt2, normalize=False)
distance = vector_magnitude(dist_vector)
return distance
def point_mid(pt1, pt2):
""" Computes the midpoint of the input points.
:param pt1: point 1
:type pt1: list, tuple
:param pt2: point 2
:type pt2: list, tuple
:return: midpoint
:rtype: list
"""
if len(pt1) != len(pt2):
raise ValueError("The input points should have the same dimension")
dist_vector = vector_generate(pt1, pt2, normalize=False)
half_dist_vector = vector_multiply(dist_vector, 0.5)
return point_translate(pt1, half_dist_vector)
@lru_cache(maxsize=os.environ['GEOMDL_CACHE_SIZE'] if "GEOMDL_CACHE_SIZE" in os.environ else 16)
def matrix_identity(n):
""" Generates a NxN identity matrix.
:param n: size of the matrix
:type n: int
:return: identity matrix
:rtype: list
"""
imat = [[1.0 if i == j else 0.0 for i in range(n)] for j in range(n)]
return imat
def matrix_transpose(m):
""" Transposes the input matrix.
The input matrix :math:`m` is a 2-dimensional array.
:param m: input matrix with dimensions :math:`(n \\times m)`
:type m: list, tuple
:return: transpose matrix with dimensions :math:`(m \\times n)`
:rtype: list
"""
num_cols = len(m)
num_rows = len(m[0])
m_t = []
for i in range(num_rows):
temp = []
for j in range(num_cols):
temp.append(m[j][i])
m_t.append(temp)
return m_t
def matrix_multiply(m1, m2):
""" Matrix multiplication (iterative algorithm).
The running time of the iterative matrix multiplication algorithm is :math:`O(n^{3})`.
:param m1: 1st matrix with dimensions :math:`(n \\times p)`
:type m1: list, tuple
:param m2: 2nd matrix with dimensions :math:`(p \\times m)`
:type m2: list, tuple
:return: resultant matrix with dimensions :math:`(n \\times m)`
:rtype: list
"""
mm = [[0.0 for _ in range(len(m2[0]))] for _ in range(len(m1))]
for i in range(len(m1)):
for j in range(len(m2[0])):
for k in range(len(m2)):
mm[i][j] += float(m1[i][k] * m2[k][j])
return mm
def triangle_normal(tri):
""" Computes the (approximate) normal vector of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:return: normal vector of the triangle
:rtype: tuple
"""
vec1 = vector_generate(tri.vertices[0].data, tri.vertices[1].data)
vec2 = vector_generate(tri.vertices[1].data, tri.vertices[2].data)
return vector_cross(vec1, vec2)
def triangle_center(tri, uv=False):
""" Computes the center of mass of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:param uv: if True, then finds parametric position of the center of mass
:type uv: bool
:return: center of mass of the triangle
:rtype: tuple
"""
if uv:
data = [t.uv for t in tri]
mid = [0.0, 0.0]
else:
data = tri.vertices
mid = [0.0, 0.0, 0.0]
for vert in data:
mid = [m + v for m, v in zip(mid, vert)]
mid = [float(m) / 3.0 for m in mid]
return tuple(mid)
@lru_cache(maxsize=os.environ['GEOMDL_CACHE_SIZE'] if "GEOMDL_CACHE_SIZE" in os.environ else 128)
def binomial_coefficient(k, i):
""" Computes the binomial coefficient (denoted by *k choose i*).
Please see the following website for details: http://mathworld.wolfram.com/BinomialCoefficient.html
:param k: size of the set of distinct elements
:type k: int
:param i: size of the subsets
:type i: int
:return: combination of *k* and *i*
:rtype: float
"""
# Special case
if i > k:
return float(0)
# Compute binomial coefficient
k_fact = math.factorial(k)
i_fact = math.factorial(i)
k_i_fact = math.factorial(k - i)
return float(k_fact / (k_i_fact * i_fact))
def lu_decomposition(matrix_a):
""" LU-Factorization method using Doolittle's Method for solution of linear systems.
Decomposes the matrix :math:`A` such that :math:`A = LU`.
The input matrix is represented by a list or a tuple. The input matrix is **2-dimensional**, i.e. list of lists of
integers and/or floats.
:param matrix_a: Input matrix (must be a square matrix)
:type matrix_a: list, tuple
:return: a tuple containing matrices L and U
:rtype: tuple
"""
# Check if the 2-dimensional input matrix is a square matrix
q = len(matrix_a)
for idx, m_a in enumerate(matrix_a):
if len(m_a) != q:
raise ValueError("The input must be a square matrix. " +
"Row " + str(idx + 1) + " has a size of " + str(len(m_a)) + ".")
# Return L and U matrices
return _linalg.doolittle(matrix_a)
def forward_substitution(matrix_l, matrix_b):
""" Forward substitution method for the solution of linear systems.
Solves the equation :math:`Ly = b` using forward substitution method
where :math:`L` is a lower triangular matrix and :math:`b` is a column matrix.
:param matrix_l: L, lower triangular matrix
:type matrix_l: list, tuple
:param matrix_b: b, column matrix
:type matrix_b: list, tuple
:return: y, column matrix
:rtype: list
"""
q = len(matrix_b)
matrix_y = [0.0 for _ in range(q)]
matrix_y[0] = float(matrix_b[0]) / float(matrix_l[0][0])
for i in range(1, q):
matrix_y[i] = float(matrix_b[i]) - sum([matrix_l[i][j] * matrix_y[j] for j in range(0, i)])
matrix_y[i] /= float(matrix_l[i][i])
return matrix_y
def backward_substitution(matrix_u, matrix_y):
""" Backward substitution method for the solution of linear systems.
Solves the equation :math:`Ux = y` using backward substitution method
where :math:`U` is a upper triangular matrix and :math:`y` is a column matrix.
:param matrix_u: U, upper triangular matrix
:type matrix_u: list, tuple
:param matrix_y: y, column matrix
:type matrix_y: list, tuple
:return: x, column matrix
:rtype: list
"""
q = len(matrix_y)
matrix_x = [0.0 for _ in range(q)]
matrix_x[q - 1] = float(matrix_y[q - 1]) / float(matrix_u[q - 1][q - 1])
for i in range(q - 2, -1, -1):
matrix_x[i] = float(matrix_y[i]) - sum([matrix_u[i][j] * matrix_x[j] for j in range(i, q)])
matrix_x[i] /= float(matrix_u[i][i])
return matrix_x
def linspace(start, stop, num, decimals=18):
""" Returns a list of evenly spaced numbers over a specified interval.
Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py
:param start: starting value
:type start: float
:param stop: end value
:type stop: float
:param num: number of samples to generate
:type num: int
:param decimals: number of significands
:type decimals: int
:return: a list of equally spaced numbers
:rtype: list
"""
start = float(start)
stop = float(stop)
if abs(start - stop) <= 10e-8:
return [start]
num = int(num)
if num > 1:
div = num - 1
delta = stop - start
return [float(("{:." + str(decimals) + "f}").format((start + (float(x) * float(delta) / float(div)))))
for x in range(num)]
return [float(("{:." + str(decimals) + "f}").format(start))]
def frange(start, stop, step=1.0):
""" Implementation of Python's ``range()`` function which works with floats.
Reference to this implementation: https://stackoverflow.com/a/36091634
:param start: start value
:type start: float
:param stop: end value
:type stop: float
:param step: increment
:type step: float
:return: float
:rtype: generator
"""
i = 0.0
x = float(start) # Prevent yielding integers.
x0 = x
epsilon = step / 2.0
yield x # always yield first value
while x + epsilon < stop:
i += 1.0
x = x0 + i * step
yield x
if stop > x:
yield stop # for yielding last value of the knot vector if the step is a large value, like 0.1
def convex_hull(points):
""" Returns points on convex hull in counterclockwise order according to Graham's scan algorithm.
Reference: https://gist.github.com/arthur-e/5cf52962341310f438e96c1f3c3398b8
.. note:: This implementation only works in 2-dimensional space.
:param points: list of 2-dimensional points
:type points: list, tuple
:return: convex hull of the input points
:rtype: list
"""
turn_left, turn_right, turn_none = (1, -1, 0)
def cmp(a, b):
return (a > b) - (a < b)
def turn(p, q, r):
return cmp((q[0] - p[0])*(r[1] - p[1]) - (r[0] - p[0])*(q[1] - p[1]), 0)
def keep_left(hull, r):
while len(hull) > 1 and turn(hull[-2], hull[-1], r) != turn_left:
hull.pop()
if not len(hull) or hull[-1] != r:
hull.append(r)
return hull
points = sorted(points)
l = reduce(keep_left, points, [])
u = reduce(keep_left, reversed(points), [])
return l.extend(u[i] for i in range(1, len(u) - 1)) or l
def is_left(point0, point1, point2):
""" Tests if a point is Left|On|Right of an infinite line.
Ported from the C++ version: on http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point0: Point P0
:param point1: Point P1
:param point2: Point P2
:return:
>0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line
"""
return ((point1[0] - point0[0]) * (point2[1] - point0[1])) - ((point2[0] - point0[0]) * (point1[1] - point0[1]))
def wn_poly(point, vertices):
""" Winding number test for a point in a polygon.
Ported from the C++ version: http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point: point to be tested
:type point: list, tuple
:param vertices: vertex points of a polygon vertices[n+1] with vertices[n] = vertices[0]
:type vertices: list, tuple
:return: True if the point is inside the input polygon, False otherwise
:rtype: bool
"""
wn = 0 # the winding number counter
v_size = len(vertices) - 1
# loop through all edges of the polygon
for i in range(v_size): # edge from V[i] to V[i+1]
if vertices[i][1] <= point[1]: # start y <= P.y
if vertices[i + 1][1] > point[1]: # an upward crossing
if is_left(vertices[i], vertices[i + 1], point) > 0: # P left of edge
wn += 1 # have a valid up intersect
else: # start y > P.y (no test needed)
if vertices[i + 1][1] <= point[1]: # a downward crossing
if is_left(vertices[i], vertices[i + 1], point) < 0: # P right of edge
wn -= 1 # have a valid down intersect
# return wn
return bool(wn)
Add matrix pivot, inverse, determinant computations
"""
.. module:: linalg
:platform: Unix, Windows
:synopsis: Provides linear algebra utility functions
.. moduleauthor:: Onur Rauf Bingol <orbingol@gmail.com>
"""
import os
import math
from copy import deepcopy
from functools import reduce
from . import _linalg
try:
from functools import lru_cache
except ImportError:
from .functools_lru_cache import lru_cache
def vector_cross(vector1, vector2):
""" Computes the cross-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the cross product
:rtype: tuple
"""
try:
if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0:
raise ValueError("Input vectors cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
if not 1 < len(vector1) <= 3 or not 1 < len(vector2) <= 3:
raise ValueError("The input vectors should contain 2 or 3 elements")
# Convert 2-D to 3-D, if necessary
if len(vector1) == 2:
v1 = [float(v) for v in vector1] + [0.0]
else:
v1 = vector1
if len(vector2) == 2:
v2 = [float(v) for v in vector2] + [0.0]
else:
v2 = vector2
# Compute cross product
vector_out = [(v1[1] * v2[2]) - (v1[2] * v2[1]),
(v1[2] * v2[0]) - (v1[0] * v2[2]),
(v1[0] * v2[1]) - (v1[1] * v2[0])]
# Return the cross product of the input vectors
return vector_out
def vector_dot(vector1, vector2):
""" Computes the dot-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the dot product
:rtype: float
"""
try:
if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0:
raise ValueError("Input vectors cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Compute dot product
prod = 0.0
for v1, v2 in zip(vector1, vector2):
prod += v1 * v2
# Return the dot product of the input vectors
return prod
def vector_multiply(vector_in, scalar):
""" Multiplies the vector with a scalar value.
This operation is also called *vector scaling*.
:param vector_in: vector
:type vector_in: list, tuple
:param scalar: scalar value
:type scalar: int, float
:return: updated vector
:rtype: tuple
"""
scaled_vector = [v * scalar for v in vector_in]
return scaled_vector
def vector_sum(vector1, vector2, coeff=1.0):
""" Sums the vectors.
This function computes the result of the vector operation :math:`\\overline{v}_{1} + c * \\overline{v}_{2}`, where
:math:`\\overline{v}_{1}` is ``vector1``, :math:`\\overline{v}_{2}` is ``vector2`` and :math:`c` is ``coeff``.
:param vector1: vector 1
:type vector1: list, tuple
:param vector2: vector 2
:type vector2: list, tuple
:param coeff: multiplier for vector 2
:type coeff: float
:return: updated vector
:rtype: list
"""
summed_vector = [v1 + (coeff * v2) for v1, v2 in zip(vector1, vector2)]
return summed_vector
def vector_normalize(vector_in, decimals=18):
""" Generates a unit vector from the input.
:param vector_in: vector to be normalized
:type vector_in: list, tuple
:param decimals: number of significands
:type decimals: int
:return: the normalized vector (i.e. the unit vector)
:rtype: list
"""
try:
if vector_in is None or len(vector_in) == 0:
raise ValueError("Input vector cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Calculate magnitude of the vector
magnitude = vector_magnitude(vector_in)
# Normalize the vector
if magnitude > 0:
vector_out = []
for vin in vector_in:
vector_out.append(vin / magnitude)
# Return the normalized vector and consider the number of significands
return [float(("{:." + str(decimals) + "f}").format(vout)) for vout in vector_out]
else:
raise ValueError("The magnitude of the vector is zero")
def vector_generate(start_pt, end_pt, normalize=False):
""" Generates a vector from 2 input points.
:param start_pt: start point of the vector
:type start_pt: list, tuple
:param end_pt: end point of the vector
:type end_pt: list, tuple
:param normalize: if True, the generated vector is normalized
:type normalize: bool
:return: a vector from start_pt to end_pt
:rtype: list
"""
try:
if start_pt is None or len(start_pt) == 0 or end_pt is None or len(end_pt) == 0:
raise ValueError("Input points cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
ret_vec = []
for sp, ep in zip(start_pt, end_pt):
ret_vec.append(ep - sp)
if normalize:
ret_vec = vector_normalize(ret_vec)
return ret_vec
def vector_mean(*args):
""" Computes the mean (average) of a list of vectors.
The function computes the arithmetic mean of a list of vectors, which are also organized as a list of
integers or floating point numbers.
.. code-block:: python
:linenos:
# Import geomdl.utilities module
from geomdl import utilities
# Create a list of vectors as an example
vector_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# Compute mean vector
mean_vector = utilities.vector_mean(*vector_list)
# Alternative usage example (same as above):
mean_vector = utilities.vector_mean([1, 2, 3], [4, 5, 6], [7, 8, 9])
:param args: list of vectors
:type args: list, tuple
:return: mean vector
:rtype: list
"""
sz = len(args)
mean_vector = [0.0 for _ in range(len(args[0]))]
for input_vector in args:
mean_vector = [a+b for a, b in zip(mean_vector, input_vector)]
mean_vector = [a / sz for a in mean_vector]
return mean_vector
def vector_magnitude(vector_in):
""" Computes the magnitude of the input vector.
:param vector_in: input vector
:type vector_in: list, tuple
:return: magnitude of the vector
:rtype: float
"""
sq_sum = 0.0
for vin in vector_in:
sq_sum += vin**2
return math.sqrt(sq_sum)
def vector_angle_between(vector1, vector2, **kwargs):
""" Computes the angle between the two input vectors.
If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be
in radians. By default, ``degrees`` is set to *True*.
:param vector1: vector
:type vector1: list, tuple
:param vector2: vector
:type vector2: list, tuple
:return: angle between the vectors
:rtype: float
"""
degrees = kwargs.get('degrees', True)
magn1 = vector_magnitude(vector1)
magn2 = vector_magnitude(vector2)
acos_val = vector_dot(vector1, vector2) / (magn1 * magn2)
angle_radians = math.acos(acos_val)
if degrees:
return math.degrees(angle_radians)
else:
return angle_radians
def vector_is_zero(vector_in, tol=10e-8):
""" Checks if the input vector is a zero vector.
:param vector_in: input vector
:type vector_in: list, tuple
:param tol: tolerance value
:type tol: float
:return: True if the input vector is zero, False otherwise
:rtype: bool
"""
if not isinstance(vector_in, (list, tuple)):
raise TypeError("Input vector must be a list or a tuple")
res = [False for _ in range(len(vector_in))]
for idx in range(len(vector_in)):
if abs(vector_in[idx]) < tol:
res[idx] = True
return all(res)
def point_translate(point_in, vector_in):
""" Translates the input points using the input vector.
:param point_in: input point
:type point_in: list, tuple
:param vector_in: input vector
:type vector_in: list, tuple
:return: translated point
:rtype: list
"""
try:
if point_in is None or len(point_in) == 0 or vector_in is None or len(vector_in) == 0:
raise ValueError("Input arguments cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Translate the point using the input vector
point_out = [coord + comp for coord, comp in zip(point_in, vector_in)]
return point_out
def point_distance(pt1, pt2):
""" Computes distance between two points.
:param pt1: point 1
:type pt1: list, tuple
:param pt2: point 2
:type pt2: list, tuple
:return: distance between input points
:rtype: float
"""
if len(pt1) != len(pt2):
raise ValueError("The input points should have the same dimension")
dist_vector = vector_generate(pt1, pt2, normalize=False)
distance = vector_magnitude(dist_vector)
return distance
def point_mid(pt1, pt2):
""" Computes the midpoint of the input points.
:param pt1: point 1
:type pt1: list, tuple
:param pt2: point 2
:type pt2: list, tuple
:return: midpoint
:rtype: list
"""
if len(pt1) != len(pt2):
raise ValueError("The input points should have the same dimension")
dist_vector = vector_generate(pt1, pt2, normalize=False)
half_dist_vector = vector_multiply(dist_vector, 0.5)
return point_translate(pt1, half_dist_vector)
@lru_cache(maxsize=os.environ['GEOMDL_CACHE_SIZE'] if "GEOMDL_CACHE_SIZE" in os.environ else 16)
def matrix_identity(n):
""" Generates a NxN identity matrix.
:param n: size of the matrix
:type n: int
:return: identity matrix
:rtype: list
"""
imat = [[1.0 if i == j else 0.0 for i in range(n)] for j in range(n)]
return imat
def matrix_pivot(m, sign=False):
""" Computes the pivot matrix for M, a square matrix.
This function computes
* the permutation matrix, P
* the product of M and P, M x P
* determinant of P, det(P) is ``sign = True``
:param m: input matrix
:type m: list, tuple
:param sign: flag to return the determinant of the permutation matrix, P
:type sign: bool
:return: a tuple containing the matrix product of M x P, P and det(P)
:rtype: tuple
"""
mp = deepcopy(m)
n = len(mp)
p = matrix_identity(n) # permutation matrix
num_rowswap = 0
for j in range(0, n):
row = j
a_max = 0.0
for i in range(j, n):
a_abs = abs(mp[i][j])
if a_abs > a_max:
a_max = a_abs
row = i
if j != row:
num_rowswap += 1
for q in range(0, n):
# Swap rows
p[j][q], p[row][q] = p[row][q], p[j][q]
mp[j][q], mp[row][q] = mp[row][q], mp[j][q]
if sign:
return mp, p, -1 ** num_rowswap
return mp, p
def matrix_inverse(m):
""" Computes the inverse of the matrix via LUP decomposition.
:param m: input matrix
:type m: list, tuple
:return: inverse of the matrix
:rtype: list
"""
mp, p = matrix_pivot(m)
m_l, m_u = lu_decomposition(mp)
m_inv = lu_solve(m_l, m_u, p)
return m_inv
def matrix_determinant(m):
""" Computes the determinant of the square matrix M via LUP decomposition.
:param m: input matrix
:type m: list, tuple
:return: determinant of the matrix
:rtype: float
"""
mp, p, sign = matrix_pivot(m, sign=True)
m_l, m_u = lu_decomposition(mp)
det = 1.0
for i in range(len(m)):
det *= m_l[i][i] * m_u[i][i]
det *= sign
return det
def matrix_transpose(m):
""" Transposes the input matrix.
The input matrix :math:`m` is a 2-dimensional array.
:param m: input matrix with dimensions :math:`(n \\times m)`
:type m: list, tuple
:return: transpose matrix with dimensions :math:`(m \\times n)`
:rtype: list
"""
num_cols = len(m)
num_rows = len(m[0])
m_t = []
for i in range(num_rows):
temp = []
for j in range(num_cols):
temp.append(m[j][i])
m_t.append(temp)
return m_t
def matrix_multiply(m1, m2):
""" Matrix multiplication (iterative algorithm).
The running time of the iterative matrix multiplication algorithm is :math:`O(n^{3})`.
:param m1: 1st matrix with dimensions :math:`(n \\times p)`
:type m1: list, tuple
:param m2: 2nd matrix with dimensions :math:`(p \\times m)`
:type m2: list, tuple
:return: resultant matrix with dimensions :math:`(n \\times m)`
:rtype: list
"""
mm = [[0.0 for _ in range(len(m2[0]))] for _ in range(len(m1))]
for i in range(len(m1)):
for j in range(len(m2[0])):
for k in range(len(m2)):
mm[i][j] += float(m1[i][k] * m2[k][j])
return mm
def triangle_normal(tri):
""" Computes the (approximate) normal vector of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:return: normal vector of the triangle
:rtype: tuple
"""
vec1 = vector_generate(tri.vertices[0].data, tri.vertices[1].data)
vec2 = vector_generate(tri.vertices[1].data, tri.vertices[2].data)
return vector_cross(vec1, vec2)
def triangle_center(tri, uv=False):
""" Computes the center of mass of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:param uv: if True, then finds parametric position of the center of mass
:type uv: bool
:return: center of mass of the triangle
:rtype: tuple
"""
if uv:
data = [t.uv for t in tri]
mid = [0.0, 0.0]
else:
data = tri.vertices
mid = [0.0, 0.0, 0.0]
for vert in data:
mid = [m + v for m, v in zip(mid, vert)]
mid = [float(m) / 3.0 for m in mid]
return tuple(mid)
@lru_cache(maxsize=os.environ['GEOMDL_CACHE_SIZE'] if "GEOMDL_CACHE_SIZE" in os.environ else 128)
def binomial_coefficient(k, i):
""" Computes the binomial coefficient (denoted by *k choose i*).
Please see the following website for details: http://mathworld.wolfram.com/BinomialCoefficient.html
:param k: size of the set of distinct elements
:type k: int
:param i: size of the subsets
:type i: int
:return: combination of *k* and *i*
:rtype: float
"""
# Special case
if i > k:
return float(0)
# Compute binomial coefficient
k_fact = math.factorial(k)
i_fact = math.factorial(i)
k_i_fact = math.factorial(k - i)
return float(k_fact / (k_i_fact * i_fact))
def lu_decomposition(matrix_a):
""" LU-Factorization method using Doolittle's Method for solution of linear systems.
Decomposes the matrix :math:`A` such that :math:`A = LU`.
The input matrix is represented by a list or a tuple. The input matrix is **2-dimensional**, i.e. list of lists of
integers and/or floats.
:param matrix_a: Input matrix (must be a square matrix)
:type matrix_a: list, tuple
:return: a tuple containing matrices L and U
:rtype: tuple
"""
# Check if the 2-dimensional input matrix is a square matrix
q = len(matrix_a)
for idx, m_a in enumerate(matrix_a):
if len(m_a) != q:
raise ValueError("The input must be a square matrix. " +
"Row " + str(idx + 1) + " has a size of " + str(len(m_a)) + ".")
# Return L and U matrices
return _linalg.doolittle(matrix_a)
def forward_substitution(matrix_l, matrix_b):
""" Forward substitution method for the solution of linear systems.
Solves the equation :math:`Ly = b` using forward substitution method
where :math:`L` is a lower triangular matrix and :math:`b` is a column matrix.
:param matrix_l: L, lower triangular matrix
:type matrix_l: list, tuple
:param matrix_b: b, column matrix
:type matrix_b: list, tuple
:return: y, column matrix
:rtype: list
"""
q = len(matrix_b)
matrix_y = [0.0 for _ in range(q)]
matrix_y[0] = float(matrix_b[0]) / float(matrix_l[0][0])
for i in range(1, q):
matrix_y[i] = float(matrix_b[i]) - sum([matrix_l[i][j] * matrix_y[j] for j in range(0, i)])
matrix_y[i] /= float(matrix_l[i][i])
return matrix_y
def backward_substitution(matrix_u, matrix_y):
""" Backward substitution method for the solution of linear systems.
Solves the equation :math:`Ux = y` using backward substitution method
where :math:`U` is a upper triangular matrix and :math:`y` is a column matrix.
:param matrix_u: U, upper triangular matrix
:type matrix_u: list, tuple
:param matrix_y: y, column matrix
:type matrix_y: list, tuple
:return: x, column matrix
:rtype: list
"""
q = len(matrix_y)
matrix_x = [0.0 for _ in range(q)]
matrix_x[q - 1] = float(matrix_y[q - 1]) / float(matrix_u[q - 1][q - 1])
for i in range(q - 2, -1, -1):
matrix_x[i] = float(matrix_y[i]) - sum([matrix_u[i][j] * matrix_x[j] for j in range(i, q)])
matrix_x[i] /= float(matrix_u[i][i])
return matrix_x
def lu_solve(m_l, m_u, b):
""" Solves a system of linear equations via forward-backward substitution.
This function solves Ax=b, where A = LU. A is a NxN matrix,
b is NxM matrix of M column vectors. Each column of x is a solution for
corresponding column of b.
:param m_l: lower triangular decomposition of matrix A
:type m_l: list
:param m_u: upper triangular decomposition of matrix A
:type m_u: list
:param b: matrix of M column vectors
:type b: list
:return: x, the solution matrix
:rtype: list
"""
# Variable initialization
dim = len(b[0])
num_x = len(b)
x = [[0.0 for _ in range(dim)] for _ in range(num_x)]
# Solve the system of linear equations
for i in range(dim):
bt = [b1[i] for b1 in b]
y = forward_substitution(m_l, bt)
xt = backward_substitution(m_u, y)
for j in range(num_x):
x[j][i] = xt[j]
# Return the solution
return x
def linspace(start, stop, num, decimals=18):
""" Returns a list of evenly spaced numbers over a specified interval.
Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py
:param start: starting value
:type start: float
:param stop: end value
:type stop: float
:param num: number of samples to generate
:type num: int
:param decimals: number of significands
:type decimals: int
:return: a list of equally spaced numbers
:rtype: list
"""
start = float(start)
stop = float(stop)
if abs(start - stop) <= 10e-8:
return [start]
num = int(num)
if num > 1:
div = num - 1
delta = stop - start
return [float(("{:." + str(decimals) + "f}").format((start + (float(x) * float(delta) / float(div)))))
for x in range(num)]
return [float(("{:." + str(decimals) + "f}").format(start))]
def frange(start, stop, step=1.0):
""" Implementation of Python's ``range()`` function which works with floats.
Reference to this implementation: https://stackoverflow.com/a/36091634
:param start: start value
:type start: float
:param stop: end value
:type stop: float
:param step: increment
:type step: float
:return: float
:rtype: generator
"""
i = 0.0
x = float(start) # Prevent yielding integers.
x0 = x
epsilon = step / 2.0
yield x # always yield first value
while x + epsilon < stop:
i += 1.0
x = x0 + i * step
yield x
if stop > x:
yield stop # for yielding last value of the knot vector if the step is a large value, like 0.1
def convex_hull(points):
""" Returns points on convex hull in counterclockwise order according to Graham's scan algorithm.
Reference: https://gist.github.com/arthur-e/5cf52962341310f438e96c1f3c3398b8
.. note:: This implementation only works in 2-dimensional space.
:param points: list of 2-dimensional points
:type points: list, tuple
:return: convex hull of the input points
:rtype: list
"""
turn_left, turn_right, turn_none = (1, -1, 0)
def cmp(a, b):
return (a > b) - (a < b)
def turn(p, q, r):
return cmp((q[0] - p[0])*(r[1] - p[1]) - (r[0] - p[0])*(q[1] - p[1]), 0)
def keep_left(hull, r):
while len(hull) > 1 and turn(hull[-2], hull[-1], r) != turn_left:
hull.pop()
if not len(hull) or hull[-1] != r:
hull.append(r)
return hull
points = sorted(points)
l = reduce(keep_left, points, [])
u = reduce(keep_left, reversed(points), [])
return l.extend(u[i] for i in range(1, len(u) - 1)) or l
def is_left(point0, point1, point2):
""" Tests if a point is Left|On|Right of an infinite line.
Ported from the C++ version: on http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point0: Point P0
:param point1: Point P1
:param point2: Point P2
:return:
>0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line
"""
return ((point1[0] - point0[0]) * (point2[1] - point0[1])) - ((point2[0] - point0[0]) * (point1[1] - point0[1]))
def wn_poly(point, vertices):
""" Winding number test for a point in a polygon.
Ported from the C++ version: http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point: point to be tested
:type point: list, tuple
:param vertices: vertex points of a polygon vertices[n+1] with vertices[n] = vertices[0]
:type vertices: list, tuple
:return: True if the point is inside the input polygon, False otherwise
:rtype: bool
"""
wn = 0 # the winding number counter
v_size = len(vertices) - 1
# loop through all edges of the polygon
for i in range(v_size): # edge from V[i] to V[i+1]
if vertices[i][1] <= point[1]: # start y <= P.y
if vertices[i + 1][1] > point[1]: # an upward crossing
if is_left(vertices[i], vertices[i + 1], point) > 0: # P left of edge
wn += 1 # have a valid up intersect
else: # start y > P.y (no test needed)
if vertices[i + 1][1] <= point[1]: # a downward crossing
if is_left(vertices[i], vertices[i + 1], point) < 0: # P right of edge
wn -= 1 # have a valid down intersect
# return wn
return bool(wn)
|
e1d56a8f-2ead-11e5-bc8b-7831c1d44c14
e1db120c-2ead-11e5-bbde-7831c1d44c14
e1db120c-2ead-11e5-bbde-7831c1d44c14 |
"""vobject module for reading vCard and vCalendar files."""
from __future__ import print_function
import copy
import re
import sys
import logging
#import codecs
import six
# Python 3 no longer has a basestring type, so....
try:
basestring = basestring
except NameError:
basestring = (str,bytes)
#------------------------------------ Logging ----------------------------------
logger = logging.getLogger(__name__)
if not logging.getLogger().handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(name)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR) # Log errors
DEBUG = False # Don't waste time on debug calls
#----------------------------------- Constants ---------------------------------
CR = '\r'
LF = '\n'
CRLF = CR + LF
SPACE = ' '
TAB = '\t'
SPACEORTAB = SPACE + TAB
#-------------------------------- Useful modules -------------------------------
# use doctest, it kills two birds with one stone and docstrings often become
# more readable to boot (see parseLine's docstring).
# use logging, then when debugging we can just set our verbosity.
# use epydoc syntax for documenting code, please document every class and non-
# trivial method (see http://epydoc.sourceforge.net/epytext.html
# and http://epydoc.sourceforge.net/fields.html). Also, please
# follow http://www.python.org/peps/pep-0257.html for docstrings.
#-------------------------------------------------------------------------------
#--------------------------------- Main classes --------------------------------
class VBase(object):
"""Base class for ContentLine and Component.
@ivar behavior:
The Behavior class associated with this object, which controls
validation, transformations, and encoding.
@ivar parentBehavior:
The object's parent's behavior, or None if no behaviored parent exists.
@ivar isNative:
Boolean describing whether this component is a Native instance.
@ivar group:
An optional group prefix, should be used only to indicate sort order in
vCards, according to spec.
Current spec: 4.0 (http://tools.ietf.org/html/rfc6350)
"""
def __init__(self, group=None, *args, **kwds):
super(VBase, self).__init__(*args, **kwds)
self.group = group
self.behavior = None
self.parentBehavior = None
self.isNative = False
def copy(self, copyit):
self.group = copyit.group
self.behavior = copyit.behavior
self.parentBehavior = copyit.parentBehavior
self.isNative = copyit.isNative
def validate(self, *args, **kwds):
"""Call the behavior's validate method, or return True."""
if self.behavior:
return self.behavior.validate(self, *args, **kwds)
else: return True
def getChildren(self):
"""Return an iterable containing the contents of the object."""
return []
def clearBehavior(self, cascade=True):
"""Set behavior to None. Do for all descendants if cascading."""
self.behavior=None
if cascade: self.transformChildrenFromNative()
def autoBehavior(self, cascade=False):
"""
Set behavior if name is in self.parentBehavior.knownChildren.
If cascade is True, unset behavior and parentBehavior for all
descendants, then recalculate behavior and parentBehavior.
"""
parentBehavior = self.parentBehavior
if parentBehavior is not None:
knownChildTup = parentBehavior.knownChildren.get(self.name, None)
if knownChildTup is not None:
behavior = getBehavior(self.name, knownChildTup[2])
if behavior is not None:
self.setBehavior(behavior, cascade)
if isinstance(self, ContentLine) and self.encoded:
self.behavior.decode(self)
elif isinstance(self, ContentLine):
self.behavior = parentBehavior.defaultBehavior
if self.encoded and self.behavior:
self.behavior.decode(self)
def setBehavior(self, behavior, cascade=True):
"""
Set behavior. If cascade is True, autoBehavior all descendants.
"""
self.behavior = behavior
if cascade:
for obj in self.getChildren():
obj.parentBehavior = behavior
obj.autoBehavior(True)
def transformToNative(self):
"""
Transform this object into a custom VBase subclass.
transformToNative should always return a representation of this object.
It may do so by modifying self in place then returning self, or by
creating a new object.
"""
if self.isNative or not self.behavior or not self.behavior.hasNative:
return self
else:
try:
return self.behavior.transformToNative(self)
except Exception as e:
# wrap errors in transformation in a ParseError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, ParseError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformToNative, unhandled exception on line %s: %s: %s"
msg = msg % (lineNumber, sys.exc_info()[0], sys.exc_info()[1])
raise ParseError(msg, lineNumber)
#raise ParseError, new_error, sys.exc_info()[2])
def transformFromNative(self):
"""
Return self transformed into a ContentLine or Component if needed.
May have side effects. If it does, transformFromNative and
transformToNative MUST have perfectly inverse side effects. Allowing
such side effects is convenient for objects whose transformations only
change a few attributes.
Note that it isn't always possible for transformFromNative to be a
perfect inverse of transformToNative, in such cases transformFromNative
should return a new object, not self after modifications.
"""
if self.isNative and self.behavior and self.behavior.hasNative:
try:
return self.behavior.transformFromNative(self)
except Exception as e:
# wrap errors in transformation in a NativeError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, NativeError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformFromNative, unhandled exception on line %s %s: %s"
msg = msg % (lineNumber, sys.exc_info()[0], sys.exc_info()[1])
raise NativeError(msg, lineNumber)
else:
return six.u(self)
def transformChildrenToNative(self):
"""Recursively replace children with their native representation."""
pass
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
pass
def serialize(self, buf=None, lineLength=75, validate=True, behavior=None):
"""Serialize to buf if it exists, otherwise return a string.
Use self.behavior.serialize if behavior exists.
"""
if not behavior:
behavior = self.behavior
if behavior:
if DEBUG: logger.debug("serializing %s with behavior" % self.name)
return behavior.serialize(self, buf, lineLength, validate)
else:
if DEBUG: logger.debug("serializing %s without behavior" % self.name)
return defaultSerialize(self, buf, lineLength)
def toVName(name, stripNum = 0, upper = False):
"""
Turn a Python name into an iCalendar style name, optionally uppercase and
with characters stripped off.
"""
if upper:
name = name.upper()
if stripNum != 0:
name = name[:-stripNum]
return name.replace('_', '-')
class ContentLine(VBase):
"""
Holds one content line for formats like vCard and vCalendar.
For example::
<SUMMARY{u'param1' : [u'val1'], u'param2' : [u'val2']}Bastille Day Party>
@ivar name:
The uppercased name of the contentline.
@ivar params:
A dictionary of parameters and associated lists of values (the list may
be empty for empty parameters).
@ivar value:
The value of the contentline.
@ivar singletonparams:
A list of parameters for which it's unclear if the string represents the
parameter name or the parameter value. In vCard 2.1, "The value string
can be specified alone in those cases where the value is unambiguous".
This is crazy, but we have to deal with it.
@ivar encoded:
A boolean describing whether the data in the content line is encoded.
Generally, text read from a serialized vCard or vCalendar should be
considered encoded. Data added programmatically should not be encoded.
@ivar lineNumber:
An optional line number associated with the contentline.
"""
def __init__(self, name, params, value, group=None, encoded=False,
isNative=False, lineNumber = None, *args, **kwds):
"""
Take output from parseLine, convert params list to dictionary.
Group is used as a positional argument to match parseLine's return
"""
super(ContentLine, self).__init__(group, *args, **kwds)
self.name = name.upper()
self.encoded = encoded
self.params = {}
self.singletonparams = []
self.isNative = isNative
self.lineNumber = lineNumber
# if not unicode, attempt make it so
# If it's already unicode, attempting to re-encode will throw an error.
try:
self.value = six.u(value)
except Exception:
self.value = value
print('params:')
print(params)
def updateTable(x):
print('updating table with x:')
print(x)
if len(x) == 1:
self.singletonparams += x
else:
paramlist = self.params.setdefault(six.u(x[0].upper()), [])
paramlist.extend([six.u(p) for p in x[1:]])
if params:
map(updateTable, params)
qp = False
if 'ENCODING' in self.params:
if 'QUOTED-PRINTABLE' in self.params['ENCODING']:
qp = True
self.params['ENCODING'].remove('QUOTED-PRINTABLE')
if 0==len(self.params['ENCODING']):
del self.params['ENCODING']
if 'QUOTED-PRINTABLE' in self.singletonparams:
qp = True
self.singletonparams.remove('QUOTED-PRINTABLE')
if qp:
self.value = self.value.decode('quoted-printable')
# self.value should be unicode for iCalendar, but if quoted-printable
# is used, or if the quoted-printable state machine is used, text may be
# encoded
#if type(self.value) is str:
# self.value = six.u(self.value)
@classmethod
def duplicate(clz, copyit):
newcopy = clz('', {}, '')
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(ContentLine, self).copy(copyit)
self.name = copyit.name
self.value = copy.copy(copyit.value)
self.encoded = self.encoded
self.params = copy.copy(copyit.params)
for k,v in self.params.items():
self.params[k] = copy.copy(v)
self.singletonparams = copy.copy(copyit.singletonparams)
self.lineNumber = copyit.lineNumber
def __eq__(self, other):
try:
return (self.name == other.name) and (self.params == other.params) and (self.value == other.value)
except Exception:
return False
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
keys = self.params.keys()
params = [param + '_param' for param in keys]
params.extend(param + '_paramlist' for param in keys)
return params
def __getattr__(self, name):
"""
Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
try:
if name.endswith('_param'):
return self.params[toVName(name, 6, True)][0]
elif name.endswith('_paramlist'):
return self.params[toVName(name, 10, True)]
else:
raise AttributeError(name)
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
"""Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name.endswith('_param'):
if type(value) == list:
self.params[toVName(name, 6, True)] = value
else:
self.params[toVName(name, 6, True)] = [value]
elif name.endswith('_paramlist'):
if type(value) == list:
self.params[toVName(name, 10, True)] = value
else:
raise VObjectError("Parameter list set to a non-list")
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name.endswith('_param'):
del self.params[toVName(name, 6, True)]
elif name.endswith('_paramlist'):
del self.params[toVName(name, 10, True)]
else:
object.__delattr__(self, name)
except KeyError:
raise AttributeError(name)
def valueRepr( self ):
"""
Transform the representation of the value
according to the behavior, if any.
"""
v = self.value
if self.behavior:
v = self.behavior.valueRepr( self )
return v
def __str__(self):
return "<%s%s%s>" % (self.name, self.params, self.valueRepr())
def __repr__(self):
#return self.__str__().replace('\n', '\\n')
return self.__str__()
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print(pre, self.name + ":", self.valueRepr())
if self.params:
print(pre, "params for ", self.name + ':')
for k in self.params.keys():
print(pre + ' ' * tabwidth, k, self.params[k])
class Component(VBase):
"""A complex property that can contain multiple ContentLines.
For our purposes, a component must start with a BEGIN:xxxx line and end with
END:xxxx, or have a PROFILE:xxx line if a top-level component.
@ivar contents:
A dictionary of lists of Component or ContentLine instances. The keys
are the lowercased names of child ContentLines or Components.
Note that BEGIN and END ContentLines are not included in contents.
@ivar name:
Uppercase string used to represent this Component, i.e VCARD if the
serialized object starts with BEGIN:VCARD.
@ivar useBegin:
A boolean flag determining whether BEGIN: and END: lines should
be serialized.
"""
def __init__(self, name=None, *args, **kwds):
super(Component, self).__init__(*args, **kwds)
self.contents = {}
if name:
self.name=name.upper()
self.useBegin = True
else:
self.name = ''
self.useBegin = False
self.autoBehavior()
@classmethod
def duplicate(clz, copyit):
newcopy = clz()
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(Component, self).copy(copyit)
# deep copy of contents
self.contents = {}
for key, lvalue in copyit.contents.items():
newvalue = []
for value in lvalue:
newitem = value.duplicate(value)
newvalue.append(newitem)
self.contents[key] = newvalue
self.name = copyit.name
self.useBegin = copyit.useBegin
def setProfile(self, name):
"""Assign a PROFILE to this unnamed component.
Used by vCard, not by vCalendar.
"""
if self.name or self.useBegin:
if self.name == name: return
raise VObjectError("This component already has a PROFILE or uses BEGIN.")
self.name = name.upper()
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
names = self.contents.keys()
names.extend(name + '_list' for name in self.contents.keys())
return names
def __getattr__(self, name):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
# if the object is being re-created by pickle, self.contents may not
# be set, don't get into an infinite loop over the issue
if name == 'contents':
return object.__getattribute__(self, name)
try:
if name.endswith('_list'):
return self.contents[toVName(name, 5)]
else:
return self.contents[toVName(name)][0]
except KeyError:
raise AttributeError(name)
normal_attributes = ['contents','name','behavior','parentBehavior','group']
def __setattr__(self, name, value):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name not in self.normal_attributes and name.lower()==name:
if type(value) == list:
if name.endswith('_list'):
name = name[:-5]
self.contents[toVName(name)] = value
elif name.endswith('_list'):
raise VObjectError("Component list set to a non-list")
else:
self.contents[toVName(name)] = [value]
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name not in self.normal_attributes and name.lower()==name:
if name.endswith('_list'):
del self.contents[toVName(name, 5)]
else:
del self.contents[toVName(name)]
else:
object.__delattr__(self, name)
except KeyError:
raise AttributeError(name)
def getChildValue(self, childName, default = None, childNumber = 0):
"""
Return a child's value (the first, by default), or None.
"""
child = self.contents.get(toVName(childName))
if child is None:
return default
else:
return child[childNumber].value
def add(self, objOrName, group = None):
"""Add objOrName to contents, set behavior if it can be inferred.
If objOrName is a string, create an empty component or line based on
behavior. If no behavior is found for the object, add a ContentLine.
group is an optional prefix to the name of the object (see
RFC 2425).
"""
if isinstance(objOrName, VBase):
obj = objOrName
if self.behavior:
obj.parentBehavior = self.behavior
obj.autoBehavior(True)
else:
name = objOrName.upper()
try:
id=self.behavior.knownChildren[name][2]
behavior = getBehavior(name, id)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '', group)
obj.parentBehavior = self.behavior
obj.behavior = behavior
obj = obj.transformToNative()
except (KeyError, AttributeError):
obj = ContentLine(objOrName, [], '', group)
if obj.behavior is None and self.behavior is not None:
if isinstance(obj, ContentLine):
obj.behavior = self.behavior.defaultBehavior
self.contents.setdefault(obj.name.lower(), []).append(obj)
return obj
def remove(self, obj):
"""Remove obj from contents."""
named = self.contents.get(obj.name.lower())
if named:
try:
named.remove(obj)
if len(named) == 0:
del self.contents[obj.name.lower()]
except ValueError:
pass;
def getChildren(self):
"""Return an iterable of all children."""
for objList in self.contents.values():
for obj in objList: yield obj
def components(self):
"""Return an iterable of all Component children."""
return (i for i in self.getChildren() if isinstance(i, Component))
def lines(self):
"""Return an iterable of all ContentLine children."""
return (i for i in self.getChildren() if isinstance(i, ContentLine))
def sortChildKeys(self):
try:
first = [s for s in self.behavior.sortFirst if s in self.contents]
except Exception:
first = []
return first + sorted(k for k in self.contents.keys() if k not in first)
def getSortedChildren(self):
return [obj for k in self.sortChildKeys() for obj in self.contents[k]]
def setBehaviorFromVersionLine(self, versionLine):
"""Set behavior if one matches name, versionLine.value."""
v = getBehavior(self.name, versionLine.value)
if v:
self.setBehavior(v)
def transformChildrenToNative(self):
"""
Recursively replace children with their native representation.
Sort to get dependency order right, like vtimezone before vevent.
"""
for childArray in (self.contents[k] for k in self.sortChildKeys()):
for child in childArray:
child = child.transformToNative()
child.transformChildrenToNative()
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
for childArray in self.contents.values():
for child in childArray:
child = child.transformFromNative()
child.transformChildrenFromNative(clearBehavior)
if clearBehavior:
child.behavior = None
child.parentBehavior = None
def __str__(self):
if self.name:
return "<%s| %s>" % (self.name, self.getSortedChildren())
else:
return u'<*unnamed*| {}>'.format(self.getSortedChildren())
def __repr__(self):
return self.__str__()
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print(pre, self.name)
if isinstance(self, Component):
for line in self.getChildren():
line.prettyPrint(level + 1, tabwidth)
class VObjectError(Exception):
def __init__(self, msg, lineNumber=None):
self.msg = msg
if lineNumber is not None:
self.lineNumber = lineNumber
def __str__(self):
if hasattr(self, 'lineNumber'):
return "At line %s: %s" % (self.lineNumber, self.msg)
else:
return repr(self.msg)
class ParseError(VObjectError):
pass
class ValidateError(VObjectError):
pass
class NativeError(VObjectError):
pass
#-------------------------- Parsing functions ----------------------------------
# parseLine regular expressions
patterns = {}
# Note that underscore is not legal for names, it's included because
# Lotus Notes uses it
patterns['name'] = '[a-zA-Z0-9\-_]+'
patterns['safe_char'] = '[^";:,]'
patterns['qsafe_char'] = '[^"]'
# the combined Python string replacement and regex syntax is a little confusing;
# remember that %(foobar)s is replaced with patterns['foobar'], so for instance
# param_value is any number of safe_chars or any number of qsaf_chars surrounded
# by double quotes.
patterns['param_value'] = ' "%(qsafe_char)s * " | %(safe_char)s * ' % patterns
# get a tuple of two elements, one will be empty, the other will have the value
patterns['param_value_grouped'] = """
" ( %(qsafe_char)s * )" | ( %(safe_char)s + )
""" % patterns
# get a parameter and its values, without any saved groups
patterns['param'] = r"""
; (?: %(name)s ) # parameter name
(?:
(?: = (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)*
""" % patterns
# get a parameter, saving groups for name and value (value still needs parsing)
patterns['params_grouped'] = r"""
; ( %(name)s )
(?: =
(
(?: (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)
)?
""" % patterns
# get a full content line, break it up into group, name, parameters, and value
patterns['line'] = r"""
^ ((?P<group> %(name)s)\.)?(?P<name> %(name)s) # name group
(?P<params> (?: %(param)s )* ) # params group (may be empty)
: (?P<value> .* )$ # value group
""" % patterns
' "%(qsafe_char)s*" | %(safe_char)s* '
param_values_re = re.compile(patterns['param_value_grouped'], re.VERBOSE)
params_re = re.compile(patterns['params_grouped'], re.VERBOSE)
line_re = re.compile(patterns['line'], re.DOTALL | re.VERBOSE)
begin_re = re.compile('BEGIN', re.IGNORECASE)
def parseParams(string):
"""
>>> parseParams(';ALTREP="http://www.wiz.org"')
[['ALTREP', 'http://www.wiz.org']]
>>> parseParams('')
[]
>>> parseParams(';ALTREP="http://www.wiz.org;;",Blah,Foo;NEXT=Nope;BAR')
[['ALTREP', 'http://www.wiz.org;;', 'Blah', 'Foo'], ['NEXT', 'Nope'], ['BAR']]
"""
all = params_re.findall(string)
allParameters = []
for tup in all:
paramList = [tup[0]] # tup looks like (name, valuesString)
for pair in param_values_re.findall(tup[1]):
# pair looks like ('', value) or (value, '')
if pair[0] != '':
paramList.append(pair[0])
else:
paramList.append(pair[1])
allParameters.append(paramList)
return allParameters
def parseLine(line, lineNumber = None):
"""
>>> parseLine("BLAH:")
('BLAH', [], '', None)
>>> parseLine("RDATE:VALUE=DATE:19970304,19970504,19970704,19970904")
('RDATE', [], 'VALUE=DATE:19970304,19970504,19970704,19970904', None)
>>> parseLine('DESCRIPTION;ALTREP="http://www.wiz.org":The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA')
('DESCRIPTION', [['ALTREP', 'http://www.wiz.org']], 'The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA', None)
>>> parseLine("EMAIL;PREF;INTERNET:john@nowhere.com")
('EMAIL', [['PREF'], ['INTERNET']], 'john@nowhere.com', None)
>>> parseLine('EMAIL;TYPE="blah",hah;INTERNET="DIGI",DERIDOO:john@nowhere.com')
('EMAIL', [['TYPE', 'blah', 'hah'], ['INTERNET', 'DIGI', 'DERIDOO']], 'john@nowhere.com', None)
>>> parseLine('item1.ADR;type=HOME;type=pref:;;Reeperbahn 116;Hamburg;;20359;')
('ADR', [['type', 'HOME'], ['type', 'pref']], ';;Reeperbahn 116;Hamburg;;20359;', 'item1')
>>> parseLine(":")
Traceback (most recent call last):
...
ParseError: 'Failed to parse line: :'
"""
match = line_re.match(line)
if match is None:
raise ParseError("Failed to parse line: %s" % line, lineNumber)
# Underscores are replaced with dash to work around Lotus Notes
return (match.group('name').replace('_','-'),
parseParams(match.group('params')),
match.group('value'), match.group('group'))
# logical line regular expressions
patterns['lineend'] = r'(?:\r\n|\r|\n|$)'
patterns['wrap'] = r'%(lineend)s [\t ]' % patterns
patterns['logicallines'] = r"""
(
(?: [^\r\n] | %(wrap)s )*
%(lineend)s
)
""" % patterns
patterns['wraporend'] = r'(%(wrap)s | %(lineend)s )' % patterns
wrap_re = re.compile(patterns['wraporend'], re.VERBOSE)
logical_lines_re = re.compile(patterns['logicallines'], re.VERBOSE)
testLines="""
Line 0 text
, Line 0 continued.
Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2 is a new line, it does not start with whitespace.
"""
def getLogicalLines(fp, allowQP=True, findBegin=False):
"""
Iterate through a stream, yielding one logical line at a time.
Because many applications still use vCard 2.1, we have to deal with the
quoted-printable encoding for long lines, as well as the vCard 3.0 and
vCalendar line folding technique, a whitespace character at the start
of the line.
Quoted-printable data will be decoded in the Behavior decoding phase.
>>> from six import StringIO
>>> f=StringIO(testLines)
>>> for n, l in enumerate(getLogicalLines(f)):
... print("Line %s: %s" % (n, l[0]))
...
Line 0: Line 0 text, Line 0 continued.
Line 1: Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2: Line 2 is a new line, it does not start with whitespace.
"""
if not allowQP:
val = fp.read(-1)
#Shouldn't need this anymore...
"""
if len(val) > 0:
if not findBegin:
val = val.decode('utf-8')
else:
for encoding in 'utf-8', 'utf-16-LE', 'utf-16-BE', 'iso-8859-1':
try:
val = val.decode(encoding)
if begin_re.search(val) is not None:
break
except UnicodeDecodeError:
pass
else:
raise ParseError('Could not find BEGIN when trying to determine encoding')
"""
# strip off any UTF8 BOMs which Python's UTF8 decoder leaves
#val = val.lstrip( unicode( codecs.BOM_UTF8, "utf8" ) )
lineNumber = 1
for match in logical_lines_re.finditer(val):
line, n = wrap_re.subn('', match.group())
if line != '':
yield line, lineNumber
lineNumber += n
else:
quotedPrintable = False
newbuffer = six.StringIO
logicalLine = newbuffer()
lineNumber = 0
lineStartNumber = 0
while True:
line = fp.readline()
if line == '':
break
else:
line = line.rstrip(CRLF)
lineNumber += 1
if line.rstrip() == '':
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
quotedPrintable = False
continue
if quotedPrintable and allowQP:
logicalLine.write('\n')
logicalLine.write(line)
quotedPrintable = False
elif line[0] in SPACEORTAB:
logicalLine.write(line[1:])
elif logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
logicalLine.write(line)
else:
logicalLine = newbuffer()
logicalLine.write(line)
# vCard 2.1 allows parameters to be encoded without a parameter name.
# False positives are unlikely, but possible.
val = logicalLine.getvalue()
if val[-1]=='=' and val.lower().find('quoted-printable') >= 0:
quotedPrintable=True
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
def textLineToContentLine(text, n=None):
return ContentLine(*parseLine(text, n), **{'encoded':True, 'lineNumber' : n})
def dquoteEscape(param):
"""
Return param, or "param" if ',' or ';' or ':' is in param.
"""
if param.find('"') >= 0:
raise VObjectError("Double quotes aren't allowed in parameter values.")
for char in ',;:':
if param.find(char) >= 0:
return '"'+ param + '"'
return param
def foldOneLine(outbuf, input, lineLength = 75):
"""
Folding line procedure that ensures multi-byte utf-8 sequences are not broken across lines
TO-DO: This all seems odd. Is it still needed, especially in python3?
"""
if len(input) < lineLength:
# Optimize for unfolded line case
try:
outbuf.write(bytes(input, 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write(input)
else:
# Look for valid utf8 range and write that out
start = 0
written = 0
while written < len(input):
# Start max length -1 chars on from where we are
offset = start + lineLength - 1
if offset >= len(input):
line = input[start:]
try:
outbuf.write(bytes(line, 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write(line)
written = len(input)
else:
# Check whether next char is valid utf8 lead byte
while (input[offset] > 0x7F) and ((ord(input[offset]) & 0xC0) == 0x80):
# Step back until we have a valid char
offset -= 1
line = input[start:offset]
outbuf.write(bytes(line))
try:
outbuf.write(bytes("\r\n", 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write("\r\n")
written += offset - start
start = offset
try:
outbuf.write(bytes("\r\n", 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write("\r\n")
def defaultSerialize(obj, buf, lineLength):
"""Encode and fold obj and its children, write to buf or return a string."""
outbuf = buf or six.StringIO()
if isinstance(obj, Component):
if obj.group is None:
groupString = ''
else:
groupString = obj.group + '.'
if obj.useBegin:
foldOneLine(outbuf, "{0}BEGIN:{1}".format(groupString, obj.name), lineLength)
for child in obj.getSortedChildren():
#validate is recursive, we only need to validate once
child.serialize(outbuf, lineLength, validate=False)
if obj.useBegin:
foldOneLine(outbuf, "{0}END:{1}".format(groupString, obj.name), lineLength)
elif isinstance(obj, ContentLine):
startedEncoded = obj.encoded
if obj.behavior and not startedEncoded: obj.behavior.encode(obj)
#s = codecs.getwriter('utf-8')(six.StringIO()) #unfolded buffer
s = six.StringIO()
if obj.group is not None:
s.write(obj.group + '.')
s.write(obj.name.upper())
keys = sorted(obj.params.keys())
for key in keys:
paramvals = obj.params[key]
s.write(';' + key + '=' + ','.join(dquoteEscape(p) for p in paramvals))
s.write(':' + six.u(obj.value))
if obj.behavior and not startedEncoded:
obj.behavior.decode(obj)
foldOneLine(outbuf, s.getvalue(), lineLength)
return buf or outbuf.getvalue()
testVCalendar="""
BEGIN:VCALENDAR
BEGIN:VEVENT
SUMMARY;blah=hi!:Bastille Day Party
END:VEVENT
END:VCALENDAR"""
class Stack:
def __init__(self):
self.stack = []
def __len__(self):
return len(self.stack)
def top(self):
if len(self) == 0: return None
else: return self.stack[-1]
def topName(self):
if len(self) == 0: return None
else: return self.stack[-1].name
def modifyTop(self, item):
top = self.top()
if top:
top.add(item)
else:
new = Component()
self.push(new)
new.add(item) # add sets behavior for item and children
def push(self, obj):
self.stack.append(obj)
def pop(self):
return self.stack.pop()
def readComponents(streamOrString, validate=False, transform=True,
findBegin=True, ignoreUnreadable=False, allowQP=False):
"""
Generate one Component at a time from a stream.
"""
if isinstance(streamOrString, basestring):
stream = six.StringIO(streamOrString)
else:
stream = streamOrString
try:
stack = Stack()
versionLine = None
n = 0
for line, n in getLogicalLines(stream, allowQP, findBegin):
if ignoreUnreadable:
try:
vline = textLineToContentLine(line, n)
except VObjectError as e:
if e.lineNumber is not None:
msg = "Skipped line %(lineNumber)s, message: %(msg)s"
else:
msg = "Skipped a line, message: %(msg)s"
logger.error(msg % {'lineNumber' : e.lineNumber, 'msg' : e.message})
continue
else:
vline = textLineToContentLine(line, n)
if vline.name == "VERSION":
versionLine = vline
stack.modifyTop(vline)
elif vline.name == "BEGIN":
stack.push(Component(vline.value, group=vline.group))
elif vline.name == "PROFILE":
if not stack.top():
stack.push(Component())
stack.top().setProfile(vline.value)
elif vline.name == "END":
if len(stack) == 0:
err = "Attempted to end the %s component but it was never opened" % vline.value
raise ParseError(err, n)
if vline.value.upper() == stack.topName(): # START matches END
if len(stack) == 1:
component = stack.pop()
if versionLine is not None:
component.setBehaviorFromVersionLine(versionLine)
else:
behavior = getBehavior(component.name)
if behavior:
component.setBehavior(behavior)
if validate:
component.validate(raiseException=True)
if transform:
component.transformChildrenToNative()
yield component # EXIT POINT
else:
stack.modifyTop(stack.pop())
else:
err = "%s component wasn't closed"
raise ParseError(err % stack.topName(), n)
else:
stack.modifyTop(vline) # not a START or END line
if stack.top():
if stack.topName() is None:
logger.warning("Top level component was never named")
elif stack.top().useBegin:
raise ParseError("Component %s was never closed" % (stack.topName()), n)
yield stack.pop()
except ParseError as e:
e.input = streamOrString
raise
def readOne(stream, validate=False, transform=True, findBegin=True, ignoreUnreadable=False, allowQP=False):
"""
Return the first component from stream.
"""
return next(readComponents(stream, validate, transform, findBegin, ignoreUnreadable, allowQP))
#--------------------------- version registry ----------------------------------
__behaviorRegistry={}
def registerBehavior(behavior, name=None, default=False, id=None):
"""Register the given behavior.
If default is True (or if this is the first version registered with this
name), the version will be the default if no id is given.
"""
if not name:
name=behavior.name.upper()
if id is None:
id=behavior.versionString
if name in __behaviorRegistry:
if default:
__behaviorRegistry[name].insert(0, (id, behavior))
else:
__behaviorRegistry[name].append((id, behavior))
else:
__behaviorRegistry[name]=[(id, behavior)]
def getBehavior(name, id=None):
"""Return a matching behavior if it exists, or None.
If id is None, return the default for name.
"""
name=name.upper()
if name in __behaviorRegistry:
if id:
for n, behavior in __behaviorRegistry[name]:
if n==id:
return behavior
return __behaviorRegistry[name][0][1]
return None
def newFromBehavior(name, id=None):
"""Given a name, return a behaviored ContentLine or Component."""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named %s" % name)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj
#--------------------------- Helper function -----------------------------------
def backslashEscape(s):
s = s.replace("\\","\\\\").replace(";","\;").replace(",","\,")
return s.replace("\r\n", "\\n").replace("\n","\\n").replace("\r","\\n")
#------------------- Testing and running functions -----------------------------
if __name__ == '__main__':
import tests
tests._test()
map object should be list
"""vobject module for reading vCard and vCalendar files."""
from __future__ import print_function
import copy
import re
import sys
import logging
#import codecs
import six
# Python 3 no longer has a basestring type, so....
try:
basestring = basestring
except NameError:
basestring = (str,bytes)
#------------------------------------ Logging ----------------------------------
logger = logging.getLogger(__name__)
if not logging.getLogger().handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(name)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR) # Log errors
DEBUG = False # Don't waste time on debug calls
#----------------------------------- Constants ---------------------------------
CR = '\r'
LF = '\n'
CRLF = CR + LF
SPACE = ' '
TAB = '\t'
SPACEORTAB = SPACE + TAB
#-------------------------------- Useful modules -------------------------------
# use doctest, it kills two birds with one stone and docstrings often become
# more readable to boot (see parseLine's docstring).
# use logging, then when debugging we can just set our verbosity.
# use epydoc syntax for documenting code, please document every class and non-
# trivial method (see http://epydoc.sourceforge.net/epytext.html
# and http://epydoc.sourceforge.net/fields.html). Also, please
# follow http://www.python.org/peps/pep-0257.html for docstrings.
#-------------------------------------------------------------------------------
#--------------------------------- Main classes --------------------------------
class VBase(object):
"""Base class for ContentLine and Component.
@ivar behavior:
The Behavior class associated with this object, which controls
validation, transformations, and encoding.
@ivar parentBehavior:
The object's parent's behavior, or None if no behaviored parent exists.
@ivar isNative:
Boolean describing whether this component is a Native instance.
@ivar group:
An optional group prefix, should be used only to indicate sort order in
vCards, according to spec.
Current spec: 4.0 (http://tools.ietf.org/html/rfc6350)
"""
def __init__(self, group=None, *args, **kwds):
super(VBase, self).__init__(*args, **kwds)
self.group = group
self.behavior = None
self.parentBehavior = None
self.isNative = False
def copy(self, copyit):
self.group = copyit.group
self.behavior = copyit.behavior
self.parentBehavior = copyit.parentBehavior
self.isNative = copyit.isNative
def validate(self, *args, **kwds):
"""Call the behavior's validate method, or return True."""
if self.behavior:
return self.behavior.validate(self, *args, **kwds)
else: return True
def getChildren(self):
"""Return an iterable containing the contents of the object."""
return []
def clearBehavior(self, cascade=True):
"""Set behavior to None. Do for all descendants if cascading."""
self.behavior=None
if cascade: self.transformChildrenFromNative()
def autoBehavior(self, cascade=False):
"""
Set behavior if name is in self.parentBehavior.knownChildren.
If cascade is True, unset behavior and parentBehavior for all
descendants, then recalculate behavior and parentBehavior.
"""
parentBehavior = self.parentBehavior
if parentBehavior is not None:
knownChildTup = parentBehavior.knownChildren.get(self.name, None)
if knownChildTup is not None:
behavior = getBehavior(self.name, knownChildTup[2])
if behavior is not None:
self.setBehavior(behavior, cascade)
if isinstance(self, ContentLine) and self.encoded:
self.behavior.decode(self)
elif isinstance(self, ContentLine):
self.behavior = parentBehavior.defaultBehavior
if self.encoded and self.behavior:
self.behavior.decode(self)
def setBehavior(self, behavior, cascade=True):
"""
Set behavior. If cascade is True, autoBehavior all descendants.
"""
self.behavior = behavior
if cascade:
for obj in self.getChildren():
obj.parentBehavior = behavior
obj.autoBehavior(True)
def transformToNative(self):
"""
Transform this object into a custom VBase subclass.
transformToNative should always return a representation of this object.
It may do so by modifying self in place then returning self, or by
creating a new object.
"""
if self.isNative or not self.behavior or not self.behavior.hasNative:
return self
else:
try:
return self.behavior.transformToNative(self)
except Exception as e:
# wrap errors in transformation in a ParseError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, ParseError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformToNative, unhandled exception on line %s: %s: %s"
msg = msg % (lineNumber, sys.exc_info()[0], sys.exc_info()[1])
raise ParseError(msg, lineNumber)
#raise ParseError, new_error, sys.exc_info()[2])
def transformFromNative(self):
"""
Return self transformed into a ContentLine or Component if needed.
May have side effects. If it does, transformFromNative and
transformToNative MUST have perfectly inverse side effects. Allowing
such side effects is convenient for objects whose transformations only
change a few attributes.
Note that it isn't always possible for transformFromNative to be a
perfect inverse of transformToNative, in such cases transformFromNative
should return a new object, not self after modifications.
"""
if self.isNative and self.behavior and self.behavior.hasNative:
try:
return self.behavior.transformFromNative(self)
except Exception as e:
# wrap errors in transformation in a NativeError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, NativeError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformFromNative, unhandled exception on line %s %s: %s"
msg = msg % (lineNumber, sys.exc_info()[0], sys.exc_info()[1])
raise NativeError(msg, lineNumber)
else:
return six.u(self)
def transformChildrenToNative(self):
"""Recursively replace children with their native representation."""
pass
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
pass
def serialize(self, buf=None, lineLength=75, validate=True, behavior=None):
"""Serialize to buf if it exists, otherwise return a string.
Use self.behavior.serialize if behavior exists.
"""
if not behavior:
behavior = self.behavior
if behavior:
if DEBUG: logger.debug("serializing %s with behavior" % self.name)
return behavior.serialize(self, buf, lineLength, validate)
else:
if DEBUG: logger.debug("serializing %s without behavior" % self.name)
return defaultSerialize(self, buf, lineLength)
def toVName(name, stripNum = 0, upper = False):
"""
Turn a Python name into an iCalendar style name, optionally uppercase and
with characters stripped off.
"""
if upper:
name = name.upper()
if stripNum != 0:
name = name[:-stripNum]
return name.replace('_', '-')
class ContentLine(VBase):
"""
Holds one content line for formats like vCard and vCalendar.
For example::
<SUMMARY{u'param1' : [u'val1'], u'param2' : [u'val2']}Bastille Day Party>
@ivar name:
The uppercased name of the contentline.
@ivar params:
A dictionary of parameters and associated lists of values (the list may
be empty for empty parameters).
@ivar value:
The value of the contentline.
@ivar singletonparams:
A list of parameters for which it's unclear if the string represents the
parameter name or the parameter value. In vCard 2.1, "The value string
can be specified alone in those cases where the value is unambiguous".
This is crazy, but we have to deal with it.
@ivar encoded:
A boolean describing whether the data in the content line is encoded.
Generally, text read from a serialized vCard or vCalendar should be
considered encoded. Data added programmatically should not be encoded.
@ivar lineNumber:
An optional line number associated with the contentline.
"""
def __init__(self, name, params, value, group=None, encoded=False,
isNative=False, lineNumber = None, *args, **kwds):
"""
Take output from parseLine, convert params list to dictionary.
Group is used as a positional argument to match parseLine's return
"""
super(ContentLine, self).__init__(group, *args, **kwds)
self.name = name.upper()
self.encoded = encoded
self.params = {}
self.singletonparams = []
self.isNative = isNative
self.lineNumber = lineNumber
# if not unicode, attempt make it so
# If it's already unicode, attempting to re-encode will throw an error.
try:
self.value = six.u(value)
except Exception:
self.value = value
print('params:')
print(params)
def updateTable(x):
print('updating table with x:')
print(x)
if len(x) == 1:
self.singletonparams += x
else:
paramlist = self.params.setdefault(six.u(x[0].upper()), [])
paramlist.extend([six.u(p) for p in x[1:]])
list(map(updateTable, params))
qp = False
if 'ENCODING' in self.params:
if 'QUOTED-PRINTABLE' in self.params['ENCODING']:
qp = True
self.params['ENCODING'].remove('QUOTED-PRINTABLE')
if 0==len(self.params['ENCODING']):
del self.params['ENCODING']
if 'QUOTED-PRINTABLE' in self.singletonparams:
qp = True
self.singletonparams.remove('QUOTED-PRINTABLE')
if qp:
self.value = self.value.decode('quoted-printable')
# self.value should be unicode for iCalendar, but if quoted-printable
# is used, or if the quoted-printable state machine is used, text may be
# encoded
#if type(self.value) is str:
# self.value = six.u(self.value)
@classmethod
def duplicate(clz, copyit):
newcopy = clz('', {}, '')
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(ContentLine, self).copy(copyit)
self.name = copyit.name
self.value = copy.copy(copyit.value)
self.encoded = self.encoded
self.params = copy.copy(copyit.params)
for k, v in self.params.items():
self.params[k] = copy.copy(v)
self.singletonparams = copy.copy(copyit.singletonparams)
self.lineNumber = copyit.lineNumber
def __eq__(self, other):
try:
return (self.name == other.name) and (self.params == other.params) and (self.value == other.value)
except Exception:
return False
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
keys = self.params.keys()
params = [param + '_param' for param in keys]
params.extend(param + '_paramlist' for param in keys)
return params
def __getattr__(self, name):
"""
Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
try:
if name.endswith('_param'):
return self.params[toVName(name, 6, True)][0]
elif name.endswith('_paramlist'):
return self.params[toVName(name, 10, True)]
else:
raise AttributeError(name)
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
"""Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name.endswith('_param'):
if type(value) == list:
self.params[toVName(name, 6, True)] = value
else:
self.params[toVName(name, 6, True)] = [value]
elif name.endswith('_paramlist'):
if type(value) == list:
self.params[toVName(name, 10, True)] = value
else:
raise VObjectError("Parameter list set to a non-list")
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name.endswith('_param'):
del self.params[toVName(name, 6, True)]
elif name.endswith('_paramlist'):
del self.params[toVName(name, 10, True)]
else:
object.__delattr__(self, name)
except KeyError:
raise AttributeError(name)
def valueRepr( self ):
"""
Transform the representation of the value
according to the behavior, if any.
"""
v = self.value
if self.behavior:
v = self.behavior.valueRepr( self )
return v
def __str__(self):
return "<%s%s%s>" % (self.name, self.params, self.valueRepr())
def __repr__(self):
#return self.__str__().replace('\n', '\\n')
return self.__str__()
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print(pre, self.name + ":", self.valueRepr())
if self.params:
print(pre, "params for ", self.name + ':')
for k in self.params.keys():
print(pre + ' ' * tabwidth, k, self.params[k])
class Component(VBase):
"""A complex property that can contain multiple ContentLines.
For our purposes, a component must start with a BEGIN:xxxx line and end with
END:xxxx, or have a PROFILE:xxx line if a top-level component.
@ivar contents:
A dictionary of lists of Component or ContentLine instances. The keys
are the lowercased names of child ContentLines or Components.
Note that BEGIN and END ContentLines are not included in contents.
@ivar name:
Uppercase string used to represent this Component, i.e VCARD if the
serialized object starts with BEGIN:VCARD.
@ivar useBegin:
A boolean flag determining whether BEGIN: and END: lines should
be serialized.
"""
def __init__(self, name=None, *args, **kwds):
super(Component, self).__init__(*args, **kwds)
self.contents = {}
if name:
self.name=name.upper()
self.useBegin = True
else:
self.name = ''
self.useBegin = False
self.autoBehavior()
@classmethod
def duplicate(clz, copyit):
newcopy = clz()
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(Component, self).copy(copyit)
# deep copy of contents
self.contents = {}
for key, lvalue in copyit.contents.items():
newvalue = []
for value in lvalue:
newitem = value.duplicate(value)
newvalue.append(newitem)
self.contents[key] = newvalue
self.name = copyit.name
self.useBegin = copyit.useBegin
def setProfile(self, name):
"""Assign a PROFILE to this unnamed component.
Used by vCard, not by vCalendar.
"""
if self.name or self.useBegin:
if self.name == name: return
raise VObjectError("This component already has a PROFILE or uses BEGIN.")
self.name = name.upper()
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
names = self.contents.keys()
names.extend(name + '_list' for name in self.contents.keys())
return names
def __getattr__(self, name):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
# if the object is being re-created by pickle, self.contents may not
# be set, don't get into an infinite loop over the issue
if name == 'contents':
return object.__getattribute__(self, name)
try:
if name.endswith('_list'):
return self.contents[toVName(name, 5)]
else:
return self.contents[toVName(name)][0]
except KeyError:
raise AttributeError(name)
normal_attributes = ['contents','name','behavior','parentBehavior','group']
def __setattr__(self, name, value):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name not in self.normal_attributes and name.lower()==name:
if type(value) == list:
if name.endswith('_list'):
name = name[:-5]
self.contents[toVName(name)] = value
elif name.endswith('_list'):
raise VObjectError("Component list set to a non-list")
else:
self.contents[toVName(name)] = [value]
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name not in self.normal_attributes and name.lower()==name:
if name.endswith('_list'):
del self.contents[toVName(name, 5)]
else:
del self.contents[toVName(name)]
else:
object.__delattr__(self, name)
except KeyError:
raise AttributeError(name)
def getChildValue(self, childName, default = None, childNumber = 0):
"""
Return a child's value (the first, by default), or None.
"""
child = self.contents.get(toVName(childName))
if child is None:
return default
else:
return child[childNumber].value
def add(self, objOrName, group = None):
"""Add objOrName to contents, set behavior if it can be inferred.
If objOrName is a string, create an empty component or line based on
behavior. If no behavior is found for the object, add a ContentLine.
group is an optional prefix to the name of the object (see
RFC 2425).
"""
if isinstance(objOrName, VBase):
obj = objOrName
if self.behavior:
obj.parentBehavior = self.behavior
obj.autoBehavior(True)
else:
name = objOrName.upper()
try:
id=self.behavior.knownChildren[name][2]
behavior = getBehavior(name, id)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '', group)
obj.parentBehavior = self.behavior
obj.behavior = behavior
obj = obj.transformToNative()
except (KeyError, AttributeError):
obj = ContentLine(objOrName, [], '', group)
if obj.behavior is None and self.behavior is not None:
if isinstance(obj, ContentLine):
obj.behavior = self.behavior.defaultBehavior
self.contents.setdefault(obj.name.lower(), []).append(obj)
return obj
def remove(self, obj):
"""Remove obj from contents."""
named = self.contents.get(obj.name.lower())
if named:
try:
named.remove(obj)
if len(named) == 0:
del self.contents[obj.name.lower()]
except ValueError:
pass;
def getChildren(self):
"""Return an iterable of all children."""
for objList in self.contents.values():
for obj in objList: yield obj
def components(self):
"""Return an iterable of all Component children."""
return (i for i in self.getChildren() if isinstance(i, Component))
def lines(self):
"""Return an iterable of all ContentLine children."""
return (i for i in self.getChildren() if isinstance(i, ContentLine))
def sortChildKeys(self):
try:
first = [s for s in self.behavior.sortFirst if s in self.contents]
except Exception:
first = []
return first + sorted(k for k in self.contents.keys() if k not in first)
def getSortedChildren(self):
return [obj for k in self.sortChildKeys() for obj in self.contents[k]]
def setBehaviorFromVersionLine(self, versionLine):
"""Set behavior if one matches name, versionLine.value."""
v = getBehavior(self.name, versionLine.value)
if v:
self.setBehavior(v)
def transformChildrenToNative(self):
"""
Recursively replace children with their native representation.
Sort to get dependency order right, like vtimezone before vevent.
"""
for childArray in (self.contents[k] for k in self.sortChildKeys()):
for child in childArray:
child = child.transformToNative()
child.transformChildrenToNative()
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
for childArray in self.contents.values():
for child in childArray:
child = child.transformFromNative()
child.transformChildrenFromNative(clearBehavior)
if clearBehavior:
child.behavior = None
child.parentBehavior = None
def __str__(self):
if self.name:
return "<%s| %s>" % (self.name, self.getSortedChildren())
else:
return u'<*unnamed*| {}>'.format(self.getSortedChildren())
def __repr__(self):
return self.__str__()
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print(pre, self.name)
if isinstance(self, Component):
for line in self.getChildren():
line.prettyPrint(level + 1, tabwidth)
class VObjectError(Exception):
def __init__(self, msg, lineNumber=None):
self.msg = msg
if lineNumber is not None:
self.lineNumber = lineNumber
def __str__(self):
if hasattr(self, 'lineNumber'):
return "At line %s: %s" % (self.lineNumber, self.msg)
else:
return repr(self.msg)
class ParseError(VObjectError):
pass
class ValidateError(VObjectError):
pass
class NativeError(VObjectError):
pass
#-------------------------- Parsing functions ----------------------------------
# parseLine regular expressions
patterns = {}
# Note that underscore is not legal for names, it's included because
# Lotus Notes uses it
patterns['name'] = '[a-zA-Z0-9\-_]+'
patterns['safe_char'] = '[^";:,]'
patterns['qsafe_char'] = '[^"]'
# the combined Python string replacement and regex syntax is a little confusing;
# remember that %(foobar)s is replaced with patterns['foobar'], so for instance
# param_value is any number of safe_chars or any number of qsaf_chars surrounded
# by double quotes.
patterns['param_value'] = ' "%(qsafe_char)s * " | %(safe_char)s * ' % patterns
# get a tuple of two elements, one will be empty, the other will have the value
patterns['param_value_grouped'] = """
" ( %(qsafe_char)s * )" | ( %(safe_char)s + )
""" % patterns
# get a parameter and its values, without any saved groups
patterns['param'] = r"""
; (?: %(name)s ) # parameter name
(?:
(?: = (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)*
""" % patterns
# get a parameter, saving groups for name and value (value still needs parsing)
patterns['params_grouped'] = r"""
; ( %(name)s )
(?: =
(
(?: (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)
)?
""" % patterns
# get a full content line, break it up into group, name, parameters, and value
patterns['line'] = r"""
^ ((?P<group> %(name)s)\.)?(?P<name> %(name)s) # name group
(?P<params> (?: %(param)s )* ) # params group (may be empty)
: (?P<value> .* )$ # value group
""" % patterns
' "%(qsafe_char)s*" | %(safe_char)s* '
param_values_re = re.compile(patterns['param_value_grouped'], re.VERBOSE)
params_re = re.compile(patterns['params_grouped'], re.VERBOSE)
line_re = re.compile(patterns['line'], re.DOTALL | re.VERBOSE)
begin_re = re.compile('BEGIN', re.IGNORECASE)
def parseParams(string):
"""
>>> parseParams(';ALTREP="http://www.wiz.org"')
[['ALTREP', 'http://www.wiz.org']]
>>> parseParams('')
[]
>>> parseParams(';ALTREP="http://www.wiz.org;;",Blah,Foo;NEXT=Nope;BAR')
[['ALTREP', 'http://www.wiz.org;;', 'Blah', 'Foo'], ['NEXT', 'Nope'], ['BAR']]
"""
all = params_re.findall(string)
allParameters = []
for tup in all:
paramList = [tup[0]] # tup looks like (name, valuesString)
for pair in param_values_re.findall(tup[1]):
# pair looks like ('', value) or (value, '')
if pair[0] != '':
paramList.append(pair[0])
else:
paramList.append(pair[1])
allParameters.append(paramList)
return allParameters
def parseLine(line, lineNumber = None):
"""
>>> parseLine("BLAH:")
('BLAH', [], '', None)
>>> parseLine("RDATE:VALUE=DATE:19970304,19970504,19970704,19970904")
('RDATE', [], 'VALUE=DATE:19970304,19970504,19970704,19970904', None)
>>> parseLine('DESCRIPTION;ALTREP="http://www.wiz.org":The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA')
('DESCRIPTION', [['ALTREP', 'http://www.wiz.org']], 'The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA', None)
>>> parseLine("EMAIL;PREF;INTERNET:john@nowhere.com")
('EMAIL', [['PREF'], ['INTERNET']], 'john@nowhere.com', None)
>>> parseLine('EMAIL;TYPE="blah",hah;INTERNET="DIGI",DERIDOO:john@nowhere.com')
('EMAIL', [['TYPE', 'blah', 'hah'], ['INTERNET', 'DIGI', 'DERIDOO']], 'john@nowhere.com', None)
>>> parseLine('item1.ADR;type=HOME;type=pref:;;Reeperbahn 116;Hamburg;;20359;')
('ADR', [['type', 'HOME'], ['type', 'pref']], ';;Reeperbahn 116;Hamburg;;20359;', 'item1')
>>> parseLine(":")
Traceback (most recent call last):
...
ParseError: 'Failed to parse line: :'
"""
match = line_re.match(line)
if match is None:
raise ParseError("Failed to parse line: %s" % line, lineNumber)
# Underscores are replaced with dash to work around Lotus Notes
return (match.group('name').replace('_','-'),
parseParams(match.group('params')),
match.group('value'), match.group('group'))
# logical line regular expressions
patterns['lineend'] = r'(?:\r\n|\r|\n|$)'
patterns['wrap'] = r'%(lineend)s [\t ]' % patterns
patterns['logicallines'] = r"""
(
(?: [^\r\n] | %(wrap)s )*
%(lineend)s
)
""" % patterns
patterns['wraporend'] = r'(%(wrap)s | %(lineend)s )' % patterns
wrap_re = re.compile(patterns['wraporend'], re.VERBOSE)
logical_lines_re = re.compile(patterns['logicallines'], re.VERBOSE)
testLines="""
Line 0 text
, Line 0 continued.
Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2 is a new line, it does not start with whitespace.
"""
def getLogicalLines(fp, allowQP=True, findBegin=False):
"""
Iterate through a stream, yielding one logical line at a time.
Because many applications still use vCard 2.1, we have to deal with the
quoted-printable encoding for long lines, as well as the vCard 3.0 and
vCalendar line folding technique, a whitespace character at the start
of the line.
Quoted-printable data will be decoded in the Behavior decoding phase.
>>> from six import StringIO
>>> f=StringIO(testLines)
>>> for n, l in enumerate(getLogicalLines(f)):
... print("Line %s: %s" % (n, l[0]))
...
Line 0: Line 0 text, Line 0 continued.
Line 1: Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2: Line 2 is a new line, it does not start with whitespace.
"""
if not allowQP:
val = fp.read(-1)
#Shouldn't need this anymore...
"""
if len(val) > 0:
if not findBegin:
val = val.decode('utf-8')
else:
for encoding in 'utf-8', 'utf-16-LE', 'utf-16-BE', 'iso-8859-1':
try:
val = val.decode(encoding)
if begin_re.search(val) is not None:
break
except UnicodeDecodeError:
pass
else:
raise ParseError('Could not find BEGIN when trying to determine encoding')
"""
# strip off any UTF8 BOMs which Python's UTF8 decoder leaves
#val = val.lstrip( unicode( codecs.BOM_UTF8, "utf8" ) )
lineNumber = 1
for match in logical_lines_re.finditer(val):
line, n = wrap_re.subn('', match.group())
if line != '':
yield line, lineNumber
lineNumber += n
else:
quotedPrintable = False
newbuffer = six.StringIO
logicalLine = newbuffer()
lineNumber = 0
lineStartNumber = 0
while True:
line = fp.readline()
if line == '':
break
else:
line = line.rstrip(CRLF)
lineNumber += 1
if line.rstrip() == '':
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
quotedPrintable = False
continue
if quotedPrintable and allowQP:
logicalLine.write('\n')
logicalLine.write(line)
quotedPrintable = False
elif line[0] in SPACEORTAB:
logicalLine.write(line[1:])
elif logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
logicalLine.write(line)
else:
logicalLine = newbuffer()
logicalLine.write(line)
# vCard 2.1 allows parameters to be encoded without a parameter name.
# False positives are unlikely, but possible.
val = logicalLine.getvalue()
if val[-1]=='=' and val.lower().find('quoted-printable') >= 0:
quotedPrintable=True
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
def textLineToContentLine(text, n=None):
return ContentLine(*parseLine(text, n), **{'encoded':True, 'lineNumber' : n})
def dquoteEscape(param):
"""
Return param, or "param" if ',' or ';' or ':' is in param.
"""
if param.find('"') >= 0:
raise VObjectError("Double quotes aren't allowed in parameter values.")
for char in ',;:':
if param.find(char) >= 0:
return '"'+ param + '"'
return param
def foldOneLine(outbuf, input, lineLength = 75):
"""
Folding line procedure that ensures multi-byte utf-8 sequences are not broken across lines
TO-DO: This all seems odd. Is it still needed, especially in python3?
"""
if len(input) < lineLength:
# Optimize for unfolded line case
try:
outbuf.write(bytes(input, 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write(input)
else:
# Look for valid utf8 range and write that out
start = 0
written = 0
while written < len(input):
# Start max length -1 chars on from where we are
offset = start + lineLength - 1
if offset >= len(input):
line = input[start:]
try:
outbuf.write(bytes(line, 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write(line)
written = len(input)
else:
# Check whether next char is valid utf8 lead byte
while (input[offset] > 0x7F) and ((ord(input[offset]) & 0xC0) == 0x80):
# Step back until we have a valid char
offset -= 1
line = input[start:offset]
outbuf.write(bytes(line))
try:
outbuf.write(bytes("\r\n", 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write("\r\n")
written += offset - start
start = offset
try:
outbuf.write(bytes("\r\n", 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write("\r\n")
def defaultSerialize(obj, buf, lineLength):
"""Encode and fold obj and its children, write to buf or return a string."""
outbuf = buf or six.StringIO()
if isinstance(obj, Component):
if obj.group is None:
groupString = ''
else:
groupString = obj.group + '.'
if obj.useBegin:
foldOneLine(outbuf, "{0}BEGIN:{1}".format(groupString, obj.name), lineLength)
for child in obj.getSortedChildren():
#validate is recursive, we only need to validate once
child.serialize(outbuf, lineLength, validate=False)
if obj.useBegin:
foldOneLine(outbuf, "{0}END:{1}".format(groupString, obj.name), lineLength)
elif isinstance(obj, ContentLine):
startedEncoded = obj.encoded
if obj.behavior and not startedEncoded: obj.behavior.encode(obj)
#s = codecs.getwriter('utf-8')(six.StringIO()) #unfolded buffer
s = six.StringIO()
if obj.group is not None:
s.write(obj.group + '.')
s.write(obj.name.upper())
keys = sorted(obj.params.keys())
for key in keys:
paramvals = obj.params[key]
s.write(';' + key + '=' + ','.join(dquoteEscape(p) for p in paramvals))
s.write(':' + six.u(obj.value))
if obj.behavior and not startedEncoded:
obj.behavior.decode(obj)
foldOneLine(outbuf, s.getvalue(), lineLength)
return buf or outbuf.getvalue()
testVCalendar="""
BEGIN:VCALENDAR
BEGIN:VEVENT
SUMMARY;blah=hi!:Bastille Day Party
END:VEVENT
END:VCALENDAR"""
class Stack:
def __init__(self):
self.stack = []
def __len__(self):
return len(self.stack)
def top(self):
if len(self) == 0: return None
else: return self.stack[-1]
def topName(self):
if len(self) == 0: return None
else: return self.stack[-1].name
def modifyTop(self, item):
top = self.top()
if top:
top.add(item)
else:
new = Component()
self.push(new)
new.add(item) # add sets behavior for item and children
def push(self, obj):
self.stack.append(obj)
def pop(self):
return self.stack.pop()
def readComponents(streamOrString, validate=False, transform=True,
findBegin=True, ignoreUnreadable=False, allowQP=False):
"""
Generate one Component at a time from a stream.
"""
if isinstance(streamOrString, basestring):
stream = six.StringIO(streamOrString)
else:
stream = streamOrString
try:
stack = Stack()
versionLine = None
n = 0
for line, n in getLogicalLines(stream, allowQP, findBegin):
if ignoreUnreadable:
try:
vline = textLineToContentLine(line, n)
except VObjectError as e:
if e.lineNumber is not None:
msg = "Skipped line %(lineNumber)s, message: %(msg)s"
else:
msg = "Skipped a line, message: %(msg)s"
logger.error(msg % {'lineNumber' : e.lineNumber, 'msg' : e.message})
continue
else:
vline = textLineToContentLine(line, n)
if vline.name == "VERSION":
versionLine = vline
stack.modifyTop(vline)
elif vline.name == "BEGIN":
stack.push(Component(vline.value, group=vline.group))
elif vline.name == "PROFILE":
if not stack.top():
stack.push(Component())
stack.top().setProfile(vline.value)
elif vline.name == "END":
if len(stack) == 0:
err = "Attempted to end the %s component but it was never opened" % vline.value
raise ParseError(err, n)
if vline.value.upper() == stack.topName(): # START matches END
if len(stack) == 1:
component = stack.pop()
if versionLine is not None:
component.setBehaviorFromVersionLine(versionLine)
else:
behavior = getBehavior(component.name)
if behavior:
component.setBehavior(behavior)
if validate:
component.validate(raiseException=True)
if transform:
component.transformChildrenToNative()
yield component # EXIT POINT
else:
stack.modifyTop(stack.pop())
else:
err = "%s component wasn't closed"
raise ParseError(err % stack.topName(), n)
else:
stack.modifyTop(vline) # not a START or END line
if stack.top():
if stack.topName() is None:
logger.warning("Top level component was never named")
elif stack.top().useBegin:
raise ParseError("Component %s was never closed" % (stack.topName()), n)
yield stack.pop()
except ParseError as e:
e.input = streamOrString
raise
def readOne(stream, validate=False, transform=True, findBegin=True, ignoreUnreadable=False, allowQP=False):
"""
Return the first component from stream.
"""
return next(readComponents(stream, validate, transform, findBegin, ignoreUnreadable, allowQP))
#--------------------------- version registry ----------------------------------
__behaviorRegistry={}
def registerBehavior(behavior, name=None, default=False, id=None):
"""Register the given behavior.
If default is True (or if this is the first version registered with this
name), the version will be the default if no id is given.
"""
if not name:
name=behavior.name.upper()
if id is None:
id=behavior.versionString
if name in __behaviorRegistry:
if default:
__behaviorRegistry[name].insert(0, (id, behavior))
else:
__behaviorRegistry[name].append((id, behavior))
else:
__behaviorRegistry[name]=[(id, behavior)]
def getBehavior(name, id=None):
"""Return a matching behavior if it exists, or None.
If id is None, return the default for name.
"""
name=name.upper()
if name in __behaviorRegistry:
if id:
for n, behavior in __behaviorRegistry[name]:
if n==id:
return behavior
return __behaviorRegistry[name][0][1]
return None
def newFromBehavior(name, id=None):
"""Given a name, return a behaviored ContentLine or Component."""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named %s" % name)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj
#--------------------------- Helper function -----------------------------------
def backslashEscape(s):
s = s.replace("\\","\\\\").replace(";","\;").replace(",","\,")
return s.replace("\r\n", "\\n").replace("\n","\\n").replace("\r","\\n")
#------------------- Testing and running functions -----------------------------
if __name__ == '__main__':
import tests
tests._test()
|
from cgum.basic import *
import cgum.expression as expression
# Mix-in implemented by all statement types
class Statement(object):
def is_statement(self):
return True
def nearestStmt(self):
return self
# TODO: Understand this better
class StatementExpression(Statement, expression.Expression, Node):
CODE = "241800"
LABEL = "StatementExpr"
def __init__(self, pos, length, label, children):
assert label is None
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
# For now, declarations are statements
class DeclarationList(Statement, Node):
CODE = "350100"
LABEL = "DeclList"
def __init__(self, pos, length, label, children):
assert label is None
super().__init__(pos, length, label, children)
def declarations(self):
return self.children()
# A declaration isn't quite a statement, but this is the best place for it,
# for now.
class Declaration(Node):
CODE = "450100"
LABEL = "Declaration"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
assert isinstance(children[0], DeclarationList)
super().__init__(pos, length, label, children)
def declarations(self):
return self.child(0)
# Generic definition class
class Definition(Statement, Node):
CODE = "450200"
LABEL = "Definition"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def defined(self):
return self.child(0)
def to_s(self):
return self.defined().to_s()
class Goto(Statement, Node):
CODE = "280100"
LABEL = "Goto"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
assert isinstance(children[0], GenericString)
super().__init__(pos, length, label, children)
def destination(self):
return self.child(0)
def to_s(self):
return "goto %s" % self.destination()
class Continue(Statement, Token):
CODE = "280001"
LABEL = "Continue"
def to_s(self):
return "continue"
# Used to specify the default switch case
class Default(Statement, Node):
CODE = "270400"
LABEL = "Default"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
class Case(Statement, Node):
CODE = "270200"
LABEL = "Case"
def __init__(self, pos, length, label, children):
assert len(children) == 2
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def stmt(self):
return self.child(1)
class Switch(Statement, Node):
CODE = "300200"
LABEL = "Switch"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
assert isinstance(children[1], Block)
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def block(self):
return self.child(1)
class Break(Statement, Token):
CODE = "280002"
LABEL = "Break"
def to_s(self):
return "break"
class ExprStatement(Statement, Node):
CODE = "260300"
LABEL = "ExprStatement"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
class DoWhile(Statement, Node):
CODE = "310200"
LABEL = "DoWhile"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
super().__init__(pos, length, label, children)
def condition(self):
return self.child(1)
def do(self):
return self.child(0)
class While(Statement, Node):
CODE = "310100"
LABEL = "While"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
super().__init__(pos, length, label, children)
def condition(self):
return self.child(0)
def do(self):
return self.child(1)
class For(Statement, Node):
CODE = "310300"
LABEL = "For"
def __init__(self, pos, length, label, children):
assert label is None, "for statement should have no label"
if len(children) != 4:
print("UNEXPECTED NUMBER OF CHILDREN IN FOR: %d" % len(children))
print(children)
print(children[0].to_s())
print(children[1].to_s())
assert len(children) == 4, "for statement should have 4 children"
#assert isinstance(children[0], ExprStatement)
#assert isinstance(children[1], ExprStatement)
#assert isinstance(children[2], ExprStatement)
#assert isinstance(children[3], Block)
super().__init__(pos, length, label, children)
def initialisation(self):
return self.child(0)
def condition(self):
return self.child(1)
def after(self):
return self.child(2)
def block(self):
return self.child(3)
class ReturnExpr(Statement, Node):
CODE = "280200"
LABEL = "ReturnExpr"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def to_s(self):
return "return %s" % self.__expr.to_s()
class Return(Statement, Token):
CODE = "280003"
LABEL = "Return"
def to_s(self):
return "return"
# Todo: move to tokens package?
class IfToken(Token):
CODE = "490100"
LABEL = "IfToken"
class IfElse(Statement, Node):
CODE = "300100"
LABEL = "If"
def __init__(self, pos, length, label, children):
assert len(children) >= 3 and len(children) <= 4
assert isinstance(children[0], IfToken)
super().__init__(pos, length, label, children)
def condition(self):
return self.child(1)
def then(self):
return self.child(2)
def els(self):
return self.child(3) if len(self.children()) == 4 else None
class Block(Node):
CODE = "330000"
LABEL = "Compound"
def contents(self):
return self.children()
debugging
from cgum.basic import *
import cgum.expression as expression
# Mix-in implemented by all statement types
class Statement(object):
def is_statement(self):
return True
def nearestStmt(self):
return self
# TODO: Understand this better
class StatementExpression(Statement, expression.Expression, Node):
CODE = "241800"
LABEL = "StatementExpr"
def __init__(self, pos, length, label, children):
assert label is None
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
# For now, declarations are statements
class DeclarationList(Statement, Node):
CODE = "350100"
LABEL = "DeclList"
def __init__(self, pos, length, label, children):
assert label is None
super().__init__(pos, length, label, children)
def declarations(self):
return self.children()
# A declaration isn't quite a statement, but this is the best place for it,
# for now.
class Declaration(Node):
CODE = "450100"
LABEL = "Declaration"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
assert isinstance(children[0], DeclarationList)
super().__init__(pos, length, label, children)
def declarations(self):
return self.child(0)
# Generic definition class
class Definition(Statement, Node):
CODE = "450200"
LABEL = "Definition"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def defined(self):
return self.child(0)
def to_s(self):
return self.defined().to_s()
class Goto(Statement, Node):
CODE = "280100"
LABEL = "Goto"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
assert isinstance(children[0], GenericString)
super().__init__(pos, length, label, children)
def destination(self):
return self.child(0)
def to_s(self):
return "goto %s" % self.destination()
class Continue(Statement, Token):
CODE = "280001"
LABEL = "Continue"
def to_s(self):
return "continue"
# Used to specify the default switch case
class Default(Statement, Node):
CODE = "270400"
LABEL = "Default"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
class Case(Statement, Node):
CODE = "270200"
LABEL = "Case"
def __init__(self, pos, length, label, children):
assert len(children) == 2
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def stmt(self):
return self.child(1)
class Switch(Statement, Node):
CODE = "300200"
LABEL = "Switch"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
assert isinstance(children[1], Block)
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def block(self):
return self.child(1)
class Break(Statement, Token):
CODE = "280002"
LABEL = "Break"
def to_s(self):
return "break"
class ExprStatement(Statement, Node):
CODE = "260300"
LABEL = "ExprStatement"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
class DoWhile(Statement, Node):
CODE = "310200"
LABEL = "DoWhile"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
super().__init__(pos, length, label, children)
def condition(self):
return self.child(1)
def do(self):
return self.child(0)
class While(Statement, Node):
CODE = "310100"
LABEL = "While"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
super().__init__(pos, length, label, children)
def condition(self):
return self.child(0)
def do(self):
return self.child(1)
class For(Statement, Node):
CODE = "310300"
LABEL = "For"
def __init__(self, pos, length, label, children):
assert label is None, "for statement should have no label"
if len(children) != 4:
print("UNEXPECTED NUMBER OF CHILDREN IN FOR: %d" % len(children))
print(pos)
print(children)
assert len(children) == 4, "for statement should have 4 children"
#assert isinstance(children[0], ExprStatement)
#assert isinstance(children[1], ExprStatement)
#assert isinstance(children[2], ExprStatement)
#assert isinstance(children[3], Block)
super().__init__(pos, length, label, children)
def initialisation(self):
return self.child(0)
def condition(self):
return self.child(1)
def after(self):
return self.child(2)
def block(self):
return self.child(3)
class ReturnExpr(Statement, Node):
CODE = "280200"
LABEL = "ReturnExpr"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def to_s(self):
return "return %s" % self.__expr.to_s()
class Return(Statement, Token):
CODE = "280003"
LABEL = "Return"
def to_s(self):
return "return"
# Todo: move to tokens package?
class IfToken(Token):
CODE = "490100"
LABEL = "IfToken"
class IfElse(Statement, Node):
CODE = "300100"
LABEL = "If"
def __init__(self, pos, length, label, children):
assert len(children) >= 3 and len(children) <= 4
assert isinstance(children[0], IfToken)
super().__init__(pos, length, label, children)
def condition(self):
return self.child(1)
def then(self):
return self.child(2)
def els(self):
return self.child(3) if len(self.children()) == 4 else None
class Block(Node):
CODE = "330000"
LABEL = "Compound"
def contents(self):
return self.children()
|
"""Mostly self-contained functions for geoprocessing in Python toolboxes."""
from collections import Counter
import logging
import arcpy
from utils import (
leveled_logger,
unique_name,
)
LOG = logging.getLogger(__name__)
class DatasetView(object):
"""Context manager for an ArcGIS dataset view (feature layer/table view).
Requires:
dataset_metadata
Attributes:
name (str): Name of the view.
dataset_path (str): Path of the dataset.
dataset_meta (dict): Metadata dictionary for the dataset.
is_spatial (bool): Flag indicating if the view is spatial.
"""
def __init__(self, dataset_path, dataset_where_sql=None, view_name=None,
force_nonspatial=False):
"""Initialize instance.
Args:
dataset_path (str): Path of the dataset.
dataset_where_sql (str): SQL where-clause for dataset
subselection.
view_name (str): Name of the view to create.
force_nonspatial (bool): Flag that forces a nonspatial view.
"""
self.name = view_name if view_name else unique_name('view')
self.dataset_path = dataset_path
self.dataset_meta = dataset_metadata(dataset_path)
self.is_spatial = all((self.dataset_meta['is_spatial'], not force_nonspatial))
self._where_sql = dataset_where_sql
def __enter__(self):
self.create()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.discard()
@property
def count(self):
"""int: Number of features in the view."""
return int(arcpy.management.GetCount(self.name).getOutput(0))
@property
def exists(self):
"""bool: Flag indicating the view currently exists."""
return arcpy.Exists(self.name)
@property
def where_sql(self):
"""str: SQL where-clause property of dataset view subselection.
Setting this property will change the view's dataset subselection.
"""
return self._where_sql
@where_sql.setter
def where_sql(self, value):
if self.exists:
arcpy.management.SelectLayerByAttribute(
in_layer_or_view=self.name, selection_type='new_selection',
where_clause=value
)
self._where_sql = value
@where_sql.deleter
def where_sql(self):
if self.exists:
arcpy.management.SelectLayerByAttribute(in_layer_or_view=self.name,
selection_type='clear_selection')
self._where_sql = None
def as_chunks(self, chunk_size):
"""Generate 'chunks' of the view's data as new DatasetView.
Yields DatasetView with context management, i.e. view will be discarded
when generator moves to next chunk-view.
Args:
chunk_size (int): Number of features in each chunk-view.
Yields:
DatasetView.
"""
# ArcPy where clauses cannot use 'between'.
chunk_where_sql_template = ("{oid_field_name} >= {from_oid}"
" and {oid_field_name} <= {to_oid}")
if self.where_sql:
chunk_where_sql_template += " and ({})".format(self.where_sql)
# Get iterable of all object IDs in dataset.
with arcpy.da.SearchCursor(in_table=self.dataset_path,
field_names=('oid@',),
where_clause=self.where_sql) as cursor:
# Sorting is important: allows selection by ID range.
oids = sorted(oid for oid, in cursor)
while oids:
chunk_where_sql = chunk_where_sql_template.format(
oid_field_name=self.dataset_meta['oid_field_name'],
from_oid=min(oids), to_oid=max(oids[:chunk_size])
)
with DatasetView(self.name, chunk_where_sql) as chunk_view:
yield chunk_view
# Remove chunk from set.
oids = oids[chunk_size:]
def create(self):
"""Create view."""
function = (arcpy.management.MakeFeatureLayer if self.is_spatial
else arcpy.management.MakeTableView)
function(self.dataset_path, self.name, where_clause=self.where_sql,
workspace=self.dataset_meta['workspace_path'])
return self.exists
def discard(self):
"""Discard view."""
if self.exists:
arcpy.management.Delete(self.name)
return not self.exists
class Editor(object):
"""Context manager for editing features.
Attributes:
workspace_path (str): Path for the editing workspace
"""
def __init__(self, workspace_path, use_edit_session=True):
"""Initialize instance.
Args:
workspace_path (str): Path for the editing workspace.
use_edit_session (bool): Flag directing edits to be made in an
edit session. Default is True.
"""
self._editor = (arcpy.da.Editor(workspace_path) if use_edit_session else None)
self.workspace_path = workspace_path
def __enter__(self):
self.start()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.stop(save_changes=False if exception_type else True)
@property
def active(self):
"""bool: Flag indicating whether edit session is active."""
if self._editor:
_active = self._editor.isEditing
else:
_active = False
return _active
def start(self):
"""Start an active edit session.
Returns:
bool: Indicator that session is active.
"""
if self._editor and not self._editor.isEditing:
self._editor.startEditing(with_undo=True, multiuser_mode=True)
self._editor.startOperation()
return self.active
def stop(self, save_changes=True):
"""Stop an active edit session.
Args:
save_changes (bool): Flag indicating whether edits should be
saved.
Returns:
bool: Indicator that session is not active.
"""
if self._editor and self._editor.isEditing:
if save_changes:
self._editor.stopOperation()
else:
self._editor.abortOperation()
self._editor.stopEditing(save_changes)
return not self.active
def _field_object_metadata(field_object):
"""Return dictionary of metadata from ArcPy field object."""
meta = {
'arc_object': field_object,
'name': getattr(field_object, 'name'),
'alias_name': getattr(field_object, 'aliasName'),
'base_name': getattr(field_object, 'baseName'),
'type': getattr(field_object, 'type').lower(),
'length': getattr(field_object, 'length'),
'precision': getattr(field_object, 'precision'),
'scale': getattr(field_object, 'scale'),
}
return meta
def dataset_feature_count(dataset_path, **kwargs):
"""Return number of features in dataset.
Requires:
DatasetView
Args:
dataset_path (str): Path of the dataset.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
Returns:
int: Number of features counted.
"""
view = DatasetView(dataset_path, **kwargs)
with view:
return view.count
def dataset_metadata(dataset_path):
"""Return dictionary of dataset metadata.
Requires:
_field_object_metadata
Args:
dataset_path (str): Path of the dataset.
Returns:
dict: Metadata for dataset.
"""
arc_object = arcpy.Describe(dataset_path)
meta = {
'arc_object': arc_object,
'name': getattr(arc_object, 'name'),
'path': getattr(arc_object, 'catalogPath'),
'data_type': getattr(arc_object, 'dataType'),
'workspace_path': getattr(arc_object, 'path'),
# Do not use getattr! Tables sometimes don't have OIDs.
'is_table': hasattr(arc_object, 'hasOID'),
'is_versioned': getattr(arc_object, 'isVersioned', False),
'oid_field_name': getattr(arc_object, 'OIDFieldName', None),
'is_spatial': hasattr(arc_object, 'shapeType'),
'geometry_type': getattr(arc_object, 'shapeType', None),
'geom_type': getattr(arc_object, 'shapeType', None),
'geometry_field_name': getattr(arc_object, 'shapeFieldName', None),
'geom_field_name': getattr(arc_object, 'shapeFieldName', None),
}
meta['field_token'] = {}
if meta['oid_field_name']:
meta['field_token'][meta['oid_field_name']] = 'oid@'
if meta['geom_field_name']:
meta['field_token'].update({
meta['geom_field_name']: 'shape@',
meta['geom_field_name'] + '_Area': 'shape@area',
meta['geom_field_name'] + '_Length': 'shape@length',
meta['geom_field_name'] + '.STArea()': 'shape@area',
meta['geom_field_name'] + '.STLength()': 'shape@length',
})
meta['field_names'] = tuple(field.name for field
in getattr(arc_object, 'fields', ()))
meta['field_names_tokenized'] = tuple(meta['field_token'].get(name, name)
for name in meta['field_names'])
meta['fields'] = tuple(_field_object_metadata(field) for field
in getattr(arc_object, 'fields', ()))
meta['user_field_names'] = tuple(
name for name in meta['field_names']
if name != meta['oid_field_name']
and '{}.'.format(meta['geometry_field_name']) not in name
)
meta['user_fields'] = tuple(
field for field in meta['fields']
if field['name'] != meta['oid_field_name']
and '{}.'.format(meta['geometry_field_name']) not in field['name']
)
if hasattr(arc_object, 'spatialReference'):
meta['spatial_reference'] = getattr(arc_object, 'spatialReference')
meta['spatial_reference_id'] = getattr(meta['spatial_reference'], 'factoryCode')
else:
meta['spatial_reference'] = None
meta['spatial_reference_id'] = None
return meta
def features_delete(dataset_path, **kwargs):
"""Delete features in the dataset.
Requires:
DatasetView
Editor
dataset_feature_count
dataset_metadata
Args:
dataset_path (str): Path of the dataset.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
use_edit_session (bool): Flag to perform updates in an edit session.
Default is False.
log_level (str): Level to log the function at. Defaults to 'info'.
Returns:
str: Path of the dataset updated.
"""
kwargs.setdefault('dataset_where_sql')
kwargs.setdefault('use_edit_session', False)
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
if kwargs['dataset_where_sql']:
log("Start: Delete features from %s where `%s`.",
dataset_path, kwargs['dataset_where_sql'])
else:
log("Start: Delete features from %s.", dataset_path)
meta = {'dataset': dataset_metadata(dataset_path)}
truncate_error_codes = (
# "Only supports Geodatabase tables and feature classes."
'ERROR 000187',
# "Operation not supported on a versioned table."
'ERROR 001259',
# "Operation not supported on table {table name}."
'ERROR 001260',
# Operation not supported on a feature class in a controller dataset.
'ERROR 001395',
)
# Can use (faster) truncate when no sub-selection or edit session.
run_truncate = (kwargs['dataset_where_sql'] is None
and kwargs['use_edit_session'] is False)
feature_count = Counter()
if run_truncate:
feature_count['deleted'] = dataset_feature_count(dataset_path)
feature_count['unchanged'] = 0
try:
arcpy.management.TruncateTable(in_table=dataset_path)
except arcpy.ExecuteError:
# Avoid arcpy.GetReturnCode(); error code position inconsistent.
# Search messages for 'ERROR ######' instead.
if any(code in arcpy.GetMessages()
for code in truncate_error_codes):
LOG.debug("Truncate unsupported; will try deleting rows.")
run_truncate = False
else:
raise
if not run_truncate:
view = {'dataset': DatasetView(dataset_path, kwargs['dataset_where_sql'])}
session = Editor(meta['dataset']['workspace_path'], kwargs['use_edit_session'])
with view['dataset'], session:
feature_count['deleted'] = view['dataset'].count
arcpy.management.DeleteRows(in_rows=view['dataset'].name)
feature_count['unchanged'] = dataset_feature_count(dataset_path)
for key in ('deleted', 'unchanged'):
log("%s features %s.", feature_count[key], key)
log("End: Delete.")
return feature_count
Add functions:
attributes_as_dicts;
attributes_as_iters;
coordinate_node_map;
field_metadata;
id_attributes_map;
id_node_map;
insert_features_from_dicts;
insert_features_from_iters;
insert_features_from_path;
python_type;
spatial_reference;
update_attributes_by_function;
update_attributes_by_mapping;
update_attributes_by_node_ids;
update_attributes_by_unique_id;
"""Mostly self-contained functions for geoprocessing in Python toolboxes."""
from collections import Counter, defaultdict
import copy
import datetime
import inspect
from itertools import chain
import logging
import uuid
import arcpy
from utils import (
contain,
leveled_logger,
unique_ids,
unique_name,
)
LOG = logging.getLogger(__name__)
class DatasetView(object):
"""Context manager for an ArcGIS dataset view (feature layer/table view).
Attributes:
name (str): Name of the view.
dataset_path (str): Path of the dataset.
dataset_meta (dict): Metadata dictionary for the dataset.
is_spatial (bool): Flag indicating if the view is spatial.
"""
def __init__(self, dataset_path, dataset_where_sql=None, view_name=None,
force_nonspatial=False):
"""Initialize instance.
Args:
dataset_path (str): Path of the dataset.
dataset_where_sql (str): SQL where-clause for dataset
subselection.
view_name (str): Name of the view to create.
force_nonspatial (bool): Flag that forces a nonspatial view.
"""
self.name = view_name if view_name else unique_name('view')
self.dataset_path = dataset_path
self.dataset_meta = dataset_metadata(dataset_path)
self.is_spatial = all((self.dataset_meta['is_spatial'], not force_nonspatial))
self._where_sql = dataset_where_sql
def __enter__(self):
self.create()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.discard()
@property
def count(self):
"""int: Number of features in the view."""
return int(arcpy.management.GetCount(self.name).getOutput(0))
@property
def exists(self):
"""bool: Flag indicating the view currently exists."""
return arcpy.Exists(self.name)
@property
def where_sql(self):
"""str: SQL where-clause property of dataset view subselection.
Setting this property will change the view's dataset subselection.
"""
return self._where_sql
@where_sql.setter
def where_sql(self, value):
if self.exists:
arcpy.management.SelectLayerByAttribute(
in_layer_or_view=self.name, selection_type='new_selection',
where_clause=value
)
self._where_sql = value
@where_sql.deleter
def where_sql(self):
if self.exists:
arcpy.management.SelectLayerByAttribute(in_layer_or_view=self.name,
selection_type='clear_selection')
self._where_sql = None
def as_chunks(self, chunk_size):
"""Generate 'chunks' of the view's data as new DatasetView.
Yields DatasetView with context management, i.e. view will be discarded
when generator moves to next chunk-view.
Args:
chunk_size (int): Number of features in each chunk-view.
Yields:
DatasetView.
"""
# ArcPy where clauses cannot use 'between'.
chunk_where_sql_template = ("{oid_field_name} >= {from_oid}"
" and {oid_field_name} <= {to_oid}")
if self.where_sql:
chunk_where_sql_template += " and ({})".format(self.where_sql)
# Get iterable of all object IDs in dataset.
with arcpy.da.SearchCursor(in_table=self.dataset_path,
field_names=('oid@',),
where_clause=self.where_sql) as cursor:
# Sorting is important: allows selection by ID range.
oids = sorted(oid for oid, in cursor)
while oids:
chunk_where_sql = chunk_where_sql_template.format(
oid_field_name=self.dataset_meta['oid_field_name'],
from_oid=min(oids), to_oid=max(oids[:chunk_size])
)
with DatasetView(self.name, chunk_where_sql) as chunk_view:
yield chunk_view
# Remove chunk from set.
oids = oids[chunk_size:]
def create(self):
"""Create view."""
function = (arcpy.management.MakeFeatureLayer if self.is_spatial
else arcpy.management.MakeTableView)
function(self.dataset_path, self.name, where_clause=self.where_sql,
workspace=self.dataset_meta['workspace_path'])
return self.exists
def discard(self):
"""Discard view."""
if self.exists:
arcpy.management.Delete(self.name)
return not self.exists
class Editor(object):
"""Context manager for editing features.
Attributes:
workspace_path (str): Path for the editing workspace
"""
def __init__(self, workspace_path, use_edit_session=True):
"""Initialize instance.
Args:
workspace_path (str): Path for the editing workspace.
use_edit_session (bool): Flag directing edits to be made in an
edit session. Default is True.
"""
self._editor = (arcpy.da.Editor(workspace_path) if use_edit_session else None)
self.workspace_path = workspace_path
def __enter__(self):
self.start()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.stop(save_changes=False if exception_type else True)
@property
def active(self):
"""bool: Flag indicating whether edit session is active."""
if self._editor:
_active = self._editor.isEditing
else:
_active = False
return _active
def start(self):
"""Start an active edit session.
Returns:
bool: Indicator that session is active.
"""
if self._editor and not self._editor.isEditing:
self._editor.startEditing(with_undo=True, multiuser_mode=True)
self._editor.startOperation()
return self.active
def stop(self, save_changes=True):
"""Stop an active edit session.
Args:
save_changes (bool): Flag indicating whether edits should be
saved.
Returns:
bool: Indicator that session is not active.
"""
if self._editor and self._editor.isEditing:
if save_changes:
self._editor.stopOperation()
else:
self._editor.abortOperation()
self._editor.stopEditing(save_changes)
return not self.active
def _field_object_metadata(field_object):
"""Return dictionary of metadata from ArcPy field object."""
meta = {
'arc_object': field_object,
'name': getattr(field_object, 'name'),
'alias_name': getattr(field_object, 'aliasName'),
'base_name': getattr(field_object, 'baseName'),
'type': getattr(field_object, 'type').lower(),
'length': getattr(field_object, 'length'),
'precision': getattr(field_object, 'precision'),
'scale': getattr(field_object, 'scale'),
}
return meta
def attributes_as_dicts(dataset_path, field_names=None, **kwargs):
"""Generator for dictionaries of feature attributes.
Use ArcPy cursor token names for object IDs and geometry objects/properties.
Args:
dataset_path (str): Path of the dataset.
field_names (iter): Collection of field names. Names will be the keys in the
dictionary mapping to their values. If value is None, all attributes fields
will be used.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
spatial_reference_item: Item from which the output geometry's spatial
reference will be derived.
Yields:
dict: Mapping of feature attribute field names to values.
"""
kwargs.setdefault('dataset_where_sql')
kwargs.setdefault('spatial_reference_item')
if field_names is None:
meta = {'dataset': dataset_metadata(dataset_path)}
keys = {'field': tuple(key.lower() for key
in meta['dataset']['field_names_tokenized'])}
else:
keys = {'field': tuple(contain(field_names))}
sref = spatial_reference(kwargs['spatial_reference_item'])
cursor = arcpy.da.SearchCursor(in_table=dataset_path, field_names=keys,
where_clause=kwargs['dataset_where_sql'],
spatial_reference=sref)
with cursor:
for feature in cursor:
yield dict(zip(cursor.fields, feature))
def attributes_as_iters(dataset_path, field_names, **kwargs):
"""Generator for iterables of feature attributes.
Use ArcPy cursor token names for object IDs and geometry objects/properties.
Args:
dataset_path (str): Path of the dataset.
field_names (iter): Collection of field names. The order of the names in the
collection will determine where its value will fall in the yielded item.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
spatial_reference_item: Item from which the output geometry's spatial
reference will be derived.
iter_type: Iterable type to yield. Default is tuple.
Yields:
iter: Collection of attribute values.
"""
kwargs.setdefault('dataset_where_sql')
kwargs.setdefault('spatial_reference_item')
kwargs.setdefault('iter_type', tuple)
keys = {'field': tuple(contain(field_names))}
sref = spatial_reference(kwargs['spatial_reference_item'])
cursor = arcpy.da.SearchCursor(in_table=dataset_path, field_names=keys['field'],
where_clause=kwargs['dataset_where_sql'],
spatial_reference=sref)
with cursor:
for feature in cursor:
yield kwargs['iter_type'](feature)
def coordinate_node_map(dataset_path, from_id_field_name, to_id_field_name,
id_field_name='oid@', **kwargs):
"""Return dictionary mapping of coordinates to node-info dictionaries.
Note:
From & to IDs must be the same attribute type.
Args:
dataset_path (str): Path of the dataset.
from_id_field_name (str): Name of the from-ID field.
to_id_field_name (str): Name of the to-ID field.
id_field_name (str): Name of the ID field. Default is 'oid@'.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
update_nodes (bool): Flag to indicate whether to update nodes based on feature
geometries. Default is False.
Returns:
dict: Mapping of coordinate tuples to node-info dictionaries.
{(x, y): {'node_id': <id>, 'ids': {'from': set(), 'to': set()}}}
"""
kwargs.setdefault('dataset_where_sql')
kwargs.setdefault('update_nodes', False)
def _node_feature_count(node):
"""Return feature count for node from info map."""
return len(node['ids']['from'].union(node['ids']['to']))
def _update_coord_node_map(coord_node, node_id_metadata):
"""Return updated coordinate node info map."""
coord_node = copy.deepcopy(coord_node)
used_ids = {node['node_id'] for node in coord_node.values()
if node['node_id'] is not None}
unused_ids = (
_id for _id in unique_ids(python_type(node_id_metadata['type']),
node_id_metadata['length'])
if _id not in used_ids
)
id_coords = {}
for coord, node in coord_node.items():
# Assign IDs where missing.
if node['node_id'] is None:
node['node_id'] = next(unused_ids)
# If ID duplicate, re-ID node with least features.
elif node['node_id'] in id_coords:
other_coord = id_coords[node['node_id']]
other_node = coord_node[other_coord]
new_node_id = next(unused_ids)
if _node_feature_count(node) > _node_feature_count(other_node):
other_node['node_id'] = new_node_id # Does update coord_node!
id_coords[new_node_id] = id_coords.pop(node['node_id'])
else:
node['node_id'] = new_node_id # Does update coord_node!
id_coords[node['node_id']] = coord
return coord_node
keys = {'field': (id_field_name, from_id_field_name, to_id_field_name, 'shape@')}
coord_node = {}
g_features = attributes_as_iters(
dataset_path, keys['field'], dataset_where_sql=kwargs['dataset_where_sql'],
)
for feature_id, from_node_id, to_node_id, geom in g_features:
for end, node_id, point in [('from', from_node_id, geom.firstPoint),
('to', to_node_id, geom.lastPoint)]:
coord = (point.X, point.Y)
if coord not in coord_node:
# Create new coordinate-node.
coord_node[coord] = {'node_id': node_id, 'ids': defaultdict(set)}
coord_node[coord]['node_id'] = (
# Assign new ID if current is missing.
node_id if coord_node[coord]['node_id'] is None
# Assign new ID if lower than current.
else min(coord_node[coord]['node_id'], node_id)
)
# Add feature ID to end-ID set.
coord_node[coord]['ids'][end].add(feature_id)
if kwargs['update_nodes']:
field_meta = {'node_id': field_metadata(dataset_path, from_id_field_name)}
coord_node = _update_coord_node_map(coord_node, field_meta['node_id'])
return coord_node
def dataset_feature_count(dataset_path, **kwargs):
"""Return number of features in dataset.
Requires:
DatasetView
Args:
dataset_path (str): Path of the dataset.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
Returns:
int: Number of features counted.
"""
view = DatasetView(dataset_path, **kwargs)
with view:
return view.count
def dataset_metadata(dataset_path):
"""Return dictionary of dataset metadata.
Requires:
_field_object_metadata
Args:
dataset_path (str): Path of the dataset.
Returns:
dict: Metadata for dataset.
"""
arc_object = arcpy.Describe(dataset_path)
meta = {
'arc_object': arc_object,
'name': getattr(arc_object, 'name'),
'path': getattr(arc_object, 'catalogPath'),
'data_type': getattr(arc_object, 'dataType'),
'workspace_path': getattr(arc_object, 'path'),
# Do not use getattr! Tables sometimes don't have OIDs.
'is_table': hasattr(arc_object, 'hasOID'),
'is_versioned': getattr(arc_object, 'isVersioned', False),
'oid_field_name': getattr(arc_object, 'OIDFieldName', None),
'is_spatial': hasattr(arc_object, 'shapeType'),
'geometry_type': getattr(arc_object, 'shapeType', None),
'geom_type': getattr(arc_object, 'shapeType', None),
'geometry_field_name': getattr(arc_object, 'shapeFieldName', None),
'geom_field_name': getattr(arc_object, 'shapeFieldName', None),
}
meta['field_token'] = {}
if meta['oid_field_name']:
meta['field_token'][meta['oid_field_name']] = 'oid@'
if meta['geom_field_name']:
meta['field_token'].update({
meta['geom_field_name']: 'shape@',
meta['geom_field_name'] + '_Area': 'shape@area',
meta['geom_field_name'] + '_Length': 'shape@length',
meta['geom_field_name'] + '.STArea()': 'shape@area',
meta['geom_field_name'] + '.STLength()': 'shape@length',
})
meta['field_names'] = tuple(field.name for field
in getattr(arc_object, 'fields', ()))
meta['field_names_tokenized'] = tuple(meta['field_token'].get(name, name)
for name in meta['field_names'])
meta['fields'] = tuple(_field_object_metadata(field) for field
in getattr(arc_object, 'fields', ()))
meta['user_field_names'] = tuple(
name for name in meta['field_names']
if name != meta['oid_field_name']
and '{}.'.format(meta['geometry_field_name']) not in name
)
meta['user_fields'] = tuple(
field for field in meta['fields']
if field['name'] != meta['oid_field_name']
and '{}.'.format(meta['geometry_field_name']) not in field['name']
)
if hasattr(arc_object, 'spatialReference'):
meta['spatial_reference'] = getattr(arc_object, 'spatialReference')
meta['spatial_reference_id'] = getattr(meta['spatial_reference'], 'factoryCode')
else:
meta['spatial_reference'] = None
meta['spatial_reference_id'] = None
return meta
def features_delete(dataset_path, **kwargs):
"""Delete features in the dataset.
Requires:
DatasetView
Editor
dataset_feature_count
dataset_metadata
Args:
dataset_path (str): Path of the dataset.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
use_edit_session (bool): Flag to perform updates in an edit session.
Default is False.
log_level (str): Level to log the function at. Defaults to 'info'.
Returns:
str: Path of the dataset updated.
"""
kwargs.setdefault('dataset_where_sql')
kwargs.setdefault('use_edit_session', False)
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
if kwargs['dataset_where_sql']:
log("Start: Delete features from %s where `%s`.",
dataset_path, kwargs['dataset_where_sql'])
else:
log("Start: Delete features from %s.", dataset_path)
meta = {'dataset': dataset_metadata(dataset_path)}
truncate_error_codes = (
# "Only supports Geodatabase tables and feature classes."
'ERROR 000187',
# "Operation not supported on a versioned table."
'ERROR 001259',
# "Operation not supported on table {table name}."
'ERROR 001260',
# Operation not supported on a feature class in a controller dataset.
'ERROR 001395',
)
# Can use (faster) truncate when no sub-selection or edit session.
run_truncate = (kwargs['dataset_where_sql'] is None
and kwargs['use_edit_session'] is False)
feature_count = Counter()
if run_truncate:
feature_count['deleted'] = dataset_feature_count(dataset_path)
feature_count['unchanged'] = 0
try:
arcpy.management.TruncateTable(in_table=dataset_path)
except arcpy.ExecuteError:
# Avoid arcpy.GetReturnCode(); error code position inconsistent.
# Search messages for 'ERROR ######' instead.
if any(code in arcpy.GetMessages()
for code in truncate_error_codes):
LOG.debug("Truncate unsupported; will try deleting rows.")
run_truncate = False
else:
raise
if not run_truncate:
view = {'dataset': DatasetView(dataset_path, kwargs['dataset_where_sql'])}
session = Editor(meta['dataset']['workspace_path'], kwargs['use_edit_session'])
with view['dataset'], session:
feature_count['deleted'] = view['dataset'].count
arcpy.management.DeleteRows(in_rows=view['dataset'].name)
feature_count['unchanged'] = dataset_feature_count(dataset_path)
for key in ('deleted', 'unchanged'):
log("%s features %s.", feature_count[key], key)
log("End: Delete.")
return feature_count
def field_metadata(dataset_path, field_name):
"""Return dictionary of field metadata.
Note:
Field name is case-insensitive.
Args:
dataset_path (str): Path of the dataset.
field_name (str): Name of the field.
Returns:
dict: Metadata for field.
"""
try:
field_object = arcpy.ListFields(dataset=dataset_path, wild_card=field_name)[0]
except IndexError:
raise AttributeError("Field {} not present on {}".format(field_name,
dataset_path))
meta = _field_object_metadata(field_object)
return meta
def id_attributes_map(dataset_path, id_field_names, field_names, **kwargs):
"""Return dictionary mapping of field attribute for each feature ID.
Note:
There is no guarantee that the ID field(s) are unique.
Use ArcPy cursor token names for object IDs and geometry objects/
properties.
Args:
dataset_path (str): Path of the dataset.
id_field_names (iter, str): Name(s) of the ID field(s).
field_names (iter, str): Name(s) of the field(s).
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
spatial_reference_item: Item from which the output geometry's spatial
reference will be derived.
Returns:
dict: Mapping of feature ID to feature attribute(s).
"""
field_names = tuple(contain(field_names))
id_field_names = tuple(contain(id_field_names))
sref = spatial_reference(kwargs.get('spatial_reference_item'))
cursor = arcpy.da.SearchCursor(dataset_path,
field_names=id_field_names + field_names,
where_clause=kwargs.get('dataset_where_sql'),
spatial_reference=sref)
with cursor:
result = {}
for row in cursor:
map_id = row[:len(id_field_names)]
map_value = row[len(id_field_names):]
if len(id_field_names) == 1:
map_id = map_id[0]
if len(field_names) == 1:
map_value = map_value[0]
result[map_id] = map_value
return result
def id_node_map(dataset_path, from_id_field_name, to_id_field_name,
id_field_name='oid@', **kwargs):
"""Return dictionary mapping of feature ID to from- & to-node IDs.
From & to IDs must be the same attribute type.
Args:
dataset_path (str): Path of the dataset.
from_id_field_name (str): Name of the from-ID field.
to_id_field_name (str): Name of the to-ID field.
id_field_name (str): Name of the ID field. Default is 'oid@'.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
field_names_as_keys (bool): Flag to indicate use of dataset's node ID field
names as the ID field names in the map. Default is False.
update_nodes (bool): Flag to indicate whether to update the nodes based on the
feature geometries. Default is False.
Returns:
dict: Mapping of feature IDs to node-end ID dictionaries.
`{feature_id: {'from': from_node_id, 'to': to_node_id}}`
"""
kwargs.setdefault('dataset_where_sql')
kwargs.setdefault('field_names_as_keys', False)
kwargs.setdefault('update_nodes', False)
field_meta = {'from': field_metadata(dataset_path, from_id_field_name),
'to': field_metadata(dataset_path, to_id_field_name)}
if field_meta['from']['type'] != field_meta['to']['type']:
raise ValueError("Fields %s & %s must be of same type.")
key = {'id': id_field_name, 'from': from_id_field_name, 'to': to_id_field_name}
end_key = {'from': from_id_field_name if kwargs['field_names_as_keys'] else 'from',
'to': to_id_field_name if kwargs['field_names_as_keys'] else 'to'}
id_nodes = defaultdict(dict)
if kwargs['update_nodes']:
coord_node_info = coordinate_node_map(dataset_path, from_id_field_name,
to_id_field_name, id_field_name, **kwargs)
for node in coord_node_info.values():
for end, key in end_key.items():
for feat_id in node['ids'][end]:
id_nodes[feat_id][key] = node['node_id']
# If not updating nodes, don't need to bother with geometry/coordinates.
else:
g_id_nodes = attributes_as_iters(
dataset_path, field_names=(key['id'], from_id_field_name, to_id_field_name),
dataset_where_sql=kwargs['dataset_where_sql'],
)
for feat_id, from_node_id, to_node_id in g_id_nodes:
id_nodes[feat_id][end_key['from']] = from_node_id
id_nodes[feat_id][end_key['to']] = to_node_id
return id_nodes
def insert_features_from_dicts(dataset_path, insert_features, field_names, **kwargs):
"""Insert features into dataset from dictionaries.
Args:
dataset_path (str): Path of the dataset.
insert_features (iter of dict): Collection of dictionaries
representing features.
field_names (iter): Collection of field names/keys to insert.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
use_edit_session (bool): Flag to perform updates in an edit session.
Default is False.
log_level (str): Level to log the function at. Defaults to 'info'.
Returns:
str: Path of the dataset updated.
"""
kwargs.setdefault('use_edit_session', False)
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
log("Start: Insert features into %s from dictionaries.", dataset_path)
keys = {'row': tuple(contain(field_names))}
if inspect.isgeneratorfunction(insert_features):
insert_features = insert_features()
iters = ((feature[key] for key in keys['row']) for feature in insert_features)
feature_count = insert_features_from_iters(
dataset_path, iters, field_names,
use_edit_session=kwargs['use_edit_session'], log_level=None,
)
log("%s features inserted.", feature_count['inserted'])
log("End: Insert.")
return feature_count
def insert_features_from_iters(dataset_path, insert_features, field_names, **kwargs):
"""Insert features into dataset from iterables.
Args:
dataset_path (str): Path of the dataset.
insert_features (iter of iter): Collection of iterables representing
features.
field_names (iter): Collection of field names to insert. These must
match the order of their attributes in the insert_features items.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
use_edit_session (bool): Flag to perform updates in an edit session.
Default is False.
log_level (str): Level to log the function at. Defaults to 'info'.
Returns:
str: Path of the dataset updated.
"""
kwargs.setdefault('use_edit_session', False)
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
log("Start: Insert features into %s from iterables.", dataset_path)
meta = {'dataset': dataset_metadata(dataset_path)}
keys = {'row': tuple(contain(field_names))}
if inspect.isgeneratorfunction(insert_features):
insert_features = insert_features()
session = Editor(meta['dataset']['workspace_path'], kwargs['use_edit_session'])
cursor = arcpy.da.InsertCursor(dataset_path, field_names=keys['row'])
feature_count = Counter()
with session, cursor:
for row in insert_features:
cursor.insertRow(tuple(row))
feature_count['inserted'] += 1
log("%s features inserted.", feature_count['inserted'])
log("End: Insert.")
return feature_count
def insert_features_from_path(dataset_path, insert_dataset_path, field_names=None,
**kwargs):
"""Insert features into dataset from another dataset.
Args:
dataset_path (str): Path of the dataset.
insert_dataset_path (str): Path of dataset to insert features from.
field_names (iter): Collection of field names to insert. Listed field must be
present in both datasets. If field_names is None, all fields will be
inserted.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
insert_where_sql (str): SQL where-clause for insert-dataset subselection.
use_edit_session (bool): Flag to perform updates in an edit session. Default is
False.
log_level (str): Level to log the function at. Defaults to 'info'.
Returns:
str: Path of the dataset updated.
"""
kwargs.setdefault('insert_where_sql')
kwargs.setdefault('use_edit_session', False)
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
log("Start: Insert features into %s from %s.", dataset_path, insert_dataset_path)
meta = {'dataset': dataset_metadata(dataset_path),
'insert': dataset_metadata(insert_dataset_path)}
if field_names is None:
keys = set.intersection(
*(set(name.lower() for name in _meta['field_names_tokenized'])
for _meta in meta.values())
)
else:
keys = set(name.lower() for name in contain(field_names))
# OIDs & area/length "fields" have no business being part of an update.
# Geometry itself is handled separately in append function.
for _meta in meta.values():
for key in chain(*_meta['field_token'].items()):
keys.discard(key)
append_kwargs = {'inputs': unique_name('view'), 'target': dataset_path,
'schema_type': 'no_test', 'field_mapping': arcpy.FieldMappings()}
# Create field maps.
# ArcGIS Pro's no-test append is case-sensitive (verified 1.0-1.1.1).
# Avoid this problem by using field mapping.
# BUG-000090970 - ArcGIS Pro 'No test' field mapping in Append tool does
# not auto-map to the same field name if naming convention differs.
for key in keys:
field_map = arcpy.FieldMap()
field_map.addInputField(insert_dataset_path, key)
append_kwargs['field_mapping'].addFieldMap(field_map)
view = DatasetView(insert_dataset_path, kwargs['insert_where_sql'],
view_name=append_kwargs['inputs'],
# Must be nonspatial to append to nonspatial table.
force_nonspatial=(not meta['dataset']['is_spatial']))
session = Editor(meta['dataset']['workspace_path'], kwargs['use_edit_session'])
with view, session:
arcpy.management.Append(**append_kwargs)
feature_count = Counter({'inserted': view.count})
log("%s features inserted.", feature_count['inserted'])
log("End: Insert.")
return feature_count
def python_type(type_description):
"""Return object representing the Python type.
Args:
type_description (str): Arc-style type description/code.
Returns:
Python object representing the type.
"""
instance = {
'date': datetime.datetime,
'double': float, 'single': float,
'integer': int, 'long': int, 'short': int, 'smallinteger': int,
'geometry': arcpy.Geometry,
'guid': uuid.UUID,
'string': str, 'text': str,
}
return instance[type_description.lower()]
def spatial_reference(item):
"""Return ArcPy spatial reference object from a Python reference.
Args:
item (int): Spatial reference ID.
(str): Path of reference dataset/file.
(arcpy.Geometry): Reference geometry object.
(arcpy.SpatialReference): Spatial reference object.
Returns:
arcpy.SpatialReference.
"""
if item is None:
arc_object = None
elif isinstance(item, arcpy.SpatialReference):
arc_object = item
elif isinstance(item, int):
arc_object = arcpy.SpatialReference(item)
elif isinstance(item, arcpy.Geometry):
arc_object = getattr(item, 'spatialReference')
else:
arc_object = arcpy.SpatialReference(
getattr(getattr(arcpy.Describe(item), 'spatialReference'), 'factoryCode')
)
return arc_object
def update_attributes_by_function(dataset_path, field_name, function, **kwargs):
"""Update attribute values by passing them to a function.
Args:
dataset_path (str): Path of the dataset.
field_name (str): Name of the field.
function (types.FunctionType): Function to get values from.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
arg_field_names (iter): Iterable of the field names whose values will
be the method arguments (not including the primary field).
dataset_where_sql (str): SQL where-clause for dataset subselection.
field_as_first_arg (bool): Flag to indicate the field value will be
the first argument for the method. Defaults to True.
kwarg_field_names (iter): Iterable of the field names whose names &
values will be the method keyword arguments.
log_level (str): Level to log the function at. Defaults to 'info'.
use_edit_session (bool): Flag to perform updates in an edit session.
Default is False.
Returns:
str: Name of the field updated.
"""
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
log("Start: Update attributes in %s on %s by function %s.",
field_name, dataset_path, function)
field_names = {'args': tuple(kwargs.get('arg_field_names', ())),
'kwargs': tuple(kwargs.get('kwarg_field_names', ()))}
field_names['row'] = ((field_name,) + field_names['args'] + field_names['kwargs'])
args_idx = len(field_names['args']) + 1
session = Editor(dataset_metadata(dataset_path)['workspace_path'],
kwargs.get('use_edit_session', False))
cursor = arcpy.da.UpdateCursor(dataset_path, field_names['row'],
kwargs.get('dataset_where_sql'))
with session, cursor:
for row in cursor:
func_args = (row[0:args_idx] if kwargs.get('field_as_first_arg', True)
else row[1:args_idx])
func_kwargs = dict(zip(field_names['kwargs'], row[args_idx:]))
new_value = function(*func_args, **func_kwargs)
if row[0] != new_value:
try:
cursor.updateRow([new_value] + row[1:])
except RuntimeError:
LOG.error("Offending value is %s", new_value)
raise RuntimeError
log("End: Update.")
return field_name
def update_attributes_by_mapping(dataset_path, field_name, mapping, key_field_names,
**kwargs):
"""Update attribute values by finding them in a mapping.
Note:
Wraps update_by_function.
Args:
dataset_path (str): Path of the dataset.
field_name (str): Name of the field.
mapping (object): Mapping to get values from.
key_field_names (iter): Name of the fields whose values will be the mapping's
keys.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
default_value: Value to return from mapping if key value on feature not
present. Defaults to None.
use_edit_session (bool): Flag to perform updates in an edit session. Default is
False.
log_level (str): Level to log the function at. Defaults to 'info'.
Returns:
str: Name of the field updated.
"""
kwargs.setdefault('dataset_where_sql')
kwargs.setdefault('default_value')
kwargs.setdefault('use_edit_session', False)
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
log("Start: Update attributes in %s on %s by mapping with key(s) in %s.",
field_name, dataset_path, key_field_names)
keys = tuple(contain(key_field_names))
session = Editor(dataset_metadata(dataset_path)['workspace_path'],
kwargs['use_edit_session'])
cursor = arcpy.da.UpdateCursor(dataset_path, (field_name,)+keys,
kwargs['dataset_where_sql'])
with session, cursor:
for row in cursor:
old_value = row[0]
key = row[1] if len(keys) == 1 else tuple(row[1:])
new_value = mapping.get(key, kwargs['default_value'])
if old_value != new_value:
try:
cursor.updateRow([new_value] + row[1:])
except RuntimeError:
LOG.error("Offending value is %s", new_value)
raise RuntimeError
log("End: Update.")
return field_name
def update_attributes_by_node_ids(dataset_path, from_id_field_name, to_id_field_name,
**kwargs):
"""Update attribute values by passing them to a function.
Args:
dataset_path (str): Path of the dataset.
from_id_field_name (str): Name of the from-ID field.
to_id_field_name (str): Name of the to-ID field.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
use_edit_session (bool): Flag to perform updates in an edit session. Default is
False.
log_level (str): Level to log the function at. Defaults to 'info'.
Returns:
tuple: Names (str) of the fields updated.
"""
kwargs.setdefault('dataset_where_sql')
kwargs.setdefault('use_edit_session', False)
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
log("Start: Update attributes in %s on %s by node IDs.",
(from_id_field_name, to_id_field_name), dataset_path)
oid_nodes = id_node_map(dataset_path, from_id_field_name, to_id_field_name,
field_names_as_keys=True, update_nodes=True)
session = Editor(dataset_metadata(dataset_path)['workspace_path'],
kwargs['use_edit_session'])
cursor = arcpy.da.UpdateCursor(
dataset_path, field_names=('oid@', from_id_field_name, to_id_field_name),
where_clause=kwargs['dataset_where_sql'],
)
with session, cursor:
for row in cursor:
oid = row[0]
new_row = (oid, oid_nodes[oid][from_id_field_name],
oid_nodes[oid][to_id_field_name])
if row != new_row:
try:
cursor.updateRow(new_row)
except RuntimeError:
LOG.error("Offending values one of %s, %s", new_row[1], new_row[2])
raise RuntimeError
log("End: Update.")
return (from_id_field_name, to_id_field_name)
def update_attributes_by_unique_id(dataset_path, field_name, **kwargs):
"""Update attribute values by assigning a unique ID.
Existing IDs are preserved, if unique.
Args:
dataset_path (str): Path of the dataset.
field_name (str): Name of the field.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
use_edit_session (bool): Flag to perform updates in an edit session. Default is
False.
log_level (str): Level to log the function at. Default is 'info'.
Returns:
dict: Mapping of new IDs to existing old IDs.
"""
kwargs.setdefault('dataset_where_sql')
kwargs.setdefault('use_edit_session', True)
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
log("Start: Update attributes in %s on %s by assigning unique IDs.",
field_name, dataset_path)
meta = {'field': field_metadata(dataset_path, field_name)}
def _corrected_id(current_id, unique_id_pool, used_ids, ignore_nonetype=False):
"""Return corrected ID to ensure uniqueness."""
if any((ignore_nonetype and current_id is None, current_id not in used_ids)):
corrected_id = current_id
else:
corrected_id = next(unique_id_pool)
while corrected_id in used_ids:
corrected_id = next(unique_id_pool)
return corrected_id
unique_id_pool = unique_ids(data_type=python_type(meta['field']['type']),
string_length=meta['field'].get('length', 16))
oid_id = id_attributes_map(dataset_path, id_field_names='oid@',
field_names=field_name)
used_ids = set()
new_old_id = {}
# Ensure current IDs are unique.
for oid, current_id in oid_id.items():
_id = _corrected_id(current_id, unique_id_pool, used_ids, ignore_nonetype=True)
if _id != current_id:
new_old_id[_id] = oid_id[oid]
oid_id[oid] = _id
used_ids.add(_id)
# Take care of unassigned IDs now that we know all the used IDs.
for oid, current_id in oid_id.items():
if current_id is None:
_id = _corrected_id(current_id, unique_id_pool, used_ids,
ignore_nonetype=False)
oid_id[oid] = _id
used_ids.add(_id)
update_attributes_by_mapping(dataset_path, field_name,
mapping=oid_id, key_field_names='oid@',
dataset_where_sql=kwargs.get('dataset_where_sql'),
use_edit_session=kwargs.get('use_edit_session', False),
log_level=None)
log("End: Update.")
return new_old_id
|
import mimetypes
import os.path
from django.core.files.storage import default_storage
from django.template.defaultfilters import slugify
def slugify_filename(filename):
""" Slugify filename """
name, ext = os.path.splitext(filename)
return slugify(name) + ext
def get_thumb_filename(file_name):
"""
Generate thumb filename by adding _thumb to end of
filename before . (if present)
"""
return '{0}_thumb{1}'.format(*os.path.splitext(file_name))
def get_image_format(extension):
mimetypes.init()
return mimetypes.types_map[extension]
def get_media_url(path):
"""
Determine system file's media URL.
"""
return default_storage.url(path)
Update utils.py
Bug fix when file extension is uppercase
import mimetypes
import os.path
from django.core.files.storage import default_storage
from django.template.defaultfilters import slugify
def slugify_filename(filename):
""" Slugify filename """
name, ext = os.path.splitext(filename)
return slugify(name) + ext
def get_thumb_filename(file_name):
"""
Generate thumb filename by adding _thumb to end of
filename before . (if present)
"""
return '{0}_thumb{1}'.format(*os.path.splitext(file_name))
def get_image_format(extension):
mimetypes.init()
return mimetypes.types_map[extension.lower()]
def get_media_url(path):
"""
Determine system file's media URL.
"""
return default_storage.url(path)
|
import pandas as pd
from amino_acid import AminoAcid
from nucleotide import Nucleotide
from cosmic_db import get_cosmic_db
import sqlite3
import pandas.io.sql as psql
import ConfigParser
import logging
logger = logging.getLogger(__name__)
config_dir = 'config/'
def get_input_config(section):
"""Returns the config object to input.cfg."""
cfg = ConfigParser.ConfigParser()
cfg.read(config_dir + 'input.cfg')
cfg_options = dict(cfg.items(section))
return cfg_options
# setup directory paths
_opts = get_input_config('result')
plot_dir = _opts['plot_dir']
result_dir = _opts['result_dir']
clf_plot_dir = _opts['clf_plot_dir']
clf_result_dir = _opts['clf_result_dir']
def read_aa_properties(file_path):
"""Read aa property counts from the data_analysis/results folder.
**Parameters**
file_path : str
path to aa_change.properties.txt
**Returns**
df : pd.DataFrame
contains mutation counts for amino acid chemical properties
"""
logger = logging.getLogger(name=__name__)
logger.info('reading in %s ...' % file_path)
df = pd.read_csv(file_path, sep='\t') # read file
df = df.set_index('initial_prop') # set rows as initial property
logger.info('finished reading file.')
return df
def read_oncogenes():
"""Reads in the oncogenes from vogelsteins' science paper.
Oncogenes from supplementary 2A:
http://www.sciencemag.org/content/339/6127/1546.full
**Returns**
oncogenes : tuple
tuple of gene names considered oncogenes
"""
cfg_opts = get_input_config('input')
with open(cfg_opts['oncogene'], 'r') as handle:
oncogenes = tuple(gene.strip() for gene in handle.readlines())
return oncogenes
def read_tsgs():
"""Reads in the tumor suppressor genes from vogelsteins' science paper.
Oncogenes from supplementary 2A:
http://www.sciencemag.org/content/339/6127/1546.full
**Returns**
tsgs : tuple
tuple of gene names considered as tumor suppressors
"""
cfg_opts = get_input_config('input')
with open(cfg_opts['tsg'], 'r') as handle:
tsgs = tuple(gene.strip() for gene in handle.readlines())
return tsgs
def read_smg():
"""Reads in the significantly mutated genes from kandoth et al's
nature paper.
The paper was from the Pan-Cancer effort from TCGA.
SMGs from supplementary:
http://www.nature.com/nature/journal/v502/n7471/full/nature12634.html
**Returns**
smgs : tuple
tuple of gene names considered as significantly mutated
"""
cfg_opts = get_input_config('input')
with open(cfg_opts['smg'], 'r') as handle:
smgs = tuple(gene.strip() for gene in handle.readlines())
# open connection
gene_db_path = get_db_config('genes')['db']
conn = sqlite3.connect(gene_db_path)
sql = ("SELECT DISTINCT Gene"
" FROM nucleotide"
" WHERE Gene in " + str(smgs))
df = psql.frame_query(sql, con=conn)
conn.close() # close connection
# get significantly mutated genes found in database
smgs_in_database = tuple(df['Gene'])
logger.info('There are only %d/%d significantly mutated genes found in the database.'
% (len(smgs_in_database), len(smgs)))
return smgs_in_database
def read_olfactory_receptors():
"""Reads in the significant olfactory receptors from Mutsigcv.
**Returns**
olfactory : tuple
tuple of gene names considered as olfactory receptors
"""
cfg_opts = get_input_config('input')
with open(cfg_opts['olfactory_receptors'], 'r') as handle:
olfactory = tuple(gene.strip() for gene in handle.readlines())
# open connection
gene_db_path = get_db_config('genes')['db']
conn = sqlite3.connect(gene_db_path)
sql = ("SELECT DISTINCT Gene"
" FROM nucleotide"
" WHERE Gene in " + str(olfactory))
df = psql.frame_query(sql, con=conn)
conn.close() # close connection
# get significantly mutated genes found in database
olfactory_in_database = tuple(df['Gene'])
logger.info('There are only %d/%d olfactory receptors found in the database.'
% (len(olfactory_in_database), len(olfactory)))
return olfactory_in_database
def classify_gene(gene):
"""Return whether the gene is an oncogene, tsg, or other.
**Parameters**
gene : str
Official gene name
**Returns**
Str, ['oncogene' | 'tsg' | 'other']
"""
if gene in oncogene_set:
return 'oncogene'
elif gene in tsg_set:
return 'tsg'
else:
return 'other'
def get_mutation_types(hgvs_iterable, kind='amino acid'):
"""Classify each protein HGVS mutation as a certain type.
**Parameters**
hgvs_iterable : iterable
iterable container with HGVS mutaiton strings
**Returns**
mut_type_series : pd.Series
container of protein mutation types in same order as input
"""
mut_type = []
if kind == 'amino acid':
for hgvs_aa in hgvs_iterable:
aa = AminoAcid(hgvs=hgvs_aa)
mut_type.append(aa.mutation_type)
elif kind == 'nucleotide':
for hgvs_nuc in hgvs_iterable:
nuc = Nucleotide(hgvs=hgvs_nuc)
mut_type.append(nuc.mutation_type)
mut_type_series = pd.Series(mut_type)
return mut_type_series
def count_mutation_types(hgvs_iterable, kind='amino acid'):
"""Count mutation types from HGVS protein strings (missense, indels, etc.)
and DNA strings (substitutions, indels).
**Parameters**
hgvs_iterable : iterable
An iterable object containing protein HGVS
**Returns**
unique_cts : pd.Series
A pandas series object counting protein mutation types
"""
mut_type_series = get_mutation_types(hgvs_iterable, kind=kind) # get mutation types
unique_cts = mut_type_series.value_counts() # count mutation types
return unique_cts
def get_output_config(section):
"""Returns the config object to output.cfg."""
cfg = ConfigParser.ConfigParser()
cfg.read(config_dir + 'output.cfg')
cfg_options = dict(cfg.items(section))
return cfg_options
def get_db_config(section):
"""Return the config object to db.cfg."""
cfg = ConfigParser.ConfigParser()
cfg.read(config_dir + 'db.cfg')
cfg_options = dict(cfg.items(section))
return cfg_options
def read_cosmic_tsv_by_gene(gene_name):
"""Reads the stored flat file corresponding to the gene_name.
NOTE: Assumes cosmic flat files are in cosmic_dir specified by input.cfg
and are sorted into alphabetical directories (eg. 'A'...'Z').
**Parameters**
gene_name : str
gene name
**Returns**
df : pd.DataFrame
tsv file as a pandas dataframe
"""
cfg_opt = get_input_config('input')
database_dir = cfg_opt['cosmic_dir'] # COSMIC_nuc database directory
gene_dir = gene_name[0].upper() + '/' # gene tsv in alphabetical directory listing
tsv_path = database_dir + gene_dir + gene_name + '.tsv' # path to tsv file
df = pd.read_csv(tsv_path, sep='\t')
return df
def drop_table(tbl_name,
kind='sqlite'):
"""Drop a table from database if exists.
**Note:** This function was written because pandas has a bug.
If pandas was working then the write_frame method could just
replace existing contents with out the need for me to drop the
table. The bug is found here:
https://github.com/pydata/pandas/issues/2971
**Parameters**
tbl_name : str
name of table to drop
kind : str, ['sqlite' | 'mysql']
type of database
"""
genes_db_path = get_db_config('genes')['db']
if kind == 'sqlite':
with sqlite3.connect(genes_db_path) as cur:
sql = "DROP TABLE IF EXISTS %s" % tbl_name
cur.execute(sql)
elif kind == 'mysql':
with get_cosmic_db() as cur:
sql = "DROP TABLE IF EXISTS %s" % tbl_name
cur.execute(sql)
# set up vogelstein oncogenes/tsgs
oncogene_list = read_oncogenes()
tsg_list = read_tsgs()
oncogene_set = set(oncogene_list)
tsg_set = set(tsg_list)
# significantly mutate genes from kandoth et al
smg_list = read_smg()
smg_set = set(smg_list)
# olfactory receptors from mutsigcv
olfactory_list = read_olfactory_receptors()
olfactory_set = set(olfactory_list)
Fixed: import error when database is not created yet.
import pandas as pd
from amino_acid import AminoAcid
from nucleotide import Nucleotide
from cosmic_db import get_cosmic_db
import sqlite3
import pandas.io.sql as psql
import ConfigParser
import logging
logger = logging.getLogger(__name__)
config_dir = 'config/'
def get_input_config(section):
"""Returns the config object to input.cfg."""
cfg = ConfigParser.ConfigParser()
cfg.read(config_dir + 'input.cfg')
cfg_options = dict(cfg.items(section))
return cfg_options
# setup directory paths
_opts = get_input_config('result')
plot_dir = _opts['plot_dir']
result_dir = _opts['result_dir']
clf_plot_dir = _opts['clf_plot_dir']
clf_result_dir = _opts['clf_result_dir']
def read_aa_properties(file_path):
"""Read aa property counts from the data_analysis/results folder.
**Parameters**
file_path : str
path to aa_change.properties.txt
**Returns**
df : pd.DataFrame
contains mutation counts for amino acid chemical properties
"""
logger = logging.getLogger(name=__name__)
logger.info('reading in %s ...' % file_path)
df = pd.read_csv(file_path, sep='\t') # read file
df = df.set_index('initial_prop') # set rows as initial property
logger.info('finished reading file.')
return df
def read_oncogenes():
"""Reads in the oncogenes from vogelsteins' science paper.
Oncogenes from supplementary 2A:
http://www.sciencemag.org/content/339/6127/1546.full
**Returns**
oncogenes : tuple
tuple of gene names considered oncogenes
"""
cfg_opts = get_input_config('input')
with open(cfg_opts['oncogene'], 'r') as handle:
oncogenes = tuple(gene.strip() for gene in handle.readlines())
return oncogenes
def read_tsgs():
"""Reads in the tumor suppressor genes from vogelsteins' science paper.
Oncogenes from supplementary 2A:
http://www.sciencemag.org/content/339/6127/1546.full
**Returns**
tsgs : tuple
tuple of gene names considered as tumor suppressors
"""
cfg_opts = get_input_config('input')
with open(cfg_opts['tsg'], 'r') as handle:
tsgs = tuple(gene.strip() for gene in handle.readlines())
return tsgs
def read_smg():
"""Reads in the significantly mutated genes from kandoth et al's
nature paper.
The paper was from the Pan-Cancer effort from TCGA.
SMGs from supplementary:
http://www.nature.com/nature/journal/v502/n7471/full/nature12634.html
**Returns**
smgs : tuple
tuple of gene names considered as significantly mutated
"""
cfg_opts = get_input_config('input')
with open(cfg_opts['smg'], 'r') as handle:
smgs = tuple(gene.strip() for gene in handle.readlines())
# open connection
try:
# if DB is not created this will throw an error
gene_db_path = get_db_config('genes')['db']
conn = sqlite3.connect(gene_db_path)
sql = ("SELECT DISTINCT Gene"
" FROM nucleotide"
" WHERE Gene in " + str(smgs))
df = psql.frame_query(sql, con=conn)
conn.close() # close connection
# get significantly mutated genes found in database
smgs_in_database = tuple(df['Gene'])
logger.info('There are only %d/%d significantly mutated genes found in the database.'
% (len(smgs_in_database), len(smgs)))
except:
smgs_in_database = smgs
return smgs_in_database
def read_olfactory_receptors():
"""Reads in the significant olfactory receptors from Mutsigcv.
**Returns**
olfactory : tuple
tuple of gene names considered as olfactory receptors
"""
cfg_opts = get_input_config('input')
with open(cfg_opts['olfactory_receptors'], 'r') as handle:
olfactory = tuple(gene.strip() for gene in handle.readlines())
# open connection
gene_db_path = get_db_config('genes')['db']
conn = sqlite3.connect(gene_db_path)
sql = ("SELECT DISTINCT Gene"
" FROM nucleotide"
" WHERE Gene in " + str(olfactory))
df = psql.frame_query(sql, con=conn)
conn.close() # close connection
# get significantly mutated genes found in database
olfactory_in_database = tuple(df['Gene'])
logger.info('There are only %d/%d olfactory receptors found in the database.'
% (len(olfactory_in_database), len(olfactory)))
return olfactory_in_database
def classify_gene(gene):
"""Return whether the gene is an oncogene, tsg, or other.
**Parameters**
gene : str
Official gene name
**Returns**
Str, ['oncogene' | 'tsg' | 'other']
"""
if gene in oncogene_set:
return 'oncogene'
elif gene in tsg_set:
return 'tsg'
else:
return 'other'
def get_mutation_types(hgvs_iterable, kind='amino acid'):
"""Classify each protein HGVS mutation as a certain type.
**Parameters**
hgvs_iterable : iterable
iterable container with HGVS mutaiton strings
**Returns**
mut_type_series : pd.Series
container of protein mutation types in same order as input
"""
mut_type = []
if kind == 'amino acid':
for hgvs_aa in hgvs_iterable:
aa = AminoAcid(hgvs=hgvs_aa)
mut_type.append(aa.mutation_type)
elif kind == 'nucleotide':
for hgvs_nuc in hgvs_iterable:
nuc = Nucleotide(hgvs=hgvs_nuc)
mut_type.append(nuc.mutation_type)
mut_type_series = pd.Series(mut_type)
return mut_type_series
def count_mutation_types(hgvs_iterable, kind='amino acid'):
"""Count mutation types from HGVS protein strings (missense, indels, etc.)
and DNA strings (substitutions, indels).
**Parameters**
hgvs_iterable : iterable
An iterable object containing protein HGVS
**Returns**
unique_cts : pd.Series
A pandas series object counting protein mutation types
"""
mut_type_series = get_mutation_types(hgvs_iterable, kind=kind) # get mutation types
unique_cts = mut_type_series.value_counts() # count mutation types
return unique_cts
def get_output_config(section):
"""Returns the config object to output.cfg."""
cfg = ConfigParser.ConfigParser()
cfg.read(config_dir + 'output.cfg')
cfg_options = dict(cfg.items(section))
return cfg_options
def get_db_config(section):
"""Return the config object to db.cfg."""
cfg = ConfigParser.ConfigParser()
cfg.read(config_dir + 'db.cfg')
cfg_options = dict(cfg.items(section))
return cfg_options
def read_cosmic_tsv_by_gene(gene_name):
"""Reads the stored flat file corresponding to the gene_name.
NOTE: Assumes cosmic flat files are in cosmic_dir specified by input.cfg
and are sorted into alphabetical directories (eg. 'A'...'Z').
**Parameters**
gene_name : str
gene name
**Returns**
df : pd.DataFrame
tsv file as a pandas dataframe
"""
cfg_opt = get_input_config('input')
database_dir = cfg_opt['cosmic_dir'] # COSMIC_nuc database directory
gene_dir = gene_name[0].upper() + '/' # gene tsv in alphabetical directory listing
tsv_path = database_dir + gene_dir + gene_name + '.tsv' # path to tsv file
df = pd.read_csv(tsv_path, sep='\t')
return df
def drop_table(tbl_name,
kind='sqlite'):
"""Drop a table from database if exists.
**Note:** This function was written because pandas has a bug.
If pandas was working then the write_frame method could just
replace existing contents with out the need for me to drop the
table. The bug is found here:
https://github.com/pydata/pandas/issues/2971
**Parameters**
tbl_name : str
name of table to drop
kind : str, ['sqlite' | 'mysql']
type of database
"""
genes_db_path = get_db_config('genes')['db']
if kind == 'sqlite':
with sqlite3.connect(genes_db_path) as cur:
sql = "DROP TABLE IF EXISTS %s" % tbl_name
cur.execute(sql)
elif kind == 'mysql':
with get_cosmic_db() as cur:
sql = "DROP TABLE IF EXISTS %s" % tbl_name
cur.execute(sql)
# set up vogelstein oncogenes/tsgs
oncogene_list = read_oncogenes()
tsg_list = read_tsgs()
oncogene_set = set(oncogene_list)
tsg_set = set(tsg_list)
# significantly mutate genes from kandoth et al
smg_list = read_smg()
smg_set = set(smg_list)
# olfactory receptors from mutsigcv
olfactory_list = read_olfactory_receptors()
olfactory_set = set(olfactory_list)
|
import copy
import imp
import json
import os
from os.path import join, splitext
from types import DictType
from .vendor import six
if six.PY3:
from .vendor import yaml3 as yaml
else:
from .vendor import yaml2 as yaml
from .env import Environment
from .exceptions import AmbiguousEnvVar, UncastableEnvVar
from .util import debug
class DataProxy(object):
"""
Helper class implementing nested dict+attr access for `.Config`.
"""
# Attributes which get proxied through to inner etc.Config obj.
_proxies = tuple("""
clear
get
has_key
items
iteritems
iterkeys
itervalues
keys
pop
popitem
setdefault
update
values
""".split()) + tuple("__{0}__".format(x) for x in """
cmp
contains
iter
sizeof
""".split())
# Alt constructor used so we aren't getting in the way of Config's real
# __init__().
@classmethod
def from_data(cls, data):
obj = cls()
obj.config = data
return obj
def __getattr__(self, key):
try:
return self._get(key)
except KeyError:
# Proxy most special vars to config for dict procotol.
if key in self._proxies:
return getattr(self.config, key)
# Otherwise, raise useful AttributeError to follow getattr proto.
err = "No attribute or config key found for {0!r}".format(key)
attrs = [x for x in dir(self.__class__) if not x.startswith('_')]
err += "\n\nValid real attributes: {0!r}".format(attrs)
err += "\n\nValid keys: {0!r}".format(self.config.keys())
raise AttributeError(err)
def __hasattr__(self, key):
return key in self.config or key in self._proxies
def __iter__(self):
# For some reason Python is ignoring our __hasattr__ when determining
# whether we support __iter__. BOO
return iter(self.config)
def __eq__(self, other):
# Can't proxy __eq__ because the RHS will always be an obj of the
# current class, not the proxied-to class, and that causes
# NotImplemented.
return self.config == other.config
def __len__(self):
# Can't proxy __len__ either apparently? ugh
return len(self.config)
def __setitem__(self, key, value):
# ... or __setitem__? thanks for nothing Python >:(
self.config[key] = value
def __delitem__(self, key):
# OK this is really getting annoying
del self.config[key]
def __getitem__(self, key):
return self._get(key)
def _get(self, key):
value = self.config[key]
if isinstance(value, DictType):
value = DataProxy.from_data(value)
return value
def __str__(self):
return str(self.config)
def __unicode__(self):
return unicode(self.config)
def __repr__(self):
return repr(self.config)
def __contains__(self, key):
return key in self.config
# TODO: copy()?
class Config(DataProxy):
"""
Invoke's primary configuration handling class.
See :doc:`/concepts/configuration` for details on the configuration system
this class implements, including the :ref:`configuration hierarchy
<config-hierarchy>`. The rest of this class' documentation assumes
familiarity with that document.
Access
------
Configuration values may be accessed using dict syntax::
config['foo']
or attribute syntax::
config.foo
.. warning::
Any "real" attributes (methods, etc) on `Config` take precedence over
settings values - so if you have top level settings named ``clone``,
``defaults``, etc, you *must* use dict syntax to access it.
Nesting works the same way - dict config values are turned into objects
which honor both the dictionary protocol and the attribute-access method::
config['foo']['bar']
config.foo.bar
Non-data attributes & methods
-----------------------------
This class implements the entire dictionary protocol: methods such as
``keys``, ``values``, ``items``, ``pop`` and so forth should all function
as they do on regular dicts.
Individual configuration 'levels' and their source locations (if
applicable) may be accessed via attributes such as
`.project`/`.project_file` and so forth - see the
documentation for individual members below for details.
Lifecycle
---------
On initialization, `Config` will seek out and load various configuration
files from disk, then `merge` the results with other in-memory sources such
as defaults and CLI overrides.
Typically, the `load_collection` and `load_shell_env` methods are called
after initialization - `load_collection` prior to each task invocation
(because collection-level config data may change depending on the task) and
`load_shell_env` as the final step (as it needs the rest of the config to
know which env vars are valid to load).
Once users are given a copy of the configuration (usually via their task's
`Context` argument) all the above loading (& a final `merge`) has been
performed and they are free to modify it as they would any other regular
dictionary.
.. warning::
Calling `merge` after manually modifying `Config` objects may overwrite
those manual changes, since it overwrites the core config dict with
data from per-source attributes like `defaults` or `user`.
"""
def __init__(self, defaults=None, overrides=None, system_prefix=None,
user_prefix=None, project_home=None, env_prefix=None,
runtime_path=None):
"""
Creates a new config object.
:param dict defaults:
A dict containing default (lowest level) config data. Default:
``{}``.
:param dict overrides:
A dict containing override-level config data. Default: ``{}``.
:param str system_prefix:
Path & partial filename for the global config file location. Should
include everything but the dot & file extension.
Default: ``/etc/invoke`` (e.g. ``/etc/invoke.yaml`` or
``/etc/invoke.json``).
:param str user_prefix:
Like ``system_prefix`` but for the per-user config file.
Default: ``~/.invoke`` (e.g. ``~/.invoke.yaml``).
:param str project_home:
Optional directory path location of the currently loaded
`.Collection` (as loaded by `.Loader`). When non-empty, will
trigger seeking of per-project config files in this location +
``invoke.(yaml|json|py)``.
:param str env_prefix:
Environment variable seek prefix; optional, defaults to ``None``.
When not ``None``, only environment variables beginning with this
value will be loaded. If it is set, the keys will have the prefix
stripped out before processing, so e.g. ``env_prefix='INVOKE_'``
means users must set ``INVOKE_MYSETTING`` in the shell to affect
the ``"mysetting"`` setting.
:param str runtime_path:
Optional file path to a runtime configuration file.
Used to fill the penultimate slot in the config hierarchy. Should
be a full file path to an existing file, not a directory path, or a
prefix.
"""
# Config file suffixes to search, in preference order.
self.file_suffixes = ('yaml', 'json', 'py')
# Technically an implementation detail - do not expose in public API.
# Stores merged configs and is accessed via DataProxy.
self.config = {}
#: Default configuration values, typically hardcoded in the
#: CLI/execution machinery.
self.defaults = {} if defaults is None else defaults
#: Collection-driven config data, gathered from the collection tree
#: containing the currently executing task.
self.collection = {}
#: Path prefix searched for the system config file.
self.system_prefix = ('/etc/invoke' if system_prefix is None
else system_prefix)
#: Path to loaded system config file, if any.
self.system_path = None
#: Whether the system config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.system_found = None
#: Data loaded from the system config file.
self.system = {}
#: Path prefix searched for per-user config files.
self.user_prefix = '~/.invoke' if user_prefix is None else user_prefix
#: Path to loaded user config file, if any.
self.user_path = None
#: Whether the user config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.user_found = None
#: Data loaded from the per-user config file.
self.user = {}
#: Parent directory of the current root tasks file, if applicable.
self.project_home = project_home
# And a normalized prefix version not really publicly exposed
self.project_prefix = None
if self.project_home is not None:
self.project_prefix = join(project_home, 'invoke')
#: Path to loaded per-project config file, if any.
self.project_path = None
#: Whether the project config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.project_found = None
#: Data loaded from the per-project config file.
self.project = {}
#: Environment variable name prefix
# TODO: make this INVOKE_ and update tests, just deal
self.env_prefix = '' if env_prefix is None else env_prefix
#: Config data loaded from the shell environment.
self.env = {}
#: Path to the user-specified runtime config file.
self.runtime_path = runtime_path
#: Data loaded from the runtime config file.
self.runtime = {}
#: Whether the runtime config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.runtime_found = None
#: Overrides - highest possible config level. Typically filled in from
#: command-line flags.
self.overrides = {} if overrides is None else overrides
# Perform initial load & merge.
self.load_files()
self.merge()
def load_shell_env(self):
"""
Load values from the shell environment.
`.load_shell_env` is intended for execution late in a `.Config`
object's lifecycle, once all other sources have been merged. Loading
from the shell is not terrifically expensive, but must be done at a
specific point in time to ensure the "only known config keys are loaded
from the env" behavior works correctly.
See :ref:`env-vars` for details on this design decision and other info
re: how environment variables are scanned and loaded.
"""
# Force merge of existing data to ensure we have an up to date picture
debug("Running pre-merge for shell env loading...")
self.merge()
debug("Done with pre-merge.")
loader = Environment(config=self.config, prefix=self.env_prefix)
self.env = loader.load()
debug("Loaded shell environment, triggering final merge")
self.merge()
def load_collection(self, data):
"""
Update collection-driven config data.
`.load_collection` is intended for use by the core task execution
machinery, which is responsible for obtaining per-task
collection-driven data. See :ref:`collection-configuration` for
details.
.. note:: This method triggers `merge` after it runs.
"""
self.collection = data
self.merge()
def clone(self):
"""
Return a copy of this configuration object.
The new object will be identical in terms of configured sources and any
loaded/merged data, but will be a distinct object with no shared
mutable state.
"""
new = Config()
for name in """
config
defaults
collection
system_prefix
system_path
system_found
system
user_prefix
user_path
user_found
user
project_home
project_prefix
project_path
project_found
project
env_prefix
env
runtime_path
runtime_found
runtime
overrides
""".split():
setattr(new, name, copy.deepcopy(getattr(self, name)))
return new
def load_files(self):
"""
Load any unloaded/un-searched-for config file sources.
Specifically, any file sources whose ``_found`` values are ``None``
will be sought and loaded if found; if their ``_found`` value is non
``None`` (e.g. ``True`` or ``False``) they will be skipped. Typically
this means this method is idempotent and becomes a no-op after the
first run.
Execution of this method does not imply merging; use `merge` for that.
"""
# TODO: make subroutine parameterized on stored path, prefix, and
# optional suffixes.
# system: use system_prefix + file_suffixes
self._load_file(prefix='system')
# user: ditto
self._load_file(prefix='user')
# project: use project_home + 'invoke' + file_suffixes
self._load_file(prefix='project')
# runtime: use runtime_path
self._load_file(prefix='runtime', absolute=True)
def _load_file(self, prefix, absolute=False):
# Setup
found = "{0}_found".format(prefix)
path = "{0}_path".format(prefix)
data = prefix
# Short-circuit if loading appears to have occurred already
if getattr(self, found) is not None:
return
# Moar setup
if absolute:
absolute_path = getattr(self, path)
# None -> expected absolute path but none set, short circuit
if absolute_path is None:
return
paths = [absolute_path]
else:
path_prefix = getattr(self, "{0}_prefix".format(prefix))
# Short circuit if loading seems unnecessary (eg for project config
# files when not running out of a project)
if path_prefix is None:
return
paths = [
'.'.join((path_prefix, x))
for x in self.file_suffixes
]
# Poke 'em
for filepath in paths:
try:
try:
type_ = splitext(filepath)[1].lstrip('.')
loader = _loaders[type_]
except KeyError as e:
print repr(type_)
raise # UnknownFileType
# Store data, the path it was found at, and fact that it was
# found
setattr(self, data, loader(filepath))
setattr(self, path, filepath)
setattr(self, found, True)
break
# Typically means 'no such file', so just note & skip past.
except IOError as e:
# TODO: is there a better / x-platform way to detect this?
if "No such file" in e.strerror:
err = "Didn't see any {0}, skipping."
debug(err.format(filepath))
else:
raise
# Still None -> no suffixed paths were found, record this fact
if getattr(self, path) is None:
setattr(self, found, False)
def merge(self):
"""
Merge all config sources, in order, to `config`.
Does not imply loading of config files or environment variables; use
`load_files` and/or `load_shell_env` beforehand instead.
"""
self.config = {}
debug("Merging config sources in order...")
debug("Defaults: {0!r}".format(self.defaults))
_merge(self.config, self.defaults)
debug("Collection-driven: {0!r}".format(self.collection))
_merge(self.config, self.collection)
self._merge_file('system', "System-wide")
self._merge_file('user', "Per-user")
self._merge_file('project', "Per-project")
debug("Environment variable config: {0!r}".format(self.env))
_merge(self.config, self.env)
self._merge_file('runtime', "Runtime")
debug("Overrides: {0!r}".format(self.overrides))
_merge(self.config, self.overrides)
def _merge_file(self, name, desc):
# Setup
desc += " config file" # yup
found = getattr(self, "{0}_found".format(name))
path = getattr(self, "{0}_path".format(name))
data = getattr(self, name)
# None -> no loading occurred yet
if found is None:
debug("{0} has not been loaded yet, skipping".format(desc))
# True -> hooray
elif found:
debug("{0} ({1}): {2!r}".format(desc, path, data))
_merge(self.config, data)
# False -> did try, did not succeed
else:
# TODO: how to preserve what was tried for each case but only for
# the negative? Just a branch here based on 'name'?
debug("{0} not found, skipping".format(desc))
def __getattr__(self, key):
debug("Config.__getattr__({0!r})".format(key))
self.load_files()
self.merge()
return super(Config, self).__getattr__(key)
def __contains__(self, key):
debug("Config.__contains__({0!r})".format(key))
self.load_files()
self.merge()
return super(Config, self).__contains__(key)
def _merge(base, updates):
"""
Recursively merge dict ``updates`` into dict ``base`` (mutating ``base``.)
* Values which are themselves dicts will be recursed into.
* Values which are a dict in one input and *not* a dict in the other input
(e.g. if our inputs were ``{'foo': 5}`` and ``{'foo': {'bar': 5}}``) are
irreconciliable and will generate an exception.
"""
for key, value in updates.items():
# Dict values whose keys also exist in 'base' -> recurse
# (But only if both types are dicts.)
if key in base:
if isinstance(value, dict):
if isinstance(base[key], dict):
_merge(base[key], value)
else:
raise _merge_error(base[key], value)
else:
if isinstance(base[key], dict):
raise _merge_error(base[key], value)
else:
base[key] = value
# New values just get set straight
else:
base[key] = value
def _merge_error(orig, new_):
return AmbiguousMergeError("Can't cleanly merge {0} with {1}".format(
_format_mismatch(orig), _format_mismatch(new_)
))
def _format_mismatch(x):
return "{0} ({1!r})".format(type(x), x)
#
# File loading
#
def _load_yaml(path):
with open(path) as fd:
return yaml.load(fd)
def _load_json(path):
with open(path) as fd:
return json.load(fd)
def _load_python(path):
data = {}
for key, value in vars(imp.load_source('mod', path)).iteritems():
if key.startswith('__'):
continue
data[key] = value
return data
_loaders = {
'yaml': _load_yaml,
'json': _load_json,
'py': _load_python,
}
Nuke outdated skel comments
import copy
import imp
import json
import os
from os.path import join, splitext
from types import DictType
from .vendor import six
if six.PY3:
from .vendor import yaml3 as yaml
else:
from .vendor import yaml2 as yaml
from .env import Environment
from .exceptions import AmbiguousEnvVar, UncastableEnvVar
from .util import debug
class DataProxy(object):
"""
Helper class implementing nested dict+attr access for `.Config`.
"""
# Attributes which get proxied through to inner etc.Config obj.
_proxies = tuple("""
clear
get
has_key
items
iteritems
iterkeys
itervalues
keys
pop
popitem
setdefault
update
values
""".split()) + tuple("__{0}__".format(x) for x in """
cmp
contains
iter
sizeof
""".split())
# Alt constructor used so we aren't getting in the way of Config's real
# __init__().
@classmethod
def from_data(cls, data):
obj = cls()
obj.config = data
return obj
def __getattr__(self, key):
try:
return self._get(key)
except KeyError:
# Proxy most special vars to config for dict procotol.
if key in self._proxies:
return getattr(self.config, key)
# Otherwise, raise useful AttributeError to follow getattr proto.
err = "No attribute or config key found for {0!r}".format(key)
attrs = [x for x in dir(self.__class__) if not x.startswith('_')]
err += "\n\nValid real attributes: {0!r}".format(attrs)
err += "\n\nValid keys: {0!r}".format(self.config.keys())
raise AttributeError(err)
def __hasattr__(self, key):
return key in self.config or key in self._proxies
def __iter__(self):
# For some reason Python is ignoring our __hasattr__ when determining
# whether we support __iter__. BOO
return iter(self.config)
def __eq__(self, other):
# Can't proxy __eq__ because the RHS will always be an obj of the
# current class, not the proxied-to class, and that causes
# NotImplemented.
return self.config == other.config
def __len__(self):
# Can't proxy __len__ either apparently? ugh
return len(self.config)
def __setitem__(self, key, value):
# ... or __setitem__? thanks for nothing Python >:(
self.config[key] = value
def __delitem__(self, key):
# OK this is really getting annoying
del self.config[key]
def __getitem__(self, key):
return self._get(key)
def _get(self, key):
value = self.config[key]
if isinstance(value, DictType):
value = DataProxy.from_data(value)
return value
def __str__(self):
return str(self.config)
def __unicode__(self):
return unicode(self.config)
def __repr__(self):
return repr(self.config)
def __contains__(self, key):
return key in self.config
# TODO: copy()?
class Config(DataProxy):
"""
Invoke's primary configuration handling class.
See :doc:`/concepts/configuration` for details on the configuration system
this class implements, including the :ref:`configuration hierarchy
<config-hierarchy>`. The rest of this class' documentation assumes
familiarity with that document.
Access
------
Configuration values may be accessed using dict syntax::
config['foo']
or attribute syntax::
config.foo
.. warning::
Any "real" attributes (methods, etc) on `Config` take precedence over
settings values - so if you have top level settings named ``clone``,
``defaults``, etc, you *must* use dict syntax to access it.
Nesting works the same way - dict config values are turned into objects
which honor both the dictionary protocol and the attribute-access method::
config['foo']['bar']
config.foo.bar
Non-data attributes & methods
-----------------------------
This class implements the entire dictionary protocol: methods such as
``keys``, ``values``, ``items``, ``pop`` and so forth should all function
as they do on regular dicts.
Individual configuration 'levels' and their source locations (if
applicable) may be accessed via attributes such as
`.project`/`.project_file` and so forth - see the
documentation for individual members below for details.
Lifecycle
---------
On initialization, `Config` will seek out and load various configuration
files from disk, then `merge` the results with other in-memory sources such
as defaults and CLI overrides.
Typically, the `load_collection` and `load_shell_env` methods are called
after initialization - `load_collection` prior to each task invocation
(because collection-level config data may change depending on the task) and
`load_shell_env` as the final step (as it needs the rest of the config to
know which env vars are valid to load).
Once users are given a copy of the configuration (usually via their task's
`Context` argument) all the above loading (& a final `merge`) has been
performed and they are free to modify it as they would any other regular
dictionary.
.. warning::
Calling `merge` after manually modifying `Config` objects may overwrite
those manual changes, since it overwrites the core config dict with
data from per-source attributes like `defaults` or `user`.
"""
def __init__(self, defaults=None, overrides=None, system_prefix=None,
user_prefix=None, project_home=None, env_prefix=None,
runtime_path=None):
"""
Creates a new config object.
:param dict defaults:
A dict containing default (lowest level) config data. Default:
``{}``.
:param dict overrides:
A dict containing override-level config data. Default: ``{}``.
:param str system_prefix:
Path & partial filename for the global config file location. Should
include everything but the dot & file extension.
Default: ``/etc/invoke`` (e.g. ``/etc/invoke.yaml`` or
``/etc/invoke.json``).
:param str user_prefix:
Like ``system_prefix`` but for the per-user config file.
Default: ``~/.invoke`` (e.g. ``~/.invoke.yaml``).
:param str project_home:
Optional directory path location of the currently loaded
`.Collection` (as loaded by `.Loader`). When non-empty, will
trigger seeking of per-project config files in this location +
``invoke.(yaml|json|py)``.
:param str env_prefix:
Environment variable seek prefix; optional, defaults to ``None``.
When not ``None``, only environment variables beginning with this
value will be loaded. If it is set, the keys will have the prefix
stripped out before processing, so e.g. ``env_prefix='INVOKE_'``
means users must set ``INVOKE_MYSETTING`` in the shell to affect
the ``"mysetting"`` setting.
:param str runtime_path:
Optional file path to a runtime configuration file.
Used to fill the penultimate slot in the config hierarchy. Should
be a full file path to an existing file, not a directory path, or a
prefix.
"""
# Config file suffixes to search, in preference order.
self.file_suffixes = ('yaml', 'json', 'py')
# Technically an implementation detail - do not expose in public API.
# Stores merged configs and is accessed via DataProxy.
self.config = {}
#: Default configuration values, typically hardcoded in the
#: CLI/execution machinery.
self.defaults = {} if defaults is None else defaults
#: Collection-driven config data, gathered from the collection tree
#: containing the currently executing task.
self.collection = {}
#: Path prefix searched for the system config file.
self.system_prefix = ('/etc/invoke' if system_prefix is None
else system_prefix)
#: Path to loaded system config file, if any.
self.system_path = None
#: Whether the system config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.system_found = None
#: Data loaded from the system config file.
self.system = {}
#: Path prefix searched for per-user config files.
self.user_prefix = '~/.invoke' if user_prefix is None else user_prefix
#: Path to loaded user config file, if any.
self.user_path = None
#: Whether the user config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.user_found = None
#: Data loaded from the per-user config file.
self.user = {}
#: Parent directory of the current root tasks file, if applicable.
self.project_home = project_home
# And a normalized prefix version not really publicly exposed
self.project_prefix = None
if self.project_home is not None:
self.project_prefix = join(project_home, 'invoke')
#: Path to loaded per-project config file, if any.
self.project_path = None
#: Whether the project config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.project_found = None
#: Data loaded from the per-project config file.
self.project = {}
#: Environment variable name prefix
# TODO: make this INVOKE_ and update tests, just deal
self.env_prefix = '' if env_prefix is None else env_prefix
#: Config data loaded from the shell environment.
self.env = {}
#: Path to the user-specified runtime config file.
self.runtime_path = runtime_path
#: Data loaded from the runtime config file.
self.runtime = {}
#: Whether the runtime config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.runtime_found = None
#: Overrides - highest possible config level. Typically filled in from
#: command-line flags.
self.overrides = {} if overrides is None else overrides
# Perform initial load & merge.
self.load_files()
self.merge()
def load_shell_env(self):
"""
Load values from the shell environment.
`.load_shell_env` is intended for execution late in a `.Config`
object's lifecycle, once all other sources have been merged. Loading
from the shell is not terrifically expensive, but must be done at a
specific point in time to ensure the "only known config keys are loaded
from the env" behavior works correctly.
See :ref:`env-vars` for details on this design decision and other info
re: how environment variables are scanned and loaded.
"""
# Force merge of existing data to ensure we have an up to date picture
debug("Running pre-merge for shell env loading...")
self.merge()
debug("Done with pre-merge.")
loader = Environment(config=self.config, prefix=self.env_prefix)
self.env = loader.load()
debug("Loaded shell environment, triggering final merge")
self.merge()
def load_collection(self, data):
"""
Update collection-driven config data.
`.load_collection` is intended for use by the core task execution
machinery, which is responsible for obtaining per-task
collection-driven data. See :ref:`collection-configuration` for
details.
.. note:: This method triggers `merge` after it runs.
"""
self.collection = data
self.merge()
def clone(self):
"""
Return a copy of this configuration object.
The new object will be identical in terms of configured sources and any
loaded/merged data, but will be a distinct object with no shared
mutable state.
"""
new = Config()
for name in """
config
defaults
collection
system_prefix
system_path
system_found
system
user_prefix
user_path
user_found
user
project_home
project_prefix
project_path
project_found
project
env_prefix
env
runtime_path
runtime_found
runtime
overrides
""".split():
setattr(new, name, copy.deepcopy(getattr(self, name)))
return new
def load_files(self):
"""
Load any unloaded/un-searched-for config file sources.
Specifically, any file sources whose ``_found`` values are ``None``
will be sought and loaded if found; if their ``_found`` value is non
``None`` (e.g. ``True`` or ``False``) they will be skipped. Typically
this means this method is idempotent and becomes a no-op after the
first run.
Execution of this method does not imply merging; use `merge` for that.
"""
self._load_file(prefix='system')
self._load_file(prefix='user')
self._load_file(prefix='project')
self._load_file(prefix='runtime', absolute=True)
def _load_file(self, prefix, absolute=False):
# Setup
found = "{0}_found".format(prefix)
path = "{0}_path".format(prefix)
data = prefix
# Short-circuit if loading appears to have occurred already
if getattr(self, found) is not None:
return
# Moar setup
if absolute:
absolute_path = getattr(self, path)
# None -> expected absolute path but none set, short circuit
if absolute_path is None:
return
paths = [absolute_path]
else:
path_prefix = getattr(self, "{0}_prefix".format(prefix))
# Short circuit if loading seems unnecessary (eg for project config
# files when not running out of a project)
if path_prefix is None:
return
paths = [
'.'.join((path_prefix, x))
for x in self.file_suffixes
]
# Poke 'em
for filepath in paths:
try:
try:
type_ = splitext(filepath)[1].lstrip('.')
loader = _loaders[type_]
except KeyError as e:
print repr(type_)
raise # UnknownFileType
# Store data, the path it was found at, and fact that it was
# found
setattr(self, data, loader(filepath))
setattr(self, path, filepath)
setattr(self, found, True)
break
# Typically means 'no such file', so just note & skip past.
except IOError as e:
# TODO: is there a better / x-platform way to detect this?
if "No such file" in e.strerror:
err = "Didn't see any {0}, skipping."
debug(err.format(filepath))
else:
raise
# Still None -> no suffixed paths were found, record this fact
if getattr(self, path) is None:
setattr(self, found, False)
def merge(self):
"""
Merge all config sources, in order, to `config`.
Does not imply loading of config files or environment variables; use
`load_files` and/or `load_shell_env` beforehand instead.
"""
debug("Merging config sources in order...")
debug("Defaults: {0!r}".format(self.defaults))
_merge(self.config, self.defaults)
debug("Collection-driven: {0!r}".format(self.collection))
_merge(self.config, self.collection)
self._merge_file('system', "System-wide")
self._merge_file('user', "Per-user")
self._merge_file('project', "Per-project")
debug("Environment variable config: {0!r}".format(self.env))
_merge(self.config, self.env)
self._merge_file('runtime', "Runtime")
debug("Overrides: {0!r}".format(self.overrides))
_merge(self.config, self.overrides)
def _merge_file(self, name, desc):
# Setup
desc += " config file" # yup
found = getattr(self, "{0}_found".format(name))
path = getattr(self, "{0}_path".format(name))
data = getattr(self, name)
# None -> no loading occurred yet
if found is None:
debug("{0} has not been loaded yet, skipping".format(desc))
# True -> hooray
elif found:
debug("{0} ({1}): {2!r}".format(desc, path, data))
_merge(self.config, data)
# False -> did try, did not succeed
else:
# TODO: how to preserve what was tried for each case but only for
# the negative? Just a branch here based on 'name'?
debug("{0} not found, skipping".format(desc))
def _merge(base, updates):
"""
Recursively merge dict ``updates`` into dict ``base`` (mutating ``base``.)
* Values which are themselves dicts will be recursed into.
* Values which are a dict in one input and *not* a dict in the other input
(e.g. if our inputs were ``{'foo': 5}`` and ``{'foo': {'bar': 5}}``) are
irreconciliable and will generate an exception.
"""
for key, value in updates.items():
# Dict values whose keys also exist in 'base' -> recurse
# (But only if both types are dicts.)
if key in base:
if isinstance(value, dict):
if isinstance(base[key], dict):
_merge(base[key], value)
else:
raise _merge_error(base[key], value)
else:
if isinstance(base[key], dict):
raise _merge_error(base[key], value)
else:
base[key] = value
# New values just get set straight
else:
base[key] = value
def _merge_error(orig, new_):
return AmbiguousMergeError("Can't cleanly merge {0} with {1}".format(
_format_mismatch(orig), _format_mismatch(new_)
))
def _format_mismatch(x):
return "{0} ({1!r})".format(type(x), x)
#
# File loading
#
def _load_yaml(path):
with open(path) as fd:
return yaml.load(fd)
def _load_json(path):
with open(path) as fd:
return json.load(fd)
def _load_python(path):
data = {}
for key, value in vars(imp.load_source('mod', path)).iteritems():
if key.startswith('__'):
continue
data[key] = value
return data
_loaders = {
'yaml': _load_yaml,
'json': _load_json,
'py': _load_python,
}
|
import json
import time
from decimal import Decimal
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
import login.models as login_models
import api.parser
from api import actions
from api import parser
from api.helpers.http import ModHttpResponse
from rest_framework.views import APIView
import geoalchemy2 # Although this import seems unused is has to be here
def permission_wrapper(permission, f):
def wrapper(caller, request, *args, **kwargs):
schema = kwargs.get('schema')
table = kwargs.get('table')
if request.user.get_permission_level(schema, table) < permission:
raise PermissionDenied
else:
return f(caller, request,*args, **kwargs)
def require_write_permission(f):
return permission_wrapper(login_models.WRITE_PERM, f)
def require_delete_permission(f):
return permission_wrapper(login_models.DELETE_PERM, f)
def require_admin_permission(f):
return permission_wrapper(login_models.ADMIN_PERM, f)
class Table(APIView):
"""
Handels the creation of tables and serves information on existing tables
"""
def get(self, request, schema, table):
"""
Returns a dictionary that describes the DDL-make-up of this table.
Fields are:
* name : Name of the table,
* schema: Name of the schema,
* columns : as specified in :meth:`api.actions.describe_columns`
* indexes : as specified in :meth:`api.actions.describe_indexes`
* constraints: as specified in
:meth:`api.actions.describe_constraints`
:param request:
:return:
"""
return JsonResponse({
'schema': schema,
'name': table,
'columns': actions.describe_columns(schema, table),
'indexed': actions.describe_indexes(schema, table),
'constraints': actions.describe_constraints(schema, table)
})
def post(self, request, schema, table):
"""
Changes properties of tables and table columns
:param request:
:param schema:
:param table:
:return:
"""
json_data = json.loads(request.body.decode("utf-8"))
if 'column' in json_data['type']:
column_definition = api.parser.parse_scolumnd_from_columnd(schema, table, json_data['name'], json_data)
result = actions.queue_column_change(schema, table, column_definition)
return ModHttpResponse(result)
elif 'constraint' in json_data['type']:
# Input has nothing to do with DDL from Postgres.
# Input is completely different.
# Using actions.parse_sconstd_from_constd is not applicable
# dict.get() returns None, if key does not exist
constraint_definition = {
'action': json_data['action'], # {ADD, DROP}
'constraint_type': json_data.get('constraint_type'), # {FOREIGN KEY, PRIMARY KEY, UNIQUE, CHECK}
'constraint_name': json_data.get('constraint_name'), # {myForeignKey, myUniqueConstraint}
'constraint_parameter': json_data.get('constraint_parameter'),
# Things in Brackets, e.g. name of column
'reference_table': json_data.get('reference_table'),
'reference_column': json_data.get('reference_column')
}
result = actions.queue_constraint_change(schema, table, constraint_definition)
return ModHttpResponse(result)
else:
return ModHttpResponse(actions.get_response_dict(False, 400, 'type not recognised'))
def put(self, request, schema, table):
"""
Every request to unsave http methods have to contain a "csrftoken".
This token is used to deny cross site reference forwarding.
In every request the header had to contain "X-CSRFToken" with the actual csrftoken.
The token can be requested at / and will be returned as cookie.
:param request:
:return:
"""
json_data = request.data['query']
constraint_definitions = []
column_definitions = []
for constraint_definiton in json_data.get('constraints',[]):
constraint_definiton.update({"action": "ADD",
"c_table": table,
"c_schema": schema})
constraint_definitions.append(constraint_definiton)
if 'columns' not in json_data:
return
for column_definition in json_data['columns']:
column_definition.update({"c_table": table,
"c_schema": schema})
column_definitions.append(column_definition)
result = actions.table_create(schema, table, column_definitions, constraint_definitions)
return ModHttpResponse(result)
class Index(APIView):
def get(self, request):
pass
def post(self, request):
pass
def put(self, request):
pass
class Fields(APIView):
def get(self, request, schema, table, id, column):
if not parser.is_pg_qual(table) or not parser.is_pg_qual(schema) or not parser.is_pg_qual(id) or not parser.is_pg_qual(column):
return ModHttpResponse({"error": "Bad Request", "http_status": 400})
returnValue = actions.getValue(schema, table, column, id);
return HttpResponse(returnValue if returnValue is not None else "", status= (404 if returnValue is None else 200))
def post(self, request):
pass
def put(self, request):
pass
class Rows(APIView):
def get(self, request, schema, table, id=''):
columns = request.GET.get('columns')
where = request.GET.get('where')
orderby = request.GET.get('orderby')
limit = request.GET.get('limit')
offset = request.GET.get('offset')
# OPERATORS could be EQUAL, GREATER, LOWER, NOTEQUAL, NOTGREATER, NOTLOWER
# CONNECTORS could be AND, OR
# If you connect two values with an +, it will convert the + to a space. Whatever.
where_clauses = None
if where:
where_splitted = where.split(' ')
where_clauses = [{'first': where_splitted[4 * i],
'operator': where_splitted[4 * i + 1],
'second': where_splitted[4 * i + 2],
'connector': where_splitted[4 * i + 3] if len(where_splitted) > 4 * i + 3 else None} for i
in range(int(len(where_splitted) / 4) + 1)]
# TODO: Validate where_clauses. Should not be vulnerable
data = {'schema': schema,
'table': table,
'columns': parser.split(columns, ','),
'where': where_clauses,
'orderby': parser.split(orderby, ','),
'limit': limit,
'offset': offset
}
return_obj = actions.get_rows(request, data)
# TODO: Figure out what JsonResponse does different.
response = json.dumps(return_obj, default=date_handler)
return HttpResponse(response, content_type='application/json')
@actions.load_cursor
def post(self, request, schema, table, row_id=None):
column_data = request.data['query']
if row_id:
return self.__update_row(request, schema, table, column_data, row_id)
else:
return self.__insert_row(request, schema, table, column_data, row_id)
def put(self, request, schema, table, row_id=None):
if not row_id:
return actions._response_error('This methods requires an id')
column_data = request.data['query']
engine = actions._get_engine()
conn = engine.connect()
# check whether id is already in use
exists = conn.execute('select count(*) '
'from {schema}.{table} '
'where id = {id};'.format(schema=schema,
table=table,
id=row_id)).first()[0] > 0 if row_id else False
if exists > 0:
response = self.__update_row(request, schema, table, column_data, row_id)
actions.apply_changes(schema, table)
return JsonResponse(response)
else:
result = self.__insert_row(request, schema, table, column_data, row_id)
actions.apply_changes(schema, table)
return JsonResponse(result)
@actions.load_cursor
def __insert_row(self, request, schema, table, row, row_id=None):
if row.get('id', row_id) != row_id:
return actions._response_error('The id given in the query does not '
'match the id given in the url')
if row_id:
row['id'] = row_id
if not all(map(parser.is_pg_qual, row.keys())):
return actions.get_response_dict(success=False,
http_status_code=400,
reason="Your request was malformed.")
context = {'cursor_id': request.data['cursor_id'],
'user': request.user}
query = {
'schema': schema,
'table': table,
'values': [row]
}
result = actions.data_insert(query, context)
return result
@actions.load_cursor
def __update_row(self, request, schema, table, row, row_id):
context = {'cursor_id': request.data['cursor_id'],
'user': request.user}
query = {
'schema': schema,
'table': table,
'where': {
'left': {
'type': 'column',
'column': 'id'
},
'operator': '=',
'right': row_id,
'type': 'operator_binary'
},
'values': row
}
return actions.data_update(query, context)
class Session(APIView):
def get(self, request, length=1):
return request.session['resonse']
def date_handler(obj):
"""
Implements a handler to serialize dates in JSON-strings
:param obj: An object
:return: The str method is called (which is the default serializer for JSON) unless the object has an attribute *isoformat*
"""
if isinstance(obj, Decimal):
return float(obj)
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return str(obj)
# Create your views here.
def create_ajax_handler(func, create_cursor=True):
"""
Implements a mapper from api pages to the corresponding functions in
api/actions.py
:param func: The name of the callable function
:return: A JSON-Response that contains a dictionary with the corresponding response stored in *content*
"""
@csrf_exempt
def execute(request):
content = request.POST if request.POST else request.GET
context = {'user': request.user}
if 'cursor_id' in content:
context['cursor_id'] = int(content['cursor_id'])
else:
if create_cursor:
context.update(actions.open_raw_connection(request, context))
context.update(actions.open_cursor(request, context))
data = func(json.loads(content.get('query', '{}')), context)
# This must be done in order to clean the structure of non-serializable
# objects (e.g. datetime)
response_data = json.loads(json.dumps(data, default=date_handler))
return JsonResponse({'content': response_data,
'cursor_id': context['cursor_id']}, safe=False)
return execute
def stream(data):
"""
TODO: Implement streaming of large datasets
:param data:
:return:
"""
size = len(data)
chunck = 100
for i in range(size):
yield json.loads(json.dumps(data[i], default=date_handler))
time.sleep(1)
Fix return in permission wrapper
import json
import time
from decimal import Decimal
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
import login.models as login_models
import api.parser
from api import actions
from api import parser
from api.helpers.http import ModHttpResponse
from rest_framework.views import APIView
import geoalchemy2 # Although this import seems unused is has to be here
def permission_wrapper(permission, f):
def wrapper(caller, request, *args, **kwargs):
schema = kwargs.get('schema')
table = kwargs.get('table')
if request.user.get_permission_level(schema, table) < permission:
raise PermissionDenied
else:
return f(caller, request,*args, **kwargs)
return wrapper
def require_write_permission(f):
return permission_wrapper(login_models.WRITE_PERM, f)
def require_delete_permission(f):
return permission_wrapper(login_models.DELETE_PERM, f)
def require_admin_permission(f):
return permission_wrapper(login_models.ADMIN_PERM, f)
class Table(APIView):
"""
Handels the creation of tables and serves information on existing tables
"""
def get(self, request, schema, table):
"""
Returns a dictionary that describes the DDL-make-up of this table.
Fields are:
* name : Name of the table,
* schema: Name of the schema,
* columns : as specified in :meth:`api.actions.describe_columns`
* indexes : as specified in :meth:`api.actions.describe_indexes`
* constraints: as specified in
:meth:`api.actions.describe_constraints`
:param request:
:return:
"""
return JsonResponse({
'schema': schema,
'name': table,
'columns': actions.describe_columns(schema, table),
'indexed': actions.describe_indexes(schema, table),
'constraints': actions.describe_constraints(schema, table)
})
def post(self, request, schema, table):
"""
Changes properties of tables and table columns
:param request:
:param schema:
:param table:
:return:
"""
json_data = json.loads(request.body.decode("utf-8"))
if 'column' in json_data['type']:
column_definition = api.parser.parse_scolumnd_from_columnd(schema, table, json_data['name'], json_data)
result = actions.queue_column_change(schema, table, column_definition)
return ModHttpResponse(result)
elif 'constraint' in json_data['type']:
# Input has nothing to do with DDL from Postgres.
# Input is completely different.
# Using actions.parse_sconstd_from_constd is not applicable
# dict.get() returns None, if key does not exist
constraint_definition = {
'action': json_data['action'], # {ADD, DROP}
'constraint_type': json_data.get('constraint_type'), # {FOREIGN KEY, PRIMARY KEY, UNIQUE, CHECK}
'constraint_name': json_data.get('constraint_name'), # {myForeignKey, myUniqueConstraint}
'constraint_parameter': json_data.get('constraint_parameter'),
# Things in Brackets, e.g. name of column
'reference_table': json_data.get('reference_table'),
'reference_column': json_data.get('reference_column')
}
result = actions.queue_constraint_change(schema, table, constraint_definition)
return ModHttpResponse(result)
else:
return ModHttpResponse(actions.get_response_dict(False, 400, 'type not recognised'))
def put(self, request, schema, table):
"""
Every request to unsave http methods have to contain a "csrftoken".
This token is used to deny cross site reference forwarding.
In every request the header had to contain "X-CSRFToken" with the actual csrftoken.
The token can be requested at / and will be returned as cookie.
:param request:
:return:
"""
json_data = request.data['query']
constraint_definitions = []
column_definitions = []
for constraint_definiton in json_data.get('constraints',[]):
constraint_definiton.update({"action": "ADD",
"c_table": table,
"c_schema": schema})
constraint_definitions.append(constraint_definiton)
if 'columns' not in json_data:
return
for column_definition in json_data['columns']:
column_definition.update({"c_table": table,
"c_schema": schema})
column_definitions.append(column_definition)
result = actions.table_create(schema, table, column_definitions, constraint_definitions)
return ModHttpResponse(result)
class Index(APIView):
def get(self, request):
pass
def post(self, request):
pass
def put(self, request):
pass
class Fields(APIView):
def get(self, request, schema, table, id, column):
if not parser.is_pg_qual(table) or not parser.is_pg_qual(schema) or not parser.is_pg_qual(id) or not parser.is_pg_qual(column):
return ModHttpResponse({"error": "Bad Request", "http_status": 400})
returnValue = actions.getValue(schema, table, column, id);
return HttpResponse(returnValue if returnValue is not None else "", status= (404 if returnValue is None else 200))
def post(self, request):
pass
def put(self, request):
pass
class Rows(APIView):
def get(self, request, schema, table, id=''):
columns = request.GET.get('columns')
where = request.GET.get('where')
orderby = request.GET.get('orderby')
limit = request.GET.get('limit')
offset = request.GET.get('offset')
# OPERATORS could be EQUAL, GREATER, LOWER, NOTEQUAL, NOTGREATER, NOTLOWER
# CONNECTORS could be AND, OR
# If you connect two values with an +, it will convert the + to a space. Whatever.
where_clauses = None
if where:
where_splitted = where.split(' ')
where_clauses = [{'first': where_splitted[4 * i],
'operator': where_splitted[4 * i + 1],
'second': where_splitted[4 * i + 2],
'connector': where_splitted[4 * i + 3] if len(where_splitted) > 4 * i + 3 else None} for i
in range(int(len(where_splitted) / 4) + 1)]
# TODO: Validate where_clauses. Should not be vulnerable
data = {'schema': schema,
'table': table,
'columns': parser.split(columns, ','),
'where': where_clauses,
'orderby': parser.split(orderby, ','),
'limit': limit,
'offset': offset
}
return_obj = actions.get_rows(request, data)
# TODO: Figure out what JsonResponse does different.
response = json.dumps(return_obj, default=date_handler)
return HttpResponse(response, content_type='application/json')
@actions.load_cursor
def post(self, request, schema, table, row_id=None):
column_data = request.data['query']
if row_id:
return self.__update_row(request, schema, table, column_data, row_id)
else:
return self.__insert_row(request, schema, table, column_data, row_id)
def put(self, request, schema, table, row_id=None):
if not row_id:
return actions._response_error('This methods requires an id')
column_data = request.data['query']
engine = actions._get_engine()
conn = engine.connect()
# check whether id is already in use
exists = conn.execute('select count(*) '
'from {schema}.{table} '
'where id = {id};'.format(schema=schema,
table=table,
id=row_id)).first()[0] > 0 if row_id else False
if exists > 0:
response = self.__update_row(request, schema, table, column_data, row_id)
actions.apply_changes(schema, table)
return JsonResponse(response)
else:
result = self.__insert_row(request, schema, table, column_data, row_id)
actions.apply_changes(schema, table)
return JsonResponse(result)
@actions.load_cursor
def __insert_row(self, request, schema, table, row, row_id=None):
if row.get('id', row_id) != row_id:
return actions._response_error('The id given in the query does not '
'match the id given in the url')
if row_id:
row['id'] = row_id
if not all(map(parser.is_pg_qual, row.keys())):
return actions.get_response_dict(success=False,
http_status_code=400,
reason="Your request was malformed.")
context = {'cursor_id': request.data['cursor_id'],
'user': request.user}
query = {
'schema': schema,
'table': table,
'values': [row]
}
result = actions.data_insert(query, context)
return result
@actions.load_cursor
def __update_row(self, request, schema, table, row, row_id):
context = {'cursor_id': request.data['cursor_id'],
'user': request.user}
query = {
'schema': schema,
'table': table,
'where': {
'left': {
'type': 'column',
'column': 'id'
},
'operator': '=',
'right': row_id,
'type': 'operator_binary'
},
'values': row
}
return actions.data_update(query, context)
class Session(APIView):
def get(self, request, length=1):
return request.session['resonse']
def date_handler(obj):
"""
Implements a handler to serialize dates in JSON-strings
:param obj: An object
:return: The str method is called (which is the default serializer for JSON) unless the object has an attribute *isoformat*
"""
if isinstance(obj, Decimal):
return float(obj)
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return str(obj)
# Create your views here.
def create_ajax_handler(func, create_cursor=True):
"""
Implements a mapper from api pages to the corresponding functions in
api/actions.py
:param func: The name of the callable function
:return: A JSON-Response that contains a dictionary with the corresponding response stored in *content*
"""
@csrf_exempt
def execute(request):
content = request.POST if request.POST else request.GET
context = {'user': request.user}
if 'cursor_id' in content:
context['cursor_id'] = int(content['cursor_id'])
else:
if create_cursor:
context.update(actions.open_raw_connection(request, context))
context.update(actions.open_cursor(request, context))
data = func(json.loads(content.get('query', '{}')), context)
# This must be done in order to clean the structure of non-serializable
# objects (e.g. datetime)
response_data = json.loads(json.dumps(data, default=date_handler))
return JsonResponse({'content': response_data,
'cursor_id': context['cursor_id']}, safe=False)
return execute
def stream(data):
"""
TODO: Implement streaming of large datasets
:param data:
:return:
"""
size = len(data)
chunck = 100
for i in range(size):
yield json.loads(json.dumps(data[i], default=date_handler))
time.sleep(1)
|
import csv
import json
import datetime
from optparse import make_option
from django.utils import timezone
from django.db import transaction
from django.core.management.base import BaseCommand
from schools.models import (
School, BoundaryType, DiseInfo)
from stories.models import (
Question, Questiongroup, QuestionType,
QuestiongroupQuestions, Source, UserType,
Story, Answer)
class Command(BaseCommand):
args = "<path to file>"
help = """Parse and store the Community Feedback V2 data
./manage.py fetchcommunityv2 --file=path/to/file"""
option_list = BaseCommand.option_list + (
make_option('--file',
help='Path to the csv file'),
make_option('--format',
help='Which format to use - Hosakote1/Hosakote1-dates/Hosakote2/v2/GKA1/GKA2'),
)
@transaction.atomic
def handle(self, *args, **options):
file_name = options.get('file', None)
if not file_name:
print "Please specify a filename with the --file argument"
return
csv_format = options.get('format', None)
if not csv_format or csv_format not in ['Hosakote1', 'Hosakote1-dates', 'Hosakote2', 'v2', 'GKA1', 'GKA2']:
print "Please specify a formate with the --format argument [Hosakote1/Hosakote2/v2/GKA1/GKA2]"
return
source = Source.objects.get_or_create(name="community")[0]
question_group = Questiongroup.objects.get_or_create(version=2, source=source)[0]
user_types = {
'parents': UserType.PARENTS,
'cbo member': UserType.CBO_MEMBER,
'local leader': UserType.LOCAL_LEADER,
'elcted/local leader': UserType.LOCAL_LEADER,
'sdmc member': UserType.SDMC_MEMBER,
'educated youth': UserType.EDUCATED_YOUTH,
}
UserType.objects.get_or_create(name=UserType.PARENTS)
UserType.objects.get_or_create(name=UserType.CBO_MEMBER)
UserType.objects.get_or_create(name=UserType.SDMC_MEMBER)
UserType.objects.get_or_create(name=UserType.LOCAL_LEADER)
UserType.objects.get_or_create(name=UserType.EDUCATED_YOUTH)
num_to_user_type = {
'':None,
'1':'Parents',
'2':'SDMC Member',
'3':'Local Leader',
'4':'CBO Member',
'5':'Educated youth'
}
f = open(file_name, 'r')
csv_f = csv.reader(f)
if csv_format in ["Hosakote1", "Hosakote1-dates", "Hosakote2"]:
dise_errors = {}
dise_errors['no_school_code'] = []
count = -1
else:
dise_errors = {}
dise_errors['no_dise_code'] = []
dise_errors['no_school_for_dise'] = []
count = 0
previous_date = ""
for row in csv_f:
# Skip first few rows
if count in [0, 1, -1]:
count += 1
continue
if csv_format == "v2":
name = row[6]
dise_code = row[5]
accepted_answers = {'Y':'Yes', 'N':'No'}
if row[7] in ['1', '2', '3', '4', '5']:
user_type = self.get_user_type(num_to_user_type[row[7]], user_types)
else:
user_type = self.get_user_type(row[7], user_types)
previous_date = date_of_visit = self.parse_date(previous_date, row[16])
try:
dise_info = DiseInfo.objects.get(dise_code=dise_code)
except Exception as ex:
dise_errors['no_dise_code'].append(dise_code)
continue
try:
school = School.objects.get(dise_info=dise_info)
except Exception as ex:
dise_errors['no_school_for_dise'].append(dise_code)
continue
question_sequence = [1, 2, 3, 4, 5, 6, 7, 8]
answer_columns = [8, 9, 10, 11, 12, 13, 14, 15]
elif csv_format in ["Hosakote1", "Hosakote1-dates", "Hosakote2"]:
if csv_format in ["Hosakote1", "Hosakote1-dates"]:
name = row[13]
school_id = row[9]
accepted_answers = {'1':'Yes', '0':'No', '88':'Unknown', '99':'Unknown'}
user_type = self.get_user_type(num_to_user_type[row[14]], user_types)
if csv_format == "Hosakote1":
previous_date = date_of_visit = self.parse_date(previous_date, row[11])
else:
transformed_date = self.transform_date(row[11])
previous_date = date_of_visit = self.parse_date(previous_date, transformed_date)
try:
school = School.objects.get(id=school_id)
except Exception as ex:
dise_errors['no_school_code'].append(school_id)
continue
question_sequence = [1, 2, 3, 4, 5, 6, 7, 8]
answer_columns = [15, 16, 17, 18, 19, 20, 21, 22]
else:
users = {
'parents': ['parent', 'parents'],
'cbo member': ['cbo member', 'cbo membar'],
'local leader': ['elected/ local leader', 'elected- local leader',
'elected/local leader', 'elected-local leader',
'elected /local leader', 'elected local leader',
'local leader', 'elected / local leader',
'educated /local leader'],
'sdmc member': ['sdmc-1', 'sdmc-2', 'sdmc -2', 'sdmc -1'],
'educated youth': ['educated youth'],
None:['na']
}
name = row[10]
school_id = row[6]
accepted_answers = {'Yes':'Yes', 'No':'No', 'Unaware':'Unknown'}
user_type = row[9].strip().lower()
for user in users:
if user_type in users[user]:
user_type = user
user_type = self.get_user_type(user_type, user_types)
previous_date = date_of_visit = self.parse_date(previous_date, row[8])
try:
school = School.objects.get(id=school_id)
except Exception as ex:
dise_errors['no_school_code'].append(school_id)
continue
question_sequence = [1, 2, 3, 4, 5, 6, 7, 8]
answer_columns = [11, 14, 17, 20, 23, 26, 29, 32]
story, created = Story.objects.get_or_create(
school=school,
name=name,
is_verified=True,
group=question_group,
date_of_visit=timezone.make_aware(
date_of_visit, timezone.get_current_timezone()
),
user_type=user_type,
)
for sequence_number, answer_column in zip(question_sequence, answer_columns):
answer_text = None
for i in range(0, 3):
if row[answer_column+i].strip() in accepted_answers:
answer_text = row[answer_column+i].strip()
break
if answer_text:
question = Question.objects.get(
questiongroup=question_group,
questiongroupquestions__sequence=sequence_number,
)
answer = Answer.objects.get_or_create(
story=story,
question=question,
text=accepted_answers[answer_text],
)
continue
else:
name = row[8]
accepted_answers = {'1':'Yes', '0':'No', '88':'Unknown', '99':'Unknown'}
user_type = self.get_user_type(num_to_user_type[row[9]], user_types)
previous_date = date_of_visit = self.parse_date(previous_date, row[18])
if csv_format == "GKA1":
dise_code = row[5]
else:
dise_code = row[7]
try:
dise_info = DiseInfo.objects.get(dise_code=dise_code)
except Exception as ex:
dise_errors['no_dise_code'].append(dise_code)
continue
try:
school = School.objects.get(dise_info=dise_info)
except Exception as ex:
dise_errors['no_school_for_dise'].append(dise_code)
continue
question_sequence = [1, 2, 3, 4, 5, 6, 7, 8]
answer_columns = [10, 11, 12, 13, 14, 15, 16, 17]
for answer_column in answer_columns:
if row[answer_column] in accepted_answers:
at_least_one_answer = True
break
else:
at_least_one_answer = False
if at_least_one_answer:
story, created = Story.objects.get_or_create(
school=school,
name=name,
is_verified=True,
group=question_group,
date_of_visit=timezone.make_aware(
date_of_visit, timezone.get_current_timezone()
),
user_type=user_type,
)
for sequence_number, answer_column in zip(question_sequence, answer_columns):
if row[answer_column] in accepted_answers:
question = Question.objects.get(
questiongroup=question_group,
questiongroupquestions__sequence=sequence_number,
)
answer = Answer.objects.get_or_create(
story=story,
question=question,
text=accepted_answers[row[answer_column]],
)
f.close()
file_name = file_name + "_error.log"
f = open(file_name, 'w')
f.write(json.dumps(dise_errors, indent = 4))
f.close()
def transform_date(self, date):
if date.strip() != 'NA':
date = date.strip()
month = date.split("/")[0]
day = date.split("/")[1]
year = date.split("/")[2]
date = day+"/"+month+"/"+year
return date
def parse_date(self, previous_date, date):
if date:
if date == "99":
return previous_date
date, date_is_sane = self.check_date_sanity(date)
if date_is_sane:
date_of_visit = datetime.datetime.strptime(
date, '%d/%m/%Y'
)
return date_of_visit
return previous_date
def get_user_type(self, user_type, user_types):
if user_type:
return UserType.objects.get(
name=user_types[user_type.strip().lower()]
)
else:
return None
def check_date_sanity(self, date):
if "." in date:
try:
day = date.split(".")[0]
month = date.split(".")[1]
year = date.split(".")[2]
except:
return (date, False)
elif "//" in date:
try:
day = date.split("/")[0]
month = date.split("/")[2]
year = date.split("/")[3]
if len(year) == 2:
year = "20"+year
except:
# Date format itself will be messed up
return (date, False)
else:
try:
day = date.split("/")[0]
month = date.split("/")[1]
year = date.split("/")[2]
if len(year) == 2:
year = "20"+year
except:
# Date format itself will be messed up
return (date, False)
if not self.is_day_correct(day):
return (date, False)
if not self.is_month_correct(month):
return (date, False)
if not self.is_year_correct(year):
return (date, False)
date = day+"/"+month+"/"+year
return (date, True)
def is_day_correct(self, day):
return int(day) in range(1,32)
def is_month_correct(self, month):
return int(month) in range(1,13)
def is_year_correct(self, year):
return (len(year) == 4 and int(year) <= timezone.now().year)
Checks for log folder path
import os
import csv
import json
import datetime
from optparse import make_option
from django.utils import timezone
from django.db import transaction
from django.core.management.base import BaseCommand
from schools.models import (
School, BoundaryType, DiseInfo)
from stories.models import (
Question, Questiongroup, QuestionType,
QuestiongroupQuestions, Source, UserType,
Story, Answer)
class Command(BaseCommand):
args = "<path to file>"
help = """Parse and store the Community Feedback V2 data
./manage.py fetchcommunityv2 --file=path/to/file"""
option_list = BaseCommand.option_list + (
make_option('--file',
help='Path to the csv file'),
make_option('--format',
help='Which format to use - Hosakote1/Hosakote1-dates/Hosakote2/v2/GKA1/GKA2'),
)
@transaction.atomic
def handle(self, *args, **options):
file_name = options.get('file', None)
if not file_name:
print "Please specify a filename with the --file argument"
return
csv_format = options.get('format', None)
if not csv_format or csv_format not in ['Hosakote1', 'Hosakote1-dates', 'Hosakote2', 'v2', 'GKA1', 'GKA2']:
print "Please specify a formate with the --format argument [Hosakote1/Hosakote2/v2/GKA1/GKA2]"
return
source = Source.objects.get_or_create(name="community")[0]
question_group = Questiongroup.objects.get_or_create(version=2, source=source)[0]
user_types = {
'parents': UserType.PARENTS,
'cbo member': UserType.CBO_MEMBER,
'local leader': UserType.LOCAL_LEADER,
'elcted/local leader': UserType.LOCAL_LEADER,
'sdmc member': UserType.SDMC_MEMBER,
'educated youth': UserType.EDUCATED_YOUTH,
}
UserType.objects.get_or_create(name=UserType.PARENTS)
UserType.objects.get_or_create(name=UserType.CBO_MEMBER)
UserType.objects.get_or_create(name=UserType.SDMC_MEMBER)
UserType.objects.get_or_create(name=UserType.LOCAL_LEADER)
UserType.objects.get_or_create(name=UserType.EDUCATED_YOUTH)
num_to_user_type = {
'':None,
'1':'Parents',
'2':'SDMC Member',
'3':'Local Leader',
'4':'CBO Member',
'5':'Educated youth'
}
f = open(file_name, 'r')
csv_f = csv.reader(f)
if csv_format in ["Hosakote1", "Hosakote1-dates", "Hosakote2"]:
dise_errors = {}
dise_errors['no_school_code'] = []
count = -1
else:
dise_errors = {}
dise_errors['no_dise_code'] = []
dise_errors['no_school_for_dise'] = []
count = 0
previous_date = ""
for row in csv_f:
# Skip first few rows
if count in [0, 1, -1]:
count += 1
continue
if csv_format == "v2":
name = row[6]
dise_code = row[5]
accepted_answers = {'Y':'Yes', 'N':'No'}
if row[7] in ['1', '2', '3', '4', '5']:
user_type = self.get_user_type(num_to_user_type[row[7]], user_types)
else:
user_type = self.get_user_type(row[7], user_types)
previous_date = date_of_visit = self.parse_date(previous_date, row[16])
try:
dise_info = DiseInfo.objects.get(dise_code=dise_code)
except Exception as ex:
dise_errors['no_dise_code'].append(dise_code)
continue
try:
school = School.objects.get(dise_info=dise_info)
except Exception as ex:
dise_errors['no_school_for_dise'].append(dise_code)
continue
question_sequence = [1, 2, 3, 4, 5, 6, 7, 8]
answer_columns = [8, 9, 10, 11, 12, 13, 14, 15]
elif csv_format in ["Hosakote1", "Hosakote1-dates", "Hosakote2"]:
if csv_format in ["Hosakote1", "Hosakote1-dates"]:
name = row[13]
school_id = row[9]
accepted_answers = {'1':'Yes', '0':'No', '88':'Unknown', '99':'Unknown'}
user_type = self.get_user_type(num_to_user_type[row[14]], user_types)
if csv_format == "Hosakote1":
previous_date = date_of_visit = self.parse_date(previous_date, row[11])
else:
transformed_date = self.transform_date(row[11])
previous_date = date_of_visit = self.parse_date(previous_date, transformed_date)
try:
school = School.objects.get(id=school_id)
except Exception as ex:
dise_errors['no_school_code'].append(school_id)
continue
question_sequence = [1, 2, 3, 4, 5, 6, 7, 8]
answer_columns = [15, 16, 17, 18, 19, 20, 21, 22]
else:
users = {
'parents': ['parent', 'parents'],
'cbo member': ['cbo member', 'cbo membar'],
'local leader': ['elected/ local leader', 'elected- local leader',
'elected/local leader', 'elected-local leader',
'elected /local leader', 'elected local leader',
'local leader', 'elected / local leader',
'educated /local leader'],
'sdmc member': ['sdmc-1', 'sdmc-2', 'sdmc -2', 'sdmc -1'],
'educated youth': ['educated youth'],
None:['na']
}
name = row[10]
school_id = row[6]
accepted_answers = {'Yes':'Yes', 'No':'No', 'Unaware':'Unknown'}
user_type = row[9].strip().lower()
for user in users:
if user_type in users[user]:
user_type = user
user_type = self.get_user_type(user_type, user_types)
previous_date = date_of_visit = self.parse_date(previous_date, row[8])
try:
school = School.objects.get(id=school_id)
except Exception as ex:
dise_errors['no_school_code'].append(school_id)
continue
question_sequence = [1, 2, 3, 4, 5, 6, 7, 8]
answer_columns = [11, 14, 17, 20, 23, 26, 29, 32]
story, created = Story.objects.get_or_create(
school=school,
name=name,
is_verified=True,
group=question_group,
date_of_visit=timezone.make_aware(
date_of_visit, timezone.get_current_timezone()
),
user_type=user_type,
)
for sequence_number, answer_column in zip(question_sequence, answer_columns):
answer_text = None
for i in range(0, 3):
if row[answer_column+i].strip() in accepted_answers:
answer_text = row[answer_column+i].strip()
break
if answer_text:
question = Question.objects.get(
questiongroup=question_group,
questiongroupquestions__sequence=sequence_number,
)
answer = Answer.objects.get_or_create(
story=story,
question=question,
text=accepted_answers[answer_text],
)
continue
else:
name = row[8]
accepted_answers = {'1':'Yes', '0':'No', '88':'Unknown', '99':'Unknown'}
user_type = self.get_user_type(num_to_user_type[row[9]], user_types)
previous_date = date_of_visit = self.parse_date(previous_date, row[18])
if csv_format == "GKA1":
dise_code = row[5]
else:
dise_code = row[7]
try:
dise_info = DiseInfo.objects.get(dise_code=dise_code)
except Exception as ex:
dise_errors['no_dise_code'].append(dise_code)
continue
try:
school = School.objects.get(dise_info=dise_info)
except Exception as ex:
dise_errors['no_school_for_dise'].append(dise_code)
continue
question_sequence = [1, 2, 3, 4, 5, 6, 7, 8]
answer_columns = [10, 11, 12, 13, 14, 15, 16, 17]
for answer_column in answer_columns:
if row[answer_column] in accepted_answers:
at_least_one_answer = True
break
else:
at_least_one_answer = False
if at_least_one_answer:
story, created = Story.objects.get_or_create(
school=school,
name=name,
is_verified=True,
group=question_group,
date_of_visit=timezone.make_aware(
date_of_visit, timezone.get_current_timezone()
),
user_type=user_type,
)
for sequence_number, answer_column in zip(question_sequence, answer_columns):
if row[answer_column] in accepted_answers:
question = Question.objects.get(
questiongroup=question_group,
questiongroupquestions__sequence=sequence_number,
)
answer = Answer.objects.get_or_create(
story=story,
question=question,
text=accepted_answers[row[answer_column]],
)
f.close()
log_directory = os.environ.get('DUBDUBDUB_LOG_DIR')
if log_directory:
file_name = file_name.split('/')[-1]
file_name = log_directory + file_name + "_error.log"
else:
file_name = file_name + "_error.log"
f = open(file_name, 'w')
f.write(json.dumps(dise_errors, indent = 4))
f.close()
def transform_date(self, date):
if date.strip() != 'NA':
date = date.strip()
month = date.split("/")[0]
day = date.split("/")[1]
year = date.split("/")[2]
date = day+"/"+month+"/"+year
return date
def parse_date(self, previous_date, date):
if date:
if date == "99":
return previous_date
date, date_is_sane = self.check_date_sanity(date)
if date_is_sane:
date_of_visit = datetime.datetime.strptime(
date, '%d/%m/%Y'
)
return date_of_visit
return previous_date
def get_user_type(self, user_type, user_types):
if user_type:
return UserType.objects.get(
name=user_types[user_type.strip().lower()]
)
else:
return None
def check_date_sanity(self, date):
if "." in date:
try:
day = date.split(".")[0]
month = date.split(".")[1]
year = date.split(".")[2]
except:
return (date, False)
elif "//" in date:
try:
day = date.split("/")[0]
month = date.split("/")[2]
year = date.split("/")[3]
if len(year) == 2:
year = "20"+year
except:
# Date format itself will be messed up
return (date, False)
else:
try:
day = date.split("/")[0]
month = date.split("/")[1]
year = date.split("/")[2]
if len(year) == 2:
year = "20"+year
except:
# Date format itself will be messed up
return (date, False)
if not self.is_day_correct(day):
return (date, False)
if not self.is_month_correct(month):
return (date, False)
if not self.is_year_correct(year):
return (date, False)
date = day+"/"+month+"/"+year
return (date, True)
def is_day_correct(self, day):
return int(day) in range(1,32)
def is_month_correct(self, month):
return int(month) in range(1,13)
def is_year_correct(self, year):
return (len(year) == 4 and int(year) <= timezone.now().year)
|
from urllib.parse import urlencode
from pytest import mark
from pytest import fixture
from adhocracy_core.testing import god_login
from adhocracy_frontend.tests.acceptance.shared import wait
from adhocracy_frontend.tests.acceptance.shared import get_list_element
from adhocracy_frontend.tests.acceptance.shared import get_listing_create_form
from adhocracy_frontend.tests.acceptance.shared import get_random_string
from adhocracy_frontend.tests.acceptance.shared import login_god
from adhocracy_frontend.tests.fixtures.users import register_user
from adhocracy_frontend.tests.fixtures.users import activate_all
from adhocracy_frontend.tests.acceptance.shared import logout
from adhocracy_frontend.tests.acceptance.shared import login
@fixture(scope="module")
def user():
name = get_random_string(n=5)
password = "password"
register_user(name, password)
activate_all()
return (name, password)
class TestComment:
def test_create(self, browser, rest_url):
login_god(browser)
comment = create_comment(browser, rest_url, 'comment1')
assert comment is not None
def test_empty_comment(self, browser, rest_url):
comment = create_comment(browser, rest_url, '')
assert comment is None
def test_nested_replies(self, browser, n=10):
for i in range(n):
comment = browser.find_by_css('.comment').last
reply = create_reply_comment(browser, comment, 'nested reply %d' % i)
assert reply is not None
def test_multiple_replies(self, browser, n=10):
comment = browser.find_by_css('.comment').first
for i in range(n):
reply = create_reply_comment(browser, comment, 'multiple reply %d' % i)
assert reply is not None
def test_edit(self, browser):
comment = browser.find_by_css('.comment').first
edit_comment(browser, comment, 'edited')
assert comment.find_by_css('.comment-content div').first.text == 'edited'
browser.reload()
assert wait(lambda: browser.find_by_css('.comment-content')\
.first.text == 'edited')
def test_edit_twice(self, browser):
comment = browser.find_by_css('.comment').first
edit_comment(browser, comment, 'edited 1')
assert comment.find_by_css('.comment-content div').first.text == 'edited 1'
edit_comment(browser, comment, 'edited 2')
assert comment.find_by_css('.comment-content div').first.text == 'edited 2'
@mark.skipif(True, reason='FIXME Test needs to be updated since the '
'backend now throws a "No fork allowed" error')
def test_multi_edits(self, browser):
parent = browser.find_by_css('.comment').first
reply = parent.find_by_css('.comment').first
edit_comment(browser, reply, 'somereply edited')
edit_comment(browser, parent, 'edited')
assert parent.find_by_css('.comment-content').first.text == 'edited'
def test_author(self, browser):
comment = browser.find_by_css('.comment').first
actual = lambda element: element.find_by_css('adh-user-meta').first.text
# the captialisation might be changed by CSS
assert wait(lambda: actual(comment).lower() == god_login.lower())
@mark.skipif(True, reason='FIXME: This test does not work as long as user '
'activation does not work more reliably.')
def test_edit_foreign_comments(self, browser, rest_url, user):
comment = create_comment(browser, rest_url, 'comment1')
assert comment is not None
logout(browser)
login(browser, user[0], user[1])
new_text = "changing comment to this text should not have worked."
edit_comment(browser, comment, new_text)
assert not comment.find_by_css('.comment-content div').\
first.text == new_text
def create_comment(browser, rest_url, name):
"""Go to content2 column and create comment with content 'comment1'."""
query = urlencode({
'key': 'test',
'pool-path': rest_url + 'adhocracy/',
})
browser.visit(browser.app_url + 'embed/create-or-show-comment-listing?' + query)
listing = browser.find_by_css('.listing')
comment = create_top_level_comment(browser, listing, name)
return comment
def create_top_level_comment(browser, listing, content):
"""Create a new top level Comment."""
form = get_listing_create_form(listing)
form.find_by_css('textarea').first.fill(content)
form.find_by_css('input[type="submit"]').first.click()
browser.is_text_present(content, wait_time=10)
comment = get_list_element(listing, content, descendant='.comment-content')
return comment
def create_reply_comment(browser, parent, content):
"""Create a new reply to an existing comment."""
form = get_comment_create_form(parent)
form.find_by_css('textarea').first.fill(content)
form.find_by_css('input[type="submit"]').first.click()
if not browser.is_text_present(content, wait_time=5):
return None
reply = get_reply(parent, content)
return reply
def edit_comment(browser, comment, content):
comment.find_by_css('.comment-meta a')[0].click()
comment.find_by_css('textarea').first.fill(content)
comment.find_by_css('.comment-meta a')[0].click()
browser.is_text_present(content, wait_time=10)
def get_comment_create_form(comment):
main = comment.find_by_css(".comment-main").first
button = main.find_by_css('.comment-meta a').last
button.click()
return comment.find_by_css('.comment-create-form').first
def get_reply(parent, content):
"""Return reply to comment `parent` with content == `content`."""
for element in parent.find_by_css('.comment'):
wait(lambda: element.text, max_steps=100)
if element.find_by_css('.comment-content').first.text == content:
return element
marking nested comments as xfail since it is not possible to comment in nested level > 7
from urllib.parse import urlencode
from pytest import mark
from pytest import fixture
from adhocracy_core.testing import god_login
from adhocracy_frontend.tests.acceptance.shared import wait
from adhocracy_frontend.tests.acceptance.shared import get_list_element
from adhocracy_frontend.tests.acceptance.shared import get_listing_create_form
from adhocracy_frontend.tests.acceptance.shared import get_random_string
from adhocracy_frontend.tests.acceptance.shared import login_god
from adhocracy_frontend.tests.fixtures.users import register_user
from adhocracy_frontend.tests.fixtures.users import activate_all
from adhocracy_frontend.tests.acceptance.shared import logout
from adhocracy_frontend.tests.acceptance.shared import login
@fixture(scope="module")
def user():
name = get_random_string(n=5)
password = "password"
register_user(name, password)
activate_all()
return (name, password)
class TestComment:
def test_create(self, browser, rest_url):
login_god(browser)
comment = create_comment(browser, rest_url, 'comment1')
assert comment is not None
def test_empty_comment(self, browser, rest_url):
comment = create_comment(browser, rest_url, '')
assert comment is None
@mark.xfail
def test_nested_replies(self, browser, n=10):
for i in range(n):
comment = browser.find_by_css('.comment').last
reply = create_reply_comment(browser, comment, 'nested reply %d' % i)
assert reply is not None
def test_multiple_replies(self, browser, n=10):
comment = browser.find_by_css('.comment').first
for i in range(n):
reply = create_reply_comment(browser, comment, 'multiple reply %d' % i)
assert reply is not None
def test_edit(self, browser):
comment = browser.find_by_css('.comment').first
edit_comment(browser, comment, 'edited')
assert comment.find_by_css('.comment-content div').first.text == 'edited'
browser.reload()
assert wait(lambda: browser.find_by_css('.comment-content')\
.first.text == 'edited')
def test_edit_twice(self, browser):
comment = browser.find_by_css('.comment').first
edit_comment(browser, comment, 'edited 1')
assert comment.find_by_css('.comment-content div').first.text == 'edited 1'
edit_comment(browser, comment, 'edited 2')
assert comment.find_by_css('.comment-content div').first.text == 'edited 2'
@mark.skipif(True, reason='FIXME Test needs to be updated since the '
'backend now throws a "No fork allowed" error')
def test_multi_edits(self, browser):
parent = browser.find_by_css('.comment').first
reply = parent.find_by_css('.comment').first
edit_comment(browser, reply, 'somereply edited')
edit_comment(browser, parent, 'edited')
assert parent.find_by_css('.comment-content').first.text == 'edited'
def test_author(self, browser):
comment = browser.find_by_css('.comment').first
actual = lambda element: element.find_by_css('adh-user-meta').first.text
# the captialisation might be changed by CSS
assert wait(lambda: actual(comment).lower() == god_login.lower())
@mark.skipif(True, reason='FIXME: This test does not work as long as user '
'activation does not work more reliably.')
def test_edit_foreign_comments(self, browser, rest_url, user):
comment = create_comment(browser, rest_url, 'comment1')
assert comment is not None
logout(browser)
login(browser, user[0], user[1])
new_text = "changing comment to this text should not have worked."
edit_comment(browser, comment, new_text)
assert not comment.find_by_css('.comment-content div').\
first.text == new_text
def create_comment(browser, rest_url, name):
"""Go to content2 column and create comment with content 'comment1'."""
query = urlencode({
'key': 'test',
'pool-path': rest_url + 'adhocracy/',
})
browser.visit(browser.app_url + 'embed/create-or-show-comment-listing?' + query)
listing = browser.find_by_css('.listing')
comment = create_top_level_comment(browser, listing, name)
return comment
def create_top_level_comment(browser, listing, content):
"""Create a new top level Comment."""
form = get_listing_create_form(listing)
form.find_by_css('textarea').first.fill(content)
form.find_by_css('input[type="submit"]').first.click()
browser.is_text_present(content, wait_time=10)
comment = get_list_element(listing, content, descendant='.comment-content')
return comment
def create_reply_comment(browser, parent, content):
"""Create a new reply to an existing comment."""
form = get_comment_create_form(parent)
form.find_by_css('textarea').first.fill(content)
form.find_by_css('input[type="submit"]').first.click()
if not browser.is_text_present(content, wait_time=5):
return None
reply = get_reply(parent, content)
return reply
def edit_comment(browser, comment, content):
comment.find_by_css('.comment-meta a')[0].click()
comment.find_by_css('textarea').first.fill(content)
comment.find_by_css('.comment-meta a')[0].click()
browser.is_text_present(content, wait_time=10)
def get_comment_create_form(comment):
main = comment.find_by_css(".comment-main").first
button = main.find_by_css('.comment-meta a').last
button.click()
return comment.find_by_css('.comment-create-form').first
def get_reply(parent, content):
"""Return reply to comment `parent` with content == `content`."""
for element in parent.find_by_css('.comment'):
wait(lambda: element.text, max_steps=100)
if element.find_by_css('.comment-content').first.text == content:
return element
|
import os
import redis
import time
import datetime
import colorsys
import threading
import ConfigParser
import paho.mqtt.client as mqtt
from Room import Room
from InformationFetcher import InformationFetcher
class LightController(threading.Thread):
def __init__(self):
self._info = InformationFetcher()
threading.Thread.__init__(self)
self.setDaemon(True)
self._homeDir = os.path.expanduser("~/.sensomatic")
self._configFileName = self._homeDir + '/config.ini'
self._config = ConfigParser.ConfigParser()
self._readConfig()
self._mqclient = mqtt.Client("LightController", clean_session=True)
self._redis = redis.StrictRedis(host=self._config.get("REDIS", "ServerAddress"),
port=self._config.get("REDIS", "ServerPort"), db=0)
def _on_connect(self, client, userdata, rc, msg):
print "Connected with result code %s" % rc
#self._mqclient.subscribe("#")
def _on_message(self, client, userdata, msg):
print "Mq Received on channel %s -> %s" % (msg.topic, msg.payload)
def _on_disconnect(self, client, userdata, msg):
print "Disconnect"
def run(self):
self._mqclient.connect(self._config.get("MQTT","ServerAddress"), self._config.get("MQTT","ServerPort"), 60)
self._mqclient.on_connect = self._on_connect
self._mqclient.on_message = self._on_message
self._mqclient.on_disconnect = self._on_disconnect
time.sleep(1)
while True:
self._mqclient.loop()
self.ansiRoom()
self._mqclient.loop()
time.sleep(60)
def _readConfig(self):
update = False
if not os.path.isdir(self._homeDir):
print "Creating homeDir"
os.makedirs(self._homeDir)
if os.path.isfile(self._configFileName):
self._config.read(self._configFileName)
else:
print "Config file not found"
update = True
if not self._config.has_section('MQTT'):
print "Adding MQTT part"
update = True
self._config.add_section("MQTT")
if not self._config.has_option("MQTT", "ServerAddress"):
print "No Server Address"
update = True
self._config.set("MQTT", "ServerAddress", "<ServerAddress>")
if not self._config.has_option("MQTT", "ServerPort"):
print "No Server Port"
update = True
self._config.set("MQTT", "ServerPort", "1883")
if not self._config.has_section('REDIS'):
print "Adding Redis part"
update = True
self._config.add_section("REDIS")
if not self._config.has_option("REDIS", "ServerAddress"):
print "No Server Address"
update = True
self._config.set("REDIS", "ServerAddress", "<ServerAddress>")
if not self._config.has_option("REDIS", "ServerPort"):
print "No Server Port"
update = True
self._config.set("REDIS", "ServerPort", "6379")
if update:
with open(self._configFileName, 'w') as f:
self._config.write(f)
def fill(self, size, val):
values = ""
for i in range(size):
values += ",%d,%d,%d" % (val[0],val[1],val[2])
return values[1:]
def fillgradient(self, len):
values = ""
for i in range(len):
values += ",%d,%d,%d" % (0,0,0)
return values[1:]
def setWorkingLight(self, val):
self._mqclient.publish("ansiroom/bedlight/overhead/colour", self.fill(20, [val[0], val[1], val[2]]))
self._mqclient.publish("ansiroom/bedlight/center/colour", self.fill(20, [val[0], val[1], val[2]]))
self._mqclient.publish("ansiroom/bedlight/left/colour", self.fill(6, [val[0], val[1], val[2]]))
self._mqclient.publish("ansiroom/bedlight/right/colour", self.fill(6, [val[0], val[1], val[2]]))
def ansiRoom(self):
if self._info.isSomeoneInTheRoom(Room.ANSI_ROOM):
lightlevel = self._info.getOutsideLightLevel()
if lightlevel < 14.999:
now = datetime.datetime.now()
blue = 1.0
r = 255
g = 255
b = 255
if now.hour in (21, 22, 23):
m = now.hour - 21
m *= 60
m += now.minute
max = 3 * 60
blue = float(m) / max
blue = 1 - blue
if now.hour in (0, 1, 2, 3, 4, 5):
blue = 0.0
if lightlevel > 0:
d = (15.0 - lightlevel) / 15.0
r *= d
g *= d
b *= d
b *= blue
self.setWorkingLight([int(r), int(g), int(b)])
else:
self.setWorkingLight([0, 0, 0])
else:
self.setWorkingLight([0, 0, 0])
if __name__ == "__main__":
print "LightController test"
l = LightController()
l.start()
time.sleep(23)
#_,_,_,weather,_,_,_ = InformationFetcher().getOutdoor()
Package max
import os
import redis
import time
import datetime
import colorsys
import threading
import ConfigParser
import paho.mqtt.client as mqtt
from Room import Room
from InformationFetcher import InformationFetcher
class LightController(threading.Thread):
def __init__(self):
self._info = InformationFetcher()
threading.Thread.__init__(self)
self.setDaemon(True)
self._homeDir = os.path.expanduser("~/.sensomatic")
self._configFileName = self._homeDir + '/config.ini'
self._config = ConfigParser.ConfigParser()
self._readConfig()
self._mqclient = mqtt.Client("LightController", clean_session=True)
self._redis = redis.StrictRedis(host=self._config.get("REDIS", "ServerAddress"),
port=self._config.get("REDIS", "ServerPort"), db=0)
def _on_connect(self, client, userdata, rc, msg):
print "Connected with result code %s" % rc
#self._mqclient.subscribe("#")
def _on_message(self, client, userdata, msg):
print "Mq Received on channel %s -> %s" % (msg.topic, msg.payload)
def _on_disconnect(self, client, userdata, msg):
print "Disconnect"
def run(self):
self._mqclient.connect(self._config.get("MQTT","ServerAddress"), self._config.get("MQTT","ServerPort"), 60)
self._mqclient.on_connect = self._on_connect
self._mqclient.on_message = self._on_message
self._mqclient.on_disconnect = self._on_disconnect
time.sleep(1)
while True:
self._mqclient.loop(max_packets=100)
self.ansiRoom()
self._mqclient.loop(max_packets=100)
time.sleep(60)
def _readConfig(self):
update = False
if not os.path.isdir(self._homeDir):
print "Creating homeDir"
os.makedirs(self._homeDir)
if os.path.isfile(self._configFileName):
self._config.read(self._configFileName)
else:
print "Config file not found"
update = True
if not self._config.has_section('MQTT'):
print "Adding MQTT part"
update = True
self._config.add_section("MQTT")
if not self._config.has_option("MQTT", "ServerAddress"):
print "No Server Address"
update = True
self._config.set("MQTT", "ServerAddress", "<ServerAddress>")
if not self._config.has_option("MQTT", "ServerPort"):
print "No Server Port"
update = True
self._config.set("MQTT", "ServerPort", "1883")
if not self._config.has_section('REDIS'):
print "Adding Redis part"
update = True
self._config.add_section("REDIS")
if not self._config.has_option("REDIS", "ServerAddress"):
print "No Server Address"
update = True
self._config.set("REDIS", "ServerAddress", "<ServerAddress>")
if not self._config.has_option("REDIS", "ServerPort"):
print "No Server Port"
update = True
self._config.set("REDIS", "ServerPort", "6379")
if update:
with open(self._configFileName, 'w') as f:
self._config.write(f)
def fill(self, size, val):
values = ""
for i in range(size):
values += ",%d,%d,%d" % (val[0],val[1],val[2])
return values[1:]
def fillgradient(self, len):
values = ""
for i in range(len):
values += ",%d,%d,%d" % (0,0,0)
return values[1:]
def setWorkingLight(self, val):
self._mqclient.publish("ansiroom/bedlight/overhead/colour", self.fill(20, [val[0], val[1], val[2]]))
self._mqclient.publish("ansiroom/bedlight/center/colour", self.fill(20, [val[0], val[1], val[2]]))
self._mqclient.publish("ansiroom/bedlight/left/colour", self.fill(6, [val[0], val[1], val[2]]))
self._mqclient.publish("ansiroom/bedlight/right/colour", self.fill(6, [val[0], val[1], val[2]]))
def ansiRoom(self):
if self._info.isSomeoneInTheRoom(Room.ANSI_ROOM):
lightlevel = self._info.getOutsideLightLevel()
if lightlevel < 14.999:
now = datetime.datetime.now()
blue = 1.0
r = 255
g = 255
b = 255
if now.hour in (21, 22, 23):
m = now.hour - 21
m *= 60
m += now.minute
max = 3 * 60
blue = float(m) / max
blue = 1 - blue
if now.hour in (0, 1, 2, 3, 4, 5):
blue = 0.0
if lightlevel > 0:
d = (15.0 - lightlevel) / 15.0
r *= d
g *= d
b *= d
b *= blue
self.setWorkingLight([int(r), int(g), int(b)])
else:
self.setWorkingLight([0, 0, 0])
else:
self.setWorkingLight([0, 0, 0])
if __name__ == "__main__":
print "LightController test"
l = LightController()
l.start()
time.sleep(23)
#_,_,_,weather,_,_,_ = InformationFetcher().getOutdoor()
|
import pygame
from operator import truth
# Flag values for anchors.
# TODO: use Rect's constants
ANCHOR_TOPLEFT = 101
ANCHOR_TOPRIGHT = 102
ANCHOR_BOTTOMLEFT = 103
ANCHOR_BOTTOMRIGHT = 104
ANCHOR_CENTER = 105
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
"""initialize sprite instance
Initializes attributes to default values, and optionally
adds it to given groups.
"""
self.image = self.original = None
self.rect = None
self.dirty = False
# Initialize position
self.anchor = ANCHOR_TOPLEFT
self.position = None
self.offset = (0, 0)
# Initialize visual attributes
self.scale = 1
self.rotate = 0
self.visible = True
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def draw(self, surface):
"""draw the sprite's image on a surface
Sprite.draw(surface): return Rect
This should be called by a group's own `draw` method.
On failure or if sprite should not be drawn, returns 0.
"""
if (self.visible):
return surface.blit(self.image, self.rect)
else:
return 0
def _visual_set(method):
"""callback that gets called on changes to visual attributes
Used to trigger the `on_visual_set` event, which is fired
before the change and decides whether to continue with it.
"""
#TODO consider using a Python decorator for such setters
def wrapper(self, *args, **kwargs):
result = None
if hasattr(self, 'on_visual_set'):
self.on_visual_set(method, *args, **kwargs)
self.dirty = True
return method(self, *args, **kwargs)
return wrapper
def set_image(self, img):
"""set a new image object for the sprite
"""
self.image = self.original = img
self.update_image()
def update_image(self):
"""update the sprite's image object
usually useful for transformations, this method does
not change the 'original' attribute."""
img = self.original
if img is not None:
if self.scale != 1:
img = pygame.transform.scale(img, self.scaled_size())
if self.rotate != 0:
img = pygame.transform.rotate(img, self.rotate)
self.image = img
self.rect = img.get_rect()
self.move_to(self.position)
def anchor_value(self):
"""return actual position of sprite's anchor
If anchor was provided in coordinates, use them.
Otherwise, translate anchor flags to coordinates.
"""
#TODO handle negative values
#TODO use same constants as Rect's
if type(self.anchor) is tuple:
return self.anchor
elif self.anchor == ANCHOR_TOPLEFT:
return (0, 0)
elif self.anchor == ANCHOR_TOPRIGHT:
return (self.rect.width, 0)
elif self.anchor == ANCHOR_BOTTOMLEFT:
return (0, self.rect.height)
elif self.anchor == ANCHOR_BOTTOMRIGHT:
return (self.rect.width, self.rect.height)
elif self.anchor == ANCHOR_CENTER:
return (self.rect.width / 2, self.rect.height / 2)
else:
return None # shouldn't happen :(
def update_position(self):
""" re-calculating the sprite's rect position
"""
(x, y) = self.position
(off_x, off_y) = self.offset
(anc_x, anc_y) = self.anchor_value()
self.rect.topleft = (x + off_x - anc_x, y + off_y - anc_y)
def move_to(self, pos):
"""move sprite to a certain position
"""
#TODO handle float values
self.position = pos
if pos:
self.update_position()
def move_by(self, delta):
"""move sprite by a certain delta
"""
(delta_x, delta_y) = delta
(current_x, current_y) = self.position
self.move_to((current_x + delta_x, current_y + delta_y))
def set_offset(self, offset):
self.offset = offset
self.update_position()
def make_visible(self):
self.visible = True
def make_invisible(self):
self.visible = False
def toggle_visibility(self):
self.visible = not self.visible
def scale_to(self, ratio):
"""set sprite's scale ratio (overwriting)
Ratio must be a positive float.
"""
if ratio < 0:
raise AttributeError("ratio must be a positive float")
self.scale = ratio
self.update_image()
def scale_by(self, ratio):
"""set sprite's scale ratio (accumalating)
Ratio must be a positive float.
"""
self.scale_to(self.scale + ratio)
def scaled_size(self):
"""return the sprite's calculated size, after scaling
"""
(width, height) = self.original.get_size()
width = (int)(width * self.scale)
height = (int)(height * self.scale)
return (width, height)
def rotate_to(self, degree):
"""rotate sprite's image by a degree (overwriting)
"""
self.rotate = degree % 360 #TODO magic number?
self.update_image()
def rotate_by(self, degree):
""" rotate sprite's image by a degree (accumalating)
"""
self.rotate_to(self.rotate + degree)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" \
% (self.__class__.__name__, len(self.__g))
class AggregatedSprite(Sprite):
"""aggregated sprite class collects many sprites into single entity
pygame.sprite.AggregatedSprite(*groups): return AggregatedSprite
The aggregated sprite holds a list of child sprites and propagates
every visual change to all of the child sprites.
"""
def __init__(self, *groups):
"""iniitalizes sprite
"""
# call super's initialization as usual.
super(AggregatedSprite, self).__init__(*groups)
# resets the rect and position which would be calculated
# according to added sprite.
self.rect = pygame.Rect(0, 0, 0, 0)
self.position = (0, 0)
def _get_sprites(self):
"""return list of child sprites
"""
try:
return self._sprites
except AttributeError:
self._sprites = []
return self._sprites
def _set_sprites(self, sprites):
"""overwrite the list of child sprites
"""
self._sprites = sprites
def add_sprite(self, sprite):
"""add a sprite to the list of child sprites
"""
self.sprites.append(sprite)
sprites = property(_get_sprites,
_set_sprites,
doc="List of sprites to aggregate")
def draw(self, surface):
"""draw child sprites in order
AggregatedSprite.draw(surface): return Rect
Returns a rectangle that is the union of all
child sprites' rects.
"""
#TODO consider sprite's layer attribute
ret = pygame.Rect(0, 0, 0, 0)
for spr in self.sprites:
r = spr.draw(surface)
if r != 0:
ret.union_ip(r)
return ret
def on_visual_set(self, method, *args, **kwargs):
"""propagate a visual attribute change to all child sprites
"""
if method.__name__ == '_set_position':
for spr in self.sprites:
spr.offset = args[0]
else:
for spr in self.sprites:
method(spr, *args, **kwargs)
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r is not 0:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
if (hasattr(spr, 'draw')):
self.spritedict[spr] = spr.draw(surface)
else:
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r is not 0:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r is not 0:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
Removing Python properties from `AggregatedSprite`.
'sprites' is a regular class attribute now.
import pygame
from operator import truth
# Flag values for anchors.
# TODO: use Rect's constants
ANCHOR_TOPLEFT = 101
ANCHOR_TOPRIGHT = 102
ANCHOR_BOTTOMLEFT = 103
ANCHOR_BOTTOMRIGHT = 104
ANCHOR_CENTER = 105
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
"""initialize sprite instance
Initializes attributes to default values, and optionally
adds it to given groups.
"""
self.image = self.original = None
self.rect = None
self.dirty = False
# Initialize position
self.anchor = ANCHOR_TOPLEFT
self.position = None
self.offset = (0, 0)
# Initialize visual attributes
self.scale = 1
self.rotate = 0
self.visible = True
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def draw(self, surface):
"""draw the sprite's image on a surface
Sprite.draw(surface): return Rect
This should be called by a group's own `draw` method.
On failure or if sprite should not be drawn, returns 0.
"""
if (self.visible):
return surface.blit(self.image, self.rect)
else:
return 0
def _visual_set(method):
"""callback that gets called on changes to visual attributes
Used to trigger the `on_visual_set` event, which is fired
before the change and decides whether to continue with it.
"""
#TODO consider using a Python decorator for such setters
def wrapper(self, *args, **kwargs):
result = None
if hasattr(self, 'on_visual_set'):
self.on_visual_set(method, *args, **kwargs)
self.dirty = True
return method(self, *args, **kwargs)
return wrapper
def set_image(self, img):
"""set a new image object for the sprite
"""
self.image = self.original = img
self.update_image()
def update_image(self):
"""update the sprite's image object
usually useful for transformations, this method does
not change the 'original' attribute."""
img = self.original
if img is not None:
if self.scale != 1:
img = pygame.transform.scale(img, self.scaled_size())
if self.rotate != 0:
img = pygame.transform.rotate(img, self.rotate)
self.image = img
self.rect = img.get_rect()
self.move_to(self.position)
def anchor_value(self):
"""return actual position of sprite's anchor
If anchor was provided in coordinates, use them.
Otherwise, translate anchor flags to coordinates.
"""
#TODO handle negative values
#TODO use same constants as Rect's
if type(self.anchor) is tuple:
return self.anchor
elif self.anchor == ANCHOR_TOPLEFT:
return (0, 0)
elif self.anchor == ANCHOR_TOPRIGHT:
return (self.rect.width, 0)
elif self.anchor == ANCHOR_BOTTOMLEFT:
return (0, self.rect.height)
elif self.anchor == ANCHOR_BOTTOMRIGHT:
return (self.rect.width, self.rect.height)
elif self.anchor == ANCHOR_CENTER:
return (self.rect.width / 2, self.rect.height / 2)
else:
return None # shouldn't happen :(
def update_position(self):
""" re-calculating the sprite's rect position
"""
(x, y) = self.position
(off_x, off_y) = self.offset
(anc_x, anc_y) = self.anchor_value()
self.rect.topleft = (x + off_x - anc_x, y + off_y - anc_y)
def move_to(self, pos):
"""move sprite to a certain position
"""
#TODO handle float values
self.position = pos
if pos:
self.update_position()
def move_by(self, delta):
"""move sprite by a certain delta
"""
(delta_x, delta_y) = delta
(current_x, current_y) = self.position
self.move_to((current_x + delta_x, current_y + delta_y))
def set_offset(self, offset):
self.offset = offset
self.update_position()
def make_visible(self):
self.visible = True
def make_invisible(self):
self.visible = False
def toggle_visibility(self):
self.visible = not self.visible
def scale_to(self, ratio):
"""set sprite's scale ratio (overwriting)
Ratio must be a positive float.
"""
if ratio < 0:
raise AttributeError("ratio must be a positive float")
self.scale = ratio
self.update_image()
def scale_by(self, ratio):
"""set sprite's scale ratio (accumalating)
Ratio must be a positive float.
"""
self.scale_to(self.scale + ratio)
def scaled_size(self):
"""return the sprite's calculated size, after scaling
"""
(width, height) = self.original.get_size()
width = (int)(width * self.scale)
height = (int)(height * self.scale)
return (width, height)
def rotate_to(self, degree):
"""rotate sprite's image by a degree (overwriting)
"""
self.rotate = degree % 360 #TODO magic number?
self.update_image()
def rotate_by(self, degree):
""" rotate sprite's image by a degree (accumalating)
"""
self.rotate_to(self.rotate + degree)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" \
% (self.__class__.__name__, len(self.__g))
class AggregatedSprite(Sprite):
"""aggregated sprite class collects many sprites into single entity
pygame.sprite.AggregatedSprite(*groups): return AggregatedSprite
The aggregated sprite holds a list of child sprites and propagates
every visual change to all of the child sprites.
"""
def __init__(self, *groups):
"""iniitalizes sprite
"""
# call super's initialization as usual.
super(AggregatedSprite, self).__init__(*groups)
# reset sprites list
self.sprites = []
# resets the rect and position which would be calculated
# according to added sprite.
self.rect = pygame.Rect(0, 0, 0, 0)
self.position = (0, 0)
def add_sprite(self, sprite):
"""add a sprite to the list of child sprites
"""
self.sprites.append(sprite)
def draw(self, surface):
"""draw child sprites in order
AggregatedSprite.draw(surface): return Rect
Returns a rectangle that is the union of all
child sprites' rects.
"""
#TODO consider sprite's layer attribute
ret = pygame.Rect(0, 0, 0, 0)
for spr in self.sprites:
r = spr.draw(surface)
if r != 0:
ret.union_ip(r)
return ret
def on_visual_set(self, method, *args, **kwargs):
"""propagate a visual attribute change to all child sprites
"""
if method.__name__ == '_set_position':
for spr in self.sprites:
spr.offset = args[0]
else:
for spr in self.sprites:
method(spr, *args, **kwargs)
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r is not 0:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
if (hasattr(spr, 'draw')):
self.spritedict[spr] = spr.draw(surface)
else:
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r is not 0:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r is not 0:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
|
import os
from os.path import join, dirname, abspath
DEBUG = True
TEMPLATE_DEBUG = DEBUG
THUMBNAIL_DEBUG = False
PROJECT_PATH = dirname(dirname(dirname(abspath(__file__))))
ADMINS = (
('Mathieu Comandon', 'strider@strycore.com'),
)
MANAGERS = ADMINS
INTERNAL_IPS = ('127.0.0.1',)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(PROJECT_PATH, 'lutris.db'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
TIME_ZONE = 'Europe/Paris'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = join(PROJECT_PATH, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = join(PROJECT_PATH, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
join(PROJECT_PATH, "public"),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = os.environ.get('SECRET_KEY', 'changeme')
TEMPLATE_LOADERS = (
('pyjade.ext.django.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'lutrisweb.urls'
WSGI_APPLICATION = 'lutrisweb.wsgi.application'
TEMPLATE_DIRS = (
join(PROJECT_PATH, "templates"),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
'django.core.context_processors.request',
"django.contrib.messages.context_processors.messages",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'grappelli',
'django.contrib.admin',
'django.contrib.admindocs',
'sorl.thumbnail',
'south',
'tastypie',
'django_jcrop',
'crispy_forms',
'django_select2',
'django_nose',
'markupfield',
'django_openid_auth',
'common',
'games',
'accounts',
'tosec',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
SOUTH_TESTS_MIGRATE = False
BANNER_SIZE = "184x69"
ICON_SIZE = "32x32"
ICON_LARGE_SIZE = "256x256"
THUMBNAIL_ENGINE = 'sorl.thumbnail.engines.convert_engine.Engine'
THUMBNAIL_COLORSPACE = "sRGB"
AUTH_USER_MODEL = 'accounts.User'
AUTH_PROFILE_MODULE = "accounts.Profile"
ACCOUNT_ACTIVATION_DAYS = 3
LOGIN_REDIRECT_URL = "/"
LOGIN_URL = "/user/login/"
AUTHENTICATION_BACKENDS = (
'django_openid_auth.auth.OpenIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
OPENID_SSO_SERVER_URL = 'http://steamcommunity.com/openid'
# Modify temporarily the session serializer because the json serializer in
# Django 1.6 can't serialize openid.yadis.manager.YadisServiceManager objects
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
RELEASES_URL = "http://lutris.net/releases/"
CLIENT_VERSION = "0.3.4"
# Crispy forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
CRISPY_FAIL_SILENTLY = not DEBUG
# Admin
GRAPPELLI_ADMIN_TITLE = "Lutris Administration"
# Email
DEFAULT_FROM_EMAIL = "admin@lutris.net"
EMAIL_SUBJECT_PREFIX = "[Lutris] "
# Celery
CELERY_SEND_TASK_ERROR_EMAILS = True
BROKER_URL = 'amqp://guest:guest@localhost//'
# API Keys
STEAM_API_KEY = "********************************"
# Logging
SEND_BROKEN_LINK_EMAILS = False
LOGGING_HANDLERS = ['file', 'mail_admins', 'console']
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'include_html': True,
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'formatter': 'verbose',
'filename': 'lutrisweb.log'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': [LOGGING_HANDLERS],
'propagate': True,
'level': 'INFO',
},
'factory': {
'handlers': ['null'],
'propagate': False,
'level': 'INFO',
},
'django.request': {
'handlers': LOGGING_HANDLERS,
'level': 'WARNING',
'propagate': True,
},
'lutrisweb': {
'handlers': LOGGING_HANDLERS,
'level': 'DEBUG',
'propagate': True,
},
'accounts': {
'handlers': LOGGING_HANDLERS,
'level': 'DEBUG',
'propagate': True,
},
'common': {
'handlers': LOGGING_HANDLERS,
'level': 'DEBUG',
'propagate': True,
},
'games': {
'handlers': LOGGING_HANDLERS,
'level': 'DEBUG',
'propagate': True,
}
}
}
Oh noes!
import os
from os.path import join, dirname, abspath
DEBUG = True
TEMPLATE_DEBUG = DEBUG
THUMBNAIL_DEBUG = False
PROJECT_PATH = dirname(dirname(dirname(abspath(__file__))))
ADMINS = (
('Mathieu Comandon', 'strider@strycore.com'),
)
MANAGERS = ADMINS
INTERNAL_IPS = ('127.0.0.1',)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(PROJECT_PATH, 'lutris.db'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
TIME_ZONE = 'Europe/Paris'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = join(PROJECT_PATH, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = join(PROJECT_PATH, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
join(PROJECT_PATH, "public"),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = os.environ.get('SECRET_KEY', 'changeme')
TEMPLATE_LOADERS = (
('pyjade.ext.django.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'lutrisweb.urls'
WSGI_APPLICATION = 'lutrisweb.wsgi.application'
TEMPLATE_DIRS = (
join(PROJECT_PATH, "templates"),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
'django.core.context_processors.request',
"django.contrib.messages.context_processors.messages",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'grappelli',
'django.contrib.admin',
'django.contrib.admindocs',
'sorl.thumbnail',
'south',
'tastypie',
'django_jcrop',
'crispy_forms',
'django_select2',
'django_nose',
'markupfield',
'django_openid_auth',
'common',
'games',
'accounts',
'tosec',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
SOUTH_TESTS_MIGRATE = False
BANNER_SIZE = "184x69"
ICON_SIZE = "32x32"
ICON_LARGE_SIZE = "256x256"
THUMBNAIL_ENGINE = 'sorl.thumbnail.engines.convert_engine.Engine'
THUMBNAIL_COLORSPACE = "sRGB"
AUTH_USER_MODEL = 'accounts.User'
AUTH_PROFILE_MODULE = "accounts.Profile"
ACCOUNT_ACTIVATION_DAYS = 3
LOGIN_REDIRECT_URL = "/"
LOGIN_URL = "/user/login/"
AUTHENTICATION_BACKENDS = (
'django_openid_auth.auth.OpenIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
OPENID_SSO_SERVER_URL = 'http://steamcommunity.com/openid'
# Modify temporarily the session serializer because the json serializer in
# Django 1.6 can't serialize openid.yadis.manager.YadisServiceManager objects
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
RELEASES_URL = "http://lutris.net/releases/"
CLIENT_VERSION = "0.3.4"
# Crispy forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
CRISPY_FAIL_SILENTLY = not DEBUG
# Admin
GRAPPELLI_ADMIN_TITLE = "Lutris Administration"
# Email
DEFAULT_FROM_EMAIL = "admin@lutris.net"
EMAIL_SUBJECT_PREFIX = "[Lutris] "
# Celery
CELERY_SEND_TASK_ERROR_EMAILS = True
BROKER_URL = 'amqp://guest:guest@localhost//'
# API Keys
STEAM_API_KEY = "********************************"
# Logging
SEND_BROKEN_LINK_EMAILS = False
LOGGING_HANDLERS = ['file', 'mail_admins', 'console']
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'include_html': True,
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'formatter': 'verbose',
'filename': 'lutrisweb.log'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': LOGGING_HANDLERS,
'propagate': True,
'level': 'INFO',
},
'factory': {
'handlers': ['null'],
'propagate': False,
'level': 'INFO',
},
'django.request': {
'handlers': LOGGING_HANDLERS,
'level': 'WARNING',
'propagate': True,
},
'lutrisweb': {
'handlers': LOGGING_HANDLERS,
'level': 'DEBUG',
'propagate': True,
},
'accounts': {
'handlers': LOGGING_HANDLERS,
'level': 'DEBUG',
'propagate': True,
},
'common': {
'handlers': LOGGING_HANDLERS,
'level': 'DEBUG',
'propagate': True,
},
'games': {
'handlers': LOGGING_HANDLERS,
'level': 'DEBUG',
'propagate': True,
}
}
}
|
from couchdbkit import ResourceNotFound
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.fields import ReportSelectField
from corehq.apps.reports.filters.base import BaseSingleOptionFilter
from corehq.apps.fixtures.dispatcher import FixtureInterfaceDispatcher
from corehq.apps.fixtures.models import FixtureDataType, FixtureDataItem, _id_from_doc, FieldList, FixtureTypeField, FixtureItemField
from corehq.apps.fixtures.views import data_table, require_can_edit_fixtures
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext_noop
from django.utils.decorators import method_decorator
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DataTablesColumnGroup
class FixtureInterface(GenericReportView):
section_name = ugettext_noop("Lookup Tables")
base_template = 'fixtures/fixtures_base.html'
asynchronous = False
dispatcher = FixtureInterfaceDispatcher
exportable = False
needs_filters = False
class FixtureSelectField(ReportSelectField):
slug = "table_id"
name = ugettext_noop("Select a Table")
cssId = "select_table"
cssClasses = "span2"
@property
def field_opts(self):
fdts = list(FixtureDataType.by_domain(self.domain))
return fdts
@property
def default_option(self):
if not self.field_opts:
return "NO TABLE"
return "Default: " + self.field_opts[0].tag
def update_params(self):
self.selected = self.request.GET.get(self.slug, '')
unselected_list = [fo for fo in self.field_opts if fo != self.selected]
self.options = [{'val': _id_from_doc(f), 'text': f.tag} for f in unselected_list]
class FixtureSelectFilter(BaseSingleOptionFilter):
slug = "table_id"
label = ""
placeholder = "place"
default_text = "Select a Table"
@property
def selected(self):
# ko won't display default selected-value as it should, display default_text instead
return ""
@property
@memoized
def fixtures(self):
fdts = list(FixtureDataType.by_domain(self.domain))
return fdts
@property
@memoized
def options(self):
return [(_id_from_doc(f), f.tag) for f in self.fixtures]
class FixtureViewInterface(GenericTabularReport, FixtureInterface):
name = ugettext_noop("View Tables")
slug = "view_lookup_tables"
report_template_path = 'fixtures/view_table.html'
fields = ['corehq.apps.fixtures.interface.FixtureSelectFilter']
@property
def report_context(self):
if not self.has_tables():
self.report_template_path = 'fixtures/no_table.html'
return {"selected_table": self.table.get("table_id", "")}
if not self.request.GET.get("table_id", None):
return {"table_not_selected": True}
try:
context = super(FixtureViewInterface, self).report_context
except ResourceNotFound:
return {"table_not_selected": True}
context.update({"selected_table": self.table.get("table_id", "")})
return context
@memoized
def has_tables(self):
return True if list(FixtureDataType.by_domain(self.domain)) else False
@property
@memoized
def table(self):
if self.has_tables() and self.request.GET.get("table_id", None):
return data_table(self.request, self.domain)
else:
return {"headers": None, "rows": None}
@property
def headers(self):
return self.table["headers"]
@property
def rows(self):
return self.table["rows"]
class FixtureEditInterface(FixtureInterface):
name = ugettext_noop("Manage Tables")
slug = "edit_lookup_tables"
report_template_path = 'fixtures/manage_tables.html'
@property
def report_context(self):
context = super(FixtureInterface, self).report_context
context.update(types=self.data_types)
return context
@property
def data_types(self):
fdts = list(FixtureDataType.by_domain(self.domain))
return fdts
delete old field filter
from couchdbkit import ResourceNotFound
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.filters.base import BaseSingleOptionFilter
from corehq.apps.fixtures.dispatcher import FixtureInterfaceDispatcher
from corehq.apps.fixtures.models import FixtureDataType, FixtureDataItem, _id_from_doc, FieldList, FixtureTypeField, FixtureItemField
from corehq.apps.fixtures.views import data_table, require_can_edit_fixtures
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext_noop
from django.utils.decorators import method_decorator
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DataTablesColumnGroup
class FixtureInterface(GenericReportView):
section_name = ugettext_noop("Lookup Tables")
base_template = 'fixtures/fixtures_base.html'
asynchronous = False
dispatcher = FixtureInterfaceDispatcher
exportable = False
needs_filters = False
class FixtureSelectFilter(BaseSingleOptionFilter):
slug = "table_id"
label = ""
placeholder = "place"
default_text = "Select a Table"
@property
def selected(self):
# ko won't display default selected-value as it should, display default_text instead
return ""
@property
@memoized
def fixtures(self):
fdts = list(FixtureDataType.by_domain(self.domain))
return fdts
@property
@memoized
def options(self):
return [(_id_from_doc(f), f.tag) for f in self.fixtures]
class FixtureViewInterface(GenericTabularReport, FixtureInterface):
name = ugettext_noop("View Tables")
slug = "view_lookup_tables"
report_template_path = 'fixtures/view_table.html'
fields = ['corehq.apps.fixtures.interface.FixtureSelectFilter']
@property
def report_context(self):
if not self.has_tables():
self.report_template_path = 'fixtures/no_table.html'
return {"selected_table": self.table.get("table_id", "")}
if not self.request.GET.get("table_id", None):
return {"table_not_selected": True}
try:
context = super(FixtureViewInterface, self).report_context
except ResourceNotFound:
return {"table_not_selected": True}
context.update({"selected_table": self.table.get("table_id", "")})
return context
@memoized
def has_tables(self):
return True if list(FixtureDataType.by_domain(self.domain)) else False
@property
@memoized
def table(self):
if self.has_tables() and self.request.GET.get("table_id", None):
return data_table(self.request, self.domain)
else:
return {"headers": None, "rows": None}
@property
def headers(self):
return self.table["headers"]
@property
def rows(self):
return self.table["rows"]
class FixtureEditInterface(FixtureInterface):
name = ugettext_noop("Manage Tables")
slug = "edit_lookup_tables"
report_template_path = 'fixtures/manage_tables.html'
@property
def report_context(self):
context = super(FixtureInterface, self).report_context
context.update(types=self.data_types)
return context
@property
def data_types(self):
fdts = list(FixtureDataType.by_domain(self.domain))
return fdts
|
from radical.entk import AppManager as Amgr
from hypothesis import given, settings
import hypothesis.strategies as st
from radical.entk import Pipeline, Stage, Task, states
from radical.entk.exceptions import *
from radical.entk.utils.sync_initiator import sync_with_master
import radical.utils as ru
import pytest
import pika
from threading import Event, Thread
from multiprocessing import Process
import os
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = int(os.environ.get('RMQ_PORT', 5672))
MLAB = os.environ.get('RADICAL_PILOT_DBURL')
# Hypothesis settings
settings.register_profile("travis", max_examples=100, deadline=None)
settings.load_profile("travis")
def test_amgr_initialization():
amgr_name = ru.generate_id('test.appmanager.%(item_counter)04d', ru.ID_CUSTOM)
amgr = Amgr(hostname=hostname, port=port,name=amgr_name)
assert amgr._name.split('.') == amgr_name.split('.')
assert amgr._sid.split('.') == amgr_name.split('.')
assert amgr._uid.split('.') == ['appmanager', '0000']
assert type(amgr._logger) == type(ru.Logger('radical.tests'))
assert type(amgr._prof) == type(ru.Profiler('radical.tests'))
assert type(amgr._report) == type(ru.Reporter('radical.tests'))
assert isinstance(amgr.name, str)
# RabbitMQ inits
assert amgr._mq_hostname == hostname
assert amgr._port == port
# RabbitMQ Queues
assert amgr._num_pending_qs == 1
assert amgr._num_completed_qs == 1
assert isinstance(amgr._pending_queue, list)
assert isinstance(amgr._completed_queue, list)
# Global parameters to have default values
assert amgr._mqs_setup == True
assert amgr._resource_desc == None
assert amgr._task_manager == None
assert amgr._workflow == None
assert amgr._resubmit_failed == False
assert amgr._reattempts == 3
assert amgr._cur_attempt == 1
assert amgr._autoterminate == True
assert isinstance(amgr.shared_data, list)
amgr = Amgr(hostname=hostname, port=port)
assert amgr._uid.split('.') == ['appmanager', '0000']
assert type(amgr._logger) == type(ru.Logger('radical.tests'))
assert type(amgr._prof) == type(ru.Profiler('radical.tests'))
assert type(amgr._report) == type(ru.Reporter('radical.tests'))
assert isinstance(amgr.name, str)
# RabbitMQ inits
assert amgr._mq_hostname == hostname
assert amgr._port == port
# RabbitMQ Queues
assert amgr._num_pending_qs == 1
assert amgr._num_completed_qs == 1
assert isinstance(amgr._pending_queue, list)
assert isinstance(amgr._completed_queue, list)
# Global parameters to have default values
assert amgr._mqs_setup == False
assert amgr._resource_desc == None
assert amgr._task_manager == None
assert amgr._workflow == None
assert amgr._resubmit_failed == False
assert amgr._reattempts == 3
assert amgr._cur_attempt == 1
assert amgr._autoterminate == True
assert isinstance(amgr.shared_data, list)
def test_amgr_read_config():
amgr = Amgr()
assert amgr._mq_hostname == 'localhost'
assert amgr._port == 5672
assert amgr._reattempts == 3
assert amgr._resubmit_failed == False
assert amgr._autoterminate == True
assert amgr._write_workflow == False
assert amgr._rts == 'radical.pilot'
assert amgr._num_pending_qs == 1
assert amgr._num_completed_qs == 1
assert amgr._rmq_cleanup == True
assert amgr._rts_config == { "sandbox_cleanup": False, "db_cleanup": False}
d = {"hostname": "radical.two",
"port": 25672,
"reattempts": 5,
"resubmit_failed": True,
"autoterminate": False,
"write_workflow": True,
"rts": "mock",
"rts_config": { "sandbox_cleanup": True, "db_cleanup": True},
"pending_qs": 2,
"completed_qs": 3,
"rmq_cleanup": False}
ru.write_json(d, './config.json')
amgr._read_config(config_path='./',
hostname=None,
port=None,
reattempts=None,
resubmit_failed=None,
autoterminate=None,
write_workflow=None,
rts=None,
rmq_cleanup=None,
rts_config=None)
assert amgr._mq_hostname == d['hostname']
assert amgr._port == d['port']
assert amgr._reattempts == d['reattempts']
assert amgr._resubmit_failed == d['resubmit_failed']
assert amgr._autoterminate == d['autoterminate']
assert amgr._write_workflow == d['write_workflow']
assert amgr._rts == d['rts']
assert amgr._rts_config == d['rts_config']
assert amgr._num_pending_qs == d['pending_qs']
assert amgr._num_completed_qs == d['completed_qs']
assert amgr._rmq_cleanup == d['rmq_cleanup']
os.remove('./config.json')
def test_amgr_resource_description_assignment():
res_dict = {
'resource': 'xsede.supermic',
'walltime': 30,
'cpus': 1000,
'project': 'TG-MCB090174'
}
amgr = Amgr(rts='radical.pilot')
amgr.resource_desc = res_dict
from radical.entk.execman.rp import ResourceManager
assert isinstance(amgr._rmgr, ResourceManager)
amgr = Amgr(rts='mock')
amgr.resource_desc = res_dict
from radical.entk.execman.mock import ResourceManager
assert isinstance(amgr._rmgr, ResourceManager)
def test_amgr_assign_workflow():
amgr = Amgr()
with pytest.raises(TypeError):
amgr.workflow = [1, 2, 3]
with pytest.raises(TypeError):
amgr.workflow = set([1, 2, 3])
p1 = Pipeline()
p2 = Pipeline()
p3 = Pipeline()
amgr._workflow = [p1, p2, p3]
amgr._workflow = set([p1, p2, p3])
def test_amgr_assign_shared_data():
amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)
res_dict = {
'resource': 'xsede.supermic',
'walltime': 30,
'cpus': 20,
'project': 'TG-MCB090174'
}
amgr.resource_desc = res_dict
amgr.shared_data = ['file1.txt','file2.txt']
assert amgr._rmgr.shared_data == ['file1.txt','file2.txt']
def test_amgr_run():
amgr = Amgr(hostname=hostname, port=port)
with pytest.raises(MissingError):
amgr.run()
p1 = Pipeline()
p2 = Pipeline()
p3 = Pipeline()
with pytest.raises(MissingError):
amgr.workflow = [p1, p2, p3]
# Remaining lines of run() should be tested in the integration
# tests
def test_amgr_run_mock():
p = Pipeline()
s = Stage()
t = Task()
t.name = 'simulation'
t.executable = ['/bin/date']
s.tasks = t
p.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 5,
'cpus': 1,
'project': ''
}
appman = Amgr(hostname=hostname, port=port, rts="mock")
appman.resource_desc = res_dict
appman.workflow = [p]
appman.run()
def test_amgr_resource_terminate():
res_dict = {
'resource': 'xsede.supermic',
'walltime': 30,
'cpus': 20,
'project': 'TG-MCB090174'
}
from radical.entk.execman.rp import TaskManager
amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)
amgr.resource_desc = res_dict
amgr._setup_mqs()
amgr._rmq_cleanup = True
amgr._task_manager = TaskManager(sid='test',
pending_queue=list(),
completed_queue=list(),
mq_hostname=amgr._mq_hostname,
rmgr=amgr._rmgr,
port=amgr._port
)
amgr.resource_terminate()
def test_amgr_terminate():
res_dict = {
'resource': 'xsede.supermic',
'walltime': 30,
'cpus': 20,
'project': 'TG-MCB090174'
}
from radical.entk.execman.rp import TaskManager
amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)
amgr.resource_desc = res_dict
amgr._setup_mqs()
amgr._rmq_cleanup = True
amgr._task_manager = TaskManager(sid='test',
pending_queue=list(),
completed_queue=list(),
mq_hostname=amgr._mq_hostname,
rmgr=amgr._rmgr,
port=amgr._port
)
amgr.terminate()
def test_amgr_setup_mqs():
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
assert len(amgr._pending_queue) == 1
assert len(amgr._completed_queue) == 1
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
mq_channel = mq_connection.channel()
qs = [
'%s-tmgr-to-sync' % amgr._sid,
'%s-cb-to-sync' % amgr._sid,
'%s-sync-to-tmgr' % amgr._sid,
'%s-sync-to-cb' % amgr._sid,
'%s-pendingq-1' % amgr._sid,
'%s-completedq-1' % amgr._sid
]
for q in qs:
mq_channel.queue_delete(queue=q)
with open('.%s.txt' % amgr._sid, 'r') as fp:
lines = fp.readlines()
for ind, val in enumerate(lines):
lines[ind] = val.strip()
assert set(qs) == set(lines)
def test_amgr_cleanup_mqs():
amgr = Amgr(hostname=hostname, port=port)
sid = amgr._sid
amgr._setup_mqs()
amgr._cleanup_mqs()
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=hostname, port=port))
qs = ['%s-tmgr-to-sync' % sid,
'%s-cb-to-sync' % sid,
'%s-sync-to-tmgr' % sid,
'%s-sync-to-cb' % sid,
'%s-pendingq-1' % sid,
'%s-completedq-1' % sid]
for q in qs:
with pytest.raises(pika.exceptions.ChannelClosed):
mq_channel = mq_connection.channel()
mq_channel.queue_purge(q)
def func_for_synchronizer_test(sid, p, logger, profiler):
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=hostname, port=port))
mq_channel = mq_connection.channel()
for t in p.stages[0].tasks:
t.state = states.COMPLETED
sync_with_master(obj=t,
obj_type='Task',
channel=mq_channel,
queue='%s-tmgr-to-sync' % sid,
logger=logger,
local_prof=profiler)
mq_connection.close()
def test_amgr_synchronizer():
logger = ru.Logger('radical.entk.temp_logger')
profiler = ru.Profiler(name='radical.entk.temp')
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
p = Pipeline()
s = Stage()
# Create and add 10 tasks to the stage
for cnt in range(10):
t = Task()
t.executable = ['some-executable-%s' % cnt]
s.add_tasks(t)
p.add_stages(s)
p._assign_uid(amgr._sid)
p._validate()
amgr.workflow = [p]
for t in p.stages[0].tasks:
assert t.state == states.INITIAL
assert p.stages[0].state == states.INITIAL
assert p.state == states.INITIAL
# Start the synchronizer method in a thread
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
# Start the synchronizer method in a thread
proc = Process(target=func_for_synchronizer_test, name='temp-proc',
args=(amgr._sid, p, logger, profiler))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
for t in p.stages[0].tasks:
assert t.state == states.COMPLETED
def test_sid_in_mqs():
appman = Amgr(hostname=hostname, port=port)
appman._setup_mqs()
sid = appman._sid
qs = [
'%s-tmgr-to-sync' % sid,
'%s-cb-to-sync' % sid,
'%s-sync-to-tmgr' % sid,
'%s-sync-to-cb' % sid,
]
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=hostname,
port=port)
)
mq_channel = mq_connection.channel()
def callback():
print True
for q in qs:
try:
mq_channel.basic_consume(callback, queue=q, no_ack=True)
except Exception as ex:
raise EnTKError(ex)
def test_state_order():
"""
**Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
"""
def create_single_task():
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/date']
t1.copy_input_data = []
t1.copy_output_data = []
return t1
p1 = Pipeline()
p1.name = 'p1'
s = Stage()
s.name = 's1'
s.tasks = create_single_task()
s.add_tasks(create_single_task())
p1.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 5,
'cpus': 1,
'project': ''
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
appman = Amgr(hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
p_state_hist = p1.state_history
assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']
s_state_hist = p1.stages[0].state_history
assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']
tasks = p1.stages[0].tasks
for t in tasks:
t_state_hist = t.state_history
print t_state_hist
assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED',
'SUBMITTING', 'EXECUTED', 'DONE']
fix the same stupid test again
from radical.entk import AppManager as Amgr
from hypothesis import given, settings
import hypothesis.strategies as st
from radical.entk import Pipeline, Stage, Task, states
from radical.entk.exceptions import *
from radical.entk.utils.sync_initiator import sync_with_master
import radical.utils as ru
import pytest
import pika
from threading import Event, Thread
from multiprocessing import Process
import os
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = int(os.environ.get('RMQ_PORT', 5672))
MLAB = os.environ.get('RADICAL_PILOT_DBURL')
# Hypothesis settings
settings.register_profile("travis", max_examples=100, deadline=None)
settings.load_profile("travis")
def test_amgr_initialization():
amgr_name = ru.generate_id('test.appmanager.%(item_counter)04d', ru.ID_CUSTOM)
amgr = Amgr(hostname=hostname, port=port,name=amgr_name)
assert amgr._name.split('.') == amgr_name.split('.')
assert amgr._sid.split('.') == amgr_name.split('.')
assert amgr._uid.split('.') == ['appmanager', '0000']
assert type(amgr._logger) == type(ru.Logger('radical.tests'))
assert type(amgr._prof) == type(ru.Profiler('radical.tests'))
assert type(amgr._report) == type(ru.Reporter('radical.tests'))
assert isinstance(amgr.name, str)
# RabbitMQ inits
assert amgr._mq_hostname == hostname
assert amgr._port == port
# RabbitMQ Queues
assert amgr._num_pending_qs == 1
assert amgr._num_completed_qs == 1
assert isinstance(amgr._pending_queue, list)
assert isinstance(amgr._completed_queue, list)
# Global parameters to have default values
assert amgr._mqs_setup == True
assert amgr._resource_desc == None
assert amgr._task_manager == None
assert amgr._workflow == None
assert amgr._resubmit_failed == False
assert amgr._reattempts == 3
assert amgr._cur_attempt == 1
assert amgr._autoterminate == True
assert isinstance(amgr.shared_data, list)
amgr = Amgr(hostname=hostname, port=port)
assert amgr._uid.split('.') == ['appmanager', '0000']
assert type(amgr._logger) == type(ru.Logger('radical.tests'))
assert type(amgr._prof) == type(ru.Profiler('radical.tests'))
assert type(amgr._report) == type(ru.Reporter('radical.tests'))
assert isinstance(amgr.name, str)
# RabbitMQ inits
assert amgr._mq_hostname == hostname
assert amgr._port == port
# RabbitMQ Queues
assert amgr._num_pending_qs == 1
assert amgr._num_completed_qs == 1
assert isinstance(amgr._pending_queue, list)
assert isinstance(amgr._completed_queue, list)
# Global parameters to have default values
assert amgr._mqs_setup == True
assert amgr._resource_desc == None
assert amgr._task_manager == None
assert amgr._workflow == None
assert amgr._resubmit_failed == False
assert amgr._reattempts == 3
assert amgr._cur_attempt == 1
assert amgr._autoterminate == True
assert isinstance(amgr.shared_data, list)
def test_amgr_read_config():
amgr = Amgr()
assert amgr._mq_hostname == 'localhost'
assert amgr._port == 5672
assert amgr._reattempts == 3
assert amgr._resubmit_failed == False
assert amgr._autoterminate == True
assert amgr._write_workflow == False
assert amgr._rts == 'radical.pilot'
assert amgr._num_pending_qs == 1
assert amgr._num_completed_qs == 1
assert amgr._rmq_cleanup == True
assert amgr._rts_config == { "sandbox_cleanup": False, "db_cleanup": False}
d = {"hostname": "radical.two",
"port": 25672,
"reattempts": 5,
"resubmit_failed": True,
"autoterminate": False,
"write_workflow": True,
"rts": "mock",
"rts_config": { "sandbox_cleanup": True, "db_cleanup": True},
"pending_qs": 2,
"completed_qs": 3,
"rmq_cleanup": False}
ru.write_json(d, './config.json')
amgr._read_config(config_path='./',
hostname=None,
port=None,
reattempts=None,
resubmit_failed=None,
autoterminate=None,
write_workflow=None,
rts=None,
rmq_cleanup=None,
rts_config=None)
assert amgr._mq_hostname == d['hostname']
assert amgr._port == d['port']
assert amgr._reattempts == d['reattempts']
assert amgr._resubmit_failed == d['resubmit_failed']
assert amgr._autoterminate == d['autoterminate']
assert amgr._write_workflow == d['write_workflow']
assert amgr._rts == d['rts']
assert amgr._rts_config == d['rts_config']
assert amgr._num_pending_qs == d['pending_qs']
assert amgr._num_completed_qs == d['completed_qs']
assert amgr._rmq_cleanup == d['rmq_cleanup']
os.remove('./config.json')
def test_amgr_resource_description_assignment():
res_dict = {
'resource': 'xsede.supermic',
'walltime': 30,
'cpus': 1000,
'project': 'TG-MCB090174'
}
amgr = Amgr(rts='radical.pilot')
amgr.resource_desc = res_dict
from radical.entk.execman.rp import ResourceManager
assert isinstance(amgr._rmgr, ResourceManager)
amgr = Amgr(rts='mock')
amgr.resource_desc = res_dict
from radical.entk.execman.mock import ResourceManager
assert isinstance(amgr._rmgr, ResourceManager)
def test_amgr_assign_workflow():
amgr = Amgr()
with pytest.raises(TypeError):
amgr.workflow = [1, 2, 3]
with pytest.raises(TypeError):
amgr.workflow = set([1, 2, 3])
p1 = Pipeline()
p2 = Pipeline()
p3 = Pipeline()
amgr._workflow = [p1, p2, p3]
amgr._workflow = set([p1, p2, p3])
def test_amgr_assign_shared_data():
amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)
res_dict = {
'resource': 'xsede.supermic',
'walltime': 30,
'cpus': 20,
'project': 'TG-MCB090174'
}
amgr.resource_desc = res_dict
amgr.shared_data = ['file1.txt','file2.txt']
assert amgr._rmgr.shared_data == ['file1.txt','file2.txt']
def test_amgr_run():
amgr = Amgr(hostname=hostname, port=port)
with pytest.raises(MissingError):
amgr.run()
p1 = Pipeline()
p2 = Pipeline()
p3 = Pipeline()
with pytest.raises(MissingError):
amgr.workflow = [p1, p2, p3]
# Remaining lines of run() should be tested in the integration
# tests
def test_amgr_run_mock():
p = Pipeline()
s = Stage()
t = Task()
t.name = 'simulation'
t.executable = ['/bin/date']
s.tasks = t
p.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 5,
'cpus': 1,
'project': ''
}
appman = Amgr(hostname=hostname, port=port, rts="mock")
appman.resource_desc = res_dict
appman.workflow = [p]
appman.run()
def test_amgr_resource_terminate():
res_dict = {
'resource': 'xsede.supermic',
'walltime': 30,
'cpus': 20,
'project': 'TG-MCB090174'
}
from radical.entk.execman.rp import TaskManager
amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)
amgr.resource_desc = res_dict
amgr._setup_mqs()
amgr._rmq_cleanup = True
amgr._task_manager = TaskManager(sid='test',
pending_queue=list(),
completed_queue=list(),
mq_hostname=amgr._mq_hostname,
rmgr=amgr._rmgr,
port=amgr._port
)
amgr.resource_terminate()
def test_amgr_terminate():
res_dict = {
'resource': 'xsede.supermic',
'walltime': 30,
'cpus': 20,
'project': 'TG-MCB090174'
}
from radical.entk.execman.rp import TaskManager
amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)
amgr.resource_desc = res_dict
amgr._setup_mqs()
amgr._rmq_cleanup = True
amgr._task_manager = TaskManager(sid='test',
pending_queue=list(),
completed_queue=list(),
mq_hostname=amgr._mq_hostname,
rmgr=amgr._rmgr,
port=amgr._port
)
amgr.terminate()
def test_amgr_setup_mqs():
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
assert len(amgr._pending_queue) == 1
assert len(amgr._completed_queue) == 1
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
mq_channel = mq_connection.channel()
qs = [
'%s-tmgr-to-sync' % amgr._sid,
'%s-cb-to-sync' % amgr._sid,
'%s-sync-to-tmgr' % amgr._sid,
'%s-sync-to-cb' % amgr._sid,
'%s-pendingq-1' % amgr._sid,
'%s-completedq-1' % amgr._sid
]
for q in qs:
mq_channel.queue_delete(queue=q)
with open('.%s.txt' % amgr._sid, 'r') as fp:
lines = fp.readlines()
for ind, val in enumerate(lines):
lines[ind] = val.strip()
assert set(qs) == set(lines)
def test_amgr_cleanup_mqs():
amgr = Amgr(hostname=hostname, port=port)
sid = amgr._sid
amgr._setup_mqs()
amgr._cleanup_mqs()
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=hostname, port=port))
qs = ['%s-tmgr-to-sync' % sid,
'%s-cb-to-sync' % sid,
'%s-sync-to-tmgr' % sid,
'%s-sync-to-cb' % sid,
'%s-pendingq-1' % sid,
'%s-completedq-1' % sid]
for q in qs:
with pytest.raises(pika.exceptions.ChannelClosed):
mq_channel = mq_connection.channel()
mq_channel.queue_purge(q)
def func_for_synchronizer_test(sid, p, logger, profiler):
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=hostname, port=port))
mq_channel = mq_connection.channel()
for t in p.stages[0].tasks:
t.state = states.COMPLETED
sync_with_master(obj=t,
obj_type='Task',
channel=mq_channel,
queue='%s-tmgr-to-sync' % sid,
logger=logger,
local_prof=profiler)
mq_connection.close()
def test_amgr_synchronizer():
logger = ru.Logger('radical.entk.temp_logger')
profiler = ru.Profiler(name='radical.entk.temp')
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
p = Pipeline()
s = Stage()
# Create and add 10 tasks to the stage
for cnt in range(10):
t = Task()
t.executable = ['some-executable-%s' % cnt]
s.add_tasks(t)
p.add_stages(s)
p._assign_uid(amgr._sid)
p._validate()
amgr.workflow = [p]
for t in p.stages[0].tasks:
assert t.state == states.INITIAL
assert p.stages[0].state == states.INITIAL
assert p.state == states.INITIAL
# Start the synchronizer method in a thread
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
# Start the synchronizer method in a thread
proc = Process(target=func_for_synchronizer_test, name='temp-proc',
args=(amgr._sid, p, logger, profiler))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
for t in p.stages[0].tasks:
assert t.state == states.COMPLETED
def test_sid_in_mqs():
appman = Amgr(hostname=hostname, port=port)
appman._setup_mqs()
sid = appman._sid
qs = [
'%s-tmgr-to-sync' % sid,
'%s-cb-to-sync' % sid,
'%s-sync-to-tmgr' % sid,
'%s-sync-to-cb' % sid,
]
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=hostname,
port=port)
)
mq_channel = mq_connection.channel()
def callback():
print True
for q in qs:
try:
mq_channel.basic_consume(callback, queue=q, no_ack=True)
except Exception as ex:
raise EnTKError(ex)
def test_state_order():
"""
**Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
"""
def create_single_task():
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/date']
t1.copy_input_data = []
t1.copy_output_data = []
return t1
p1 = Pipeline()
p1.name = 'p1'
s = Stage()
s.name = 's1'
s.tasks = create_single_task()
s.add_tasks(create_single_task())
p1.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 5,
'cpus': 1,
'project': ''
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
appman = Amgr(hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
p_state_hist = p1.state_history
assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']
s_state_hist = p1.stages[0].state_history
assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']
tasks = p1.stages[0].tasks
for t in tasks:
t_state_hist = t.state_history
print t_state_hist
assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED',
'SUBMITTING', 'EXECUTED', 'DONE']
test_amgr_initialization()
|
''' imports from python libraries '''
import os
import csv
import json
import datetime
import urllib2
import hashlib
import io
import time
# import ast
# import magic
# import subprocess
# import mimetypes
# from PIL import Image
# from StringIO import StringIO
''' imports from installed packages '''
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
# from django.core.management.base import CommandError
from django_mongokit import get_database
from mongokit import IS
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' imports from application folders/files '''
from gnowsys_ndf.ndf.models import Node, File
from gnowsys_ndf.ndf.models import GSystemType, AttributeType, RelationType
from gnowsys_ndf.ndf.models import GSystem, GAttribute, GRelation
from gnowsys_ndf.ndf.models import node_collection, triple_collection, gridfs_collection
from gnowsys_ndf.ndf.models import node_collection
from gnowsys_ndf.ndf.views.file import save_file
from gnowsys_ndf.ndf.views.methods import create_grelation, create_gattribute, get_language_tuple
from gnowsys_ndf.ndf.management.commands.create_theme_topic_hierarchy import add_to_collection_set
##############################################################################
SCHEMA_ROOT = os.path.join(os.path.dirname(__file__), "schema_files")
script_start_str = "######### Script ran on : " + time.strftime("%c") + " #########\n------------------------------------------------------------\n"
log_file_not_found = []
log_file_not_found.append(script_start_str)
log_list = [] # To hold intermediate errors
log_list.append(script_start_str)
file_gst = node_collection.one({'_type': 'GSystemType', "name": "File"})
home_group = node_collection.one({"name": "home", "_type": "Group"})
warehouse_group = node_collection.one({"name": 'warehouse', "_type": "Group"})
theme_gst = node_collection.one({'_type': 'GSystemType', "name": "Theme"})
theme_item_gst = node_collection.one({'_type': 'GSystemType', "name": "theme_item"})
topic_gst = node_collection.one({'_type': 'GSystemType', "name": "Topic"})
twist_gst = node_collection.one({'_type': 'GSystemType', 'name': 'Twist'})
rel_resp_at = node_collection.one({'_type': 'AttributeType', 'name': 'release_response'})
thr_inter_type_at = node_collection.one({'_type': 'AttributeType', 'name': 'thread_interaction_type'})
has_thread_rt = node_collection.one({"_type": "RelationType", "name": u"has_thread"})
has_thumbnail_rt = node_collection.one({'_type': "RelationType", 'name': u"has_thumbnail"})
discussion_enable_at = node_collection.one({"_type": "AttributeType", "name": "discussion_enable"})
nroer_team_id = 1
# setting variable:
# If set true, despite of having file nlob in gridfs, it fetches concern File which contains this _id in it's fs_file_ids field and returns it.
# If set False, returns None
update_file_exists_in_gridfs = True
# INFO notes:
# http://172.16.0.252/sites/default/files/nroer_resources/ (for room no 012)
# http://192.168.1.102/sites/default/files/nroer_resources/ (for whole ncert campus)
# http://125.23.112.5/sites/default/files/nroer_resources/ (for public i.e outside campus)
resource_link_common = "http://125.23.112.5/sites/default/files/nroer_resources/"
class Command(BaseCommand):
help = "\n\tFor saving data in gstudio DB from NROER schema files. This will create 'File' type GSystem instances.\n\tCSV file condition: The first row should contain DB names.\n"
def handle(self, *args, **options):
try:
# print "working........" + SCHEMA_ROOT
# processing each file of passed multiple CSV files as args
for file_name in args:
file_path = os.path.join(SCHEMA_ROOT, file_name)
if os.path.exists(file_path):
file_extension = os.path.splitext(file_name)[1]
if "csv" in file_extension:
total_rows = 0
# Process csv file and convert it to json format at first
info_message = "\n- CSV File (" + file_path + ") found!!!"
print info_message
log_list.append(str(info_message))
try:
csv_file_path = file_path
json_file_name = file_name.rstrip("csv") + "json"
json_file_path = os.path.join(SCHEMA_ROOT, json_file_name)
json_file_content = ""
with open(csv_file_path, 'rb') as csv_file:
csv_file_content = csv.DictReader(csv_file, delimiter=",")
json_file_content = []
for row in csv_file_content:
total_rows += 1
json_file_content.append(row)
info_message = "\n- File '" + file_name + "' contains : [ " + str(total_rows) + " ] entries/rows (excluding top-header/column-names)."
print info_message
log_list.append(str(info_message))
with open(json_file_path, 'w') as json_file:
json.dump(json_file_content, json_file, indent=4, sort_keys=False)
if os.path.exists(json_file_path):
file_path = json_file_path
is_json_file_exists = True
info_message = "\n- JSONType: File (" + json_file_path + ") created successfully.\n"
print info_message
log_list.append(str(info_message))
except Exception as e:
error_message = "\n!! CSV-JSONError: " + str(e)
print error_message
log_list.append(str(error_message))
# End of csv-json coversion
elif "json" in file_extension:
is_json_file_exists = True
else:
error_message = "\n!! FileTypeError: Please choose either 'csv' or 'json' format supported files!!!\n"
print error_message
log_list.append(str(error_message))
raise Exception(error_mesage)
if is_json_file_exists:
create_user_nroer_team()
# print nroer_team_id
# Process json file and create required GSystems, GRelations, and GAttributes
info_message = "\n------- Initiating task of processing json-file -------\n"
print info_message
log_list.append(str(info_message))
t0 = time.time()
parse_data_create_gsystem(file_path)
t1 = time.time()
time_diff = t1 - t0
total_time_minute = round( (time_diff/60), 2) if time_diff else 0
total_time_hour = round( (time_diff/(60*60)), 2) if time_diff else 0
# End of processing json file
info_message = "\n------- Task finised: Successfully processed json-file -------\n"
info_message += "- Total time taken for the processing: \n\n\t" + str(total_time_minute) + " MINUTES\n\t=== OR ===\n\t" + str(total_time_hour) + " HOURS\n"
print info_message
log_list.append(str(info_message))
# End of creation of respective GSystems, GAttributes and GRelations for Enrollment
else:
error_message = "\n!! FileNotFound: Following path (" + file_path + ") doesn't exists!!!\n"
print error_message
log_list.append(str(error_message))
raise Exception(error_message)
except Exception as e:
print str(e)
log_list.append(str(e))
finally:
if log_list:
log_list.append("\n ============================================================ End of Iteration ============================================================\n\n\n")
# print log_list
log_file_name = args[0].rstrip("csv") + "log"
log_file_path = os.path.join(SCHEMA_ROOT, log_file_name)
# print log_file_path
with open(log_file_path, 'a') as log_file:
log_file.writelines(log_list)
if log_file_not_found != [script_start_str]:
log_file_not_found.append("============================== End of Iteration =====================================\n")
log_file_not_found.append("-------------------------------------------------------------------------------------\n")
log_file_name = args[0].replace('.', '_FILES_NOT_FOUND.').rstrip("csv") + "log"
log_file_path = os.path.join(SCHEMA_ROOT, log_file_name)
# print log_file_path
with open(log_file_path, 'a') as log_file:
log_file.writelines(log_file_not_found)
# --- End of handle() ---
def create_user_nroer_team():
'''
Check for the user: "nroer_team". If it doesn't exists, create one.
'''
global nroer_team_id
if User.objects.filter(username="nroer_team"):
nroer_team_id = get_user_id("nroer_team")
else:
info_message = "\n- Creating super user: 'nroer_team': "
user = User.objects.create_superuser(username='nroer_team', password='nroer_team', email='nroer_team@example.com')
nroer_team_id = user.id
info_message += "\n- Created super user with following creadentials: "
info_message += "\n\n\tusername = 'nroer_team', \n\tpassword = 'nroer_team', \n\temail = 'nroer_team@example.com', \n\tid = '" + str(nroer_team_id) + "'"
print info_message
log_list.append(info_message)
def get_user_id(user_name):
'''
Takes the "user name" as an argument and returns:
- django "use id" as a response.
else
- returns False.
'''
try:
user_obj = User.objects.get(username=user_name)
return int(user_obj.id)
except Exception as e:
error_message = e + "\n!! for username: " + user_name
print error_message
log_list.append(str(error_message))
return False
def cast_to_data_type(value, data_type):
'''
This method will cast first argument: "value" to second argument: "data_type" and returns catsed value.
'''
value = value.strip()
casted_value = value
if data_type == unicode:
casted_value = unicode(value)
elif data_type == basestring:
casted_value = unicode(value)
# the casting is made to unicode despite of str;
# to prevent "authorized type" check error in mongoDB
elif (data_type == int) and str(value):
casted_value = int(value) if (str.isdigit(str(value))) else value
elif (data_type == float) and str(value):
casted_value = float(value) if (str.isdigit(str(value))) else value
elif (data_type == long) and str(value):
casted_value = long(value) if (str.isdigit(str(value))) else value
elif data_type == bool and str(value): # converting unicode to int and then to bool
casted_value = bool(int(value)) if (str.isdigit(str(value))) else bool(value)
elif (data_type == list) or isinstance(data_type, list):
value = value.replace("\n", "").replace(" and ", ",").split(",")
# check for complex list type like: [int] or [unicode]
if isinstance(data_type, list) and len(data_type) and isinstance(data_type[0], type):
casted_value = [data_type[0](i.strip()) for i in value if i]
else: # otherwise normal list
casted_value = [i.strip() for i in value if i]
elif data_type == datetime.datetime:
# "value" should be in following example format
# In [10]: datetime.datetime.strptime( "11/12/2014", "%d/%m/%Y")
# Out[10]: datetime.datetime(2014, 12, 11, 0, 0)
casted_value = datetime.datetime.strptime(value, "%d/%m/%Y")
return casted_value
def get_id_from_hierarchy(hier_list):
"""
method to check hierarchy of theme-topic.
returns - ObjectId or None
Args:
hier_list (list):
# e.g:
# [u'NCF', u'Biology', u'Living world', u'Biological classification']
Returns: ObjectId or None
- If hierarchy found to be correct, _id/ObjectId will be returned.
- else None will be returned.
"""
theme = hier_list[0]
topic = hier_list[-1:][0]
theme_items_list = hier_list[1:-1]
theme_node = node_collection.one({'name': {'$regex': "^" + unicode(theme) + "$", '$options': 'i'}, 'group_set': {'$in': [home_group._id]}, 'member_of': theme_gst._id })
if not theme_node:
return None
node_id = theme_node._id
node = theme_node
for each_item in theme_items_list:
node = node_collection.one({
'name': {'$regex': "^" + unicode(each_item) + "$", '$options': 'i'},
'prior_node': {'$in': [node_id]},
'member_of': {'$in': [theme_item_gst._id]},
'group_set': {'$in': [home_group._id]}
})
# print each_item, "===", node.name
if not node:
return None
node_id = node._id
# print topic, "node_id : ", node_id
# fetching a theme-item node
topic_node = node_collection.one({
'name': {'$regex': "^" + unicode(topic) + "$", '$options': 'i'},
'group_set': {'$in': [home_group._id]},
'member_of': {'$in': [topic_gst._id]},
'prior_node': {'$in': [node_id]}
})
if topic_node:
return topic_node._id
def parse_data_create_gsystem(json_file_path):
json_file_content = ""
try:
with open(json_file_path) as json_file:
json_file_content = json_file.read()
json_documents_list = json.loads(json_file_content)
# Initiating empty node obj and other related data variables
node = node_collection.collection.File()
node_keys = node.keys()
node_structure = node.structure
# print "\n\n---------------", node_keys
json_documents_list_spaces = json_documents_list
json_documents_list = []
# Removes leading and trailing spaces from keys as well as values
for json_document_spaces in json_documents_list_spaces:
json_document = {}
for key_spaces, value_spaces in json_document_spaces.iteritems():
json_document[key_spaces.strip().lower()] = value_spaces.strip()
json_documents_list.append(json_document)
except Exception as e:
error_message = "\n!! While parsing the file ("+json_file_path+") got following error...\n " + str(e)
log_list.append(str(error_message))
raise error_message
for i, json_document in enumerate(json_documents_list):
info_message = "\n\n\n********** Processing row number : ["+ str(i + 2) + "] **********"
print info_message
log_list.append(str(info_message))
try:
parsed_json_document = {}
attribute_relation_list = []
for key in json_document.iterkeys():
parsed_key = key.lower()
if parsed_key in node_keys:
# print parsed_key
# adding the default field values e.g: created_by, member_of
# created_by:
if parsed_key == "created_by":
if json_document[key]:
temp_user_id = get_user_id(json_document[key].strip())
if temp_user_id:
parsed_json_document[parsed_key] = temp_user_id
else:
parsed_json_document[parsed_key] = nroer_team_id
else:
parsed_json_document[parsed_key] = nroer_team_id
# print "---", parsed_json_document[parsed_key]
# contributors:
elif parsed_key == "contributors":
if json_document[key]:
contrib_list = json_document[key].split(",")
temp_contributors = []
for each_user in contrib_list:
user_id = get_user_id(each_user.strip())
if user_id:
temp_contributors.append(user_id)
parsed_json_document[parsed_key] = temp_contributors
else:
parsed_json_document[parsed_key] = [nroer_team_id]
# print "===", parsed_json_document[parsed_key]
# tags:
elif (parsed_key == "tags") and json_document[key]:
parsed_json_document[parsed_key] = cast_to_data_type(json_document[key], node_structure.get(parsed_key))
# print parsed_json_document[parsed_key]
# member_of:
elif parsed_key == "member_of":
parsed_json_document[parsed_key] = [file_gst._id]
# print parsed_json_document[parsed_key]
# --- END of adding the default field values
else:
# parsed_json_document[parsed_key] = json_document[key]
parsed_json_document[parsed_key] = cast_to_data_type(json_document[key], node_structure.get(parsed_key))
# print parsed_json_document[parsed_key]
# --- END of processing for remaining fields
else: # key is not in the node_keys
parsed_json_document[key] = json_document[key]
attribute_relation_list.append(key)
# print "key : ", key
# --END of for loop ---
# calling method to create File GSystems
nodeid = create_resource_gsystem(parsed_json_document, i)
# print "nodeid : ", nodeid
collection_name = parsed_json_document.get('collection', '')
if collection_name and nodeid:
collection_node = node_collection.find_one({
'_type': 'File',
'group_set': {'$in': [home_group._id]},
'name': unicode(collection_name)
})
if collection_node:
add_to_collection_set(collection_node, nodeid)
thumbnail_url = parsed_json_document.get('thumbnail')
# print "thumbnail_url : ", thumbnail_url
if thumbnail_url and nodeid:
try:
print "\n\n- Attaching thumbnail to resource\n"
attach_resource_thumbnail(thumbnail_url, nodeid, parsed_json_document, i)
except Exception, e:
print e
# print type(nodeid), "-------", nodeid, "\n"
# create thread node
if isinstance(nodeid, ObjectId):
thread_result = create_thread_obj(nodeid)
# starting processing for the attributes and relations saving
if isinstance(nodeid, ObjectId) and attribute_relation_list:
node = node_collection.one({ "_id": ObjectId(nodeid) })
gst_possible_attributes_dict = node.get_possible_attributes(file_gst._id)
# print gst_possible_attributes_dict
relation_list = []
json_document['name'] = node.name
# Write code for setting atrributes
for key in attribute_relation_list:
is_relation = True
# print "\n", key, "----------\n"
for attr_key, attr_value in gst_possible_attributes_dict.iteritems():
# print "\n", attr_key,"======", attr_value
if key == attr_key:
# print key
is_relation = False
# setting value to "0" for int, float, long (to avoid casting error)
# if (attr_value['data_type'] in [int, float, long]) and (not json_document[key]):
# json_document[key] = 0
if json_document[key]:
# print "key : ", key, "\nvalue : ",json_document[key]
info_message = "\n- For GAttribute parsing content | key: '" + attr_key + "' having value: '" + json_document[key] + "'"
print info_message
log_list.append(str(info_message))
cast_to_data_type(json_document[key], attr_value['data_type'])
if attr_value['data_type'] == "curricular":
# setting int values for CR/XCR
if json_document[key] == "CR":
json_document[key] = 1
elif json_document[key] == "XCR":
json_document[key] = 0
else: # needs to be confirm
json_document[key] = 0
# json_document[key] = bool(int(json_document[key]))
# print attr_value['data_type'], "@@@@@@@@@ : ", json_document[key]
json_document[key] = cast_to_data_type(json_document[key], attr_value['data_type'])
# print key, " !!!!!!!!! : ", json_document[key]
subject_id = node._id
# print "\n-----\nsubject_id: ", subject_id
attribute_type_node = node_collection.one({
'_type': "AttributeType",
'$or': [
{'name':
{'$regex': "^"+attr_key+"$",
'$options': 'i'}
},
{'altnames': {'$regex': "^"+attr_key+"$", '$options': 'i'}
}
]
})
# print "\nattribute_type_node: ", attribute_type_node.name
object_value = json_document[key]
# print "\nobject_value: ", object_value
ga_node = None
info_message = "\n- Creating GAttribute ("+node.name+" -- "+attribute_type_node.name+" -- "+str(json_document[key])+") ...\n"
print info_message
log_list.append(str(info_message))
ga_node = create_gattribute(subject_id, attribute_type_node, object_value)
info_message = "- Created ga_node : "+ str(ga_node.name) + "\n"
print info_message
log_list.append(str(info_message))
# To break outer for loop as key found
break
else:
error_message = "\n!! DataNotFound: No data found for field ("+str(attr_key)+") while creating GSystem ( -- "+str(node.name)+")\n"
print error_message
log_list.append(str(error_message))
# ---END of if (key == attr_key)
if is_relation:
relation_list.append(key)
if not relation_list:
# No possible relations defined for this node
info_message = "\n!! ("+str(node.name)+"): No possible relations defined for this node.\n"
print info_message
log_list.append(str(info_message))
return
gst_possible_relations_dict = node.get_possible_relations(file_gst._id)
# processing each entry in relation_list
# print "=== relation_list : ", relation_list
for key in relation_list:
is_relation = True
for rel_key, rel_value in gst_possible_relations_dict.iteritems():
if key == rel_key:
# if key == "teaches":
is_relation = False
if json_document[key]:
# most often the data is hierarchy sep by ":"
if ":" in json_document[key]:
formatted_list = []
temp_teaches_list = json_document[key].replace("\n", "").split(":")
# print "\n temp_teaches", temp_teaches
for v in temp_teaches_list:
formatted_list.append(v.strip())
right_subject_id = []
# print "~~~~~~~~~~~", formatted_list
# rsub_id = _get_id_from_hierarchy(formatted_list)
rsub_id = get_id_from_hierarchy(formatted_list)
# print "=== rsub_id : ", rsub_id
hierarchy_output = None
# checking every item in hierarchy exist and leaf node's _id found
if rsub_id:
right_subject_id.append(rsub_id)
json_document[key] = right_subject_id
# print json_document[key]
else:
error_message = "\n!! While creating teaches rel: Any one of the item in hierarchy"+ str(json_document[key]) +"does not exist in Db. \n!! So relation: " + str(key) + " cannot be created.\n"
print error_message
log_list.append(error_message)
break
# sometimes direct leaf-node may be present without hierarchy and ":"
else:
formatted_list = list(json_document[key].strip())
right_subject_id = []
right_subject_id.append(_get_id_from_hierarchy(formatted_list))
json_document[key] = right_subject_id
# print "\n----------", json_document[key]
info_message = "\n- For GRelation parsing content | key: " + str(rel_key) + " -- " + str(json_document[key])
print info_message
log_list.append(str(info_message))
# print list(json_document[key])
# perform_eval_type(key, json_document, "GSystem", "GSystem")
for right_subject_id in json_document[key]:
# print "\njson_document[key]: ", json_document[key]
subject_id = node._id
# print "subject_id : ", subject_id
# print "node.name: ", node.name
# Here we are appending list of ObjectIds of GSystemType's type_of field
# along with the ObjectId of GSystemType's itself (whose GSystem is getting created)
# This is because some of the RelationType's are holding Base class's ObjectId
# and not that of the Derived one's
# Delibrately keeping GSystemType's ObjectId first in the list
# And hence, used $in operator in the query!
rel_subject_type = []
rel_subject_type.append(file_gst._id)
if file_gst.type_of:
rel_subject_type.extend(file_gst.type_of)
relation_type_node = node_collection.one({'_type': "RelationType",
'$or': [{'name': {'$regex': "^"+rel_key+"$", '$options': 'i'}},
{'altnames': {'$regex': "^"+rel_key+"$", '$options': 'i'}}],
'subject_type': {'$in': rel_subject_type}
})
right_subject_id_or_list = []
right_subject_id_or_list.append(ObjectId(right_subject_id))
nodes = triple_collection.find({'_type': "GRelation",
'subject': subject_id,
'relation_type.$id': relation_type_node._id
})
# sending list of all the possible right subject to relation
for n in nodes:
if not n.right_subject in right_subject_id_or_list:
right_subject_id_or_list.append(n.right_subject)
info_message = "\n- Creating GRelation ("+ str(node.name)+ " -- "+ str(rel_key)+ " -- "+ str(right_subject_id_or_list)+") ..."
print info_message
log_list.append(str(info_message))
gr_node = create_grelation(subject_id, relation_type_node, right_subject_id_or_list)
info_message = "\n- Grelation processing done.\n"
print info_message
log_list.append(str(info_message))
# To break outer for loop if key found
break
else:
error_message = "\n!! DataNotFound: No data found for relation ("+ str(rel_key)+ ") while creating GSystem (" + str(file_gst.name) + " -- " + str(node.name) + ")\n"
print error_message
log_list.append(str(error_message))
break
# print relation_list
else:
info_message = "\n!! Either resource is already created or file is already saved into gridfs/DB or file not found"
print info_message
log_list.append(str(info_message))
continue
except Exception as e:
error_message = "\n While creating ("+str(json_document['name'])+") got following error...\n " + str(e)
print error_message # Keep it!
log_list.append(str(error_message))
def create_thread_obj(node_id):
'''
Creates thread object.
RT : has_thread
AT : release_response, thread_interaction_type
'''
try:
node_obj = node_collection.one({'_id': ObjectId(node_id)})
release_response_val = True
interaction_type_val = unicode('Comment')
thread_obj = None
thread_obj = node_collection.one({"_type": "GSystem", "member_of": ObjectId(twist_gst._id),"relation_set.thread_of": ObjectId(node_obj._id)})
if thread_obj == None:
# print "\n\n Creating new thread node"
thread_obj = node_collection.collection.GSystem()
thread_obj.name = u"Thread_of_" + unicode(node_obj.name)
thread_obj.status = u"PUBLISHED"
thread_obj.created_by = int(nroer_team_id)
thread_obj.modified_by = int(nroer_team_id)
thread_obj.contributors.append(int(nroer_team_id))
thread_obj.member_of.append(ObjectId(twist_gst._id))
thread_obj.group_set.append(home_group._id)
thread_obj.save()
# creating GRelation
gr = create_grelation(node_obj._id, has_thread_rt, thread_obj._id)
create_gattribute(thread_obj._id, rel_resp_at, release_response_val)
create_gattribute(thread_obj._id, thr_inter_type_at, interaction_type_val)
create_gattribute(node_obj._id, discussion_enable_at, True)
thread_obj.reload()
node_obj.reload()
# print "\n\n thread_obj", thread_obj.attribute_set, "\n---\n"
info_message = "\n- Successfully created thread obj - " + thread_obj._id.__str__() +" for - " + node_obj._id.__str__()
print info_message
log_list.append(str(info_message))
except Exception as e:
info_message = "\n- Error occurred while creating thread obj for - " + node_obj._id.__str__() +" - " + str(e)
print info_message
log_list.append(str(info_message))
def create_resource_gsystem(resource_data, row_no='', group_set_id=None):
# fetching resource from url
resource_link = resource_data.get("resource_link") # actual download file link
resource_link = resource_link.replace(' ', '%20')
if not resource_link:
resource_link = resource_link_common + resource_data.get("file_name")
# print "---------------",resource_link
filename = resource_link.split("/")[-1] # actual download file name with extension. e.g: neuron.jpg
info_message = "\n- Fetching resource from : '" + resource_link + "'"
print info_message
log_list.append(info_message)
print " (Might take some time. please hold on ...)\n"
try:
files = urllib2.urlopen(resource_link)
print " Fetched the resource successfully!\n"
except urllib2.URLError, e:
error_message = "\n!! File Not Found at: " + resource_link
log_list.append(error_message)
file_not_found_msg = "\nFile with following details not found: \n"
file_not_found_msg += "- Row No : " + str(row_no) + "\n"
file_not_found_msg += "- Name : " + resource_data["name"] + "\n"
file_not_found_msg += "- File Name: " + resource_data["file_name"] + "\n"
file_not_found_msg += "- URL : " + resource_link + "\n\n"
file_not_found_msg += "- ERROR : " + str(e) + "\n\n"
log_file_not_found.append(file_not_found_msg)
return None
files = io.BytesIO(files.read())
files.name = filename
name = unicode(resource_data["name"]) # name to be given to gsystem
userid = resource_data["created_by"]
content_org = resource_data["content_org"]
tags = resource_data["tags"]
language = resource_data["language"]
group_set_id = ObjectId(group_set_id) if group_set_id else home_group._id
img_type = None
access_policy = None
usrname = "nroer_team"
filemd5 = hashlib.md5(files.read()).hexdigest()
# size, unit = getFileSize(files)
# size = {'size':round(size, 2), 'unit':unicode(unit)}
# fcol = get_database()[File.collection_name]
# fileobj = fcol.File()
# fileobj = node_collection.collection.File()
# there can be two different files with same name.
# e.g: "The Living World" exists with epub, document, audio etc.
# hence not to check by name.
# check_obj_by_name = node_collection.find_one({"_type":"File", 'member_of': {'$all': [ObjectId(file_gst._id)]}, 'group_set': {'$all': [ObjectId(home_group._id)]}, "name": unicode(resource_data["name"]) })
# print "\n====", check_obj_by_name, "==== ", fileobj.fs.files.exists({"md5":filemd5})
check_file_in_gridfs = gridfs_collection.find_one({"md5": filemd5})
# even though file resource exists as a GSystem or in gridfs return None
# if fileobj.fs.files.exists({"md5": filemd5}) # or check_obj_by_name:
if check_file_in_gridfs:
# coll_oid = get_database()['fs.files']
# cur_oid = gridfs_collection.find_one({"md5": filemd5})
# printing appropriate error message
# if check_obj_by_name:
# info_message = "\n- Resource with same name of '"+ str(resource_data["name"]) +"' and _type 'File' exist in the home group. (Ref _id: '"+ str(check_obj_by_name._id) + "' )"
# print info_message
# log_list.append(str(info_message))
# return check_obj_by_name._id
# elif cur_oid:
info_message = "\n- Resource file exists in gridfs having id: '" + \
str(check_file_in_gridfs["_id"]) + "'"
print info_message
log_list.append(str(info_message))
if update_file_exists_in_gridfs:
file_obj = node_collection.one({'_type': 'File', 'fs_file_ids': {'$in': [ObjectId(check_file_in_gridfs['_id'])]} })
if file_obj:
info_message = "\n- Returning file _id despite of having in gridfs"
print info_message
log_list.append(str(info_message))
return file_obj._id
return None
# else:
# info_message = "\n- Resource file does not exists in database"
# print info_message
# log_list.append(str(info_message))
# return None
else: # creating new resource
info_message = "\n- Creating resource: " + str(resource_data["name"])
log_list.append(str(info_message))
print info_message
files.seek(0)
fileobj_oid, video = save_file(files, name, userid, group_set_id, content_org, tags, img_type, language, usrname, access_policy=u"PUBLIC", count=0, first_object="")
# print "\n------------ fileobj_oid : ", fileobj_oid, "--- ", video
# filetype = magic.from_buffer(files.read(100000), mime = 'true') # Gusing filetype by python-magic
node_collection.collection.update(
{'_id': ObjectId(fileobj_oid)},
{'$push': {'origin': {'csv-import': 'save_file'} }},
upsert=False,
multi=False
)
info_message = "\n- Created resource/GSystem object of name: '" + unicode(name) + "' having ObjectId: " + unicode(fileobj_oid) + "\n- Saved resource into gridfs. \n"
log_list.append(info_message)
print info_message
# print "\n----------", fileobj
return fileobj_oid
def attach_resource_thumbnail(thumbnail_url, node_id, resource_data, row_no):
updated_res_data = resource_data.copy()
updated_res_data['resource_link'] = thumbnail_url
updated_res_data['name'] = u'Thumbnail: ' + thumbnail_url.split('/')[-1]
updated_res_data['content_org'] = ''
updated_res_data['tags'] = []
# th_id: thumbnail id
th_id = create_resource_gsystem(updated_res_data, row_no, group_set_id=warehouse_group._id)
# th_obj = node_collection.one({'_id': ObjectId(th_id)})
# # tring to keep mid-size image otherwise thumbnail
# try:
# th_gridfs_id = th_obj.fs_file_ids[2]
# except:
# th_gridfs_id = th_obj.fs_file_ids[1]
# # print "th_gridfs_id: ", th_gridfs_id
# node_obj = node_collection.one({'_id': ObjectId(node_id)})
# print "~~~~~~~~~~", ObjectId(node_id), " : ", has_thumbnail_rt, " : ", ObjectId(th_id)
info_message = "\n- Creating GRelation ("+ str(node_id)+ " -- has_thumbnail -- "+ str(th_id)+") ..."
print info_message
log_list.append(str(info_message))
create_grelation(ObjectId(node_id), has_thumbnail_rt, ObjectId(th_id))
info_message = "\n- Grelation processing done for has_thumbnail.\n"
print info_message
log_list.append(str(info_message))
# # print "node_obj.fs_file_ids: ", node_obj.fs_file_ids
# node_fs_file_ids = node_obj.fs_file_ids
# if len(node_fs_file_ids) == 1:
# node_fs_file_ids.append(ObjectId(th_gridfs_id))
# elif len(node_fs_file_ids) > 1:
# node_fs_file_ids[1] = ObjectId(th_gridfs_id)
# # print "node_fs_file_ids: ", node_fs_file_ids
# node_collection.collection.update(
# {'_id': ObjectId(node_id)},
# {'$set': {'fs_file_ids': node_fs_file_ids}}
# )
Updated nroer_data_entry script to correct language field's value
''' imports from python libraries '''
import os
import csv
import json
import datetime
import urllib2
import hashlib
import io
import time
# import ast
# import magic
# import subprocess
# import mimetypes
# from PIL import Image
# from StringIO import StringIO
''' imports from installed packages '''
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
# from django.core.management.base import CommandError
from django_mongokit import get_database
from mongokit import IS
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' imports from application folders/files '''
from gnowsys_ndf.ndf.models import Node, File
from gnowsys_ndf.ndf.models import GSystemType, AttributeType, RelationType
from gnowsys_ndf.ndf.models import GSystem, GAttribute, GRelation
from gnowsys_ndf.ndf.models import node_collection, triple_collection, gridfs_collection
from gnowsys_ndf.ndf.models import node_collection
from gnowsys_ndf.ndf.views.file import save_file
from gnowsys_ndf.ndf.views.methods import create_grelation, create_gattribute, get_language_tuple
from gnowsys_ndf.ndf.management.commands.create_theme_topic_hierarchy import add_to_collection_set
##############################################################################
SCHEMA_ROOT = os.path.join(os.path.dirname(__file__), "schema_files")
script_start_str = "######### Script ran on : " + time.strftime("%c") + " #########\n------------------------------------------------------------\n"
log_file_not_found = []
log_file_not_found.append(script_start_str)
log_list = [] # To hold intermediate errors
log_list.append(script_start_str)
file_gst = node_collection.one({'_type': 'GSystemType', "name": "File"})
home_group = node_collection.one({"name": "home", "_type": "Group"})
warehouse_group = node_collection.one({"name": 'warehouse', "_type": "Group"})
theme_gst = node_collection.one({'_type': 'GSystemType', "name": "Theme"})
theme_item_gst = node_collection.one({'_type': 'GSystemType', "name": "theme_item"})
topic_gst = node_collection.one({'_type': 'GSystemType', "name": "Topic"})
twist_gst = node_collection.one({'_type': 'GSystemType', 'name': 'Twist'})
rel_resp_at = node_collection.one({'_type': 'AttributeType', 'name': 'release_response'})
thr_inter_type_at = node_collection.one({'_type': 'AttributeType', 'name': 'thread_interaction_type'})
has_thread_rt = node_collection.one({"_type": "RelationType", "name": u"has_thread"})
has_thumbnail_rt = node_collection.one({'_type': "RelationType", 'name': u"has_thumbnail"})
discussion_enable_at = node_collection.one({"_type": "AttributeType", "name": "discussion_enable"})
nroer_team_id = 1
# setting variable:
# If set true, despite of having file nlob in gridfs, it fetches concern File which contains this _id in it's fs_file_ids field and returns it.
# If set False, returns None
update_file_exists_in_gridfs = True
# INFO notes:
# http://172.16.0.252/sites/default/files/nroer_resources/ (for room no 012)
# http://192.168.1.102/sites/default/files/nroer_resources/ (for whole ncert campus)
# http://125.23.112.5/sites/default/files/nroer_resources/ (for public i.e outside campus)
resource_link_common = "http://125.23.112.5/sites/default/files/nroer_resources/"
class Command(BaseCommand):
help = "\n\tFor saving data in gstudio DB from NROER schema files. This will create 'File' type GSystem instances.\n\tCSV file condition: The first row should contain DB names.\n"
def handle(self, *args, **options):
try:
# print "working........" + SCHEMA_ROOT
# processing each file of passed multiple CSV files as args
for file_name in args:
file_path = os.path.join(SCHEMA_ROOT, file_name)
if os.path.exists(file_path):
file_extension = os.path.splitext(file_name)[1]
if "csv" in file_extension:
total_rows = 0
# Process csv file and convert it to json format at first
info_message = "\n- CSV File (" + file_path + ") found!!!"
print info_message
log_list.append(str(info_message))
try:
csv_file_path = file_path
json_file_name = file_name.rstrip("csv") + "json"
json_file_path = os.path.join(SCHEMA_ROOT, json_file_name)
json_file_content = ""
with open(csv_file_path, 'rb') as csv_file:
csv_file_content = csv.DictReader(csv_file, delimiter=",")
json_file_content = []
for row in csv_file_content:
total_rows += 1
json_file_content.append(row)
info_message = "\n- File '" + file_name + "' contains : [ " + str(total_rows) + " ] entries/rows (excluding top-header/column-names)."
print info_message
log_list.append(str(info_message))
with open(json_file_path, 'w') as json_file:
json.dump(json_file_content, json_file, indent=4, sort_keys=False)
if os.path.exists(json_file_path):
file_path = json_file_path
is_json_file_exists = True
info_message = "\n- JSONType: File (" + json_file_path + ") created successfully.\n"
print info_message
log_list.append(str(info_message))
except Exception as e:
error_message = "\n!! CSV-JSONError: " + str(e)
print error_message
log_list.append(str(error_message))
# End of csv-json coversion
elif "json" in file_extension:
is_json_file_exists = True
else:
error_message = "\n!! FileTypeError: Please choose either 'csv' or 'json' format supported files!!!\n"
print error_message
log_list.append(str(error_message))
raise Exception(error_mesage)
if is_json_file_exists:
create_user_nroer_team()
# print nroer_team_id
# Process json file and create required GSystems, GRelations, and GAttributes
info_message = "\n------- Initiating task of processing json-file -------\n"
print info_message
log_list.append(str(info_message))
t0 = time.time()
parse_data_create_gsystem(file_path)
t1 = time.time()
time_diff = t1 - t0
total_time_minute = round( (time_diff/60), 2) if time_diff else 0
total_time_hour = round( (time_diff/(60*60)), 2) if time_diff else 0
# End of processing json file
info_message = "\n------- Task finised: Successfully processed json-file -------\n"
info_message += "- Total time taken for the processing: \n\n\t" + str(total_time_minute) + " MINUTES\n\t=== OR ===\n\t" + str(total_time_hour) + " HOURS\n"
print info_message
log_list.append(str(info_message))
# End of creation of respective GSystems, GAttributes and GRelations for Enrollment
else:
error_message = "\n!! FileNotFound: Following path (" + file_path + ") doesn't exists!!!\n"
print error_message
log_list.append(str(error_message))
raise Exception(error_message)
except Exception as e:
print str(e)
log_list.append(str(e))
finally:
if log_list:
log_list.append("\n ============================================================ End of Iteration ============================================================\n\n\n")
# print log_list
log_file_name = args[0].rstrip("csv") + "log"
log_file_path = os.path.join(SCHEMA_ROOT, log_file_name)
# print log_file_path
with open(log_file_path, 'a') as log_file:
log_file.writelines(log_list)
if log_file_not_found != [script_start_str]:
log_file_not_found.append("============================== End of Iteration =====================================\n")
log_file_not_found.append("-------------------------------------------------------------------------------------\n")
log_file_name = args[0].replace('.', '_FILES_NOT_FOUND.').rstrip("csv") + "log"
log_file_path = os.path.join(SCHEMA_ROOT, log_file_name)
# print log_file_path
with open(log_file_path, 'a') as log_file:
log_file.writelines(log_file_not_found)
# --- End of handle() ---
def create_user_nroer_team():
'''
Check for the user: "nroer_team". If it doesn't exists, create one.
'''
global nroer_team_id
if User.objects.filter(username="nroer_team"):
nroer_team_id = get_user_id("nroer_team")
else:
info_message = "\n- Creating super user: 'nroer_team': "
user = User.objects.create_superuser(username='nroer_team', password='nroer_team', email='nroer_team@example.com')
nroer_team_id = user.id
info_message += "\n- Created super user with following creadentials: "
info_message += "\n\n\tusername = 'nroer_team', \n\tpassword = 'nroer_team', \n\temail = 'nroer_team@example.com', \n\tid = '" + str(nroer_team_id) + "'"
print info_message
log_list.append(info_message)
def get_user_id(user_name):
'''
Takes the "user name" as an argument and returns:
- django "use id" as a response.
else
- returns False.
'''
try:
user_obj = User.objects.get(username=user_name)
return int(user_obj.id)
except Exception as e:
error_message = e + "\n!! for username: " + user_name
print error_message
log_list.append(str(error_message))
return False
def cast_to_data_type(value, data_type):
'''
This method will cast first argument: "value" to second argument: "data_type" and returns catsed value.
'''
value = value.strip()
casted_value = value
if data_type == unicode:
casted_value = unicode(value)
elif data_type == basestring:
casted_value = unicode(value)
# the casting is made to unicode despite of str;
# to prevent "authorized type" check error in mongoDB
elif (data_type == int) and str(value):
casted_value = int(value) if (str.isdigit(str(value))) else value
elif (data_type == float) and str(value):
casted_value = float(value) if (str.isdigit(str(value))) else value
elif (data_type == long) and str(value):
casted_value = long(value) if (str.isdigit(str(value))) else value
elif data_type == bool and str(value): # converting unicode to int and then to bool
casted_value = bool(int(value)) if (str.isdigit(str(value))) else bool(value)
elif (data_type == list) or isinstance(data_type, list):
value = value.replace("\n", "").replace(" and ", ",").split(",")
# check for complex list type like: [int] or [unicode]
if isinstance(data_type, list) and len(data_type) and isinstance(data_type[0], type):
casted_value = [data_type[0](i.strip()) for i in value if i]
else: # otherwise normal list
casted_value = [i.strip() for i in value if i]
elif data_type == datetime.datetime:
# "value" should be in following example format
# In [10]: datetime.datetime.strptime( "11/12/2014", "%d/%m/%Y")
# Out[10]: datetime.datetime(2014, 12, 11, 0, 0)
casted_value = datetime.datetime.strptime(value, "%d/%m/%Y")
return casted_value
def get_id_from_hierarchy(hier_list):
"""
method to check hierarchy of theme-topic.
returns - ObjectId or None
Args:
hier_list (list):
# e.g:
# [u'NCF', u'Biology', u'Living world', u'Biological classification']
Returns: ObjectId or None
- If hierarchy found to be correct, _id/ObjectId will be returned.
- else None will be returned.
"""
theme = hier_list[0]
topic = hier_list[-1:][0]
theme_items_list = hier_list[1:-1]
theme_node = node_collection.one({'name': {'$regex': "^" + unicode(theme) + "$", '$options': 'i'}, 'group_set': {'$in': [home_group._id]}, 'member_of': theme_gst._id })
if not theme_node:
return None
node_id = theme_node._id
node = theme_node
for each_item in theme_items_list:
node = node_collection.one({
'name': {'$regex': "^" + unicode(each_item) + "$", '$options': 'i'},
'prior_node': {'$in': [node_id]},
'member_of': {'$in': [theme_item_gst._id]},
'group_set': {'$in': [home_group._id]}
})
# print each_item, "===", node.name
if not node:
return None
node_id = node._id
# print topic, "node_id : ", node_id
# fetching a theme-item node
topic_node = node_collection.one({
'name': {'$regex': "^" + unicode(topic) + "$", '$options': 'i'},
'group_set': {'$in': [home_group._id]},
'member_of': {'$in': [topic_gst._id]},
'prior_node': {'$in': [node_id]}
})
if topic_node:
return topic_node._id
def parse_data_create_gsystem(json_file_path):
json_file_content = ""
try:
with open(json_file_path) as json_file:
json_file_content = json_file.read()
json_documents_list = json.loads(json_file_content)
# Initiating empty node obj and other related data variables
node = node_collection.collection.File()
node_keys = node.keys()
node_structure = node.structure
# print "\n\n---------------", node_keys
json_documents_list_spaces = json_documents_list
json_documents_list = []
# Removes leading and trailing spaces from keys as well as values
for json_document_spaces in json_documents_list_spaces:
json_document = {}
for key_spaces, value_spaces in json_document_spaces.iteritems():
json_document[key_spaces.strip().lower()] = value_spaces.strip()
json_documents_list.append(json_document)
except Exception as e:
error_message = "\n!! While parsing the file ("+json_file_path+") got following error...\n " + str(e)
log_list.append(str(error_message))
raise error_message
for i, json_document in enumerate(json_documents_list):
info_message = "\n\n\n********** Processing row number : ["+ str(i + 2) + "] **********"
print info_message
log_list.append(str(info_message))
try:
parsed_json_document = {}
attribute_relation_list = []
for key in json_document.iterkeys():
parsed_key = key.lower()
if parsed_key in node_keys:
# print parsed_key
# adding the default field values e.g: created_by, member_of
# created_by:
if parsed_key == "created_by":
if json_document[key]:
temp_user_id = get_user_id(json_document[key].strip())
if temp_user_id:
parsed_json_document[parsed_key] = temp_user_id
else:
parsed_json_document[parsed_key] = nroer_team_id
else:
parsed_json_document[parsed_key] = nroer_team_id
# print "---", parsed_json_document[parsed_key]
# contributors:
elif parsed_key == "contributors":
if json_document[key]:
contrib_list = json_document[key].split(",")
temp_contributors = []
for each_user in contrib_list:
user_id = get_user_id(each_user.strip())
if user_id:
temp_contributors.append(user_id)
parsed_json_document[parsed_key] = temp_contributors
else:
parsed_json_document[parsed_key] = [nroer_team_id]
# print "===", parsed_json_document[parsed_key]
# tags:
elif (parsed_key == "tags") and json_document[key]:
parsed_json_document[parsed_key] = cast_to_data_type(json_document[key], node_structure.get(parsed_key))
# print parsed_json_document[parsed_key]
# member_of:
elif parsed_key == "member_of":
parsed_json_document[parsed_key] = [file_gst._id]
# print parsed_json_document[parsed_key]
# --- END of adding the default field values
else:
# parsed_json_document[parsed_key] = json_document[key]
parsed_json_document[parsed_key] = cast_to_data_type(json_document[key], node_structure.get(parsed_key))
# print parsed_json_document[parsed_key]
# --- END of processing for remaining fields
else: # key is not in the node_keys
parsed_json_document[key] = json_document[key]
attribute_relation_list.append(key)
# print "key : ", key
# --END of for loop ---
# calling method to create File GSystems
nodeid = create_resource_gsystem(parsed_json_document, i)
# print "nodeid : ", nodeid
# ----- for updating language -----
node_lang = get_language_tuple(eval(parsed_json_document['language']))
# print "============= 1 :", type(eval(parsed_json_document['language']))
# print "============= 2 :", node_lang
temp_node = node_collection.one({'_id': ObjectId(nodeid) })
# print "============= lang :", temp_node.language
update_res = node_collection.collection.update(
{'_id': ObjectId(nodeid), 'language': {'$ne': node_lang}},
{'$set': {'language': node_lang}},
upsert=False,
multi=False
)
if update_res['updatedExisting']:
temp_node.reload()
info_message = "\n\n- Update to language of resource: " + str(update_res)
print info_message
log_list.append(info_message)
info_message = "\n\n- Now language of resource updates to: " + str(temp_node.language)
print info_message
log_list.append(info_message)
# print "============= lang :", temp_node.language
# ----- END of updating language -----
collection_name = parsed_json_document.get('collection', '')
if collection_name and nodeid:
collection_node = node_collection.find_one({
'_type': 'File',
'group_set': {'$in': [home_group._id]},
'name': unicode(collection_name)
})
if collection_node:
add_to_collection_set(collection_node, nodeid)
thumbnail_url = parsed_json_document.get('thumbnail')
# print "thumbnail_url : ", thumbnail_url
if thumbnail_url and nodeid:
try:
print "\n\n- Attaching thumbnail to resource\n"
attach_resource_thumbnail(thumbnail_url, nodeid, parsed_json_document, i)
except Exception, e:
print e
# print type(nodeid), "-------", nodeid, "\n"
# create thread node
if isinstance(nodeid, ObjectId):
thread_result = create_thread_obj(nodeid)
# starting processing for the attributes and relations saving
if isinstance(nodeid, ObjectId) and attribute_relation_list:
node = node_collection.one({ "_id": ObjectId(nodeid) })
gst_possible_attributes_dict = node.get_possible_attributes(file_gst._id)
# print gst_possible_attributes_dict
relation_list = []
json_document['name'] = node.name
# Write code for setting atrributes
for key in attribute_relation_list:
is_relation = True
# print "\n", key, "----------\n"
for attr_key, attr_value in gst_possible_attributes_dict.iteritems():
# print "\n", attr_key,"======", attr_value
if key == attr_key:
# print key
is_relation = False
# setting value to "0" for int, float, long (to avoid casting error)
# if (attr_value['data_type'] in [int, float, long]) and (not json_document[key]):
# json_document[key] = 0
if json_document[key]:
# print "key : ", key, "\nvalue : ",json_document[key]
info_message = "\n- For GAttribute parsing content | key: '" + attr_key + "' having value: '" + json_document[key] + "'"
print info_message
log_list.append(str(info_message))
cast_to_data_type(json_document[key], attr_value['data_type'])
if attr_value['data_type'] == "curricular":
# setting int values for CR/XCR
if json_document[key] == "CR":
json_document[key] = 1
elif json_document[key] == "XCR":
json_document[key] = 0
else: # needs to be confirm
json_document[key] = 0
# json_document[key] = bool(int(json_document[key]))
# print attr_value['data_type'], "@@@@@@@@@ : ", json_document[key]
json_document[key] = cast_to_data_type(json_document[key], attr_value['data_type'])
# print key, " !!!!!!!!! : ", json_document[key]
subject_id = node._id
# print "\n-----\nsubject_id: ", subject_id
attribute_type_node = node_collection.one({
'_type': "AttributeType",
'$or': [
{'name':
{'$regex': "^"+attr_key+"$",
'$options': 'i'}
},
{'altnames': {'$regex': "^"+attr_key+"$", '$options': 'i'}
}
]
})
# print "\nattribute_type_node: ", attribute_type_node.name
object_value = json_document[key]
# print "\nobject_value: ", object_value
ga_node = None
info_message = "\n- Creating GAttribute ("+node.name+" -- "+attribute_type_node.name+" -- "+str(json_document[key])+") ...\n"
print info_message
log_list.append(str(info_message))
ga_node = create_gattribute(subject_id, attribute_type_node, object_value)
info_message = "- Created ga_node : "+ str(ga_node.name) + "\n"
print info_message
log_list.append(str(info_message))
# To break outer for loop as key found
break
else:
error_message = "\n!! DataNotFound: No data found for field ("+str(attr_key)+") while creating GSystem ( -- "+str(node.name)+")\n"
print error_message
log_list.append(str(error_message))
# ---END of if (key == attr_key)
if is_relation:
relation_list.append(key)
if not relation_list:
# No possible relations defined for this node
info_message = "\n!! ("+str(node.name)+"): No possible relations defined for this node.\n"
print info_message
log_list.append(str(info_message))
return
gst_possible_relations_dict = node.get_possible_relations(file_gst._id)
# processing each entry in relation_list
# print "=== relation_list : ", relation_list
for key in relation_list:
is_relation = True
for rel_key, rel_value in gst_possible_relations_dict.iteritems():
if key == rel_key:
# if key == "teaches":
is_relation = False
if json_document[key]:
# most often the data is hierarchy sep by ":"
if ":" in json_document[key]:
formatted_list = []
temp_teaches_list = json_document[key].replace("\n", "").split(":")
# print "\n temp_teaches", temp_teaches
for v in temp_teaches_list:
formatted_list.append(v.strip())
right_subject_id = []
# print "~~~~~~~~~~~", formatted_list
# rsub_id = _get_id_from_hierarchy(formatted_list)
rsub_id = get_id_from_hierarchy(formatted_list)
# print "=== rsub_id : ", rsub_id
hierarchy_output = None
# checking every item in hierarchy exist and leaf node's _id found
if rsub_id:
right_subject_id.append(rsub_id)
json_document[key] = right_subject_id
# print json_document[key]
else:
error_message = "\n!! While creating teaches rel: Any one of the item in hierarchy"+ str(json_document[key]) +"does not exist in Db. \n!! So relation: " + str(key) + " cannot be created.\n"
print error_message
log_list.append(error_message)
break
# sometimes direct leaf-node may be present without hierarchy and ":"
else:
formatted_list = list(json_document[key].strip())
right_subject_id = []
right_subject_id.append(_get_id_from_hierarchy(formatted_list))
json_document[key] = right_subject_id
# print "\n----------", json_document[key]
info_message = "\n- For GRelation parsing content | key: " + str(rel_key) + " -- " + str(json_document[key])
print info_message
log_list.append(str(info_message))
# print list(json_document[key])
# perform_eval_type(key, json_document, "GSystem", "GSystem")
for right_subject_id in json_document[key]:
# print "\njson_document[key]: ", json_document[key]
subject_id = node._id
# print "subject_id : ", subject_id
# print "node.name: ", node.name
# Here we are appending list of ObjectIds of GSystemType's type_of field
# along with the ObjectId of GSystemType's itself (whose GSystem is getting created)
# This is because some of the RelationType's are holding Base class's ObjectId
# and not that of the Derived one's
# Delibrately keeping GSystemType's ObjectId first in the list
# And hence, used $in operator in the query!
rel_subject_type = []
rel_subject_type.append(file_gst._id)
if file_gst.type_of:
rel_subject_type.extend(file_gst.type_of)
relation_type_node = node_collection.one({'_type': "RelationType",
'$or': [{'name': {'$regex': "^"+rel_key+"$", '$options': 'i'}},
{'altnames': {'$regex': "^"+rel_key+"$", '$options': 'i'}}],
'subject_type': {'$in': rel_subject_type}
})
right_subject_id_or_list = []
right_subject_id_or_list.append(ObjectId(right_subject_id))
nodes = triple_collection.find({'_type': "GRelation",
'subject': subject_id,
'relation_type.$id': relation_type_node._id
})
# sending list of all the possible right subject to relation
for n in nodes:
if not n.right_subject in right_subject_id_or_list:
right_subject_id_or_list.append(n.right_subject)
info_message = "\n- Creating GRelation ("+ str(node.name)+ " -- "+ str(rel_key)+ " -- "+ str(right_subject_id_or_list)+") ..."
print info_message
log_list.append(str(info_message))
gr_node = create_grelation(subject_id, relation_type_node, right_subject_id_or_list)
info_message = "\n- Grelation processing done.\n"
print info_message
log_list.append(str(info_message))
# To break outer for loop if key found
break
else:
error_message = "\n!! DataNotFound: No data found for relation ("+ str(rel_key)+ ") while creating GSystem (" + str(file_gst.name) + " -- " + str(node.name) + ")\n"
print error_message
log_list.append(str(error_message))
break
# print relation_list
else:
info_message = "\n!! Either resource is already created or file is already saved into gridfs/DB or file not found"
print info_message
log_list.append(str(info_message))
continue
except Exception as e:
error_message = "\n While creating ("+str(json_document['name'])+") got following error...\n " + str(e)
print error_message # Keep it!
log_list.append(str(error_message))
def create_thread_obj(node_id):
'''
Creates thread object.
RT : has_thread
AT : release_response, thread_interaction_type
'''
try:
node_obj = node_collection.one({'_id': ObjectId(node_id)})
release_response_val = True
interaction_type_val = unicode('Comment')
thread_obj = None
thread_obj = node_collection.one({"_type": "GSystem", "member_of": ObjectId(twist_gst._id),"relation_set.thread_of": ObjectId(node_obj._id)})
if thread_obj == None:
# print "\n\n Creating new thread node"
thread_obj = node_collection.collection.GSystem()
thread_obj.name = u"Thread_of_" + unicode(node_obj.name)
thread_obj.status = u"PUBLISHED"
thread_obj.created_by = int(nroer_team_id)
thread_obj.modified_by = int(nroer_team_id)
thread_obj.contributors.append(int(nroer_team_id))
thread_obj.member_of.append(ObjectId(twist_gst._id))
thread_obj.group_set.append(home_group._id)
thread_obj.save()
# creating GRelation
gr = create_grelation(node_obj._id, has_thread_rt, thread_obj._id)
create_gattribute(thread_obj._id, rel_resp_at, release_response_val)
create_gattribute(thread_obj._id, thr_inter_type_at, interaction_type_val)
create_gattribute(node_obj._id, discussion_enable_at, True)
thread_obj.reload()
node_obj.reload()
# print "\n\n thread_obj", thread_obj.attribute_set, "\n---\n"
info_message = "\n- Successfully created thread obj - " + thread_obj._id.__str__() +" for - " + node_obj._id.__str__()
print info_message
log_list.append(str(info_message))
except Exception as e:
info_message = "\n- Error occurred while creating thread obj for - " + node_obj._id.__str__() +" - " + str(e)
print info_message
log_list.append(str(info_message))
def create_resource_gsystem(resource_data, row_no='', group_set_id=None):
# fetching resource from url
resource_link = resource_data.get("resource_link") # actual download file link
resource_link = resource_link.replace(' ', '%20')
if not resource_link:
resource_link = resource_link_common + resource_data.get("file_name")
# print "---------------",resource_link
filename = resource_link.split("/")[-1] # actual download file name with extension. e.g: neuron.jpg
info_message = "\n- Fetching resource from : '" + resource_link + "'"
print info_message
log_list.append(info_message)
print " (Might take some time. please hold on ...)\n"
try:
files = urllib2.urlopen(resource_link)
print " Fetched the resource successfully!\n"
except urllib2.URLError, e:
error_message = "\n!! File Not Found at: " + resource_link
log_list.append(error_message)
file_not_found_msg = "\nFile with following details not found: \n"
file_not_found_msg += "- Row No : " + str(row_no) + "\n"
file_not_found_msg += "- Name : " + resource_data["name"] + "\n"
file_not_found_msg += "- File Name: " + resource_data["file_name"] + "\n"
file_not_found_msg += "- URL : " + resource_link + "\n\n"
file_not_found_msg += "- ERROR : " + str(e) + "\n\n"
log_file_not_found.append(file_not_found_msg)
return None
files = io.BytesIO(files.read())
files.name = filename
name = unicode(resource_data["name"]) # name to be given to gsystem
userid = resource_data["created_by"]
content_org = resource_data["content_org"]
tags = resource_data["tags"]
language = get_language_tuple(eval(parsed_json_document['language']))
group_set_id = ObjectId(group_set_id) if group_set_id else home_group._id
img_type = None
access_policy = None
usrname = "nroer_team"
filemd5 = hashlib.md5(files.read()).hexdigest()
# size, unit = getFileSize(files)
# size = {'size':round(size, 2), 'unit':unicode(unit)}
# fcol = get_database()[File.collection_name]
# fileobj = fcol.File()
# fileobj = node_collection.collection.File()
# there can be two different files with same name.
# e.g: "The Living World" exists with epub, document, audio etc.
# hence not to check by name.
# check_obj_by_name = node_collection.find_one({"_type":"File", 'member_of': {'$all': [ObjectId(file_gst._id)]}, 'group_set': {'$all': [ObjectId(home_group._id)]}, "name": unicode(resource_data["name"]) })
# print "\n====", check_obj_by_name, "==== ", fileobj.fs.files.exists({"md5":filemd5})
check_file_in_gridfs = gridfs_collection.find_one({"md5": filemd5})
# even though file resource exists as a GSystem or in gridfs return None
# if fileobj.fs.files.exists({"md5": filemd5}) # or check_obj_by_name:
if check_file_in_gridfs:
# coll_oid = get_database()['fs.files']
# cur_oid = gridfs_collection.find_one({"md5": filemd5})
# printing appropriate error message
# if check_obj_by_name:
# info_message = "\n- Resource with same name of '"+ str(resource_data["name"]) +"' and _type 'File' exist in the home group. (Ref _id: '"+ str(check_obj_by_name._id) + "' )"
# print info_message
# log_list.append(str(info_message))
# return check_obj_by_name._id
# elif cur_oid:
info_message = "\n- Resource file exists in gridfs having id: '" + \
str(check_file_in_gridfs["_id"]) + "'"
print info_message
log_list.append(str(info_message))
if update_file_exists_in_gridfs:
file_obj = node_collection.one({'_type': 'File', 'fs_file_ids': {'$in': [ObjectId(check_file_in_gridfs['_id'])]} })
if file_obj:
info_message = "\n- Returning file _id despite of having in gridfs"
print info_message
log_list.append(str(info_message))
return file_obj._id
return None
# else:
# info_message = "\n- Resource file does not exists in database"
# print info_message
# log_list.append(str(info_message))
# return None
else: # creating new resource
info_message = "\n- Creating resource: " + str(resource_data["name"])
log_list.append(str(info_message))
print info_message
files.seek(0)
fileobj_oid, video = save_file(files, name, userid, group_set_id, content_org, tags, img_type, language, usrname, access_policy=u"PUBLIC", count=0, first_object="")
# print "\n------------ fileobj_oid : ", fileobj_oid, "--- ", video
# filetype = magic.from_buffer(files.read(100000), mime = 'true') # Gusing filetype by python-magic
node_collection.collection.update(
{'_id': ObjectId(fileobj_oid)},
{'$push': {'origin': {'csv-import': 'save_file'} }},
upsert=False,
multi=False
)
print "\n\n Printing newly created object:\n", node_collection.one({'_id': ObjectId(fileobj_oid)})
print "\n ===================================\n\n"
info_message = "\n- Created resource/GSystem object of name: '" + unicode(name) + "' having ObjectId: " + unicode(fileobj_oid) + "\n- Saved resource into gridfs. \n"
log_list.append(info_message)
print info_message
# print "\n----------", fileobj
return fileobj_oid
def attach_resource_thumbnail(thumbnail_url, node_id, resource_data, row_no):
updated_res_data = resource_data.copy()
updated_res_data['resource_link'] = thumbnail_url
updated_res_data['name'] = u'Thumbnail: ' + thumbnail_url.split('/')[-1]
updated_res_data['content_org'] = ''
updated_res_data['tags'] = []
# th_id: thumbnail id
th_id = create_resource_gsystem(updated_res_data, row_no, group_set_id=warehouse_group._id)
# th_obj = node_collection.one({'_id': ObjectId(th_id)})
# # tring to keep mid-size image otherwise thumbnail
# try:
# th_gridfs_id = th_obj.fs_file_ids[2]
# except:
# th_gridfs_id = th_obj.fs_file_ids[1]
# # print "th_gridfs_id: ", th_gridfs_id
# node_obj = node_collection.one({'_id': ObjectId(node_id)})
# print "~~~~~~~~~~", ObjectId(node_id), " : ", has_thumbnail_rt, " : ", ObjectId(th_id)
info_message = "\n- Creating GRelation ("+ str(node_id)+ " -- has_thumbnail -- "+ str(th_id)+") ..."
print info_message
log_list.append(str(info_message))
create_grelation(ObjectId(node_id), has_thumbnail_rt, ObjectId(th_id))
info_message = "\n- Grelation processing done for has_thumbnail.\n"
print info_message
log_list.append(str(info_message))
# # print "node_obj.fs_file_ids: ", node_obj.fs_file_ids
# node_fs_file_ids = node_obj.fs_file_ids
# if len(node_fs_file_ids) == 1:
# node_fs_file_ids.append(ObjectId(th_gridfs_id))
# elif len(node_fs_file_ids) > 1:
# node_fs_file_ids[1] = ObjectId(th_gridfs_id)
# # print "node_fs_file_ids: ", node_fs_file_ids
# node_collection.collection.update(
# {'_id': ObjectId(node_id)},
# {'$set': {'fs_file_ids': node_fs_file_ids}}
# )
|
#
# Forked from sstvProxy:
#
#sstvProxy Development Lead
#````````````````
#- bjzy <bjzybjzy@gmail.com>
#
#tvhProxy Developer (majority of code, thanks!)
#````````````````
#- Joel Kaaberg <joel.kaberg@gmail.com>
#
#Patches and Suggestions
#```````````````````````
#
#- Nikhil Choudhary
from gevent import monkey; monkey.patch_all()
import subprocess
import sys
import os
# import dateutil.parser
# import datetime
# import urllib2
# import SmoothUtils
# import SmoothAuth
# import traceback
# import operator
# import bisect
# import time
# import calendar
# import shlex
# import requests
from gevent.pywsgi import WSGIServer
from flask import Flask, Response, request, jsonify, abort
# import SmoothUtils
# import SmoothAuth
# import SmoothPlaylist #requires python 3
app = Flask(__name__)
# config = {
# 'bindAddr': os.environ.get('SSTV_BINDADDR') or '',
# 'sstvProxyURL': os.environ.get('SSTV_PROXY_URL') or 'http://localhost',
# 'tunerCount': os.environ.get('SSTV_TUNER_COUNT') or 6, # number of tuners to use for sstv
# }
#
#
# @app.route('/discover.json')
# def discover():
# return jsonify({
# 'FriendlyName': 'sstvProxy',
# 'ModelNumber': 'HDTC-2US',
# 'FirmwareName': 'hdhomeruntc_atsc',
# 'TunerCount': int(config['tunerCount']),
# 'FirmwareVersion': '20150826',
# 'DeviceID': '12345678',
# 'DeviceAuth': 'test1234',
# 'BaseURL': '%s' % config['sstvProxyURL'],
# 'LineupURL': '%s/lineup.json' % config['sstvProxyURL']
# })
#
#
# @app.route('/lineup_status.json')
# def status():
# return jsonify({
# 'ScanInProgress': 0,
# 'ScanPossible': 1,
# 'Source': "Cable",
# 'SourceList': ['Cable']
# })
@app.route('/lineup.json')
def lineup():
#Python 3
# scheduleResult = SmoothPlaylist.main()
#Python 2 compatible
child = subprocess.Popen("python SmoothPlaylist.py", shell=True, stderr=subprocess.PIPE)
while True:
out = child.stderr.read(1)
if out == '' and child.poll() != None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
file = open("SmoothStreamsTV-xml.m3u8", 'r')
file.readline()
lineup = []
for channelNum in range(1,151):
#example m3u8 line
#Line 1 #EXTINF:-1 tvg-id="tv.9" tvg-logo="http://www.freeviewnz.tv/nonumbracoimages/ChannelsOpg/TVNZ11280x1280.png",TVNZ 1
#Line 2 https://tvnzioslive04-i.akamaihd.net/hls/live/267188/1924997895001/channel1/master.m3u8|X-Forwarded-For=219.88.222.91
header = file.readline()
url = file.readline()
header = header.split(",")
metadata = header[0]
metadata = metadata.split(" ")
for item in metadata:
if item == "#EXTINF:-1":
metadata.remove("#EXTINF:-1")
elif "tvg-id" in item:
channelId = item[8:-1]
elif "tvg-logo" in item:
channelLogo = item[10:-1]
channelName = header[1]
print (channelName)
print (url)
lineup.append({'GuideNumber': channelNum,
'GuideName': str(channelNum) + channelName,
'URL': url
})
# print ({'GuideNumber': channelNum,
# 'GuideName': str(channelNum) + channelName,
# 'URL': url
# })
#lineup.append({'GuideNumber': "1",
# 'GuideName': "ESPNEWS",
# 'URL':"http://dnaw1.smoothstreams.tv:9100/viewms/ch01q1.stream/playlist.m3u8?wmsAuthSign=c2VydmVyX3RpbWU9OC8yMC8yMDE3IDc6NDU6MDIgUE0maGFzaF92YWx1ZT1FanNiNVFmeEFNb211cVN6Zkl3c3JBPT0mdmFsaWRtaW51dGVzZpZD12aWV3bXMtMTUzNTk==="
# })
# return jsonify(lineup)
# @app.route('/lineup.post')
# def lineup_post():
# return ''
#
#
# if __name__ == '__main__':
# http = WSGIServer((config['bindAddr'], 5004), app.wsgi_app)
# http.serve_forever()
lineup()
Update SmoothLive.py
#
# Forked from sstvProxy:
#
#sstvProxy Development Lead
#````````````````
#- bjzy <bjzybjzy@gmail.com>
#
#tvhProxy Developer (majority of code, thanks!)
#````````````````
#- Joel Kaaberg <joel.kaberg@gmail.com>
#
#Patches and Suggestions
#```````````````````````
#
#- Nikhil Choudhary
from gevent import monkey; monkey.patch_all()
import subprocess
import sys
import os
# import dateutil.parser
# import datetime
# import urllib2
# import SmoothUtils
# import SmoothAuth
# import traceback
# import operator
# import bisect
# import time
# import calendar
# import shlex
# import requests
from gevent.pywsgi import WSGIServer
from flask import Flask, Response, request, jsonify, abort
# import SmoothUtils
# import SmoothAuth
# import SmoothPlaylist #requires python 3
app = Flask(__name__)
# config = {
# 'bindAddr': os.environ.get('SSTV_BINDADDR') or '',
# 'sstvProxyURL': os.environ.get('SSTV_PROXY_URL') or 'http://localhost',
# 'tunerCount': os.environ.get('SSTV_TUNER_COUNT') or 6, # number of tuners to use for sstv
# }
#
#
# @app.route('/discover.json')
# def discover():
# return jsonify({
# 'FriendlyName': 'sstvProxy',
# 'ModelNumber': 'HDTC-2US',
# 'FirmwareName': 'hdhomeruntc_atsc',
# 'TunerCount': int(config['tunerCount']),
# 'FirmwareVersion': '20150826',
# 'DeviceID': '12345678',
# 'DeviceAuth': 'test1234',
# 'BaseURL': '%s' % config['sstvProxyURL'],
# 'LineupURL': '%s/lineup.json' % config['sstvProxyURL']
# })
#
#
# @app.route('/lineup_status.json')
# def status():
# return jsonify({
# 'ScanInProgress': 0,
# 'ScanPossible': 1,
# 'Source': "Cable",
# 'SourceList': ['Cable']
# })
@app.route('/lineup.json')
def lineup():
#Python 3
# scheduleResult = SmoothPlaylist.main()
#Python 2 compatible
child = subprocess.Popen("python SmoothPlaylist.py", shell=True, stderr=subprocess.PIPE)
while True:
out = child.stderr.read(1)
if out == '' and child.poll() != None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
file = open("SmoothStreamsTV-xml.m3u8", 'r')
file.readline()
lineup = []
for channelNum in range(1,151):
#example m3u8 line
#Line 1 #EXTINF:-1 tvg-id="tv.9" tvg-logo="http://www.freeviewnz.tv/nonumbracoimages/ChannelsOpg/TVNZ11280x1280.png",TVNZ 1
#Line 2 https://tvnzioslive04-i.akamaihd.net/hls/live/267188/1924997895001/channel1/master.m3u8|X-Forwarded-For=219.88.222.91
header = file.readline()
url = file.readline()
header = header.split(",")
metadata = header[0]
metadata = metadata.split(" ")
for item in metadata:
if item == "#EXTINF:-1":
metadata.remove("#EXTINF:-1")
elif "tvg-id" in item:
channelId = item[8:-1]
elif "tvg-logo" in item:
channelLogo = item[10:-1]
channelName = header[1]
print (channelName)
print (url)
#random web pipe
#ffmpeg -i url -vcodec rawvideo -pix_fmt yuv420p -f rawvideo - | x264 --crf 18 -o test.mp4 --fps 25.0 - 640x352
#jobriens ffmpeg pipe
pipeUrl = "ffmpeg -i url -codec copy -loglevel info -bsf:v h264_mp4toannexb -f mpegts -tune zerolatency pipe:1"
lineup.append({'GuideNumber': channelNum,
'GuideName': str(channelNum) + channelName,
'URL': pipeUrl
})
# print ({'GuideNumber': channelNum,
# 'GuideName': str(channelNum) + channelName,
# 'URL': url
# })
#lineup.append({'GuideNumber': "1",
# 'GuideName': "ESPNEWS",
# 'URL':"http://dnaw1.smoothstreams.tv:9100/viewms/ch01q1.stream/playlist.m3u8?wmsAuthSign=c2VydmVyX3RpbWU9OC8yMC8yMDE3IDc6NDU6MDIgUE0maGFzaF92YWx1ZT1FanNiNVFmeEFNb211cVN6Zkl3c3JBPT0mdmFsaWRtaW51dGVzZpZD12aWV3bXMtMTUzNTk==="
# })
# return jsonify(lineup)
# @app.route('/lineup.post')
# def lineup_post():
# return ''
#
#
# if __name__ == '__main__':
# http = WSGIServer((config['bindAddr'], 5004), app.wsgi_app)
# http.serve_forever()
lineup()
|
c34d6c91-2ead-11e5-8e49-7831c1d44c14
c3536335-2ead-11e5-b0eb-7831c1d44c14
c3536335-2ead-11e5-b0eb-7831c1d44c14 |
"""vobject module for reading vCard and vCalendar files."""
from __future__ import print_function
import copy
import re
import sys
import logging
import codecs
import six
#------------------------------------ Logging ----------------------------------
logger = logging.getLogger(__name__)
if not logging.getLogger().handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(name)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR) # Log errors
DEBUG = False # Don't waste time on debug calls
#----------------------------------- Constants ---------------------------------
CR = '\r'
LF = '\n'
CRLF = CR + LF
SPACE = ' '
TAB = '\t'
SPACEORTAB = SPACE + TAB
#-------------------------------- Useful modules -------------------------------
# use doctest, it kills two birds with one stone and docstrings often become
# more readable to boot (see parseLine's docstring).
# use logging, then when debugging we can just set our verbosity.
# use epydoc syntax for documenting code, please document every class and non-
# trivial method (see http://epydoc.sourceforge.net/epytext.html
# and http://epydoc.sourceforge.net/fields.html). Also, please
# follow http://www.python.org/peps/pep-0257.html for docstrings.
#-------------------------------------------------------------------------------
#--------------------------------- Main classes --------------------------------
class VBase(object):
"""Base class for ContentLine and Component.
@ivar behavior:
The Behavior class associated with this object, which controls
validation, transformations, and encoding.
@ivar parentBehavior:
The object's parent's behavior, or None if no behaviored parent exists.
@ivar isNative:
Boolean describing whether this component is a Native instance.
@ivar group:
An optional group prefix, should be used only to indicate sort order in
vCards, according to RFC2426
"""
def __init__(self, group=None, *args, **kwds):
super(VBase, self).__init__(*args, **kwds)
self.group = group
self.behavior = None
self.parentBehavior = None
self.isNative = False
def copy(self, copyit):
self.group = copyit.group
self.behavior = copyit.behavior
self.parentBehavior = copyit.parentBehavior
self.isNative = copyit.isNative
def validate(self, *args, **kwds):
"""Call the behavior's validate method, or return True."""
if self.behavior:
return self.behavior.validate(self, *args, **kwds)
else: return True
def getChildren(self):
"""Return an iterable containing the contents of the object."""
return []
def clearBehavior(self, cascade=True):
"""Set behavior to None. Do for all descendants if cascading."""
self.behavior=None
if cascade: self.transformChildrenFromNative()
def autoBehavior(self, cascade=False):
"""Set behavior if name is in self.parentBehavior.knownChildren.
If cascade is True, unset behavior and parentBehavior for all
descendants, then recalculate behavior and parentBehavior.
"""
parentBehavior = self.parentBehavior
if parentBehavior is not None:
knownChildTup = parentBehavior.knownChildren.get(self.name, None)
if knownChildTup is not None:
behavior = getBehavior(self.name, knownChildTup[2])
if behavior is not None:
self.setBehavior(behavior, cascade)
if isinstance(self, ContentLine) and self.encoded:
self.behavior.decode(self)
elif isinstance(self, ContentLine):
self.behavior = parentBehavior.defaultBehavior
if self.encoded and self.behavior:
self.behavior.decode(self)
def setBehavior(self, behavior, cascade=True):
"""Set behavior. If cascade is True, autoBehavior all descendants."""
self.behavior=behavior
if cascade:
for obj in self.getChildren():
obj.parentBehavior=behavior
obj.autoBehavior(True)
def transformToNative(self):
"""Transform this object into a custom VBase subclass.
transformToNative should always return a representation of this object.
It may do so by modifying self in place then returning self, or by
creating a new object.
"""
if self.isNative or not self.behavior or not self.behavior.hasNative:
return self
else:
try:
return self.behavior.transformToNative(self)
except Exception as e:
# wrap errors in transformation in a ParseError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, ParseError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformToNative, unhandled exception: %s: %s"
msg = msg % (sys.exc_info()[0], sys.exc_info()[1])
new_error = ParseError(msg, lineNumber)
raise (ParseError, new_error, sys.exc_info()[2])
def transformFromNative(self):
"""Return self transformed into a ContentLine or Component if needed.
May have side effects. If it does, transformFromNative and
transformToNative MUST have perfectly inverse side effects. Allowing
such side effects is convenient for objects whose transformations only
change a few attributes.
Note that it isn't always possible for transformFromNative to be a
perfect inverse of transformToNative, in such cases transformFromNative
should return a new object, not self after modifications.
"""
if self.isNative and self.behavior and self.behavior.hasNative:
try:
return self.behavior.transformFromNative(self)
except Exception as e:
# wrap errors in transformation in a NativeError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, NativeError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformFromNative, unhandled exception: %s: %s"
msg = msg % (sys.exc_info()[0], sys.exc_info()[1])
new_error = NativeError(msg, lineNumber)
raise (NativeError, new_error, sys.exc_info()[2])
else: return self
def transformChildrenToNative(self):
"""Recursively replace children with their native representation."""
pass
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
pass
def serialize(self, buf=None, lineLength=75, validate=True, behavior=None):
"""Serialize to buf if it exists, otherwise return a string.
Use self.behavior.serialize if behavior exists.
"""
if not behavior:
behavior = self.behavior
if behavior:
if DEBUG: logger.debug("serializing %s with behavior" % self.name)
return behavior.serialize(self, buf, lineLength, validate)
else:
if DEBUG: logger.debug("serializing %s without behavior" % self.name)
return defaultSerialize(self, buf, lineLength)
def toVName(name, stripNum = 0, upper = False):
"""
Turn a Python name into an iCalendar style name, optionally uppercase and
with characters stripped off.
"""
if upper:
name = name.upper()
if stripNum != 0:
name = name[:-stripNum]
return name.replace('_', '-')
class ContentLine(VBase):
"""Holds one content line for formats like vCard and vCalendar.
For example::
<SUMMARY{u'param1' : [u'val1'], u'param2' : [u'val2']}Bastille Day Party>
@ivar name:
The uppercased name of the contentline.
@ivar params:
A dictionary of parameters and associated lists of values (the list may
be empty for empty parameters).
@ivar value:
The value of the contentline.
@ivar singletonparams:
A list of parameters for which it's unclear if the string represents the
parameter name or the parameter value. In vCard 2.1, "The value string
can be specified alone in those cases where the value is unambiguous".
This is crazy, but we have to deal with it.
@ivar encoded:
A boolean describing whether the data in the content line is encoded.
Generally, text read from a serialized vCard or vCalendar should be
considered encoded. Data added programmatically should not be encoded.
@ivar lineNumber:
An optional line number associated with the contentline.
"""
def __init__(self, name, params, value, group=None,
encoded=False, isNative=False,
lineNumber = None, *args, **kwds):
"""Take output from parseLine, convert params list to dictionary."""
# group is used as a positional argument to match parseLine's return
super(ContentLine, self).__init__(group, *args, **kwds)
self.name = name.upper()
self.value = value
self.encoded = encoded
self.params = {}
self.singletonparams = []
self.isNative = isNative
self.lineNumber = lineNumber
def updateTable(x):
if len(x) == 1:
self.singletonparams += x
else:
paramlist = self.params.setdefault(x[0].upper(), [])
paramlist.extend(x[1:])
map(updateTable, params)
qp = False
if 'ENCODING' in self.params:
if 'QUOTED-PRINTABLE' in self.params['ENCODING']:
qp = True
self.params['ENCODING'].remove('QUOTED-PRINTABLE')
if 0==len(self.params['ENCODING']):
del self.params['ENCODING']
if 'QUOTED-PRINTABLE' in self.singletonparams:
qp = True
self.singletonparams.remove('QUOTED-PRINTABLE')
if qp:
self.value = six.u(self.value).decode('quoted-printable')
# self.value should be unicode for iCalendar, but if quoted-printable
# is used, or if the quoted-printable state machine is used, text may be
# encoded
if type(self.value) is str:
self.value = six.u(self.value)
@classmethod
def duplicate(clz, copyit):
newcopy = clz('', {}, '')
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(ContentLine, self).copy(copyit)
self.name = copyit.name
self.value = copy.copy(copyit.value)
self.encoded = self.encoded
self.params = copy.copy(copyit.params)
for k,v in self.params.items():
self.params[k] = copy.copy(v)
self.singletonparams = copy.copy(copyit.singletonparams)
self.lineNumber = copyit.lineNumber
def __eq__(self, other):
try:
return (self.name == other.name) and (self.params == other.params) and (self.value == other.value)
except Exception:
return False
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
keys = self.params.keys()
params = [param + '_param' for param in keys]
params.extend(param + '_paramlist' for param in keys)
return params
def __getattr__(self, name):
"""Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
try:
if name.endswith('_param'):
return self.params[toVName(name, 6, True)][0]
elif name.endswith('_paramlist'):
return self.params[toVName(name, 10, True)]
else:
raise AttributeError(name)
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
"""Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name.endswith('_param'):
if type(value) == list:
self.params[toVName(name, 6, True)] = value
else:
self.params[toVName(name, 6, True)] = [value]
elif name.endswith('_paramlist'):
if type(value) == list:
self.params[toVName(name, 10, True)] = value
else:
raise VObjectError("Parameter list set to a non-list")
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name.endswith('_param'):
del self.params[toVName(name, 6, True)]
elif name.endswith('_paramlist'):
del self.params[toVName(name, 10, True)]
else:
object.__delattr__(self, name)
except KeyError:
raise AttributeError(name)
def valueRepr( self ):
"""transform the representation of the value according to the behavior,
if any"""
v = self.value
if self.behavior:
v = self.behavior.valueRepr( self )
return v
def __str__(self):
return "<%s%s%s>" % (self.name, self.params, self.valueRepr())
def __repr__(self):
return self.__str__().replace('\n', '\\n')
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print(pre, self.name + ":", self.valueRepr())
if self.params:
lineKeys= self.params.keys()
print(pre, "params for ", self.name +':')
for aKey in lineKeys:
print(pre + ' ' * tabwidth, aKey, self.params[aKey])
class Component(VBase):
"""A complex property that can contain multiple ContentLines.
For our purposes, a component must start with a BEGIN:xxxx line and end with
END:xxxx, or have a PROFILE:xxx line if a top-level component.
@ivar contents:
A dictionary of lists of Component or ContentLine instances. The keys
are the lowercased names of child ContentLines or Components.
Note that BEGIN and END ContentLines are not included in contents.
@ivar name:
Uppercase string used to represent this Component, i.e VCARD if the
serialized object starts with BEGIN:VCARD.
@ivar useBegin:
A boolean flag determining whether BEGIN: and END: lines should
be serialized.
"""
def __init__(self, name=None, *args, **kwds):
super(Component, self).__init__(*args, **kwds)
self.contents = {}
if name:
self.name=name.upper()
self.useBegin = True
else:
self.name = ''
self.useBegin = False
self.autoBehavior()
@classmethod
def duplicate(clz, copyit):
newcopy = clz()
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(Component, self).copy(copyit)
# deep copy of contents
self.contents = {}
for key, lvalue in copyit.contents.items():
newvalue = []
for value in lvalue:
newitem = value.duplicate(value)
newvalue.append(newitem)
self.contents[key] = newvalue
self.name = copyit.name
self.useBegin = copyit.useBegin
def setProfile(self, name):
"""Assign a PROFILE to this unnamed component.
Used by vCard, not by vCalendar.
"""
if self.name or self.useBegin:
if self.name == name: return
raise VObjectError("This component already has a PROFILE or uses BEGIN.")
self.name = name.upper()
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
names = self.contents.keys()
names.extend(name + '_list' for name in self.contents.keys())
return names
def __getattr__(self, name):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
# if the object is being re-created by pickle, self.contents may not
# be set, don't get into an infinite loop over the issue
if name == 'contents':
return object.__getattribute__(self, name)
try:
if name.endswith('_list'):
return self.contents[toVName(name, 5)]
else:
return self.contents[toVName(name)][0]
except KeyError:
raise AttributeError(name)
normal_attributes = ['contents','name','behavior','parentBehavior','group']
def __setattr__(self, name, value):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name not in self.normal_attributes and name.lower()==name:
if type(value) == list:
if name.endswith('_list'):
name = name[:-5]
self.contents[toVName(name)] = value
elif name.endswith('_list'):
raise VObjectError("Component list set to a non-list")
else:
self.contents[toVName(name)] = [value]
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name not in self.normal_attributes and name.lower()==name:
if name.endswith('_list'):
del self.contents[toVName(name, 5)]
else:
del self.contents[toVName(name)]
else:
object.__delattr__(self, name)
except KeyError:
raise AttributeError(name)
def getChildValue(self, childName, default = None, childNumber = 0):
"""Return a child's value (the first, by default), or None."""
child = self.contents.get(toVName(childName))
if child is None:
return default
else:
return child[childNumber].value
def add(self, objOrName, group = None):
"""Add objOrName to contents, set behavior if it can be inferred.
If objOrName is a string, create an empty component or line based on
behavior. If no behavior is found for the object, add a ContentLine.
group is an optional prefix to the name of the object (see
RFC 2425).
"""
if isinstance(objOrName, VBase):
obj = objOrName
if self.behavior:
obj.parentBehavior = self.behavior
obj.autoBehavior(True)
else:
name = objOrName.upper()
try:
id=self.behavior.knownChildren[name][2]
behavior = getBehavior(name, id)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '', group)
obj.parentBehavior = self.behavior
obj.behavior = behavior
obj = obj.transformToNative()
except (KeyError, AttributeError):
obj = ContentLine(objOrName, [], '', group)
if obj.behavior is None and self.behavior is not None:
if isinstance(obj, ContentLine):
obj.behavior = self.behavior.defaultBehavior
self.contents.setdefault(obj.name.lower(), []).append(obj)
return obj
def remove(self, obj):
"""Remove obj from contents."""
named = self.contents.get(obj.name.lower())
if named:
try:
named.remove(obj)
if len(named) == 0:
del self.contents[obj.name.lower()]
except ValueError:
pass;
def getChildren(self):
"""Return an iterable of all children."""
for objList in self.contents.values():
for obj in objList: yield obj
def components(self):
"""Return an iterable of all Component children."""
return (i for i in self.getChildren() if isinstance(i, Component))
def lines(self):
"""Return an iterable of all ContentLine children."""
return (i for i in self.getChildren() if isinstance(i, ContentLine))
def sortChildKeys(self):
try:
first = [s for s in self.behavior.sortFirst if s in self.contents]
except Exception:
first = []
return first + sorted(k for k in self.contents.keys() if k not in first)
def getSortedChildren(self):
return [obj for k in self.sortChildKeys() for obj in self.contents[k]]
def setBehaviorFromVersionLine(self, versionLine):
"""Set behavior if one matches name, versionLine.value."""
v = getBehavior(self.name, versionLine.value)
if v:
self.setBehavior(v)
def transformChildrenToNative(self):
"""Recursively replace children with their native representation."""
#sort to get dependency order right, like vtimezone before vevent
for childArray in (self.contents[k] for k in self.sortChildKeys()):
for i in xrange(len(childArray)):
childArray[i]=childArray[i].transformToNative()
childArray[i].transformChildrenToNative()
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
for childArray in self.contents.values():
for i in xrange(len(childArray)):
childArray[i]=childArray[i].transformFromNative()
childArray[i].transformChildrenFromNative(clearBehavior)
if clearBehavior:
childArray[i].behavior = None
childArray[i].parentBehavior = None
def __str__(self):
if self.name:
return "<%s| %s>" % (six.u(self.name), ''.join(six.u(e) for e in self.getSortedChildren()))
else:
return u'<*unnamed*| {}>'.format(self.getSortedChildren())
def __repr__(self):
return self.__str__()
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print(pre, self.name)
if isinstance(self, Component):
for line in self.getChildren():
line.prettyPrint(level + 1, tabwidth)
print('')
class VObjectError(Exception):
def __init__(self, msg, lineNumber=None):
self.msg = msg
if lineNumber is not None:
self.lineNumber = lineNumber
def __str__(self):
if hasattr(self, 'lineNumber'):
return "At line %s: %s" % (self.lineNumber, self.msg)
else:
return repr(self.msg)
class ParseError(VObjectError):
pass
class ValidateError(VObjectError):
pass
class NativeError(VObjectError):
pass
#-------------------------- Parsing functions ----------------------------------
# parseLine regular expressions
patterns = {}
# Note that underscore is not legal for names, it's included because
# Lotus Notes uses it
patterns['name'] = '[a-zA-Z0-9\-_]+'
patterns['safe_char'] = '[^";:,]'
patterns['qsafe_char'] = '[^"]'
# the combined Python string replacement and regex syntax is a little confusing;
# remember that %(foobar)s is replaced with patterns['foobar'], so for instance
# param_value is any number of safe_chars or any number of qsaf_chars surrounded
# by double quotes.
patterns['param_value'] = ' "%(qsafe_char)s * " | %(safe_char)s * ' % patterns
# get a tuple of two elements, one will be empty, the other will have the value
patterns['param_value_grouped'] = """
" ( %(qsafe_char)s * )" | ( %(safe_char)s + )
""" % patterns
# get a parameter and its values, without any saved groups
patterns['param'] = r"""
; (?: %(name)s ) # parameter name
(?:
(?: = (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)*
""" % patterns
# get a parameter, saving groups for name and value (value still needs parsing)
patterns['params_grouped'] = r"""
; ( %(name)s )
(?: =
(
(?: (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)
)?
""" % patterns
# get a full content line, break it up into group, name, parameters, and value
patterns['line'] = r"""
^ ((?P<group> %(name)s)\.)?(?P<name> %(name)s) # name group
(?P<params> (?: %(param)s )* ) # params group (may be empty)
: (?P<value> .* )$ # value group
""" % patterns
' "%(qsafe_char)s*" | %(safe_char)s* '
param_values_re = re.compile(patterns['param_value_grouped'], re.VERBOSE)
params_re = re.compile(patterns['params_grouped'], re.VERBOSE)
line_re = re.compile(patterns['line'], re.DOTALL | re.VERBOSE)
begin_re = re.compile('BEGIN', re.IGNORECASE)
def parseParams(string):
"""
>>> parseParams(';ALTREP="http://www.wiz.org"')
[['ALTREP', 'http://www.wiz.org']]
>>> parseParams('')
[]
>>> parseParams(';ALTREP="http://www.wiz.org;;",Blah,Foo;NEXT=Nope;BAR')
[['ALTREP', 'http://www.wiz.org;;', 'Blah', 'Foo'], ['NEXT', 'Nope'], ['BAR']]
"""
all = params_re.findall(string)
allParameters = []
for tup in all:
paramList = [tup[0]] # tup looks like (name, valuesString)
for pair in param_values_re.findall(tup[1]):
# pair looks like ('', value) or (value, '')
if pair[0] != '':
paramList.append(pair[0])
else:
paramList.append(pair[1])
allParameters.append(paramList)
return allParameters
def parseLine(line, lineNumber = None):
"""
>>> parseLine("BLAH:")
('BLAH', [], '', None)
>>> parseLine("RDATE:VALUE=DATE:19970304,19970504,19970704,19970904")
('RDATE', [], 'VALUE=DATE:19970304,19970504,19970704,19970904', None)
>>> parseLine('DESCRIPTION;ALTREP="http://www.wiz.org":The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA')
('DESCRIPTION', [['ALTREP', 'http://www.wiz.org']], 'The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA', None)
>>> parseLine("EMAIL;PREF;INTERNET:john@nowhere.com")
('EMAIL', [['PREF'], ['INTERNET']], 'john@nowhere.com', None)
>>> parseLine('EMAIL;TYPE="blah",hah;INTERNET="DIGI",DERIDOO:john@nowhere.com')
('EMAIL', [['TYPE', 'blah', 'hah'], ['INTERNET', 'DIGI', 'DERIDOO']], 'john@nowhere.com', None)
>>> parseLine('item1.ADR;type=HOME;type=pref:;;Reeperbahn 116;Hamburg;;20359;')
('ADR', [['type', 'HOME'], ['type', 'pref']], ';;Reeperbahn 116;Hamburg;;20359;', 'item1')
>>> parseLine(":")
Traceback (most recent call last):
...
ParseError: 'Failed to parse line: :'
"""
match = line_re.match(line)
if match is None:
raise ParseError("Failed to parse line: %s" % line, lineNumber)
# Underscores are replaced with dash to work around Lotus Notes
return (match.group('name').replace('_','-'),
parseParams(match.group('params')),
match.group('value'), match.group('group'))
# logical line regular expressions
patterns['lineend'] = r'(?:\r\n|\r|\n|$)'
patterns['wrap'] = r'%(lineend)s [\t ]' % patterns
patterns['logicallines'] = r"""
(
(?: [^\r\n] | %(wrap)s )*
%(lineend)s
)
""" % patterns
patterns['wraporend'] = r'(%(wrap)s | %(lineend)s )' % patterns
wrap_re = re.compile(patterns['wraporend'], re.VERBOSE)
logical_lines_re = re.compile(patterns['logicallines'], re.VERBOSE)
testLines="""
Line 0 text
, Line 0 continued.
Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2 is a new line, it does not start with whitespace.
"""
def getLogicalLines(fp, allowQP=True, findBegin=False):
"""
Iterate through a stream, yielding one logical line at a time.
Because many applications still use vCard 2.1, we have to deal with the
quoted-printable encoding for long lines, as well as the vCard 3.0 and
vCalendar line folding technique, a whitespace character at the start
of the line.
Quoted-printable data will be decoded in the Behavior decoding phase.
>>> from six import StringIO
>>> f=StringIO(testLines)
>>> for n, l in enumerate(getLogicalLines(f)):
... print("Line %s: %s" % (n, l[0]))
...
Line 0: Line 0 text, Line 0 continued.
Line 1: Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2: Line 2 is a new line, it does not start with whitespace.
"""
if not allowQP:
bytes = fp.read(-1)
if len(bytes) > 0:
if type(bytes[0]) == unicode:
val = bytes
elif not findBegin:
val = bytes.decode('utf-8')
else:
for encoding in 'utf-8', 'utf-16-LE', 'utf-16-BE', 'iso-8859-1':
try:
val = bytes.decode(encoding)
if begin_re.search(val) is not None:
break
except UnicodeDecodeError:
pass
else:
raise ParseError('Could not find BEGIN when trying to determine encoding')
else:
val = bytes
# strip off any UTF8 BOMs which Python's UTF8 decoder leaves
val = val.lstrip( unicode( codecs.BOM_UTF8, "utf8" ) )
lineNumber = 1
for match in logical_lines_re.finditer(val):
line, n = wrap_re.subn('', match.group())
if line != '':
yield line, lineNumber
lineNumber += n
else:
quotedPrintable=False
newbuffer = six.StringIO
logicalLine = newbuffer()
lineNumber = 0
lineStartNumber = 0
while True:
line = fp.readline()
if line == '':
break
else:
line = line.rstrip(CRLF)
lineNumber += 1
if line.rstrip() == '':
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
quotedPrintable=False
continue
if quotedPrintable and allowQP:
logicalLine.write('\n')
logicalLine.write(line)
quotedPrintable=False
elif line[0] in SPACEORTAB:
logicalLine.write(line[1:])
elif logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
logicalLine.write(line)
else:
logicalLine = newbuffer()
logicalLine.write(line)
# hack to deal with the fact that vCard 2.1 allows parameters to be
# encoded without a parameter name. False positives are unlikely, but
# possible.
val = logicalLine.getvalue()
if val[-1]=='=' and val.lower().find('quoted-printable') >= 0:
quotedPrintable=True
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
def textLineToContentLine(text, n=None):
return ContentLine(*parseLine(text, n), **{'encoded':True, 'lineNumber' : n})
def dquoteEscape(param):
"""Return param, or "param" if ',' or ';' or ':' is in param."""
if param.find('"') >= 0:
raise VObjectError("Double quotes aren't allowed in parameter values.")
for char in ',;:':
if param.find(char) >= 0:
return '"'+ param + '"'
return param
def foldOneLine(outbuf, input, lineLength = 75):
# Folding line procedure that ensures multi-byte utf-8 sequences are not broken
# across lines
# To-do: This all seems odd. Is it still needed, especially in python3?
if len(input) < lineLength:
# Optimize for unfolded line case
try:
outbuf.write(bytes(input, 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write(input)
else:
# Look for valid utf8 range and write that out
start = 0
written = 0
while written < len(input):
# Start max length -1 chars on from where we are
offset = start + lineLength - 1
if offset >= len(input):
line = input[start:]
try:
outbuf.write(bytes(line, 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write(line)
written = len(input)
else:
# Check whether next char is valid utf8 lead byte
while (input[offset] > 0x7F) and ((ord(input[offset]) & 0xC0) == 0x80):
# Step back until we have a valid char
offset -= 1
line = input[start:offset]
outbuf.write(bytes(line))
try:
outbuf.write(bytes("\r\n", 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write("\r\n")
written += offset - start
start = offset
try:
outbuf.write(bytes("\r\n", 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write("\r\n")
def defaultSerialize(obj, buf, lineLength):
"""Encode and fold obj and its children, write to buf or return a string."""
outbuf = buf or six.StringIO()
if isinstance(obj, Component):
if obj.group is None:
groupString = ''
else:
groupString = obj.group + '.'
if obj.useBegin:
foldOneLine(outbuf, "{0}BEGIN:{1}".format(groupString, obj.name), lineLength)
for child in obj.getSortedChildren():
#validate is recursive, we only need to validate once
child.serialize(outbuf, lineLength, validate=False)
if obj.useBegin:
foldOneLine(outbuf, "{0}END:{1}".format(groupString, obj.name), lineLength)
elif isinstance(obj, ContentLine):
startedEncoded = obj.encoded
if obj.behavior and not startedEncoded: obj.behavior.encode(obj)
s=codecs.getwriter('utf-8')(six.StringIO()) #unfolded buffer
if obj.group is not None:
s.write(obj.group + '.')
s.write(obj.name.upper())
keys = sorted(obj.params.keys())
for key in keys:
paramvals = obj.params[key]
s.write(';' + key + '=' + ','.join(dquoteEscape(p) for p in paramvals))
s.write(':' + obj.value)
if obj.behavior and not startedEncoded: obj.behavior.decode(obj)
foldOneLine(outbuf, s.getvalue(), lineLength)
return buf or outbuf.getvalue()
testVCalendar="""
BEGIN:VCALENDAR
BEGIN:VEVENT
SUMMARY;blah=hi!:Bastille Day Party
END:VEVENT
END:VCALENDAR"""
class Stack:
def __init__(self):
self.stack = []
def __len__(self):
return len(self.stack)
def top(self):
if len(self) == 0: return None
else: return self.stack[-1]
def topName(self):
if len(self) == 0: return None
else: return self.stack[-1].name
def modifyTop(self, item):
top = self.top()
if top:
top.add(item)
else:
new = Component()
self.push(new)
new.add(item) #add sets behavior for item and children
def push(self, obj): self.stack.append(obj)
def pop(self): return self.stack.pop()
def readComponents(streamOrString, validate=False, transform=True,
findBegin=True, ignoreUnreadable=False,
allowQP=False):
"""Generate one Component at a time from a stream.
>>> from six import StringIO
>>> f = StringIO(testVCalendar)
>>> cal=readComponents(f).next()
>>> cal
<VCALENDAR| [<VEVENT| [<SUMMARY{u'BLAH': [u'hi!']}Bastille Day Party>]>]>
>>> cal.vevent.summary
<SUMMARY{u'BLAH': [u'hi!']}Bastille Day Party>
"""
if isinstance(streamOrString, basestring):
stream = six.StringIO(streamOrString)
else:
stream = streamOrString
try:
stack = Stack()
versionLine = None
n = 0
for line, n in getLogicalLines(stream, allowQP, findBegin):
if ignoreUnreadable:
try:
vline = textLineToContentLine(line, n)
except VObjectError as e:
if e.lineNumber is not None:
msg = "Skipped line %(lineNumber)s, message: %(msg)s"
else:
msg = "Skipped a line, message: %(msg)s"
logger.error(msg % {'lineNumber' : e.lineNumber,
'msg' : e.message})
continue
else:
vline = textLineToContentLine(line, n)
if vline.name == "VERSION":
versionLine = vline
stack.modifyTop(vline)
elif vline.name == "BEGIN":
stack.push(Component(vline.value, group=vline.group))
elif vline.name == "PROFILE":
if not stack.top(): stack.push(Component())
stack.top().setProfile(vline.value)
elif vline.name == "END":
if len(stack) == 0:
err = "Attempted to end the %s component, \
but it was never opened" % vline.value
raise ParseError(err, n)
if vline.value.upper() == stack.topName(): #START matches END
if len(stack) == 1:
component=stack.pop()
if versionLine is not None:
component.setBehaviorFromVersionLine(versionLine)
else:
behavior = getBehavior(component.name)
if behavior:
component.setBehavior(behavior)
if validate: component.validate(raiseException=True)
if transform: component.transformChildrenToNative()
yield component #EXIT POINT
else: stack.modifyTop(stack.pop())
else:
err = "%s component wasn't closed"
raise ParseError(err % stack.topName(), n)
else: stack.modifyTop(vline) #not a START or END line
if stack.top():
if stack.topName() is None:
logger.warning("Top level component was never named")
elif stack.top().useBegin:
raise ParseError("Component %s was never closed" % (stack.topName()), n)
yield stack.pop()
except ParseError as e:
e.input = streamOrString
raise
def readOne(stream, validate=False, transform=True, findBegin=True,
ignoreUnreadable=False, allowQP=False):
"""Return the first component from stream."""
return readComponents(stream, validate, transform, findBegin,
ignoreUnreadable, allowQP).next()
#--------------------------- version registry ----------------------------------
__behaviorRegistry={}
def registerBehavior(behavior, name=None, default=False, id=None):
"""Register the given behavior.
If default is True (or if this is the first version registered with this
name), the version will be the default if no id is given.
"""
if not name:
name=behavior.name.upper()
if id is None:
id=behavior.versionString
if name in __behaviorRegistry:
if default:
__behaviorRegistry[name].insert(0, (id, behavior))
else:
__behaviorRegistry[name].append((id, behavior))
else:
__behaviorRegistry[name]=[(id, behavior)]
def getBehavior(name, id=None):
"""Return a matching behavior if it exists, or None.
If id is None, return the default for name.
"""
name=name.upper()
if name in __behaviorRegistry:
if id:
for n, behavior in __behaviorRegistry[name]:
if n==id:
return behavior
return __behaviorRegistry[name][0][1]
return None
def newFromBehavior(name, id=None):
"""Given a name, return a behaviored ContentLine or Component."""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named %s" % name)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj
#--------------------------- Helper function -----------------------------------
def backslashEscape(s):
s = s.replace("\\","\\\\").replace(";","\;").replace(",","\,")
return s.replace("\r\n", "\\n").replace("\n","\\n").replace("\r","\\n")
#------------------- Testing and running functions -----------------------------
if __name__ == '__main__':
import tests
tests._test()
this is dumb
"""vobject module for reading vCard and vCalendar files."""
from __future__ import print_function
import copy
import re
import sys
import logging
import codecs
import six
#------------------------------------ Logging ----------------------------------
logger = logging.getLogger(__name__)
if not logging.getLogger().handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(name)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR) # Log errors
DEBUG = False # Don't waste time on debug calls
#----------------------------------- Constants ---------------------------------
CR = '\r'
LF = '\n'
CRLF = CR + LF
SPACE = ' '
TAB = '\t'
SPACEORTAB = SPACE + TAB
#-------------------------------- Useful modules -------------------------------
# use doctest, it kills two birds with one stone and docstrings often become
# more readable to boot (see parseLine's docstring).
# use logging, then when debugging we can just set our verbosity.
# use epydoc syntax for documenting code, please document every class and non-
# trivial method (see http://epydoc.sourceforge.net/epytext.html
# and http://epydoc.sourceforge.net/fields.html). Also, please
# follow http://www.python.org/peps/pep-0257.html for docstrings.
#-------------------------------------------------------------------------------
#--------------------------------- Main classes --------------------------------
class VBase(object):
"""Base class for ContentLine and Component.
@ivar behavior:
The Behavior class associated with this object, which controls
validation, transformations, and encoding.
@ivar parentBehavior:
The object's parent's behavior, or None if no behaviored parent exists.
@ivar isNative:
Boolean describing whether this component is a Native instance.
@ivar group:
An optional group prefix, should be used only to indicate sort order in
vCards, according to RFC2426
"""
def __init__(self, group=None, *args, **kwds):
super(VBase, self).__init__(*args, **kwds)
self.group = group
self.behavior = None
self.parentBehavior = None
self.isNative = False
def copy(self, copyit):
self.group = copyit.group
self.behavior = copyit.behavior
self.parentBehavior = copyit.parentBehavior
self.isNative = copyit.isNative
def validate(self, *args, **kwds):
"""Call the behavior's validate method, or return True."""
if self.behavior:
return self.behavior.validate(self, *args, **kwds)
else: return True
def getChildren(self):
"""Return an iterable containing the contents of the object."""
return []
def clearBehavior(self, cascade=True):
"""Set behavior to None. Do for all descendants if cascading."""
self.behavior=None
if cascade: self.transformChildrenFromNative()
def autoBehavior(self, cascade=False):
"""Set behavior if name is in self.parentBehavior.knownChildren.
If cascade is True, unset behavior and parentBehavior for all
descendants, then recalculate behavior and parentBehavior.
"""
parentBehavior = self.parentBehavior
if parentBehavior is not None:
knownChildTup = parentBehavior.knownChildren.get(self.name, None)
if knownChildTup is not None:
behavior = getBehavior(self.name, knownChildTup[2])
if behavior is not None:
self.setBehavior(behavior, cascade)
if isinstance(self, ContentLine) and self.encoded:
self.behavior.decode(self)
elif isinstance(self, ContentLine):
self.behavior = parentBehavior.defaultBehavior
if self.encoded and self.behavior:
self.behavior.decode(self)
def setBehavior(self, behavior, cascade=True):
"""Set behavior. If cascade is True, autoBehavior all descendants."""
self.behavior=behavior
if cascade:
for obj in self.getChildren():
obj.parentBehavior=behavior
obj.autoBehavior(True)
def transformToNative(self):
"""Transform this object into a custom VBase subclass.
transformToNative should always return a representation of this object.
It may do so by modifying self in place then returning self, or by
creating a new object.
"""
if self.isNative or not self.behavior or not self.behavior.hasNative:
return self
else:
try:
return self.behavior.transformToNative(self)
except Exception as e:
# wrap errors in transformation in a ParseError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, ParseError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformToNative, unhandled exception: %s: %s"
msg = msg % (sys.exc_info()[0], sys.exc_info()[1])
new_error = ParseError(msg, lineNumber)
raise (ParseError, new_error, sys.exc_info()[2])
def transformFromNative(self):
"""Return self transformed into a ContentLine or Component if needed.
May have side effects. If it does, transformFromNative and
transformToNative MUST have perfectly inverse side effects. Allowing
such side effects is convenient for objects whose transformations only
change a few attributes.
Note that it isn't always possible for transformFromNative to be a
perfect inverse of transformToNative, in such cases transformFromNative
should return a new object, not self after modifications.
"""
if self.isNative and self.behavior and self.behavior.hasNative:
try:
return self.behavior.transformFromNative(self)
except Exception as e:
# wrap errors in transformation in a NativeError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, NativeError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformFromNative, unhandled exception: %s: %s"
msg = msg % (sys.exc_info()[0], sys.exc_info()[1])
new_error = NativeError(msg, lineNumber)
raise (NativeError, new_error, sys.exc_info()[2])
else: return self
def transformChildrenToNative(self):
"""Recursively replace children with their native representation."""
pass
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
pass
def serialize(self, buf=None, lineLength=75, validate=True, behavior=None):
"""Serialize to buf if it exists, otherwise return a string.
Use self.behavior.serialize if behavior exists.
"""
if not behavior:
behavior = self.behavior
if behavior:
if DEBUG: logger.debug("serializing %s with behavior" % self.name)
return behavior.serialize(self, buf, lineLength, validate)
else:
if DEBUG: logger.debug("serializing %s without behavior" % self.name)
return defaultSerialize(self, buf, lineLength)
def toVName(name, stripNum = 0, upper = False):
"""
Turn a Python name into an iCalendar style name, optionally uppercase and
with characters stripped off.
"""
if upper:
name = name.upper()
if stripNum != 0:
name = name[:-stripNum]
return name.replace('_', '-')
class ContentLine(VBase):
"""Holds one content line for formats like vCard and vCalendar.
For example::
<SUMMARY{u'param1' : [u'val1'], u'param2' : [u'val2']}Bastille Day Party>
@ivar name:
The uppercased name of the contentline.
@ivar params:
A dictionary of parameters and associated lists of values (the list may
be empty for empty parameters).
@ivar value:
The value of the contentline.
@ivar singletonparams:
A list of parameters for which it's unclear if the string represents the
parameter name or the parameter value. In vCard 2.1, "The value string
can be specified alone in those cases where the value is unambiguous".
This is crazy, but we have to deal with it.
@ivar encoded:
A boolean describing whether the data in the content line is encoded.
Generally, text read from a serialized vCard or vCalendar should be
considered encoded. Data added programmatically should not be encoded.
@ivar lineNumber:
An optional line number associated with the contentline.
"""
def __init__(self, name, params, value, group=None,
encoded=False, isNative=False,
lineNumber = None, *args, **kwds):
"""Take output from parseLine, convert params list to dictionary."""
# group is used as a positional argument to match parseLine's return
super(ContentLine, self).__init__(group, *args, **kwds)
self.name = name.upper()
self.value = value
self.encoded = encoded
self.params = {}
self.singletonparams = []
self.isNative = isNative
self.lineNumber = lineNumber
def updateTable(x):
if len(x) == 1:
self.singletonparams += x
else:
paramlist = self.params.setdefault(x[0].upper(), [])
paramlist.extend(x[1:])
map(updateTable, params)
qp = False
if 'ENCODING' in self.params:
if 'QUOTED-PRINTABLE' in self.params['ENCODING']:
qp = True
self.params['ENCODING'].remove('QUOTED-PRINTABLE')
if 0==len(self.params['ENCODING']):
del self.params['ENCODING']
if 'QUOTED-PRINTABLE' in self.singletonparams:
qp = True
self.singletonparams.remove('QUOTED-PRINTABLE')
if qp:
self.value = six.u(self.value).decode('quoted-printable')
# self.value should be unicode for iCalendar, but if quoted-printable
# is used, or if the quoted-printable state machine is used, text may be
# encoded
if type(self.value) is str:
self.value = six.u(self.value)
@classmethod
def duplicate(clz, copyit):
newcopy = clz('', {}, '')
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(ContentLine, self).copy(copyit)
self.name = copyit.name
self.value = copy.copy(copyit.value)
self.encoded = self.encoded
self.params = copy.copy(copyit.params)
for k,v in self.params.items():
self.params[k] = copy.copy(v)
self.singletonparams = copy.copy(copyit.singletonparams)
self.lineNumber = copyit.lineNumber
def __eq__(self, other):
try:
return (self.name == other.name) and (self.params == other.params) and (self.value == other.value)
except Exception:
return False
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
keys = self.params.keys()
params = [param + '_param' for param in keys]
params.extend(param + '_paramlist' for param in keys)
return params
def __getattr__(self, name):
"""Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
try:
if name.endswith('_param'):
return self.params[toVName(name, 6, True)][0]
elif name.endswith('_paramlist'):
return self.params[toVName(name, 10, True)]
else:
raise AttributeError(name)
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
"""Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name.endswith('_param'):
if type(value) == list:
self.params[toVName(name, 6, True)] = value
else:
self.params[toVName(name, 6, True)] = [value]
elif name.endswith('_paramlist'):
if type(value) == list:
self.params[toVName(name, 10, True)] = value
else:
raise VObjectError("Parameter list set to a non-list")
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name.endswith('_param'):
del self.params[toVName(name, 6, True)]
elif name.endswith('_paramlist'):
del self.params[toVName(name, 10, True)]
else:
object.__delattr__(self, name)
except KeyError:
raise AttributeError(name)
def valueRepr( self ):
"""transform the representation of the value according to the behavior,
if any"""
v = self.value
if self.behavior:
v = self.behavior.valueRepr( self )
return v
def __str__(self):
return "<%s%s%s>" % (self.name, self.params, self.valueRepr())
def __repr__(self):
return self.__str__().replace('\n', '\\n')
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print(pre, self.name + ":", self.valueRepr())
if self.params:
lineKeys= self.params.keys()
print(pre, "params for ", self.name +':')
for aKey in lineKeys:
print(pre + ' ' * tabwidth, aKey, self.params[aKey])
class Component(VBase):
"""A complex property that can contain multiple ContentLines.
For our purposes, a component must start with a BEGIN:xxxx line and end with
END:xxxx, or have a PROFILE:xxx line if a top-level component.
@ivar contents:
A dictionary of lists of Component or ContentLine instances. The keys
are the lowercased names of child ContentLines or Components.
Note that BEGIN and END ContentLines are not included in contents.
@ivar name:
Uppercase string used to represent this Component, i.e VCARD if the
serialized object starts with BEGIN:VCARD.
@ivar useBegin:
A boolean flag determining whether BEGIN: and END: lines should
be serialized.
"""
def __init__(self, name=None, *args, **kwds):
super(Component, self).__init__(*args, **kwds)
self.contents = {}
if name:
self.name=name.upper()
self.useBegin = True
else:
self.name = ''
self.useBegin = False
self.autoBehavior()
@classmethod
def duplicate(clz, copyit):
newcopy = clz()
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(Component, self).copy(copyit)
# deep copy of contents
self.contents = {}
for key, lvalue in copyit.contents.items():
newvalue = []
for value in lvalue:
newitem = value.duplicate(value)
newvalue.append(newitem)
self.contents[key] = newvalue
self.name = copyit.name
self.useBegin = copyit.useBegin
def setProfile(self, name):
"""Assign a PROFILE to this unnamed component.
Used by vCard, not by vCalendar.
"""
if self.name or self.useBegin:
if self.name == name: return
raise VObjectError("This component already has a PROFILE or uses BEGIN.")
self.name = name.upper()
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
names = self.contents.keys()
names.extend(name + '_list' for name in self.contents.keys())
return names
def __getattr__(self, name):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
# if the object is being re-created by pickle, self.contents may not
# be set, don't get into an infinite loop over the issue
if name == 'contents':
return object.__getattribute__(self, name)
try:
if name.endswith('_list'):
return self.contents[toVName(name, 5)]
else:
return self.contents[toVName(name)][0]
except KeyError:
raise AttributeError(name)
normal_attributes = ['contents','name','behavior','parentBehavior','group']
def __setattr__(self, name, value):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name not in self.normal_attributes and name.lower()==name:
if type(value) == list:
if name.endswith('_list'):
name = name[:-5]
self.contents[toVName(name)] = value
elif name.endswith('_list'):
raise VObjectError("Component list set to a non-list")
else:
self.contents[toVName(name)] = [value]
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name not in self.normal_attributes and name.lower()==name:
if name.endswith('_list'):
del self.contents[toVName(name, 5)]
else:
del self.contents[toVName(name)]
else:
object.__delattr__(self, name)
except KeyError:
raise AttributeError(name)
def getChildValue(self, childName, default = None, childNumber = 0):
"""Return a child's value (the first, by default), or None."""
child = self.contents.get(toVName(childName))
if child is None:
return default
else:
return child[childNumber].value
def add(self, objOrName, group = None):
"""Add objOrName to contents, set behavior if it can be inferred.
If objOrName is a string, create an empty component or line based on
behavior. If no behavior is found for the object, add a ContentLine.
group is an optional prefix to the name of the object (see
RFC 2425).
"""
if isinstance(objOrName, VBase):
obj = objOrName
if self.behavior:
obj.parentBehavior = self.behavior
obj.autoBehavior(True)
else:
name = objOrName.upper()
try:
id=self.behavior.knownChildren[name][2]
behavior = getBehavior(name, id)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '', group)
obj.parentBehavior = self.behavior
obj.behavior = behavior
obj = obj.transformToNative()
except (KeyError, AttributeError):
obj = ContentLine(objOrName, [], '', group)
if obj.behavior is None and self.behavior is not None:
if isinstance(obj, ContentLine):
obj.behavior = self.behavior.defaultBehavior
self.contents.setdefault(obj.name.lower(), []).append(obj)
return obj
def remove(self, obj):
"""Remove obj from contents."""
named = self.contents.get(obj.name.lower())
if named:
try:
named.remove(obj)
if len(named) == 0:
del self.contents[obj.name.lower()]
except ValueError:
pass;
def getChildren(self):
"""Return an iterable of all children."""
for objList in self.contents.values():
for obj in objList: yield obj
def components(self):
"""Return an iterable of all Component children."""
return (i for i in self.getChildren() if isinstance(i, Component))
def lines(self):
"""Return an iterable of all ContentLine children."""
return (i for i in self.getChildren() if isinstance(i, ContentLine))
def sortChildKeys(self):
try:
first = [s for s in self.behavior.sortFirst if s in self.contents]
except Exception:
first = []
return first + sorted(k for k in self.contents.keys() if k not in first)
def getSortedChildren(self):
return [obj for k in self.sortChildKeys() for obj in self.contents[k]]
def setBehaviorFromVersionLine(self, versionLine):
"""Set behavior if one matches name, versionLine.value."""
v = getBehavior(self.name, versionLine.value)
if v:
self.setBehavior(v)
def transformChildrenToNative(self):
"""Recursively replace children with their native representation."""
#sort to get dependency order right, like vtimezone before vevent
for childArray in (self.contents[k] for k in self.sortChildKeys()):
for i in xrange(len(childArray)):
childArray[i]=childArray[i].transformToNative()
childArray[i].transformChildrenToNative()
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
for childArray in self.contents.values():
for i in xrange(len(childArray)):
childArray[i]=childArray[i].transformFromNative()
childArray[i].transformChildrenFromNative(clearBehavior)
if clearBehavior:
childArray[i].behavior = None
childArray[i].parentBehavior = None
def __str__(self):
if self.name:
return "<%s| %s>" % (six.u(self.name), ''.join(self.getSortedChildren()))
else:
return u'<*unnamed*| {}>'.format(self.getSortedChildren())
def __repr__(self):
return self.__str__()
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print(pre, self.name)
if isinstance(self, Component):
for line in self.getChildren():
line.prettyPrint(level + 1, tabwidth)
print('')
class VObjectError(Exception):
def __init__(self, msg, lineNumber=None):
self.msg = msg
if lineNumber is not None:
self.lineNumber = lineNumber
def __str__(self):
if hasattr(self, 'lineNumber'):
return "At line %s: %s" % (self.lineNumber, self.msg)
else:
return repr(self.msg)
class ParseError(VObjectError):
pass
class ValidateError(VObjectError):
pass
class NativeError(VObjectError):
pass
#-------------------------- Parsing functions ----------------------------------
# parseLine regular expressions
patterns = {}
# Note that underscore is not legal for names, it's included because
# Lotus Notes uses it
patterns['name'] = '[a-zA-Z0-9\-_]+'
patterns['safe_char'] = '[^";:,]'
patterns['qsafe_char'] = '[^"]'
# the combined Python string replacement and regex syntax is a little confusing;
# remember that %(foobar)s is replaced with patterns['foobar'], so for instance
# param_value is any number of safe_chars or any number of qsaf_chars surrounded
# by double quotes.
patterns['param_value'] = ' "%(qsafe_char)s * " | %(safe_char)s * ' % patterns
# get a tuple of two elements, one will be empty, the other will have the value
patterns['param_value_grouped'] = """
" ( %(qsafe_char)s * )" | ( %(safe_char)s + )
""" % patterns
# get a parameter and its values, without any saved groups
patterns['param'] = r"""
; (?: %(name)s ) # parameter name
(?:
(?: = (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)*
""" % patterns
# get a parameter, saving groups for name and value (value still needs parsing)
patterns['params_grouped'] = r"""
; ( %(name)s )
(?: =
(
(?: (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)
)?
""" % patterns
# get a full content line, break it up into group, name, parameters, and value
patterns['line'] = r"""
^ ((?P<group> %(name)s)\.)?(?P<name> %(name)s) # name group
(?P<params> (?: %(param)s )* ) # params group (may be empty)
: (?P<value> .* )$ # value group
""" % patterns
' "%(qsafe_char)s*" | %(safe_char)s* '
param_values_re = re.compile(patterns['param_value_grouped'], re.VERBOSE)
params_re = re.compile(patterns['params_grouped'], re.VERBOSE)
line_re = re.compile(patterns['line'], re.DOTALL | re.VERBOSE)
begin_re = re.compile('BEGIN', re.IGNORECASE)
def parseParams(string):
"""
>>> parseParams(';ALTREP="http://www.wiz.org"')
[['ALTREP', 'http://www.wiz.org']]
>>> parseParams('')
[]
>>> parseParams(';ALTREP="http://www.wiz.org;;",Blah,Foo;NEXT=Nope;BAR')
[['ALTREP', 'http://www.wiz.org;;', 'Blah', 'Foo'], ['NEXT', 'Nope'], ['BAR']]
"""
all = params_re.findall(string)
allParameters = []
for tup in all:
paramList = [tup[0]] # tup looks like (name, valuesString)
for pair in param_values_re.findall(tup[1]):
# pair looks like ('', value) or (value, '')
if pair[0] != '':
paramList.append(pair[0])
else:
paramList.append(pair[1])
allParameters.append(paramList)
return allParameters
def parseLine(line, lineNumber = None):
"""
>>> parseLine("BLAH:")
('BLAH', [], '', None)
>>> parseLine("RDATE:VALUE=DATE:19970304,19970504,19970704,19970904")
('RDATE', [], 'VALUE=DATE:19970304,19970504,19970704,19970904', None)
>>> parseLine('DESCRIPTION;ALTREP="http://www.wiz.org":The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA')
('DESCRIPTION', [['ALTREP', 'http://www.wiz.org']], 'The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA', None)
>>> parseLine("EMAIL;PREF;INTERNET:john@nowhere.com")
('EMAIL', [['PREF'], ['INTERNET']], 'john@nowhere.com', None)
>>> parseLine('EMAIL;TYPE="blah",hah;INTERNET="DIGI",DERIDOO:john@nowhere.com')
('EMAIL', [['TYPE', 'blah', 'hah'], ['INTERNET', 'DIGI', 'DERIDOO']], 'john@nowhere.com', None)
>>> parseLine('item1.ADR;type=HOME;type=pref:;;Reeperbahn 116;Hamburg;;20359;')
('ADR', [['type', 'HOME'], ['type', 'pref']], ';;Reeperbahn 116;Hamburg;;20359;', 'item1')
>>> parseLine(":")
Traceback (most recent call last):
...
ParseError: 'Failed to parse line: :'
"""
match = line_re.match(line)
if match is None:
raise ParseError("Failed to parse line: %s" % line, lineNumber)
# Underscores are replaced with dash to work around Lotus Notes
return (match.group('name').replace('_','-'),
parseParams(match.group('params')),
match.group('value'), match.group('group'))
# logical line regular expressions
patterns['lineend'] = r'(?:\r\n|\r|\n|$)'
patterns['wrap'] = r'%(lineend)s [\t ]' % patterns
patterns['logicallines'] = r"""
(
(?: [^\r\n] | %(wrap)s )*
%(lineend)s
)
""" % patterns
patterns['wraporend'] = r'(%(wrap)s | %(lineend)s )' % patterns
wrap_re = re.compile(patterns['wraporend'], re.VERBOSE)
logical_lines_re = re.compile(patterns['logicallines'], re.VERBOSE)
testLines="""
Line 0 text
, Line 0 continued.
Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2 is a new line, it does not start with whitespace.
"""
def getLogicalLines(fp, allowQP=True, findBegin=False):
"""
Iterate through a stream, yielding one logical line at a time.
Because many applications still use vCard 2.1, we have to deal with the
quoted-printable encoding for long lines, as well as the vCard 3.0 and
vCalendar line folding technique, a whitespace character at the start
of the line.
Quoted-printable data will be decoded in the Behavior decoding phase.
>>> from six import StringIO
>>> f=StringIO(testLines)
>>> for n, l in enumerate(getLogicalLines(f)):
... print("Line %s: %s" % (n, l[0]))
...
Line 0: Line 0 text, Line 0 continued.
Line 1: Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2: Line 2 is a new line, it does not start with whitespace.
"""
if not allowQP:
bytes = fp.read(-1)
if len(bytes) > 0:
if type(bytes[0]) == unicode:
val = bytes
elif not findBegin:
val = bytes.decode('utf-8')
else:
for encoding in 'utf-8', 'utf-16-LE', 'utf-16-BE', 'iso-8859-1':
try:
val = bytes.decode(encoding)
if begin_re.search(val) is not None:
break
except UnicodeDecodeError:
pass
else:
raise ParseError('Could not find BEGIN when trying to determine encoding')
else:
val = bytes
# strip off any UTF8 BOMs which Python's UTF8 decoder leaves
val = val.lstrip( unicode( codecs.BOM_UTF8, "utf8" ) )
lineNumber = 1
for match in logical_lines_re.finditer(val):
line, n = wrap_re.subn('', match.group())
if line != '':
yield line, lineNumber
lineNumber += n
else:
quotedPrintable=False
newbuffer = six.StringIO
logicalLine = newbuffer()
lineNumber = 0
lineStartNumber = 0
while True:
line = fp.readline()
if line == '':
break
else:
line = line.rstrip(CRLF)
lineNumber += 1
if line.rstrip() == '':
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
quotedPrintable=False
continue
if quotedPrintable and allowQP:
logicalLine.write('\n')
logicalLine.write(line)
quotedPrintable=False
elif line[0] in SPACEORTAB:
logicalLine.write(line[1:])
elif logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
logicalLine.write(line)
else:
logicalLine = newbuffer()
logicalLine.write(line)
# hack to deal with the fact that vCard 2.1 allows parameters to be
# encoded without a parameter name. False positives are unlikely, but
# possible.
val = logicalLine.getvalue()
if val[-1]=='=' and val.lower().find('quoted-printable') >= 0:
quotedPrintable=True
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
def textLineToContentLine(text, n=None):
return ContentLine(*parseLine(text, n), **{'encoded':True, 'lineNumber' : n})
def dquoteEscape(param):
"""Return param, or "param" if ',' or ';' or ':' is in param."""
if param.find('"') >= 0:
raise VObjectError("Double quotes aren't allowed in parameter values.")
for char in ',;:':
if param.find(char) >= 0:
return '"'+ param + '"'
return param
def foldOneLine(outbuf, input, lineLength = 75):
# Folding line procedure that ensures multi-byte utf-8 sequences are not broken
# across lines
# To-do: This all seems odd. Is it still needed, especially in python3?
if len(input) < lineLength:
# Optimize for unfolded line case
try:
outbuf.write(bytes(input, 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write(input)
else:
# Look for valid utf8 range and write that out
start = 0
written = 0
while written < len(input):
# Start max length -1 chars on from where we are
offset = start + lineLength - 1
if offset >= len(input):
line = input[start:]
try:
outbuf.write(bytes(line, 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write(line)
written = len(input)
else:
# Check whether next char is valid utf8 lead byte
while (input[offset] > 0x7F) and ((ord(input[offset]) & 0xC0) == 0x80):
# Step back until we have a valid char
offset -= 1
line = input[start:offset]
outbuf.write(bytes(line))
try:
outbuf.write(bytes("\r\n", 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write("\r\n")
written += offset - start
start = offset
try:
outbuf.write(bytes("\r\n", 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write("\r\n")
def defaultSerialize(obj, buf, lineLength):
"""Encode and fold obj and its children, write to buf or return a string."""
outbuf = buf or six.StringIO()
if isinstance(obj, Component):
if obj.group is None:
groupString = ''
else:
groupString = obj.group + '.'
if obj.useBegin:
foldOneLine(outbuf, "{0}BEGIN:{1}".format(groupString, obj.name), lineLength)
for child in obj.getSortedChildren():
#validate is recursive, we only need to validate once
child.serialize(outbuf, lineLength, validate=False)
if obj.useBegin:
foldOneLine(outbuf, "{0}END:{1}".format(groupString, obj.name), lineLength)
elif isinstance(obj, ContentLine):
startedEncoded = obj.encoded
if obj.behavior and not startedEncoded: obj.behavior.encode(obj)
s=codecs.getwriter('utf-8')(six.StringIO()) #unfolded buffer
if obj.group is not None:
s.write(obj.group + '.')
s.write(obj.name.upper())
keys = sorted(obj.params.keys())
for key in keys:
paramvals = obj.params[key]
s.write(';' + key + '=' + ','.join(dquoteEscape(p) for p in paramvals))
s.write(':' + obj.value)
if obj.behavior and not startedEncoded: obj.behavior.decode(obj)
foldOneLine(outbuf, s.getvalue(), lineLength)
return buf or outbuf.getvalue()
testVCalendar="""
BEGIN:VCALENDAR
BEGIN:VEVENT
SUMMARY;blah=hi!:Bastille Day Party
END:VEVENT
END:VCALENDAR"""
class Stack:
def __init__(self):
self.stack = []
def __len__(self):
return len(self.stack)
def top(self):
if len(self) == 0: return None
else: return self.stack[-1]
def topName(self):
if len(self) == 0: return None
else: return self.stack[-1].name
def modifyTop(self, item):
top = self.top()
if top:
top.add(item)
else:
new = Component()
self.push(new)
new.add(item) #add sets behavior for item and children
def push(self, obj): self.stack.append(obj)
def pop(self): return self.stack.pop()
def readComponents(streamOrString, validate=False, transform=True,
findBegin=True, ignoreUnreadable=False,
allowQP=False):
"""Generate one Component at a time from a stream.
>>> from six import StringIO
>>> f = StringIO(testVCalendar)
>>> cal=readComponents(f).next()
>>> cal
<VCALENDAR| [<VEVENT| [<SUMMARY{u'BLAH': [u'hi!']}Bastille Day Party>]>]>
>>> cal.vevent.summary
<SUMMARY{u'BLAH': [u'hi!']}Bastille Day Party>
"""
if isinstance(streamOrString, basestring):
stream = six.StringIO(streamOrString)
else:
stream = streamOrString
try:
stack = Stack()
versionLine = None
n = 0
for line, n in getLogicalLines(stream, allowQP, findBegin):
if ignoreUnreadable:
try:
vline = textLineToContentLine(line, n)
except VObjectError as e:
if e.lineNumber is not None:
msg = "Skipped line %(lineNumber)s, message: %(msg)s"
else:
msg = "Skipped a line, message: %(msg)s"
logger.error(msg % {'lineNumber' : e.lineNumber,
'msg' : e.message})
continue
else:
vline = textLineToContentLine(line, n)
if vline.name == "VERSION":
versionLine = vline
stack.modifyTop(vline)
elif vline.name == "BEGIN":
stack.push(Component(vline.value, group=vline.group))
elif vline.name == "PROFILE":
if not stack.top(): stack.push(Component())
stack.top().setProfile(vline.value)
elif vline.name == "END":
if len(stack) == 0:
err = "Attempted to end the %s component, \
but it was never opened" % vline.value
raise ParseError(err, n)
if vline.value.upper() == stack.topName(): #START matches END
if len(stack) == 1:
component=stack.pop()
if versionLine is not None:
component.setBehaviorFromVersionLine(versionLine)
else:
behavior = getBehavior(component.name)
if behavior:
component.setBehavior(behavior)
if validate: component.validate(raiseException=True)
if transform: component.transformChildrenToNative()
yield component #EXIT POINT
else: stack.modifyTop(stack.pop())
else:
err = "%s component wasn't closed"
raise ParseError(err % stack.topName(), n)
else: stack.modifyTop(vline) #not a START or END line
if stack.top():
if stack.topName() is None:
logger.warning("Top level component was never named")
elif stack.top().useBegin:
raise ParseError("Component %s was never closed" % (stack.topName()), n)
yield stack.pop()
except ParseError as e:
e.input = streamOrString
raise
def readOne(stream, validate=False, transform=True, findBegin=True,
ignoreUnreadable=False, allowQP=False):
"""Return the first component from stream."""
return readComponents(stream, validate, transform, findBegin,
ignoreUnreadable, allowQP).next()
#--------------------------- version registry ----------------------------------
__behaviorRegistry={}
def registerBehavior(behavior, name=None, default=False, id=None):
"""Register the given behavior.
If default is True (or if this is the first version registered with this
name), the version will be the default if no id is given.
"""
if not name:
name=behavior.name.upper()
if id is None:
id=behavior.versionString
if name in __behaviorRegistry:
if default:
__behaviorRegistry[name].insert(0, (id, behavior))
else:
__behaviorRegistry[name].append((id, behavior))
else:
__behaviorRegistry[name]=[(id, behavior)]
def getBehavior(name, id=None):
"""Return a matching behavior if it exists, or None.
If id is None, return the default for name.
"""
name=name.upper()
if name in __behaviorRegistry:
if id:
for n, behavior in __behaviorRegistry[name]:
if n==id:
return behavior
return __behaviorRegistry[name][0][1]
return None
def newFromBehavior(name, id=None):
"""Given a name, return a behaviored ContentLine or Component."""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named %s" % name)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj
#--------------------------- Helper function -----------------------------------
def backslashEscape(s):
s = s.replace("\\","\\\\").replace(";","\;").replace(",","\,")
return s.replace("\r\n", "\\n").replace("\n","\\n").replace("\r","\\n")
#------------------- Testing and running functions -----------------------------
if __name__ == '__main__':
import tests
tests._test()
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Dataflow credentials and authentication."""
import datetime
import json
import logging
import os
import urllib2
from oauth2client.client import GoogleCredentials
from oauth2client.client import OAuth2Credentials
from apache_beam.utils import processes
from apache_beam.utils import retry
# When we are running in GCE, we can authenticate with VM credentials.
is_running_in_gce = False
# When we are running in GCE, this value is set based on worker startup
# information.
executing_project = None
def set_running_in_gce(worker_executing_project):
"""Informs the authentication library that we are running in GCE.
When we are running in GCE, we have the option of using the VM metadata
credentials for authentication to Google services.
Args:
worker_executing_project: The project running the workflow. This information
comes from worker startup information.
"""
global is_running_in_gce
global executing_project
is_running_in_gce = True
executing_project = worker_executing_project
class AuthenticationException(retry.PermanentException):
pass
class GCEMetadataCredentials(OAuth2Credentials):
"""Credential object initialized using access token from GCE VM metadata."""
def __init__(self, user_agent=None):
"""Create an instance of GCEMetadataCredentials.
These credentials are generated by contacting the metadata server on a GCE
VM instance.
Args:
user_agent: string, The HTTP User-Agent to provide for this application.
"""
super(GCEMetadataCredentials, self).__init__(
None, # access_token
None, # client_id
None, # client_secret
None, # refresh_token
datetime.datetime(2010, 1, 1), # token_expiry, set to time in past.
None, # token_uri
user_agent)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _refresh(self, http_request):
refresh_time = datetime.datetime.now()
metadata_root = os.environ.get(
'GCE_METADATA_ROOT', 'metadata.google.internal')
token_url = ('http://{}/computeMetadata/v1/instance/service-accounts/'
'default/token').format(metadata_root)
req = urllib2.Request(token_url, headers={'Metadata-Flavor': 'Google'})
token_data = json.loads(urllib2.urlopen(req).read())
self.access_token = token_data['access_token']
self.token_expiry = (refresh_time +
datetime.timedelta(seconds=token_data['expires_in']))
class _GCloudWrapperCredentials(OAuth2Credentials):
"""Credentials class wrapping gcloud credentials via shell."""
def __init__(self, user_agent, **kwds):
super(_GCloudWrapperCredentials, self).__init__(
None, None, None, None, None, None, user_agent, **kwds)
def _refresh(self, http_request):
"""Gets an access token using the gcloud client."""
try:
gcloud_process = processes.Popen(
['gcloud', 'auth', 'print-access-token'], stdout=processes.PIPE)
except OSError as exn:
logging.error('The gcloud tool was not found.', exc_info=True)
raise AuthenticationException('The gcloud tool was not found: %s' % exn)
output, _ = gcloud_process.communicate()
self.access_token = output.strip()
def get_service_credentials():
"""Get credentials to access Google services."""
user_agent = 'beam-python-sdk/1.0'
if is_running_in_gce:
# We are currently running as a GCE taskrunner worker.
#
# TODO(ccy): It's not entirely clear if these credentials are thread-safe.
# If so, we can cache these credentials to save the overhead of creating
# them again.
return GCEMetadataCredentials(user_agent=user_agent)
else:
client_scopes = [
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/datastore'
]
try:
credentials = _GCloudWrapperCredentials(user_agent)
# Check if we are able to get an access token. If not fallback to
# application default credentials.
credentials.get_access_token()
return credentials
except AuthenticationException:
logging.warning('Unable to find credentials from gcloud.')
# Falling back to application default credentials.
try:
credentials = GoogleCredentials.get_application_default()
credentials = credentials.create_scoped(client_scopes)
logging.debug('Connecting using Google Application Default '
'Credentials.')
return credentials
except Exception:
logging.warning('Unable to find default credentials to use.')
raise
This closes #2527
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Dataflow credentials and authentication."""
import datetime
import json
import logging
import os
import urllib2
from oauth2client.client import GoogleCredentials
from oauth2client.client import OAuth2Credentials
from apache_beam.utils import processes
from apache_beam.utils import retry
# When we are running in GCE, we can authenticate with VM credentials.
is_running_in_gce = False
# When we are running in GCE, this value is set based on worker startup
# information.
executing_project = None
def set_running_in_gce(worker_executing_project):
"""Informs the authentication library that we are running in GCE.
When we are running in GCE, we have the option of using the VM metadata
credentials for authentication to Google services.
Args:
worker_executing_project: The project running the workflow. This information
comes from worker startup information.
"""
global is_running_in_gce
global executing_project
is_running_in_gce = True
executing_project = worker_executing_project
class AuthenticationException(retry.PermanentException):
pass
class GCEMetadataCredentials(OAuth2Credentials):
"""Credential object initialized using access token from GCE VM metadata."""
def __init__(self, user_agent=None):
"""Create an instance of GCEMetadataCredentials.
These credentials are generated by contacting the metadata server on a GCE
VM instance.
Args:
user_agent: string, The HTTP User-Agent to provide for this application.
"""
super(GCEMetadataCredentials, self).__init__(
None, # access_token
None, # client_id
None, # client_secret
None, # refresh_token
datetime.datetime(2010, 1, 1), # token_expiry, set to time in past.
None, # token_uri
user_agent)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _refresh(self, http_request):
refresh_time = datetime.datetime.now()
metadata_root = os.environ.get(
'GCE_METADATA_ROOT', 'metadata.google.internal')
token_url = ('http://{}/computeMetadata/v1/instance/service-accounts/'
'default/token').format(metadata_root)
req = urllib2.Request(token_url, headers={'Metadata-Flavor': 'Google'})
token_data = json.loads(urllib2.urlopen(req).read())
self.access_token = token_data['access_token']
self.token_expiry = (refresh_time +
datetime.timedelta(seconds=token_data['expires_in']))
class _GCloudWrapperCredentials(OAuth2Credentials):
"""Credentials class wrapping gcloud credentials via shell."""
def __init__(self, user_agent, **kwds):
super(_GCloudWrapperCredentials, self).__init__(
None, None, None, None, None, None, user_agent, **kwds)
def _refresh(self, http_request):
"""Gets an access token using the gcloud client."""
try:
gcloud_process = processes.Popen(
['gcloud', 'auth', 'print-access-token'], stdout=processes.PIPE)
except OSError:
message = 'gcloud tool not found so falling back to using ' +\
'application default credentials'
logging.warning(message)
raise AuthenticationException(message)
output, _ = gcloud_process.communicate()
self.access_token = output.strip()
def get_service_credentials():
"""Get credentials to access Google services."""
user_agent = 'beam-python-sdk/1.0'
if is_running_in_gce:
# We are currently running as a GCE taskrunner worker.
#
# TODO(ccy): It's not entirely clear if these credentials are thread-safe.
# If so, we can cache these credentials to save the overhead of creating
# them again.
return GCEMetadataCredentials(user_agent=user_agent)
else:
client_scopes = [
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/datastore'
]
try:
credentials = _GCloudWrapperCredentials(user_agent)
# Check if we are able to get an access token. If not fallback to
# application default credentials.
credentials.get_access_token()
return credentials
except AuthenticationException:
logging.warning('Unable to find credentials from gcloud.')
# Falling back to application default credentials.
try:
credentials = GoogleCredentials.get_application_default()
credentials = credentials.create_scoped(client_scopes)
logging.debug('Connecting using Google Application Default '
'Credentials.')
return credentials
except Exception:
logging.warning('Unable to find default credentials to use.')
raise
|
from cgum.basic import *
import cgum.expression as expression
# Mix-in implemented by all statement types
class Statement(object):
def is_statement(self):
return True
def nearestStmt(self):
return self
# TODO: Understand this better
class StatementExpression(Statement, expression.Expression, Node):
CODE = "241800"
LABEL = "StatementExpr"
def __init__(self, pos, length, label, children):
assert label is None
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
# For now, declarations are statements
class DeclarationList(Statement, Node):
CODE = "350100"
LABEL = "DeclList"
def __init__(self, pos, length, label, children):
assert label is None
super().__init__(pos, length, label, children)
def declarations(self):
return self.children()
# A declaration isn't quite a statement, but this is the best place for it,
# for now.
class Declaration(Node):
CODE = "450100"
LABEL = "Declaration"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
assert isinstance(children[0], DeclarationList)
super().__init__(pos, length, label, children)
def declarations(self):
return self.child(0)
# Generic definition class
class Definition(Statement, Node):
CODE = "450200"
LABEL = "Definition"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def defined(self):
return self.child(0)
def to_s(self):
return self.defined().to_s()
class Goto(Statement, Node):
CODE = "280100"
LABEL = "Goto"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
assert isinstance(children[0], GenericString)
super().__init__(pos, length, label, children)
def destination(self):
return self.child(0)
def to_s(self):
return "goto %s" % self.destination()
class Continue(Statement, Token):
CODE = "280001"
LABEL = "Continue"
def to_s(self):
return "continue"
# Used to specify the default switch case
class Default(Statement, Node):
CODE = "270400"
LABEL = "Default"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
class Case(Statement, Node):
CODE = "270200"
LABEL = "Case"
def __init__(self, pos, length, label, children):
assert len(children) == 2
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def stmt(self):
return self.child(1)
class Switch(Statement, Node):
CODE = "300200"
LABEL = "Switch"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
assert isinstance(children[1], Block)
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def block(self):
return self.child(1)
class Break(Statement, Token):
CODE = "280002"
LABEL = "Break"
def to_s(self):
return "break"
class ExprStatement(Statement, Node):
CODE = "260300"
LABEL = "ExprStatement"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
class DoWhile(Statement, Node):
CODE = "310200"
LABEL = "DoWhile"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
super().__init__(pos, length, label, children)
def condition(self):
return self.child(1)
def do(self):
return self.child(0)
class While(Statement, Node):
CODE = "310100"
LABEL = "While"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
super().__init__(pos, length, label, children)
def condition(self):
return self.child(0)
def do(self):
return self.child(1)
class For(Statement, Node):
CODE = "310300"
LABEL = "For"
def __init__(self, pos, length, label, children):
assert label is None, "for statement should have no label"
assert len(children) in [3,4], "for statement should have between 3 and 4 children (inclusive)"
super().__init__(pos, length, label, children)
def initialisation(self):
return self.child(0)
def condition(self):
return self.child(1)
def after(self):
children = self.children()
if len(children) == 3:
return None
else:
return self.child(2)
def block(self):
return self.children()[-1]
class ReturnExpr(Statement, Node):
CODE = "280200"
LABEL = "ReturnExpr"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def to_s(self):
return "return %s" % self.__expr.to_s()
class Return(Statement, Token):
CODE = "280003"
LABEL = "Return"
def to_s(self):
return "return"
# Todo: move to tokens package?
class IfToken(Token):
CODE = "490100"
LABEL = "IfToken"
class IfElse(Statement, Node):
CODE = "300100"
LABEL = "If"
def __init__(self, pos, length, label, children):
assert len(children) >= 3 and len(children) <= 4
assert isinstance(children[0], IfToken)
super().__init__(pos, length, label, children)
def condition(self):
return self.child(1)
def then(self):
return self.child(2)
def els(self):
return self.child(3) if len(self.children()) == 4 else None
class Block(Node):
CODE = "330000"
LABEL = "Compound"
def contents(self):
return self.children()
added loop class
from cgum.basic import *
import cgum.expression as expression
# Mix-in implemented by all statement types
class Statement(object):
def is_statement(self):
return True
def nearestStmt(self):
return self
class Loop(object):
pass
# TODO: Understand this better
class StatementExpression(Statement, expression.Expression, Node):
CODE = "241800"
LABEL = "StatementExpr"
def __init__(self, pos, length, label, children):
assert label is None
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
# For now, declarations are statements
class DeclarationList(Statement, Node):
CODE = "350100"
LABEL = "DeclList"
def __init__(self, pos, length, label, children):
assert label is None
super().__init__(pos, length, label, children)
def declarations(self):
return self.children()
# A declaration isn't quite a statement, but this is the best place for it,
# for now.
class Declaration(Node):
CODE = "450100"
LABEL = "Declaration"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
assert isinstance(children[0], DeclarationList)
super().__init__(pos, length, label, children)
def declarations(self):
return self.child(0)
# Generic definition class
class Definition(Statement, Node):
CODE = "450200"
LABEL = "Definition"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def defined(self):
return self.child(0)
def to_s(self):
return self.defined().to_s()
class Goto(Statement, Node):
CODE = "280100"
LABEL = "Goto"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
assert isinstance(children[0], GenericString)
super().__init__(pos, length, label, children)
def destination(self):
return self.child(0)
def to_s(self):
return "goto %s" % self.destination()
class Continue(Statement, Token):
CODE = "280001"
LABEL = "Continue"
def to_s(self):
return "continue"
# Used to specify the default switch case
class Default(Statement, Node):
CODE = "270400"
LABEL = "Default"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
class Case(Statement, Node):
CODE = "270200"
LABEL = "Case"
def __init__(self, pos, length, label, children):
assert len(children) == 2
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def stmt(self):
return self.child(1)
class Switch(Statement, Node):
CODE = "300200"
LABEL = "Switch"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
assert isinstance(children[1], Block)
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def block(self):
return self.child(1)
class Break(Statement, Token):
CODE = "280002"
LABEL = "Break"
def to_s(self):
return "break"
class ExprStatement(Statement, Node):
CODE = "260300"
LABEL = "ExprStatement"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
class DoWhile(Loop, Statement, Node):
CODE = "310200"
LABEL = "DoWhile"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
super().__init__(pos, length, label, children)
def condition(self):
return self.child(1)
def do(self):
return self.child(0)
class While(Loop, Statement, Node):
CODE = "310100"
LABEL = "While"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 2
super().__init__(pos, length, label, children)
def condition(self):
return self.child(0)
def do(self):
return self.child(1)
class For(Loop, Statement, Node):
CODE = "310300"
LABEL = "For"
def __init__(self, pos, length, label, children):
assert label is None, "for statement should have no label"
assert len(children) in [3,4], "for statement should have between 3 and 4 children (inclusive)"
super().__init__(pos, length, label, children)
def initialisation(self):
return self.child(0)
def condition(self):
return self.child(1)
def after(self):
children = self.children()
if len(children) == 3:
return None
else:
return self.child(2)
def block(self):
return self.children()[-1]
class ReturnExpr(Statement, Node):
CODE = "280200"
LABEL = "ReturnExpr"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) == 1
super().__init__(pos, length, label, children)
def expr(self):
return self.child(0)
def to_s(self):
return "return %s" % self.__expr.to_s()
class Return(Statement, Token):
CODE = "280003"
LABEL = "Return"
def to_s(self):
return "return"
# Todo: move to tokens package?
class IfToken(Token):
CODE = "490100"
LABEL = "IfToken"
class IfElse(Statement, Node):
CODE = "300100"
LABEL = "If"
def __init__(self, pos, length, label, children):
assert len(children) >= 3 and len(children) <= 4
assert isinstance(children[0], IfToken)
super().__init__(pos, length, label, children)
def condition(self):
return self.child(1)
def then(self):
return self.child(2)
def els(self):
return self.child(3) if len(self.children()) == 4 else None
class Block(Node):
CODE = "330000"
LABEL = "Compound"
def contents(self):
return self.children()
|
"""
calculate and set camera values
for overscan in one camera setup
overscan is not uniform. It matches
image proportions
default overscan:
10 / 10 pixels :: left / right
###
w = 1920.0
h = 1440.0
w_osc = 1980
w_extra = w_osc - w
osc_factor = w / w_osc
h_osc = h / osc_factor
print '\\noriginal res: ', w, '*', h
print 'overscan res', w_osc, '*', h_osc
print 'overscan factor (uniform)', osc_factor
print 'extra pixels (width, height)', w_osc - w, h_osc - h
###
"""
def set_camera_post_scale(ratio):
postscale = None
return postscale
def set_overscan(pixels=10):
pass
def main():
pass
def test():
import setoverscan_tests
try:
reload(setoverscan_tests)
except:
import importlib
importlib.reload(setoverscan_tests)
setoverscan_tests.main()
set overscan update
"""
calculate and set camera values
for overscan in one camera setup
overscan is not uniform. It matches
image proportions
default overscan:
10 / 10 pixels :: left / right
###
w = 1920.0
h = 1440.0
w_osc = 1980
w_extra = w_osc - w
osc_factor = w / w_osc
h_osc = h / osc_factor
print '\\noriginal res: ', w, '*', h
print 'overscan res', w_osc, '*', h_osc
print 'overscan factor (uniform)', osc_factor
print 'extra pixels (width, height)', w_osc - w, h_osc - h
###
"""
from fractions import Fraction
import pymel.core
def set_camera_post_scale(ratio):
postscale = None
return postscale
def set_osc_resolution(pixels):
rendersettings = pymel.core.PyNode('defaultResolution')
res_x = rendersettings.getAttr('width')
res_y = rendersettings.getAttr('height')
image_ratio = Fraction(res_x, res_y)
res_y_new = res_y+(pixels*2)
res_x_new = float(res_y_new * image_ratio)
rendersettings.setAttr('width', res_x_new)
rendersettings.setAttr('height', res_y_new)
return (res_x_new, res_y_new)
def main(**kwargs):
set_osc_resolution(pixels=kwargs.get('pixels', 10))
def test():
import setoverscan_tests
try:
reload(setoverscan_tests)
except:
import importlib
importlib.reload(setoverscan_tests)
setoverscan_tests.main()
|
import os
import sys
from .collection import Collection
from .exceptions import CollectionNotFound
from .task import Task
class Loader(object):
def __init__(self, root=None):
"""
Creates a loader object with search root directory of ``root``.
If not given, ``root`` defaults to ``os.getcwd``.
"""
self.root = root or os.getcwd()
def update_sys_path(self):
"""Adds ``root`` to the first location of the system path."""
parent = os.path.abspath(self.root)
# If we want to auto-strip .py:
# os.path.splitext(os.path.basename(name))[0]
# TODO: copy over rest of path munging from fabric.main
if parent not in sys.path:
sys.path.insert(0, parent)
def get_collections(self, name, collection):
"""
Loads all the given task from the module determined by `name` if we are
unable to import name CollectionNotFound exception is raised.
Otherwise we proceed to iterate over candidate and add them
to collection.
"""
try:
module = __import__(name)
candidates = filter(
lambda x: isinstance(x[1], Task),
vars(module).items()
)
if not candidates:
# Recurse downwards towards FS
pass
for name, task in candidates:
collection.add_task(
name=name,
task=task,
aliases=task.aliases,
default=task.is_default
)
return collection
except ImportError, e:
raise CollectionNotFound(name=name, root=self.root, error=e)
def load_collection(self, name=None):
"""
Load collection named ``name``.
If not given, looks for a ``"tasks"`` collection by default.
"""
if name is None:
# TODO: make this configurable
name = 'tasks'
c = Collection()
# adding root to system path
self.add_parent_to_path()
# adding task candidates to collection
collection = self.get_collections(name, c)
return collection
Whitespace
import os
import sys
from .collection import Collection
from .exceptions import CollectionNotFound
from .task import Task
class Loader(object):
def __init__(self, root=None):
"""
Creates a loader object with search root directory of ``root``.
If not given, ``root`` defaults to ``os.getcwd``.
"""
self.root = root or os.getcwd()
def update_sys_path(self):
"""Adds ``root`` to the first location of the system path."""
parent = os.path.abspath(self.root)
# If we want to auto-strip .py:
# os.path.splitext(os.path.basename(name))[0]
# TODO: copy over rest of path munging from fabric.main
if parent not in sys.path:
sys.path.insert(0, parent)
def get_collections(self, name, collection):
"""
Loads all the given task from the module determined by `name` if we are
unable to import name CollectionNotFound exception is raised.
Otherwise we proceed to iterate over candidate and add them
to collection.
"""
try:
module = __import__(name)
candidates = filter(
lambda x: isinstance(x[1], Task),
vars(module).items()
)
if not candidates:
# Recurse downwards towards FS
pass
for name, task in candidates:
collection.add_task(
name=name,
task=task,
aliases=task.aliases,
default=task.is_default
)
return collection
except ImportError, e:
raise CollectionNotFound(name=name, root=self.root, error=e)
def load_collection(self, name=None):
"""
Load collection named ``name``.
If not given, looks for a ``"tasks"`` collection by default.
"""
if name is None:
# TODO: make this configurable
name = 'tasks'
c = Collection()
# adding root to system path
self.add_parent_to_path()
# adding task candidates to collection
collection = self.get_collections(name, c)
return collection
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from api.models import Post, Comment, Image, Friending, Author, Node
from api.serializers import PostSerializer, CommentSerializer, ImageSerializer, AuthorSerializer, FriendingSerializer
from api.serializers import UserSerializer
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from django.http import Http404
from django.contrib.auth.models import User
from django.utils import timezone
from rest_framework import generics
from api.paginators import ListPaginator
from django.shortcuts import get_object_or_404
from django.conf import settings
from itertools import chain
from django.conf import settings
from rest_framework.reverse import reverse
from post.models import Notification
import json
import urllib2
import json
import base64
from rest_framework import HTTP_HEADER_ENCODING
# Create your views here.
@api_view(('GET',))
def api_root(request, format=None):
return Response({
'posts': reverse('post-list', request=request, format=format),
'author': reverse('author-list', request=request, format=format),
'images': reverse('images', request=request, format=format),
'friendrequest': reverse('friendrequest', request=request, format=format),
})
'''
Paramters:
* Post_pk - the post whose privacy / visibility settings is being checked
* Author_id - author who is wants to view the post
Return:
* True if access is allowed, False otherwise
'''
def isAllowed(post_pk, author_id):
try:
post = Post.objects.get(id=post_pk)
except:
raise Post.DoesNotExist
if post.visibility == Post.PUBLIC:
return True
privacy = post.visibility
viewer = Author.objects.get(id=author_id)
#if the post was created by the user allow access
if viewer == post.author :
return True
#if it is a public post allow everypne access
elif privacy == Post.PUBLIC:
return True
elif privacy == Post.SERVER_ONLY:
print viewer.host
print post.author.host
if viewer.host == post.author.host:
return True
else:
return False
#checks if another post is being shared with you
elif privacy == Post.OTHER_AUTHOR:
user = User.objects.get(username=post.other_author)
other_author = Author.objects.get(user=user)
if other_author.id == author_id:
return True
else:
return False
#check if the user is in the friend list
elif privacy == Post.FRIENDS or privacy == Post.FRIENDS_OF_FRIENDS:
friend_pairs = Friending.objects.filter(author=post.author)
friends = []
for i in range(len(friend_pairs)):
#make sure they are mutual friends
backwards = Friending.objects.filter(author=friend_pairs[i].friend,friend=post.author)
if len(backwards) > 0:
friends.append(friend_pairs[i].friend)
if viewer in friends:
return True
#check if the user is in the FoaF list
elif privacy == Post.FRIENDS_OF_FRIENDS:
for i in range(len(friends)):
fofriend_pairs = Friending.objects.filter(author=friends[i])
fofriends = []
for j in range(len(fofriend_pairs)):
#make sure they are mutual friends
backwards = Friending.objects.filter(friend=friends[i],author=fofriend_pairs[j].friend)
if len(backwards) > 0:
fofriends.append(fofriend_pairs[j].friend)
if viewer in fofriends:
return True
#if not a friend return false
else:
return False
else:
return False
'''
Parameters :
* author_id
Return:
* list of all friends (list of Author ids)
'''
def getAllFriends(author_id):
friendsList = []
# return json object so we must extract the friend id
aList = Friending.objects.filter(author__id=author_id).values('friend__id')
for i in aList:
# if both people are following eachother (so two-way friendship)
blist = Friending.objects.filter(author__id=i["friend__id"], friend__id=author_id)
if len(blist) > 0:
friendsList.append(i["friend__id"])
return friendsList
'''
Parameters :
* author_id
Return:
* list of all friends of friends (list of Author ids) - no duplicates
'''
def getAllFOAF(author_id):
friends = getAllFriends(author_id)
foaf = []
for friend_id in friends:
tempFofs = getAllFriends(friend_id)
newFriends = list(set(tempFofs) - set(foaf))
foaf.extend(newFriends)
return foaf
def getRemoteAuthorProfile(node_url, request):
url = node_url + 'api/author/' + request.META.get("HTTP_REMOTE_USER")
opener = urllib2.build_opener(urllib2.HTTPHandler)
req = urllib2.Request(url)
credentials = { "http://project-c404.rhcloud.com/" : "team4:team4team4",\
"http://disporia-cmput404.rhcloud.com/": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InRlYW00IiwidXNlcl9pZCI6MiwiZW1haWwiOiIiLCJleHAiOjE0NTg1OTE1Nzd9.WjbgA_s-cWtNHzURwAceZOYuD4RASsSqqFiwnY58FqQ",\
"http://cmput404-team-4b.herokuapp.com/" : "teamgeneva@teamgeneva:teamgeneva"}
print node_url
print url
# set credentials on request
if node_url == "http://project-c404.rhcloud.com/":
creds = base64.b64encode(credentials[node_url])
req.add_header("Authorization", "Basic " + creds)
elif node_url == "http://disporia-cmput404.rhcloud.com/":
creds = credentials[node_url]
req.add_header("Authorization", "JWT " + creds)
elif node_url == "http://cmput404-team-4b.herokuapp.com/":
encodedValue = base64.b64encode(credentials[node_url])
req.add_header("Authorization", "Basic " + encodedValue ) #Header, Value
x = opener.open(req)
y = x.read()
author_serializer = AuthorSerializer(json.loads(y))
print author_serializer.data
return author_serializer
'''
Returns True if request.user is a Node
Returns False if request.user is an Author
'''
def getRemoteNode(user):
try:
node = Node.objects.get(user=user)
# print node
# print node.hostname,
# print " - ",
# print node.url
return node
except Node.DoesNotExist as e:
return None
class PostList(generics.GenericAPIView):
'''
Lists all Posts | Create a new Post / Update an existing post
GET : http://service/api/posts/
* Returns a list of all public posts on the server - most recent to least recent order
POST : http://service/api/posts/
* Creates a new post
POST : http://service/api/posts/<post_pk>
* Updates the post specified by the post_pk
'''
pagination_class = ListPaginator
serializer_class = PostSerializer
queryset = Post.objects.all()
def get(self, request, format=None):
posts = Post.objects.filter(visibility='PUBLIC').order_by('-published')
page = self.paginate_queryset(posts)
serializer = PostSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "posts"})
def post(self, request, post_pk=None, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
data = request.data
'''
Gets the author from the request
'''
try:
author = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist / is not a local author"}, status=status.HTTP_400_BAD_REQUEST)
# If its a remote node - not allowed to make posts
if request.get_host() not in author.host:
return Response({"message":"Only local authors can make posts"}, status=status.HTTP_403_FORBIDDEN)
statusCode = status.HTTP_201_CREATED
serializer = PostSerializer(data=data)
'''
Handles : EDIT Posts via POST method
'''
if post_pk != None:
try:
post = Post.objects.get(id=post_pk)
except Post.DoesNotExist as e:
return Response({"message": "Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
# only allow author of the post to modify it
if author != post.author:
return Response({"message":"Only the author of this post can make changes to it"}, status=status.HTTP_403_FORBIDDEN)
statusCode = status.HTTP_200_OK
serializer = PostSerializer(post, data=data)
if serializer.is_valid():
print "DEBUG : API - views.py - PostList"
serializer.validated_data["author"] = author
serializer.validated_data["published"] = timezone.now()
serializer.save()
return Response(serializer.data, status=statusCode)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class PostDetail(generics.GenericAPIView):
'''
Gets a specific Post / Updates a Post / Deletes a Post
GET : http://service/api/posts/<post_pk>
* Returns the post with id post_pk
PUT : http://service/api/posts/<post_pk>
* Updates the post specified at post_pk
DELETE : http://service/api/posts/<post_pk>
* Deletes the post specified by the post_pk
'''
serializer_class = PostSerializer
queryset = Post.objects.all()
def get(self, request, pk, format=None):
# Returns post if it's privacy setting is public - no need to be authenticated
# returns 404 if post does not exist
try:
post = Post.objects.get(id=pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
serializer = PostSerializer(post)
if post.visibility == Post.PUBLIC:
return Response(serializer.data, status=status.HTTP_200_OK)
# if post is not public - ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
# check if it is a remote node
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
# is a remote node / author
if ((post.visibility == Post.SERVER_ONLY) | (post.visibility == Post.ME_ONLY)):
return Response({"message": "This node & authors on this node are not allowed to see this post"}, status=status.HTTP_403_FORBIDDEN)
else:
return Response(serializer.data, status=status.HTTP_200_OK)
try:
author = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message": "Author does not exist"}, status=status.HTTP_403_FORBIDDEN)
if (isAllowed(pk, author_id)):
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response({"message": "User is not allowed to see this post"}, status=status.HTTP_403_FORBIDDEN)
def put(self, request, pk, format=None):
data = request.data
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
post = Post.objects.get(id=pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
# only allow author of the post to modify it
try:
loggedInAuthor = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_401_UNAUTHORIZED)
# only allow author of the post to modify it
if loggedInAuthor != post.author:
return Response({"message": "User is not the author of this post & is not allowed to update this post"}, status=status.HTTP_403_FORBIDDEN)
# else logged in user is the author of the post
serializer = PostSerializer(post, data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
post = Post.objects.get(id=pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
try:
loggedInAuthor = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_401_UNAUTHORIZED)
# only allow author of the post to delete it
if loggedInAuthor != post.author:
return Response({"message": "User is not the author of this post & is not allowed to delete this post"}, status=status.HTTP_403_FORBIDDEN)
# else if logged in user is author of the post, delete it
post.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CommentList(generics.GenericAPIView):
'''
Lists all Comments for specific post / Create a new comment
GET : http://service/api/posts/<post_pk>/comments/
* Returns a list of all comments on the post specified by post_pk - most recent to least recent order
POST : http://service/api/posts/<post_pk>/comments/
* Creates a new comment attached to the post specified by post_pk
Required fields for the body of a post are:
"author" (the Author object making the post),
"comment" (the comment you wish to make),
"contentType" (plaintext or markdown),
'''
pagination_class = ListPaginator
serializer_class = CommentSerializer
queryset = Comment.objects.all()
def get(self, request, post_pk, format=None):
# Returns post if it's privacy setting is public - no need to be authenticated
# returns 404 if post does not exist
try:
post = Post.objects.get(id=post_pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
comments = Comment.objects.filter(post=post_pk).order_by('-published')
page = self.paginate_queryset(comments)
serializer = CommentSerializer(page, many=True)
paginatedResponse = self.get_paginated_response({"data": serializer.data, "query": "comments"})
if post.visibility == Post.PUBLIC:
return paginatedResponse
# if post is not public - ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
# check if it is a remote node
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
# is a remote node / author
if ((post.visibility == Post.SERVER_ONLY) | (post.visibility == Post.ME_ONLY)):
return Response({"message": "This node & authors on this node are not allowed to see this post"}, status=status.HTTP_403_FORBIDDEN)
else:
return paginatedResponse
try:
author = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message": "Author does not exist"}, status=status.HTTP_403_FORBIDDEN)
if (isAllowed(post_pk, author.id)):
return paginatedResponse
else:
return Response({"message": "User is not allowed to see this comment or it's corresponding post"}, status=status.HTTP_403_FORBIDDEN)
def post(self, request, post_pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
post = Post.objects.get(id=post_pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
data = request.data
# check if it is a remote node
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
# is a remote node / author
if ((post.visibility == Post.SERVER_ONLY) | (post.visibility == Post.ME_ONLY)):
return Response({"message": "This node & authors on this node are not allowed to see this post & thus cannot comment"}, status=status.HTTP_403_FORBIDDEN)
else:
author_serializer = AuthorSerializer(data["author"])
try:
author = Author.objects.get(id=author_serializer.data["id"], host=remoteNode.url)
except Author.DoesNotExist as e:
author = Author.objects.create(id=author_serializer.data["id"])
for key in author_serializer.data.keys():
if author_serializer.data[key] != None:
author.key = author_serializer.data[key]
# author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
# author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=author_serializer.data["host"], github=author_serializer.data["github"])
author.save()
serializer = CommentSerializer(data=data)
if serializer.is_valid():
print "DEBUG : API - views.py - CommentList"
serializer.validated_data["author"] = author
serializer.validated_data["published"] = timezone.now()
serializer.validated_data["post"] = Post.objects.get(pk=post_pk)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
try:
author = Author.objects.get(id=data["author"]["id"])
# author = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message": "Author does not exist"}, status=status.HTTP_403_FORBIDDEN)
if (isAllowed(post_pk, author.id)):
serializer = CommentSerializer(data=data)
if serializer.is_valid():
print "DEBUG : API - views.py - CommentList"
serializer.validated_data["author"] = author
serializer.validated_data["published"] = timezone.now()
serializer.validated_data["post"] = Post.objects.get(pk=post_pk)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({"message": "User is not allowed to see this post/comment"}, status=status.HTTP_403_FORBIDDEN)
class CommentDetail(generics.GenericAPIView):
'''
Gets a specific Comment/ Updates a Comment / Deletes a Comment
GET : http://service/api/posts/<post_pk>/comments/<comment_pk>
* Returns the comment with id comment_pk correlating to the post specified by post_pk
PUT : http://service/api/posts/<post_pk>/comments/<comment_pk>
* Updates the comment specified at comment_pk
DELETE : http://service/api/posts/<post_pk>/comments/<comment_pk>
* Deletes the comment specified by the comment_pk
'''
serializer_class = CommentSerializer
queryset = Comment.objects.all()
def get(self, request, post_pk, comment_pk, format=None):
try:
post = Post.objects.get(id=post_pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
try:
comment = Comment.objects.get(id=comment_pk)
except Comment.DoesNotExist as e:
return Response({"message":"Comment does not exist"}, status=status.HTTP_404_NOT_FOUND)
serializer = CommentSerializer(comment)
if post.visibility == Post.PUBLIC:
return Response(serializer.data, status=status.HTTP_200_OK)
# if post is not public - ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
# is a remote node / author
if ((post.visibility == Post.SERVER_ONLY) | (post.visibility == Post.ME_ONLY)):
return Response({"message": "This node & authors on this node are not allowed to see this post & thus cannnot see comments"}, status=status.HTTP_403_FORBIDDEN)
else:
return Response(serializer.data, status=status.HTTP_200_OK)
try:
author = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message": "Author does not exist"}, status=status.HTTP_403_FORBIDDEN)
if (isAllowed(post_pk, author.id)):
return paginatedResponse
else:
return Response({"message": "User is not allowed to see this comment or it's corresponding post"}, status=status.HTTP_403_FORBIDDEN)
# need to fix
def put(self, request, post_pk, comment_pk, format=None):
data = request.data
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
post = Post.objects.get(id=post_pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
try:
comment = Comment.objects.get(id=comment_pk)
except Comment.DoesNotExist as e:
return Response({"message":"Comment does not exist"}, status=status.HTTP_404_NOT_FOUND)
serializer = CommentSerializer(comment)
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
try:
author = Author.objects.get(id=data["author"]["id"])
except Author.DoesNotExist as e:
# author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=author_serializer.data["host"], github=author_serializer.data["github"])
# TODO :ADD GITHUB AFTER CHANGING OUR GITHUB MODEL FROM GITHUB_NAME TO GITHUB
author.save()
else:
try:
author = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message": "Author does not exist"}, status=status.HTTP_403_FORBIDDEN)
# only allow author of the comment to modify it
if author.id != comment.author.id:
return Response({"message": "User is not the author of this comment & is not allowed to update this comment"}, status=status.HTTP_403_FORBIDDEN)
serializer = CommentSerializer(comment, data=data)
if serializer.is_valid():
print "DEBUG : API - views.py - CommentList"
serializer.validated_data["published"] = timezone.now()
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, post_pk, comment_pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
post = Post.objects.get(id=post_pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
try:
comment = Comment.objects.get(id=comment_pk)
except Comment.DoesNotExist as e:
return Response({"message":"Comment does not exist"}, status=status.HTTP_404_NOT_FOUND)
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
return Response({"message":"remote authors can't delete comments. no author is provided."}, status=status.HTTP_403_FORBIDDEN)
# if remoteNode != None:
# try:
# author = Author.objects.get(id=data["author"]["id"])
# except Author.DoesNotExist as e:
# # author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
# author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=author_serializer.data["host"])
# # TODO :ADD GITHUB AFTER CHANGING OUR GITHUB MODEL FROM GITHUB_NAME TO GITHUB
# author.save()
try:
author = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_401_UNAUTHORIZED)
# only allow author of the comment to delete it
if author.id != comment.author.id:
return Response({"message": "User is not the author of this comment & is not allowed to delete this comment"}, status=status.HTTP_403_FORBIDDEN)
# else if logged in user is author of the comment, delete it
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Images(generics.GenericAPIView):
'''
Lists all Images / Posts a new image
GET : http://service/api/images/
* Returns a list of all images on the server (not including profile pictures) - most recent to least recent order
POST : http://service/api/images/
* Creates a new image
'''
pagination_class = ListPaginator
serializer_class = ImageSerializer
queryset = Image.objects.all()
def get(self, request, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
images = Image.objects.order_by('-upload_date')
page = self.paginate_queryset(images)
if page is not None:
serializer = ImageSerializer(page, many=True)
return self.get_paginated_response({"data":serializer.data, "query": "images"})
#else:
def post(self, request, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
serializer = ImageSerializer(data=request.data)
if serializer.is_valid():
print "DEBUG : API - views.py - Images"
serializer.validated_data["author"] = Author.objects.get(user=request.user)
serializer.validated_data["upload_date"] = timezone.now()
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class AuthorList(generics.GenericAPIView):
'''
Lists all Authors / Posts a new Author
GET : http://service/api/author/
* Returns a list of authors on the server
'''
serializer_class = AuthorSerializer
queryset = Author.objects.all()
def get(self, request,format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
authors = Author.objects.all()
serializer = AuthorSerializer(authors, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AuthorTimeline(generics.GenericAPIView):
'''
Lists all Posts an author has made
GET : http://service/api/author/<author_id>/posts
* Returns a list of all posts on the server made by author specified by <author_id> - most recent to least recent order
'''
pagination_class = ListPaginator
serializer_class = PostSerializer
queryset = Post.objects.all()
def get(self, request, author_pk=None, format=None):
if request.user.is_authenticated():
# get currently logged in user
try:
viewer = Author.objects.get(user=request.user)
except DoesNotExist as e:
return Response(status=status.HTTP_404_NOT_FOUND)
if author_pk != None:
try:
author = Author.objects.get(id=author_pk)
except Author.DoesNotExist as e:
return Response(status=status.HTTP_404_NOT_FOUND)
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
if author_pk == None:
resultPosts = Post.objects.filter(visibility__in=[Post.PUBLIC, Post.FRIENDS, Post.FRIENDS_OF_FRIENDS, Post.OTHER_AUTHOR])
else:
resultPosts = Post.objects.filter(visibility__in=[Post.PUBLIC, Post.FRIENDS, Post.FRIENDS_OF_FRIENDS, Post.OTHER_AUTHOR], author__id=author_pk)
page = self.paginate_queryset(resultPosts)
serializer = PostSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "posts"})
# posts that are visible to the currently authenticated user
if author_pk == None:
# get author's own posts
authorsPosts = Post.objects.filter(author=viewer)
# get public posts
publicPosts = Post.objects.filter(visibility=Post.PUBLIC)
# get friends posts
friends = getAllFriends(viewer.id)
friendsPosts = Post.objects.filter(author__id__in=friends, visibility__in=[Post.FRIENDS, Post.FRIENDS_OF_FRIENDS])
# get foaf posts
foaf = getAllFOAF(viewer.id)
foafPosts = Post.objects.filter(author__id__in=foaf, visibility__in=Post.FRIENDS_OF_FRIENDS)
# combine all posts into one list w/o duplicates
result = list(set(authorsPosts) | set(publicPosts) | set(friendsPosts) | set(foafPosts))
# put posts in order from most recent to least recent
resultPosts = Post.objects.filter(id__in=[post.id for post in result]).order_by('-published')
page = self.paginate_queryset(resultPosts)
serializer = PostSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "posts"})
# author pk is provided - all posts made by {AUTHOR_ID} visible to the currently authenticated user
else: # author_pk != None
# ensure author exists
try:
viewee = Author.objects.get(id=author_pk)
except Author.DoesNotExist as e:
return Response(status=status.HTTP_404_NOT_FOUND)
# if viewer is viewee, show all of their posts
if (viewee.id == viewer.id):
resultPosts = Post.objects.filter(author=viewee).order_by('-published')
else:
# get all viewee's friends & foafs
friends = getAllFriends(viewee.id)
foaf = getAllFOAF(viewee.id)
friendsPosts = []
foafPosts = []
# if viewer is friends or foafs with viewee, get their posts
if (viewer.id in friends):
friendsPosts = Post.objects.filter(author=viewee, visibility__in=[Post.FRIENDS, Post.FRIENDS_OF_FRIENDS])
if (viewer.id in foaf):
foafPosts = Post.objects.filter(author=viewee, visibility=Post.FRIENDS_OF_FRIENDS)
# viewee's public posts
publicPosts = Post.objects.filter(author=viewee, visibility=Post.PUBLIC)
serverPosts = Post.objects.filter(author=viewee, visibility=Post.SERVER_ONLY)
otherAuthor = Post.objects.filter(author=viewee, visibility=Post.OTHER_AUTHOR, other_author=viewer)
# combine all posts into one list w/o duplicates
result = list(set(publicPosts) | set(friendsPosts) | set(foafPosts) | set(serverPosts) | set(otherAuthor))
# put posts in order from most recent to least recent
resultPosts = Post.objects.filter(id__in=[post.id for post in result]).order_by('-published')
page = self.paginate_queryset(resultPosts)
if page is not None:
serializer = PostSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "posts"})
# else : TODO
# only show posts by author_pk that are public - b/c user (viewer) is not logged in
else:
posts = Post.objects.filter(visibility=Post.PUBLIC).order_by('-published')
page = self.paginate_queryset(posts)
if page is not None:
serializer = PostSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "posts"})
# else : TODO
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class AuthorDetail(generics.GenericAPIView):
'''
Gets Author / Updates Author via POST
GET : http://service/api/author/<author_id>
* Returns the author specified by author_id. This includes the author's id, github name, profile picture url, and host.
POST : http://service/api/author/<author_id>
* Updates the author specified by author_id
'''
serializer_class = AuthorSerializer
queryset = Author.objects.all()
def get(self, request, author_pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
author = Author.objects.get(id=author_pk)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_404_NOT_FOUND)
# # remote author
# if request.get_host() not in author.host:
# return Response({"message": "This author is not on this node. It is a remote author on another node."}, status=status.HTTP_404_NOT_FOUND)
# else local author
serializer = AuthorSerializer(author)
# get the author's friend list
responseData = serializer.data
friendsList = []
# return json object so we must extract the friend
aList = Friending.objects.filter(author=author)
# friendsList = getAllFriends(author.id)
for person_pair in aList:
# backwards check
if len(Friending.objects.filter(author=person_pair.friend, friend=author)) > 0:
friendsList.append(person_pair.friend)
serializer = AuthorSerializer(friendsList, many=True)
responseData["friends"] = serializer.data
if request.get_host() not in author.host:
responseData["url"] = author.host + 'author/' + str(author.id)
else:
responseData["url"] = author.host + "author/" + author.user.username
return Response(responseData, status=status.HTTP_200_OK)
def post(self, request, author_pk=None, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
author = Author.objects.get(id=author_pk)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_404_NOT_FOUND)
if request.user == author.user:
serializer = AuthorSerializer(author, data=request.data)
if serializer.is_valid():
print "DEBUG : API - views.py - AuthorDetail"
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({"message":"only this author can make changes to their profile"},status=status.HTTP_403_FORBIDDEN)
class FriendingCheck(generics.GenericAPIView):
'''
Returns a list of an author's friends / Checks whether or not 2 authors are friends
GET : http://service/api/friends/<author_id>
* Returns the author specified by author_id's list of friends (by friend id)
GET : http://service/api/friends/<author_id1>/<author_id2>
* Returns the 2 author's ids & a boolean specifying if the 2 authors are friends or not.
'''
queryset = Friending.objects.all()
serializer_class = FriendingSerializer
def get(self, request, author_id1, author_id2=None, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
# returns whether or not author_id1 & author_id2 are friends or not
if author_id2 != None:
aList = Friending.objects.filter(author__id=author_id1, friend__id=author_id2)
bList = Friending.objects.filter(author__id=author_id2, friend__id=author_id1)
result = list(chain(aList, bList))
if len(result) > 1:
friends = True
else:
friends = False
return Response({'query':'friends', 'authors': [author_id1, author_id2], 'friends':friends}, status=status.HTTP_200_OK)
# returns all friends of author_1
else:
# check if request is from remote node, if so handle it
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
return Response({"message":"This is a remote user on another node, to see their friends, use the api of the remote user's original node"}, status=status.HTTP_400_BAD_REQUEST)
# author_serializer = getRemoteAuthorProfile(remoteNode.url, request)
# # get remoteAuthor's Author object in our database (has id, displayname, host only - no user) if we already have it
# # else, create a new author object w/o user
# # author = remoteAuthor here
# try:
# author = Author.objects.get(id=author_serializer.data["id"])
# except Author.DoesNotExist as e:
# author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
# author.save()
# local author - get from db
else:
author = Author.objects.get(user=request.user)
author_id = author.id
friendsList = getAllFriends(author_id)
return Response({'query':'friends', 'authors': friendsList}, status=status.HTTP_200_OK)
def post(self, request, author_id1, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
# check if request is from remote node, if so handle it
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
return Response({"message":"This is a remote user on another node, to use this service, use the api of the remote user's original node"}, status=status.HTTP_400_BAD_REQUEST)
# author_serializer = getRemoteAuthorProfile(remoteNode.url, request)
# # get remoteAuthor's Author object in our database (has id, displayname, host only - no user) if we already have it
# # else, create a new author object w/o user
# # author = remoteAuthor here
# try:
# author = Author.objects.get(id=author_serializer.data["id"])
# except Author.DoesNotExist as e:
# author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
# author.save()
# local author - get from db
else:
try:
author = Author.objects.get(id=author_id1)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_404_NOT_FOUND)
data = request.data
# list of uuid in string representation
listOfPotentialFriendIds = data["authors"]
listOfFriendIds = getAllFriends(author_id1)
# convert list of uuid to strings
for i in range(0, len(listOfFriendIds)):
listOfFriendIds[i] = str(listOfFriendIds[i])
resultList = list(set(listOfFriendIds) & set(listOfPotentialFriendIds))
returnObj = { "query": "friends", "author": author_id1, "authors": resultList }
return Response(returnObj, status=status.HTTP_200_OK)
class RequestList(generics.GenericAPIView):
serializer_class = FriendingSerializer
queryset = Friending.objects.all()
def get(self, request, author_id1, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
# return all auother_ids who author_id1 are following
if author_id1 is not None:
followerList = []
aList = Friending.objects.filter(friending__id=author_id1).values('author__id')
for i in aList:
followerList.append(i["author__id"])
return Response({'query':'friending', 'followers':followerList}, status=status.HTTP_200_OK)
class FriendRequest(generics.GenericAPIView):
serializer_class = FriendingSerializer
queryset = Friending.objects.all()
def post(self, request, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
data = request.data
if data == None:
return Response({"message": "no body given."}, status=status.HTTP_400_BAD_REQUEST)
# # check if request is from remote node, if so handle it
# remoteNode = getRemoteNode(request.user)
# if remoteNode != None:
# author_serializer = getRemoteAuthorProfile(remoteNode.url, request)
# # get remoteAuthor's Author object in our database (has id, displayname, host only - no user) if we already have it
# # else, create a new author object w/o user
# # author_of_request = remoteAuthor here
# try:
# author_of_request = Author.objects.get(id=author_serializer.data["id"])
# except Author.DoesNotExist as e:
# author_of_request = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
# author_of_request.save()
# # local author - get from db
# else:
# author_of_request = Author.objects.get(user=request.user)
try:
author_req= request.data["author"]
friend_req = request.data["friend"]
except:
return Response({"message":"missing inputs"}, status=status.HTTP_400_BAD_REQUEST)
atLeastOneAuthorIsLocal = False
bothLocalAuthors = False
try:
author = Author.objects.get(id=author_req["id"])
# it's a local user
if request.get_host() in author.host: # author.user != None:
atLeastOneAuthorIsLocal = True
# else is remote author sending the request
except Author.DoesNotExist as e:
# not local author - create remote author w/o user
author = Author.objects.create(id=author_req["id"], displayname=author_req["displayname"], host=author_req["host"], github=author_req["github"])
author.save()
try:
friend = Author.objects.get(id=friend_req["id"])
# it's a local user
if request.get_host() in friend.host: # friend.user != None:
if atLeastOneAuthorIsLocal:
bothLocalAuthors = True
atLeastOneAuthorIsLocal = True
# if friend is remote user
else:
return Response({"message":"Friend is not an author on this node"}, status=status.HTTP_400_BAD_REQUEST)
except Author.DoesNotExist as e:
return Response({"message":"Friend is not an author on this node"}, status=status.HTTP_400_BAD_REQUEST)
if not atLeastOneAuthorIsLocal and not bothLocalAuthors: # both remote users - client error - shouldn't have to handle this
return Response({"message": "both are remote authors."}, status=status.HTTP_400_BAD_REQUEST)
# we don't handle local to remote here - done in javascript - shouldn't hit our api
# else if both are local or remote to local
# check if friendship already exists in db
try:
friendship = Friending.objects.get(author=author, friend=friend)
return Response({"message":"Relationship between author & friend already exists."}, status=status.HTTP_200_OK)
except Friending.DoesNotExist as e:
serializer = FriendingSerializer(data=data)
if serializer.is_valid():
serializer.validated_data["author"] = author
serializer.validated_data["friend"] = friend
serializer.save()
noti = Notification.objects.create(notificatee=Author.objects.get(id=friend_req["id"]), follower=Author.objects.get(id=author_req["id"]))
noti.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, request_pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
data = request.data
if data == None:
return Response({"message": "no body given."}, status=status.HTTP_400_BAD_REQUEST)
# check if the friend exist
try:
unfriend = Author.objects.get(id=request_pk)
except Author.DoesNotExist as e:
return Response({"message":"Friend does not exist"}, status=status.HTTP_400_BAD_REQUEST)
# check if the author exist
try:
loggedInAuthor = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"},status=status.HTTP_401_UNAUTHORIZED)
# check if the friendship exist
try:
friendship = Friending.objects.get(author=loggedInAuthor, friend=unfriend)
except Exception as e:
print '%s (%s)' % (e.message, type(e))
# to unfriend simply do it locally
friendship.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
remove useless functions
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from api.models import Post, Comment, Image, Friending, Author, Node
from api.serializers import PostSerializer, CommentSerializer, ImageSerializer, AuthorSerializer, FriendingSerializer
from api.serializers import UserSerializer
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from django.http import Http404
from django.contrib.auth.models import User
from django.utils import timezone
from rest_framework import generics
from api.paginators import ListPaginator
from django.shortcuts import get_object_or_404
from django.conf import settings
from itertools import chain
from django.conf import settings
from rest_framework.reverse import reverse
from post.models import Notification
import json
import urllib2
import json
import base64
from rest_framework import HTTP_HEADER_ENCODING
# Create your views here.
@api_view(('GET',))
def api_root(request, format=None):
return Response({
'posts': reverse('post-list', request=request, format=format),
'author': reverse('author-list', request=request, format=format),
'images': reverse('images', request=request, format=format),
'friendrequest': reverse('friendrequest', request=request, format=format),
})
'''
Paramters:
* Post_pk - the post whose privacy / visibility settings is being checked
* Author_id - author who is wants to view the post
Return:
* True if access is allowed, False otherwise
'''
def isAllowed(post_pk, author_id):
try:
post = Post.objects.get(id=post_pk)
except:
raise Post.DoesNotExist
if post.visibility == Post.PUBLIC:
return True
privacy = post.visibility
viewer = Author.objects.get(id=author_id)
#if the post was created by the user allow access
if viewer == post.author :
return True
#if it is a public post allow everypne access
elif privacy == Post.PUBLIC:
return True
elif privacy == Post.SERVER_ONLY:
if viewer.host == post.author.host:
return True
else:
return False
#checks if another post is being shared with you
elif privacy == Post.OTHER_AUTHOR:
user = User.objects.get(username=post.other_author)
other_author = Author.objects.get(user=user)
if other_author.id == author_id:
return True
else:
return False
#check if the user is in the friend list
elif privacy == Post.FRIENDS or privacy == Post.FRIENDS_OF_FRIENDS:
friend_pairs = Friending.objects.filter(author=post.author)
friends = []
for i in range(len(friend_pairs)):
#make sure they are mutual friends
backwards = Friending.objects.filter(author=friend_pairs[i].friend,friend=post.author)
if len(backwards) > 0:
friends.append(friend_pairs[i].friend)
if viewer in friends:
return True
#check if the user is in the FoaF list
elif privacy == Post.FRIENDS_OF_FRIENDS:
for i in range(len(friends)):
fofriend_pairs = Friending.objects.filter(author=friends[i])
fofriends = []
for j in range(len(fofriend_pairs)):
#make sure they are mutual friends
backwards = Friending.objects.filter(friend=friends[i],author=fofriend_pairs[j].friend)
if len(backwards) > 0:
fofriends.append(fofriend_pairs[j].friend)
if viewer in fofriends:
return True
#if not a friend return false
else:
return False
else:
return False
'''
Parameters :
* author_id
Return:
* list of all friends (list of Author ids)
'''
def getAllFriends(author_id):
friendsList = []
# return json object so we must extract the friend id
aList = Friending.objects.filter(author__id=author_id).values('friend__id')
for i in aList:
# if both people are following eachother (so two-way friendship)
blist = Friending.objects.filter(author__id=i["friend__id"], friend__id=author_id)
if len(blist) > 0:
friendsList.append(i["friend__id"])
return friendsList
'''
Parameters :
* author_id
Return:
* list of all friends of friends (list of Author ids) - no duplicates
'''
def getAllFOAF(author_id):
friends = getAllFriends(author_id)
foaf = []
for friend_id in friends:
tempFofs = getAllFriends(friend_id)
newFriends = list(set(tempFofs) - set(foaf))
foaf.extend(newFriends)
return foaf
def getRemoteAuthorProfile(node_url, request):
url = node_url + 'api/author/' + request.META.get("HTTP_REMOTE_USER")
opener = urllib2.build_opener(urllib2.HTTPHandler)
req = urllib2.Request(url)
credentials = { "http://project-c404.rhcloud.com/" : "team4:team4team4",\
"http://disporia-cmput404.rhcloud.com/": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InRlYW00IiwidXNlcl9pZCI6MiwiZW1haWwiOiIiLCJleHAiOjE0NTg1OTE1Nzd9.WjbgA_s-cWtNHzURwAceZOYuD4RASsSqqFiwnY58FqQ"}
print node_url
print url
# set credentials on request
if node_url == "http://project-c404.rhcloud.com/":
creds = base64.b64encode(credentials[node_url])
req.add_header("Authorization", "Basic " + creds)
elif node_url == "http://disporia-cmput404.rhcloud.com/":
creds = credentials[node_url]
req.add_header("Authorization", "JWT " + creds)
elif node_url == "":
encodedValue = base64.b64encode("nodeHost4B@nodeHost4B:host4b")
request.add_header("Authorization", "Basic " + encodedValue ) #Header, Value
x = opener.open(req)
y = x.read()
author_serializer = AuthorSerializer(json.loads(y))
print author_serializer.data
return author_serializer
'''
Returns True if request.user is a Node
Returns False if request.user is an Author
'''
def getRemoteNode(user):
try:
node = Node.objects.get(user=user)
# print node
# print node.hostname,
# print " - ",
# print node.url
return node
except Node.DoesNotExist as e:
return None
# ref: http://stackoverflow.com/questions/16700968/check-existing-password-and-reset-password
# HASN'T BEEN QUITE TESTED OR IMPLEMENTED COMPLETELY YET
def postChangeUserPassword(request):
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
if 'reset_password' in request.POST:
old_password = request.POST['old_password'].strip()
reset_password = request.POST['reset_password'].strip()
new_password = request.POST['new_password'].strip()
if (old_password & reset_password & reset_password == new_password):
saveuser = User.objects.get(id=request.user.id)
if user.check_password(old_password):
saveuser.set_password(request.POST['reset_password']);
saveuser.save()
return Response(status=status.HTTP_200_OK)
class PostList(generics.GenericAPIView):
'''
Lists all Posts | Create a new Post / Update an existing post
GET : http://service/api/posts/
* Returns a list of all public posts on the server - most recent to least recent order
POST : http://service/api/posts/
* Creates a new post
POST : http://service/api/posts/<post_pk>
* Updates the post specified by the post_pk
'''
pagination_class = ListPaginator
serializer_class = PostSerializer
queryset = Post.objects.all()
def get(self, request, format=None):
posts = Post.objects.filter(visibility='PUBLIC').order_by('-published')
page = self.paginate_queryset(posts)
serializer = PostSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "posts"})
def post(self, request, post_pk=None, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
data = request.data
'''
Gets the author from the request
'''
try:
author = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist / is not a local author"}, status=status.HTTP_400_BAD_REQUEST)
# If its a remote node - not allowed to make posts
if request.get_host() not in author.host:
return Response({"message":"Only local authors can make posts"}, status=status.HTTP_403_FORBIDDEN)
statusCode = status.HTTP_201_CREATED
serializer = PostSerializer(data=data)
'''
Handles : EDIT Posts via POST method
'''
if post_pk != None:
try:
post = Post.objects.get(id=post_pk)
except Post.DoesNotExist as e:
return Response({"message": "Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
# only allow author of the post to modify it
if author != post.author:
return Response({"message":"Only the author of this post can make changes to it"}, status=status.HTTP_403_FORBIDDEN)
statusCode = status.HTTP_200_OK
serializer = PostSerializer(post, data=data)
if serializer.is_valid():
print "DEBUG : API - views.py - PostList"
serializer.validated_data["author"] = author
serializer.validated_data["published"] = timezone.now()
serializer.save()
return Response(serializer.data, status=statusCode)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class PostDetail(generics.GenericAPIView):
'''
Gets a specific Post / Updates a Post / Deletes a Post
GET : http://service/api/posts/<post_pk>
* Returns the post with id post_pk
PUT : http://service/api/posts/<post_pk>
* Updates the post specified at post_pk
DELETE : http://service/api/posts/<post_pk>
* Deletes the post specified by the post_pk
'''
serializer_class = PostSerializer
queryset = Post.objects.all()
def get(self, request, pk, format=None):
# Returns post if it's privacy setting is public - no need to be authenticated
# returns 404 if post does not exist
try:
post = Post.objects.get(id=pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
if post.visibility == Post.PUBLIC:
serializer = PostSerializer(post)
return Response(serializer.data, status=status.HTTP_200_OK)
# if post is not public - ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
'''
Gets the author from the request
'''
try:
author = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
# check if it is a remote node
remoteNode = getRemoteNode(request.user)
# not a remote author & not a local author
if remoteNode == None:
return Response({"message":"Node not allowed"},status=status.HTTP_403_FORBIDDEN)
# is a remote author - assume remote author is already authenticated by remote node
author_id = request.META.get("HTTP_REMOTE_USER")
if (isAllowed(pk, author_id)):
serializer = PostSerializer(post)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response({"message": "User is not allowed to see this post"}, status=status.HTTP_403_FORBIDDEN)
# If its a local author - return the post
if request.get_host() in author.host:
serializer = PostSerializer(post)
return Response(serializer.data, status=status.HTTP_200_OK)
# check if it is a remote node
remoteNode = getRemoteNode(request.user)
# not a remote author & not a local author
if remoteNode == None:
return Response({"message":"Node not allowed"},status=status.HTTP_403_FORBIDDEN)
# is a remote author - assume remote author is already authenticated by remote node
author_id = request.META.get("HTTP_REMOTE_USER")
author_serializer = getRemoteAuthorProfile(remoteNode.url, request)
# get remoteAuthor's Author object in our database (has id, displayname, host only - no user) if we already have it
# else, create a new author object w/o user
# author = remoteAuthor here
try:
author = Author.objects.get(id=author_serializer.data["id"])
except Author.DoesNotExist as e:
author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
author.save()
if (isAllowed(pk, author_id)):
serializer = PostSerializer(post)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response({"message": "User is not allowed to see this post"}, status=status.HTTP_403_FORBIDDEN)
def put(self, request, pk, format=None):
data = request.data
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
post = Post.objects.get(id=pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
# only allow author of the post to modify it
try:
loggedInAuthor = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_401_UNAUTHORIZED)
# only allow author of the post to modify it
if loggedInAuthor != post.author:
return Response({"message": "User is not the author of this post & is not allowed to update this post"}, status=status.HTTP_403_FORBIDDEN)
# else logged in user is the author of the post
serializer = PostSerializer(post, data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
post = Post.objects.get(id=pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
try:
loggedInAuthor = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_401_UNAUTHORIZED)
# only allow author of the post to delete it
if loggedInAuthor != post.author:
return Response({"message": "User is not the author of this post & is not allowed to delete this post"}, status=status.HTTP_403_FORBIDDEN)
# else if logged in user is author of the post, delete it
post.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CommentList(generics.GenericAPIView):
'''
Lists all Comments for specific post / Create a new comment
GET : http://service/api/posts/<post_pk>/comments/
* Returns a list of all comments on the post specified by post_pk - most recent to least recent order
POST : http://service/api/posts/<post_pk>/comments/
* Creates a new comment attached to the post specified by post_pk
'''
pagination_class = ListPaginator
serializer_class = CommentSerializer
queryset = Comment.objects.all()
def get(self, request, post_pk, format=None):
# Returns post if it's privacy setting is public - no need to be authenticated
# returns 404 if post does not exist
try:
post = Post.objects.get(id=post_pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
if post.visibility == Post.PUBLIC:
comments = Comment.objects.filter(post=post_pk).order_by('-published')
page = self.paginate_queryset(comments)
serializer = CommentSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "comments"})
# if post is not public - ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
# check if request is from remote node, if so handle it
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
author_serializer = getRemoteAuthorProfile(remoteNode.url, request)
# get remoteAuthor's Author object in our database (has id, displayname, host only - no user) if we already have it
# else, create a new author object w/o user
# author = remoteAuthor here
try:
author = Author.objects.get(id=author_serializer.data["id"])
except Author.DoesNotExist as e:
author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
author.save()
# local author - get from db
else:
author = Author.objects.get(user=request.user)
author_id = author.id
try:
if (isAllowed(post_pk, author_id)):
comments = Comment.objects.filter(post=post_pk).order_by('-published')
page = self.paginate_queryset(comments)
serializer = CommentSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "comments"})
else:
return Response({"message": "User is not allowed to see this comment or it's corresponding post"}, status=status.HTTP_403_FORBIDDEN)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
def post(self, request, post_pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
data = request.data
# check if request is from remote node, if so handle it
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
author_serializer = getRemoteAuthorProfile(remoteNode.url, request)
# get remoteAuthor's Author object in our database (has id, displayname, host only - no user) if we already have it
# else, create a new author object w/o user
# author = remoteAuthor here
try:
author = Author.objects.get(id=author_serializer.data["id"])
except Author.DoesNotExist as e:
author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
author.save()
# local author - get from db
else:
author = Author.objects.get(user=request.user)
author_id = author.id
try:
if (isAllowed(post_pk, author_id)):
serializer = CommentSerializer(data=data)
if serializer.is_valid():
print "DEBUG : API - views.py - CommentList"
serializer.validated_data["author"] = author
serializer.validated_data["published"] = timezone.now()
serializer.validated_data["post"] = Post.objects.get(pk=post_pk)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({"message": "User is not allowed to see this post/comment"}, status=status.HTTP_403_FORBIDDEN)
except Post.DoesNotExist as e:
return Response({"message":"Corresponding post does not exist"}, status=status.HTTP_404_NOT_FOUND)
class CommentDetail(generics.GenericAPIView):
'''
Gets a specific Comment/ Updates a Comment / Deletes a Comment
GET : http://service/api/posts/<post_pk>/comments/<comment_pk>
* Returns the comment with id comment_pk correlating to the post specified by post_pk
PUT : http://service/api/posts/<post_pk>/comments/<comment_pk>
* Updates the comment specified at comment_pk
DELETE : http://service/api/posts/<post_pk>/comments/<comment_pk>
* Deletes the comment specified by the comment_pk
'''
serializer_class = CommentSerializer
queryset = Comment.objects.all()
def get(self, request, post_pk, comment_pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
# check if request is from remote node, if so handle it
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
author_serializer = getRemoteAuthorProfile(remoteNode.url, request)
# get remoteAuthor's Author object in our database (has id, displayname, host only - no user) if we already have it
# else, create a new author object w/o user
# author = remoteAuthor here
try:
author = Author.objects.get(id=author_serializer.data["id"])
except Author.DoesNotExist as e:
author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
author.save()
# local author - get from db
else:
author = Author.objects.get(user=request.user)
author_id = author.id
try:
if (isAllowed(post_pk, author_id)):
comment = Comment.objects.get(id=comment_pk)
serializer = CommentSerializer(comment)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response({"message": "User is not allowed to see this comment or it's corresponding post"}, status=status.HTTP_403_FORBIDDEN)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
except Comment.DoesNotExist as e:
return Response({"message":"Comment does not exist"}, status=status.HTTP_404_NOT_FOUND)
# need to fix
def put(self, request, post_pk, comment_pk, format=None):
data = request.data
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
post = Post.objects.get(id=post_pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
try:
comment = Comment.objects.get(id=comment_pk)
except Comment.DoesNotExist as e:
return Response({"message":"Comment does not exist"}, status=status.HTTP_404_NOT_FOUND)
try:
loggedInAuthor = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_401_UNAUTHORIZED)
# only allow author of the comment to modify it
if loggedInAuthor != comment.author:
return Response({"message": "User is not the author of this comment & is not allowed to update this comment"}, status=status.HTTP_403_FORBIDDEN)
serializer = CommentSerializer(comment, data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, post_pk, comment_pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
post = Post.objects.get(id=post_pk)
except Post.DoesNotExist as e:
return Response({"message":"Post does not exist"}, status=status.HTTP_404_NOT_FOUND)
try:
comment = Comment.objects.get(id=comment_pk)
except Comment.DoesNotExist as e:
return Response({"message":"Comment does not exist"}, status=status.HTTP_404_NOT_FOUND)
try:
loggedInAuthor = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_401_UNAUTHORIZED)
# only allow author of the comment to delete it
if loggedInAuthor != comment.author:
return Response({"message": "User is not the author of this comment & is not allowed to delete this comment"}, status=status.HTTP_403_FORBIDDEN)
# else if logged in user is author of the comment, delete it
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Images(generics.GenericAPIView):
'''
Lists all Images / Posts a new image
GET : http://service/api/images/
* Returns a list of all images on the server (not including profile pictures) - most recent to least recent order
POST : http://service/api/images/
* Creates a new image
'''
pagination_class = ListPaginator
serializer_class = ImageSerializer
queryset = Image.objects.all()
def get(self, request, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
images = Image.objects.order_by('-upload_date')
page = self.paginate_queryset(images)
if page is not None:
serializer = ImageSerializer(page, many=True)
return self.get_paginated_response({"data":serializer.data, "query": "images"})
#else:
def post(self, request, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
serializer = ImageSerializer(data=request.data)
if serializer.is_valid():
print "DEBUG : API - views.py - Images"
serializer.validated_data["author"] = Author.objects.get(user=request.user)
serializer.validated_data["upload_date"] = timezone.now()
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class AuthorList(generics.GenericAPIView):
'''
Lists all Authors / Posts a new Author
GET : http://service/api/author/
* Returns a list of authors on the server
'''
serializer_class = AuthorSerializer
queryset = Author.objects.all()
def get(self, request,format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
authors = Author.objects.all()
serializer = AuthorSerializer(authors, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AuthorTimeline(generics.GenericAPIView):
'''
Lists all Posts an author has made
GET : http://service/api/author/<author_id>/posts
* Returns a list of all posts on the server made by author specified by <author_id> - most recent to least recent order
'''
pagination_class = ListPaginator
serializer_class = PostSerializer
queryset = Post.objects.all()
def get(self, request, author_pk=None, format=None):
if request.user.is_authenticated():
# get currently logged in user
try:
viewer = Author.objects.get(user=request.user)
except DoesNotExist as e:
return Response(status=status.HTTP_404_NOT_FOUND)
# posts that are visible to the currently authenticated user
if author_pk == None:
# get author's own posts
authorsPosts = Post.objects.filter(author=viewer)
# get public posts
publicPosts = Post.objects.filter(visibility=Post.PUBLIC)
# get friends posts
friends = getAllFriends(viewer.id)
friendsPosts = Post.objects.filter(author__id__in=friends, visibility__in=[Post.FRIENDS, Post.FRIENDS_OF_FRIENDS])
# get foaf posts
foaf = getAllFOAF(viewer.id)
foafPosts = Post.objects.filter(author__id__in=foaf, visibility__in=Post.FRIENDS_OF_FRIENDS)
# combine all posts into one list w/o duplicates
result = list(set(authorsPosts) | set(publicPosts) | set(friendsPosts) | set(foafPosts))
# put posts in order from most recent to least recent
resultPosts = Post.objects.filter(id__in=[post.id for post in result]).order_by('-published')
page = self.paginate_queryset(resultPosts)
if page is not None:
serializer = PostSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "posts"})
# else : TODO
# author pk is provided - all posts made by {AUTHOR_ID} visible to the currently authenticated user
else: # author_pk != None
# ensure author exists
try:
viewee = Author.objects.get(id=author_pk)
except Author.DoesNotExist as e:
return Response(status=status.HTTP_404_NOT_FOUND)
# if viewer is viewee, show all of their posts
if (viewee.id == viewer.id):
resultPosts = Post.objects.filter(author=viewee).order_by('-published')
else:
# get all viewee's friends & foafs
friends = getAllFriends(viewee.id)
foaf = getAllFOAF(viewee.id)
friendsPosts = []
foafPosts = []
# if viewer is friends or foafs with viewee, get their posts
if (viewer.id in friends):
friendsPosts = Post.objects.filter(author=viewee, visibility__in=[Post.FRIENDS, Post.FRIENDS_OF_FRIENDS])
if (viewer.id in foaf):
foafPosts = Post.objects.filter(author=viewee, visibility=Post.FRIENDS_OF_FRIENDS)
# viewee's public posts
publicPosts = Post.objects.filter(author=viewee, visibility=Post.PUBLIC)
# combine all posts into one list w/o duplicates
result = list(set(publicPosts) | set(friendsPosts) | set(foafPosts))
# put posts in order from most recent to least recent
resultPosts = Post.objects.filter(id__in=[post.id for post in result]).order_by('-published')
page = self.paginate_queryset(resultPosts)
if page is not None:
serializer = PostSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "posts"})
# else : TODO
# only show posts by author_pk that are public - b/c user (viewer) is not logged in
else:
posts = Post.objects.filter(visibility=Post.PUBLIC).order_by('-published')
page = self.paginate_queryset(posts)
if page is not None:
serializer = PostSerializer(page, many=True)
return self.get_paginated_response({"data": serializer.data, "query": "posts"})
# else : TODO
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class AuthorDetail(generics.GenericAPIView):
'''
Gets Author / Updates Author via POST
GET : http://service/api/author/<author_id>
* Returns the author specified by author_id. This includes the author's id, github name, profile picture url, and host.
POST : http://service/api/author/<author_id>
* Updates the author specified by author_id
'''
serializer_class = AuthorSerializer
queryset = Author.objects.all()
def get(self, request, author_pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
author = Author.objects.get(id=author_pk)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_404_NOT_FOUND)
# # remote author
# if request.get_host() not in author.host:
# return Response({"message": "This author is not on this node. It is a remote author on another node."}, status=status.HTTP_404_NOT_FOUND)
# else local author
serializer = AuthorSerializer(author)
# get the author's friend list
responseData = serializer.data
friendsList = []
# return json object so we must extract the friend
aList = Friending.objects.filter(author=author)
# friendsList = getAllFriends(author.id)
for person_pair in aList:
# backwards check
if len(Friending.objects.filter(author=person_pair.friend, friend=author)) > 0:
friendsList.append(person_pair.friend)
serializer = AuthorSerializer(friendsList, many=True)
responseData["friends"] = serializer.data
if request.get_host() not in author.host:
responseData["url"] = author.host + 'author/' + str(author.id)
else:
responseData["url"] = author.host + "author/" + author.user.username
return Response(responseData, status=status.HTTP_200_OK)
def post(self, request, author_pk=None, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
try:
author = Author.objects.get(id=author_pk)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_404_NOT_FOUND)
if request.user == author.user:
print "1"
serializer = AuthorSerializer(author, data=request.data)
print "2"
if serializer.is_valid():
print "DEBUG : API - views.py - AuthorDetail"
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({"message":"only this author can make changes to their profile"},status=status.HTTP_403_FORBIDDEN)
class FriendingCheck(generics.GenericAPIView):
'''
Returns a list of an author's friends / Checks whether or not 2 authors are friends
GET : http://service/api/friends/<author_id>
* Returns the author specified by author_id's list of friends (by friend id)
GET : http://service/api/friends/<author_id1>/<author_id2>
* Returns the 2 author's ids & a boolean specifying if the 2 authors are friends or not.
'''
queryset = Friending.objects.all()
serializer_class = FriendingSerializer
def get(self, request, author_id1, author_id2=None, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
# returns whether or not author_id1 & author_id2 are friends or not
if author_id2 != None:
aList = Friending.objects.filter(author__id=author_id1, friend__id=author_id2)
bList = Friending.objects.filter(author__id=author_id2, friend__id=author_id1)
result = list(chain(aList, bList))
if len(result) > 1:
friends = True
else:
friends = False
return Response({'query':'friends', 'authors': [author_id1, author_id2], 'friends':friends}, status=status.HTTP_200_OK)
# returns all friends of author_1
else:
# check if request is from remote node, if so handle it
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
return Response({"message":"This is a remote user on another node, to see their friends, use the api of the remote user's original node"}, status=status.HTTP_400_BAD_REQUEST)
# author_serializer = getRemoteAuthorProfile(remoteNode.url, request)
# # get remoteAuthor's Author object in our database (has id, displayname, host only - no user) if we already have it
# # else, create a new author object w/o user
# # author = remoteAuthor here
# try:
# author = Author.objects.get(id=author_serializer.data["id"])
# except Author.DoesNotExist as e:
# author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
# author.save()
# local author - get from db
else:
author = Author.objects.get(user=request.user)
author_id = author.id
friendsList = getAllFriends(author_id)
return Response({'query':'friends', 'authors': friendsList}, status=status.HTTP_200_OK)
def post(self, request, author_id1, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
# check if request is from remote node, if so handle it
remoteNode = getRemoteNode(request.user)
if remoteNode != None:
return Response({"message":"This is a remote user on another node, to use this service, use the api of the remote user's original node"}, status=status.HTTP_400_BAD_REQUEST)
# author_serializer = getRemoteAuthorProfile(remoteNode.url, request)
# # get remoteAuthor's Author object in our database (has id, displayname, host only - no user) if we already have it
# # else, create a new author object w/o user
# # author = remoteAuthor here
# try:
# author = Author.objects.get(id=author_serializer.data["id"])
# except Author.DoesNotExist as e:
# author = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
# author.save()
# local author - get from db
else:
try:
author = Author.objects.get(id=author_id1)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"}, status=status.HTTP_404_NOT_FOUND)
data = request.data
# list of uuid in string representation
listOfPotentialFriendIds = data["authors"]
listOfFriendIds = getAllFriends(author_id1)
# convert list of uuid to strings
for i in range(0, len(listOfFriendIds)):
listOfFriendIds[i] = str(listOfFriendIds[i])
resultList = list(set(listOfFriendIds) & set(listOfPotentialFriendIds))
returnObj = { "query": "friends", "author": author_id1, "authors": resultList }
return Response(returnObj, status=status.HTTP_200_OK)
class FriendRequest(generics.GenericAPIView):
serializer_class = FriendingSerializer
queryset = Friending.objects.all()
def post(self, request, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
data = request.data
if data == None:
return Response({"message": "no body given."}, status=status.HTTP_400_BAD_REQUEST)
# # check if request is from remote node, if so handle it
# remoteNode = getRemoteNode(request.user)
# if remoteNode != None:
# author_serializer = getRemoteAuthorProfile(remoteNode.url, request)
# # get remoteAuthor's Author object in our database (has id, displayname, host only - no user) if we already have it
# # else, create a new author object w/o user
# # author_of_request = remoteAuthor here
# try:
# author_of_request = Author.objects.get(id=author_serializer.data["id"])
# except Author.DoesNotExist as e:
# author_of_request = Author.objects.create(id=author_serializer.data["id"], displayname=author_serializer.data["displayname"], host=remoteNode.url)
# author_of_request.save()
# # local author - get from db
# else:
# author_of_request = Author.objects.get(user=request.user)
try:
author_req= request.data["author"]
friend_req = request.data["friend"]
except:
return Response({"message":"missing inputs"}, status=status.HTTP_400_BAD_REQUEST)
atLeastOneAuthorIsLocal = False
bothLocalAuthors = False
try:
author = Author.objects.get(id=author_req["id"])
# it's a local user
if request.get_host() in author.host: # author.user != None:
atLeastOneAuthorIsLocal = True
# else is remote author sending the request
except Author.DoesNotExist as e:
# not local author - create remote author w/o user
author = Author.objects.create(id=author_req["id"], displayname=author_req["displayname"], host=author_req["host"])
author.save()
try:
friend = Author.objects.get(id=friend_req["id"])
# it's a local user
if request.get_host() in friend.host: # friend.user != None:
if atLeastOneAuthorIsLocal:
bothLocalAuthors = True
atLeastOneAuthorIsLocal = True
# if friend is remote user
else:
return Response({"message":"Friend is not an author on this node"}, status=status.HTTP_400_BAD_REQUEST)
except Author.DoesNotExist as e:
return Response({"message":"Friend is not an author on this node"}, status=status.HTTP_400_BAD_REQUEST)
if not atLeastOneAuthorIsLocal and not bothLocalAuthors: # both remote users - client error - shouldn't have to handle this
return Response({"message": "both are remote authors."}, status=status.HTTP_400_BAD_REQUEST)
# we don't handle local to remote here - done in javascript - shouldn't hit our api
# else if both are local or remote to local
# check if friendship already exists in db
try:
friendship = Friending.objects.get(author=author, friend=friend)
return Response({"message":"Relationship between author & friend already exists."}, status=status.HTTP_200_OK)
except Friending.DoesNotExist as e:
serializer = FriendingSerializer(data=data)
if serializer.is_valid():
serializer.validated_data["author"] = author
serializer.validated_data["friend"] = friend
serializer.save()
noti = Notification.objects.create(notificatee=Author.objects.get(id=friend_req["id"]), follower=Author.objects.get(id=author_req["id"]))
noti.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, request_pk, format=None):
# ensure user is authenticated
if (not request.user.is_authenticated()):
return Response({'message':'Not authenticated'}, status=status.HTTP_401_UNAUTHORIZED)
data = request.data
if data == None:
return Response({"message": "no body given."}, status=status.HTTP_400_BAD_REQUEST)
# check if the friend exist
try:
unfriend = Author.objects.get(id=request_pk)
except Author.DoesNotExist as e:
return Response({"message":"Friend does not exist"}, status=status.HTTP_400_BAD_REQUEST)
# check if the author exist
try:
loggedInAuthor = Author.objects.get(user=request.user)
except Author.DoesNotExist as e:
return Response({"message":"Author does not exist"},status=status.HTTP_401_UNAUTHORIZED)
# check if the friendship exist
try:
friendship = Friending.objects.get(author=loggedInAuthor, friend=unfriend)
except Exception as e:
print '%s (%s)' % (e.message, type(e))
# to unfriend simply do it locally
friendship.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
#!/usr/bin/env python
from apps.webdriver_testing.pages import Page
import time
class EditorDialogs(Page):
_CONTINUE = 'div.unisubs-modal-lang a.unisubs-green-button'
_DIALOG = 'div.unisubs-modal-widget-content'
_WARNING = 'div.unisubs-modal-lang-content' #Resume editing
#DIALOG TYPES
_HOW_TO = 'div.unisubs-howtopanel'
_TYPING = _DIALOG + '.unisubs-modal-widget-transcribe'
_SYNCING = _DIALOG + '.unisubs-modal-widget-sync'
_TITLE_DESCRIPTION = '.unisubs-help-heading' #NOT GOOD
_REVIEW = _DIALOG + '.unisubs-model-widget-review'
_GUIDELINES = 'div.unisubs-guidelinespanel'
_GUIDELINE_TEXT = _GUIDELINES + ' div p'
_HEADING = 'div.unisubs-modal-lang h3'
_DONE = 'a.unisubs-done span'
_CHECKBOX = 'span.goog-checkbox'
_CLOSE = 'span.unisubs-modal-widget-title-close'
_CLOSE_LANG_MODAL = 'span.unisubs-modal-lang-title-close'
_COMPLETED_DIALOG = 'div.unisubs-modal-completed'
_SAVED_OK = 'div.unisubs-modal-completed a.unisubs-green-button'
def wait_for_dialog_present(self):
self.wait_for_element_present(self._DIALOG)
def warning_dialog_title(self):
self.wait_for_element_present(self._WARNING)
title = self.get_text_by_css(self._WARNING + ' h3')
return title
def close_dialog(self):
self.logger.info('closing the dialog')
self.click_by_css(self._CLOSE)
def close_lang_dialog(self):
self.logger.info('closing the language dialog')
self.click_by_css(self._CLOSE_LANG_MODAL)
def continue_to_next_step(self):
self.logger.info('clicking done to continue to the next step')
self.click_by_css(self._DONE)
def continue_past_help(self, skip=True):
self.logger.info('Checking for help video, and continuing past')
time.sleep(5)
if self.is_element_present(self._HOW_TO):
if skip:
self.click_by_css(self._CHECKBOX)
self.continue_to_next_step()
def click_saved_ok(self):
self.logger.info('clicking the subtitles saved confirmation box')
self.wait_for_element_present(self._SAVED_OK, wait_time=10)
self.click_by_css(self._SAVED_OK)
def resume_dialog_ok(self):
self.logger.info('clicking OK in the resume dialog')
self.wait_for_element_present(self._CONTINUE)
elements_found = self.get_elements_list(self._CONTINUE)
for el in elements_found:
if el.text == 'OK':
el.click()
def click_dialog_continue(self):
self.logger.info('clicking Continue in the dialog')
self.wait_for_element_present(self._CONTINUE)
self.click_by_css(self._CONTINUE)
def mark_subs_complete(self, complete=True):
self.logger.info('checking for the mark subs complete dialog')
time.sleep(3)
if self.is_element_visible(self._CHECKBOX):
if complete == True:
self.logger.info('Marking subs as complete')
self.click_by_css(self._CHECKBOX)
self.logger.info('Click OK in the subs complete diaplog')
self.click_by_css(self._SAVED_OK)
def incomplete_alert_text(self):
self.logger.info('Accepting the subs incomplete dialog')
a = self.browser.switch_to_alert()
alert_text = a.text
a.accept()
return alert_text
class CreateLanguageSelection(EditorDialogs):
_DIALOG_NAME = 'Create subtitles'
_ORIGINAL_LANG = 'select.original-language'
_TRANSCRIBE_LANG = 'select.to-language'
_SOURCE_LANG = 'select.from-language'
def _is_lang_selection_dialog(self):
self.wait_for_element_present(self._HEADING)
if self._DIALOG_NAME in self.get_text_by_css(self._HEADING):
return True
def lang_selection_dialog_present(self):
self.logger.info('checking for lang selection dialog')
self.wait_for_element_present(self._HEADING)
if self._DIALOG_NAME in self.get_text_by_css(self._HEADING):
return True
def _set_video_language(self, language):
"""Choose the videos original language.
Language should be the fully written language'
ex: French, Canadian
"""
self.logger.info('setting the primary audio language: %s' % language)
self.select_option_by_text(self._ORIGINAL_LANG, language)
def _set_new_language(self, language):
"""Choose the language that is being transcribed.
Language should be the fully written language'
ex: French, Canadian
"""
self.logger.info('setting transcribe lang: %s' % language)
self.select_option_by_text(self._TRANSCRIBE_LANG, language)
def _set_translation_source(self, language):
"""Choose the language that is being transcribed.
Language should be the fully written language'
ex: French, Canadian
"""
self.logger.info('setting from lang: %s' % language)
self.select_option_by_text(self._SOURCE_LANG, language)
def _submit_choices(self):
self.logger.info('submitting dialog choices')
self.click_by_css(self._CONTINUE)
def create_original_subs(self, video_language, new_language):
assert self._is_lang_selection_dialog()
self._set_video_language(video_language)
self._set_new_language(new_language)
self._submit_choices()
make sure the checkbox is not checked before checking it.
#!/usr/bin/env python
from apps.webdriver_testing.pages import Page
import time
class EditorDialogs(Page):
_CONTINUE = 'div.unisubs-modal-lang a.unisubs-green-button'
_DIALOG = 'div.unisubs-modal-widget-content'
_WARNING = 'div.unisubs-modal-lang-content' #Resume editing
#DIALOG TYPES
_HOW_TO = 'div.unisubs-howtopanel'
_TYPING = _DIALOG + '.unisubs-modal-widget-transcribe'
_SYNCING = _DIALOG + '.unisubs-modal-widget-sync'
_TITLE_DESCRIPTION = '.unisubs-help-heading' #NOT GOOD
_REVIEW = _DIALOG + '.unisubs-model-widget-review'
_GUIDELINES = 'div.unisubs-guidelinespanel'
_GUIDELINE_TEXT = _GUIDELINES + ' div p'
_HEADING = 'div.unisubs-modal-lang h3'
_DONE = 'a.unisubs-done span'
_CHECKBOX = 'span.goog-checkbox'
_CHECKBOX_CHECKED = 'span.goog-checkbox-checked'
_CLOSE = 'span.unisubs-modal-widget-title-close'
_CLOSE_LANG_MODAL = 'span.unisubs-modal-lang-title-close'
_COMPLETED_DIALOG = 'div.unisubs-modal-completed'
_SAVED_OK = 'div.unisubs-modal-completed a.unisubs-green-button'
def wait_for_dialog_present(self):
self.wait_for_element_present(self._DIALOG)
def warning_dialog_title(self):
self.wait_for_element_present(self._WARNING)
title = self.get_text_by_css(self._WARNING + ' h3')
return title
def close_dialog(self):
self.logger.info('closing the dialog')
self.click_by_css(self._CLOSE)
def close_lang_dialog(self):
self.logger.info('closing the language dialog')
self.click_by_css(self._CLOSE_LANG_MODAL)
def continue_to_next_step(self):
self.logger.info('clicking done to continue to the next step')
self.click_by_css(self._DONE)
def continue_past_help(self, skip=True):
self.logger.info('Checking for help video, and continuing past')
time.sleep(5)
if self.is_element_present(self._HOW_TO):
if skip:
self.click_by_css(self._CHECKBOX)
self.continue_to_next_step()
def click_saved_ok(self):
self.logger.info('clicking the subtitles saved confirmation box')
self.wait_for_element_present(self._SAVED_OK, wait_time=10)
self.click_by_css(self._SAVED_OK)
def resume_dialog_ok(self):
self.logger.info('clicking OK in the resume dialog')
self.wait_for_element_present(self._CONTINUE)
elements_found = self.get_elements_list(self._CONTINUE)
for el in elements_found:
if el.text == 'OK':
el.click()
def click_dialog_continue(self):
self.logger.info('clicking Continue in the dialog')
self.wait_for_element_present(self._CONTINUE)
self.click_by_css(self._CONTINUE)
def mark_subs_complete(self, complete=True):
self.logger.info('checking for the mark subs complete dialog')
time.sleep(3)
if self.is_element_visible(self._CHECKBOX):
if complete == True:
self.logger.info('Marking subs as complete')
if not self.is_element_present(self._CHECKBOX_CHECKED):
self.click_by_css(self._CHECKBOX)
self.logger.info('Click OK in the subs complete diaplog')
self.click_by_css(self._SAVED_OK)
def incomplete_alert_text(self):
self.logger.info('Accepting the subs incomplete dialog')
a = self.browser.switch_to_alert()
alert_text = a.text
a.accept()
return alert_text
class CreateLanguageSelection(EditorDialogs):
_DIALOG_NAME = 'Create subtitles'
_ORIGINAL_LANG = 'select.original-language'
_TRANSCRIBE_LANG = 'select.to-language'
_SOURCE_LANG = 'select.from-language'
def _is_lang_selection_dialog(self):
self.wait_for_element_present(self._HEADING)
if self._DIALOG_NAME in self.get_text_by_css(self._HEADING):
return True
def lang_selection_dialog_present(self):
self.logger.info('checking for lang selection dialog')
self.wait_for_element_present(self._HEADING)
if self._DIALOG_NAME in self.get_text_by_css(self._HEADING):
return True
def _set_video_language(self, language):
"""Choose the videos original language.
Language should be the fully written language'
ex: French, Canadian
"""
self.logger.info('setting the primary audio language: %s' % language)
self.select_option_by_text(self._ORIGINAL_LANG, language)
def _set_new_language(self, language):
"""Choose the language that is being transcribed.
Language should be the fully written language'
ex: French, Canadian
"""
self.logger.info('setting transcribe lang: %s' % language)
self.select_option_by_text(self._TRANSCRIBE_LANG, language)
def _set_translation_source(self, language):
"""Choose the language that is being transcribed.
Language should be the fully written language'
ex: French, Canadian
"""
self.logger.info('setting from lang: %s' % language)
self.select_option_by_text(self._SOURCE_LANG, language)
def _submit_choices(self):
self.logger.info('submitting dialog choices')
self.click_by_css(self._CONTINUE)
def create_original_subs(self, video_language, new_language):
assert self._is_lang_selection_dialog()
self._set_video_language(video_language)
self._set_new_language(new_language)
self._submit_choices()
|
import spyral
import pygame as pygame
from weakref import ref as _wref
import math
_all_sprites = []
def _switch_scene():
global _all_sprites
_all_sprites = [s for s in _all_sprites if s() is not None and s()
._expire_static()]
class Sprite(object):
"""
Analagous to Sprite in pygame, but with many more features. For more
detail, read the FAQ. Important member variables are:
| *position*, *pos* - (x,y) coordinates for the sprite. Supports
subpixel positioning and is kept in sync with *x* and *y*
| *x* - x coordinate for the sprite
| *y* - y coordinate for the sprite
| *anchor* - position that the *x* and *y* coordinates are relative
to on the image. Supports special values 'topleft', 'topright',
'bottomleft', 'bottomright', 'center', 'midtop', 'midbottom',
'midleft', 'midright', or a tuple of offsets which are treated
as relative to the top left of the image.
| *layer* - a string representing the layer to draw on. It should be a
layer which exists on the camera that is used for the group(s) the
sprite belongs to; if it is not, it will be drawn on top
| *image* - a pygame.surface.Surface to be drawn to the screen. The surface
must, for now, have certain flags set. Use spyral.util.new_surface and
spyral.util.load_image to get surfaces. One caveat is that once it is
drawn to the camera, if the camera uses scaling, and the surface is
changed, the display will not reflect this due to caching. If you must
change a surface, copy it first.
| *blend_flags* - blend flags for pygame.surface.Surface.blit(). See the
pygame documentation for more information.
| *visible* - whether or not to draw this sprite
| *width*, *height*, *size* - width, height, and size of the image
respectively. Read-only.
| *group* - the group in which this sprite is contained. Read-only.
| *scale* - a factor by which to scale the image by before drawing.
"""
def __init__(self, group=None):
""" Adds this sprite to any number of groups by default. """
_all_sprites.append(_wref(self))
self._age = 0
self._static = False
self._image = None
self._layer = '__default__'
self._groups = []
self._make_static = False
self._pos = spyral.Vec2D(0, 0)
self._blend_flags = 0
self.visible = True
self._anchor = 'topleft'
self._offset = spyral.Vec2D(0, 0)
self._scale = spyral.Vec2D(1.0, 1.0)
self._scaled_image = None
self._group = None
self._angle = 0
self._transform_image = None
self._transform_offset = spyral.Vec2D(0, 0)
self.on_remove = spyral.Signal()
if group is not None:
group.add(self)
def _set_static(self):
self._make_static = True
self._static = True
def _expire_static(self):
# Expire static is part of the private API which must
# be implemented by Sprites that wish to be static.
if self._static:
spyral.director.get_camera()._remove_static_blit(self)
self._static = False
self._age = 0
return True
def _recalculate_offset(self):
if self.image is None:
return
size = self._scale * self._image.get_size()
w = size[0]
h = size[1]
a = self._anchor
if a == 'topleft':
offset = (0, 0)
elif a == 'topright':
offset = (w, 0)
elif a == 'midtop':
offset = (w / 2., 0)
elif a == 'bottomleft':
offset = (0, h)
elif a == 'bottomright':
offset = (w, h)
elif a == 'midbottom':
offset = (w / 2., h)
elif a == 'midleft':
offset = (0, h / 2.)
elif a == 'midright':
offset = (w, h / 2.)
elif a == 'center':
offset = (w / 2., h / 2.)
else:
offset = a
self._offset = spyral.Vec2D(offset) - self._transform_offset
def _recalculate_transforms(self):
source = self._image._surf
# scale first
if self._scale != (1.0, 1.0):
new_size = self._scale * self._image.get_size()
new_size = (int(new_size[0]), int(new_size[1]))
source = pygame.transform.smoothscale(source, new_size, pygame.Surface(new_size, pygame.SRCALPHA))
# flip
if self._angle != 0:
angle = 180.0 / math.pi * self._angle % 360
old = spyral.Vec2D(source.get_rect().center)
source = pygame.transform.rotate(source, angle).convert_alpha()
new = source.get_rect().center
self._transform_offset = old - new
self._transform_image = source
self._recalculate_offset()
self._expire_static()
def _get_pos(self):
return self._pos
def _set_pos(self, pos):
if pos == self._pos:
return
self._pos = spyral.Vec2D(pos)
self._expire_static()
def _get_layer(self):
return self._layer
def _set_layer(self, layer):
if layer == self._layer:
return
self._layer = layer
self._expire_static()
def _get_image(self):
return self._image
def _set_image(self, image):
if self._image is image:
return
self._image = image
self._recalculate_transforms()
self._expire_static()
def _get_x(self):
return self._get_pos()[0]
def _set_x(self, x):
self._set_pos((x, self._get_y()))
def _get_y(self):
return self._get_pos()[1]
def _set_y(self, y):
self._set_pos((self._get_x(), y))
def _get_anchor(self):
return self._anchor
def _set_anchor(self, anchor):
if anchor == self._anchor:
return
self._anchor = anchor
self._recalculate_offset()
self._expire_static()
def _get_width(self):
if self._transform_image:
return spyral.Vec2D(self._transform_image.get_width())
def _get_height(self):
if self._transform_image:
return spyral.Vec2D(self._transform_image.get_height())
def _get_size(self):
if self._transform_image:
return spyral.Vec2D(self._transform_image.get_size())
return spyral.Vec2D(0, 0)
def _get_scale(self):
return self._scale
def _set_scale(self, scale):
if isinstance(scale, (int, float)):
scale = spyral.Vec2D(scale, scale)
if self._scale == scale:
return
self._scale = spyral.Vec2D(scale)
self._recalculate_transforms()
self._expire_static()
def _get_scale_x(self):
return self._scale[0]
def _get_scale_y(self):
return self._scale[1]
def _set_scale_x(self, x):
self._set_scale((x, self._scale[1]))
def _set_scale_y(self, y):
self._set_scale((self._scale[0], y))
def _get_group(self):
return self._group
def _set_group(self, group):
if self._group is not None:
self._group.remove(self)
group.add(self)
def _get_angle(self):
return self._angle
def _set_angle(self, angle):
if self._angle == angle:
return
self._angle = angle
self._recalculate_transforms()
position = property(_get_pos, _set_pos)
pos = property(_get_pos, _set_pos)
layer = property(_get_layer, _set_layer)
image = property(_get_image, _set_image)
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
anchor = property(_get_anchor, _set_anchor)
scale = property(_get_scale, _set_scale)
scale_x = property(_get_scale_x, _set_scale_x)
scale_y = property(_get_scale_y, _set_scale_y)
width = property(_get_width)
height = property(_get_height)
size = property(_get_size)
group = property(_get_group, _set_group)
angle = property(_get_angle, _set_angle)
def get_rect(self):
return spyral.Rect(
(self._pos[0] - self._offset[0], self._pos[1] - self._offset[1]),
self.size)
def draw(self, camera):
if not self.visible:
return
if self._static:
return
if self._make_static or self._age > 4:
camera._static_blit(self,
self._transform_image,
(self._pos[0] - self._offset[0],
self._pos[1] - self._offset[1]),
self._layer,
self._blend_flags)
self._make_static = False
self._static = True
return
camera._blit(self._transform_image,
(self._pos[0] - self._offset[0],
self._pos[1] - self._offset[1]),
self._layer,
self._blend_flags)
self._age += 1
def update(self, *args):
""" Called once per update tick. """
pass
def __del__(self):
spyral.director.get_camera()._remove_static_blit(self)
### Group classes ###
class Group(object):
""" Behaves like sprite.Group in pygame. """
def __init__(self, camera, *sprites):
"""
Create a group and associate a camera with it. This is where all drawing
will be sent.
"""
self.camera = camera
self._sprites = list(sprites)
def draw(self):
""" Draws all of its sprites to the group's camera. """
c = self.camera
for x in self._sprites:
x.draw(c)
def update(self, *args):
""" Calls update on all of its Sprites. """
for sprite in self._sprites:
sprite.update(*args)
def remove(self, *sprites):
""" Removes Sprites from this Group. """
for sprite in sprites:
if sprite in self._sprites:
self._sprites.remove(sprite)
sprite._group = None
sprite._expire_static()
sprite.on_remove.emit(self)
def add(self, *sprites):
""" Adds an object to its drawable list. """
for sprite in sprites:
if sprite not in self._sprites:
self._sprites.append(sprite)
sprite._group = self
def has(self, *sprites):
"""
Return true if all sprites are contained in the group. Unlike
pygame, this does not take an iterator for each argument, only sprites.
"""
for sprite in sprites:
if sprite not in self._sprites:
return False
return True
def empty(self):
""" Clears all sprites from the group. """
for sprite in self._sprites:
sprite._group = None
self._sprites = []
def sprites(self):
""" Return a list of the sprites in this group. """
return self._sprites[:]
Add flip_x and flip_y to sprite
Signed-off-by: Robert Deaton <eb00a885478926d5d594195591fb94a03acb1062@udel.edu>
import spyral
import pygame as pygame
from weakref import ref as _wref
import math
_all_sprites = []
def _switch_scene():
global _all_sprites
_all_sprites = [s for s in _all_sprites if s() is not None and s()
._expire_static()]
class Sprite(object):
"""
Analagous to Sprite in pygame, but with many more features. For more
detail, read the FAQ. Important member variables are:
| *position*, *pos* - (x,y) coordinates for the sprite. Supports
subpixel positioning and is kept in sync with *x* and *y*
| *x* - x coordinate for the sprite
| *y* - y coordinate for the sprite
| *anchor* - position that the *x* and *y* coordinates are relative
to on the image. Supports special values 'topleft', 'topright',
'bottomleft', 'bottomright', 'center', 'midtop', 'midbottom',
'midleft', 'midright', or a tuple of offsets which are treated
as relative to the top left of the image.
| *layer* - a string representing the layer to draw on. It should be a
layer which exists on the camera that is used for the group(s) the
sprite belongs to; if it is not, it will be drawn on top
| *image* - a pygame.surface.Surface to be drawn to the screen. The surface
must, for now, have certain flags set. Use spyral.util.new_surface and
spyral.util.load_image to get surfaces. One caveat is that once it is
drawn to the camera, if the camera uses scaling, and the surface is
changed, the display will not reflect this due to caching. If you must
change a surface, copy it first.
| *blend_flags* - blend flags for pygame.surface.Surface.blit(). See the
pygame documentation for more information.
| *visible* - whether or not to draw this sprite
| *width*, *height*, *size* - width, height, and size of the image
respectively. Read-only.
| *group* - the group in which this sprite is contained. Read-only.
| *scale* - a factor by which to scale the image by before drawing.
"""
def __init__(self, group=None):
""" Adds this sprite to any number of groups by default. """
_all_sprites.append(_wref(self))
self._age = 0
self._static = False
self._image = None
self._layer = '__default__'
self._groups = []
self._make_static = False
self._pos = spyral.Vec2D(0, 0)
self._blend_flags = 0
self.visible = True
self._anchor = 'topleft'
self._offset = spyral.Vec2D(0, 0)
self._scale = spyral.Vec2D(1.0, 1.0)
self._scaled_image = None
self._group = None
self._angle = 0
self._transform_image = None
self._transform_offset = spyral.Vec2D(0, 0)
self._flip_x = False
self._flip_y = False
self.on_remove = spyral.Signal()
if group is not None:
group.add(self)
def _set_static(self):
self._make_static = True
self._static = True
def _expire_static(self):
# Expire static is part of the private API which must
# be implemented by Sprites that wish to be static.
if self._static:
spyral.director.get_camera()._remove_static_blit(self)
self._static = False
self._age = 0
return True
def _recalculate_offset(self):
if self.image is None:
return
size = self._scale * self._image.get_size()
w = size[0]
h = size[1]
a = self._anchor
if a == 'topleft':
offset = (0, 0)
elif a == 'topright':
offset = (w, 0)
elif a == 'midtop':
offset = (w / 2., 0)
elif a == 'bottomleft':
offset = (0, h)
elif a == 'bottomright':
offset = (w, h)
elif a == 'midbottom':
offset = (w / 2., h)
elif a == 'midleft':
offset = (0, h / 2.)
elif a == 'midright':
offset = (w, h / 2.)
elif a == 'center':
offset = (w / 2., h / 2.)
else:
offset = a
self._offset = spyral.Vec2D(offset) - self._transform_offset
def _recalculate_transforms(self):
source = self._image._surf
# flip
source = pygame.transform.flip(source, self._flip_x, self._flip_y)
# scale
if self._scale != (1.0, 1.0):
new_size = self._scale * self._image.get_size()
new_size = (int(new_size[0]), int(new_size[1]))
source = pygame.transform.smoothscale(source, new_size, pygame.Surface(new_size, pygame.SRCALPHA))
# rotate
if self._angle != 0:
angle = 180.0 / math.pi * self._angle % 360
old = spyral.Vec2D(source.get_rect().center)
source = pygame.transform.rotate(source, angle).convert_alpha()
new = source.get_rect().center
self._transform_offset = old - new
self._transform_image = source
self._recalculate_offset()
self._expire_static()
def _get_pos(self):
return self._pos
def _set_pos(self, pos):
if pos == self._pos:
return
self._pos = spyral.Vec2D(pos)
self._expire_static()
def _get_layer(self):
return self._layer
def _set_layer(self, layer):
if layer == self._layer:
return
self._layer = layer
self._expire_static()
def _get_image(self):
return self._image
def _set_image(self, image):
if self._image is image:
return
self._image = image
self._recalculate_transforms()
self._expire_static()
def _get_x(self):
return self._get_pos()[0]
def _set_x(self, x):
self._set_pos((x, self._get_y()))
def _get_y(self):
return self._get_pos()[1]
def _set_y(self, y):
self._set_pos((self._get_x(), y))
def _get_anchor(self):
return self._anchor
def _set_anchor(self, anchor):
if anchor == self._anchor:
return
self._anchor = anchor
self._recalculate_offset()
self._expire_static()
def _get_width(self):
if self._transform_image:
return spyral.Vec2D(self._transform_image.get_width())
def _get_height(self):
if self._transform_image:
return spyral.Vec2D(self._transform_image.get_height())
def _get_size(self):
if self._transform_image:
return spyral.Vec2D(self._transform_image.get_size())
return spyral.Vec2D(0, 0)
def _get_scale(self):
return self._scale
def _set_scale(self, scale):
if isinstance(scale, (int, float)):
scale = spyral.Vec2D(scale, scale)
if self._scale == scale:
return
self._scale = spyral.Vec2D(scale)
self._recalculate_transforms()
self._expire_static()
def _get_scale_x(self):
return self._scale[0]
def _get_scale_y(self):
return self._scale[1]
def _set_scale_x(self, x):
self._set_scale((x, self._scale[1]))
def _set_scale_y(self, y):
self._set_scale((self._scale[0], y))
def _get_group(self):
return self._group
def _set_group(self, group):
if self._group is not None:
self._group.remove(self)
group.add(self)
def _get_angle(self):
return self._angle
def _set_angle(self, angle):
if self._angle == angle:
return
self._angle = angle
self._recalculate_transforms()
def _get_flip_x(self):
return self._flip_x
def _set_flip_x(self, flip_x):
if self._flip_x == flip_x:
return
self._flip_x = flip_x
self._recalculate_transforms()
def _get_flip_y(self):
return self._flip_y
def _set_flip_y(self, flip_y):
if self._flip_y == flip_y:
return
self._flip_y = flip_y
self._recalculate_transforms()
position = property(_get_pos, _set_pos)
pos = property(_get_pos, _set_pos)
layer = property(_get_layer, _set_layer)
image = property(_get_image, _set_image)
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
anchor = property(_get_anchor, _set_anchor)
scale = property(_get_scale, _set_scale)
scale_x = property(_get_scale_x, _set_scale_x)
scale_y = property(_get_scale_y, _set_scale_y)
width = property(_get_width)
height = property(_get_height)
size = property(_get_size)
group = property(_get_group, _set_group)
angle = property(_get_angle, _set_angle)
flip_x = property(_get_flip_x, _set_flip_x)
flip_y = property(_get_flip_y, _set_flip_y)
def get_rect(self):
return spyral.Rect(
(self._pos[0] - self._offset[0], self._pos[1] - self._offset[1]),
self.size)
def draw(self, camera):
if not self.visible:
return
if self._static:
return
if self._make_static or self._age > 4:
camera._static_blit(self,
self._transform_image,
(self._pos[0] - self._offset[0],
self._pos[1] - self._offset[1]),
self._layer,
self._blend_flags)
self._make_static = False
self._static = True
return
camera._blit(self._transform_image,
(self._pos[0] - self._offset[0],
self._pos[1] - self._offset[1]),
self._layer,
self._blend_flags)
self._age += 1
def update(self, *args):
""" Called once per update tick. """
pass
def __del__(self):
spyral.director.get_camera()._remove_static_blit(self)
### Group classes ###
class Group(object):
""" Behaves like sprite.Group in pygame. """
def __init__(self, camera, *sprites):
"""
Create a group and associate a camera with it. This is where all drawing
will be sent.
"""
self.camera = camera
self._sprites = list(sprites)
def draw(self):
""" Draws all of its sprites to the group's camera. """
c = self.camera
for x in self._sprites:
x.draw(c)
def update(self, *args):
""" Calls update on all of its Sprites. """
for sprite in self._sprites:
sprite.update(*args)
def remove(self, *sprites):
""" Removes Sprites from this Group. """
for sprite in sprites:
if sprite in self._sprites:
self._sprites.remove(sprite)
sprite._group = None
sprite._expire_static()
sprite.on_remove.emit(self)
def add(self, *sprites):
""" Adds an object to its drawable list. """
for sprite in sprites:
if sprite not in self._sprites:
self._sprites.append(sprite)
sprite._group = self
def has(self, *sprites):
"""
Return true if all sprites are contained in the group. Unlike
pygame, this does not take an iterator for each argument, only sprites.
"""
for sprite in sprites:
if sprite not in self._sprites:
return False
return True
def empty(self):
""" Clears all sprites from the group. """
for sprite in self._sprites:
sprite._group = None
self._sprites = []
def sprites(self):
""" Return a list of the sprites in this group. """
return self._sprites[:]
|
from __future__ import absolute_import
from __future__ import unicode_literals
import openpyxl
import langcodes
from django import forms
from zipfile import ZipFile
from crispy_forms.helper import FormHelper
from crispy_forms import layout as crispy
from crispy_forms.bootstrap import StrictButton
from crispy_forms import bootstrap as twbscrispy
from django.utils.translation import ugettext as _, ugettext_lazy
from corehq.apps.app_manager.dbaccessors import get_available_versions_for_app
from corehq.apps.hqwebapp import crispy as hqcrispy
from corehq.apps.app_manager.dbaccessors import get_brief_apps_in_domain
from corehq.apps.translations.models import TransifexProject
from corehq.motech.utils import b64_aes_decrypt
class ConvertTranslationsForm(forms.Form):
upload_file = forms.FileField(label="", required=True,
help_text=ugettext_lazy("Upload a xls/xlsx/po/zip file"))
def __init__(self, *args, **kwargs):
super(ConvertTranslationsForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = crispy.Layout(
hqcrispy.B3MultiField(
"",
crispy.Div(
crispy.Field(
'upload_file',
data_bind="value: file",
),
css_class='col-sm-4'
),
),
StrictButton(
ugettext_lazy('Convert'),
css_class='btn-primary',
type='submit',
),
)
def clean_upload_file(self):
uploaded_file = self.cleaned_data.get('upload_file')
if uploaded_file:
if uploaded_file.name.endswith('.xls') or uploaded_file.name.endswith('.xlsx'):
workbook = openpyxl.load_workbook(uploaded_file)
worksheet = workbook.worksheets[0]
rows = [row for row in worksheet.iter_rows()]
headers = [cell.value for cell in rows[0]]
# ensure mandatory columns in the excel sheet
if 'source' not in headers or 'translation' not in headers:
raise forms.ValidationError(_("Please ensure columns 'source' and 'translation' in the sheet"))
return uploaded_file
elif uploaded_file.name.endswith('.po'):
return uploaded_file
elif uploaded_file.name.endswith('.zip'):
zipfile = ZipFile(uploaded_file)
for fileinfo in zipfile.filelist:
filename = fileinfo.filename
if (not filename.endswith('.xls') and not filename.endswith('.xlsx') and
not filename.endswith('.po')):
raise forms.ValidationError(
_('Unexpected file passed within zip. Please upload xls/xlsx/po files.'))
return uploaded_file
raise forms.ValidationError(_('Unexpected file passed. Please upload xls/xlsx/po/zip file.'))
class PullResourceForm(forms.Form):
transifex_project_slug = forms.ChoiceField(label=ugettext_lazy("Trasifex project"), choices=())
target_lang = forms.ChoiceField(label=ugettext_lazy("Target Language"),
choices=langcodes.get_all_langs_for_select(),
initial="en"
)
resource_slug = forms.CharField(label=_("Resource Slug"), required=False,
help_text=ugettext_lazy("Leave blank to fetch full project")
)
def __init__(self, domain, *args, **kwargs):
super(PullResourceForm, self).__init__(*args, **kwargs)
self.domain = domain
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.label_class = 'col-sm-3 col-md-4 col-lg-2'
self.helper.field_class = 'col-sm-4 col-md-5 col-lg-3'
projects = TransifexProject.objects.filter(domain=domain).all()
if projects:
self.fields['transifex_project_slug'].choices = (
tuple((project.slug, project) for project in projects)
)
self.helper.layout = crispy.Layout(
'transifex_project_slug',
crispy.Field('target_lang', css_class="ko-select2"),
'resource_slug',
hqcrispy.FormActions(
twbscrispy.StrictButton(
ugettext_lazy("Submit"),
type="submit",
css_class="btn-primary",
)
)
)
class AppTranslationsForm(forms.Form):
app_id = forms.ChoiceField(label=ugettext_lazy("Application"), choices=(), required=True)
version = forms.IntegerField(label=ugettext_lazy("Application Version"), required=False,
help_text=ugettext_lazy("Leave blank to use current saved state"))
use_version_postfix = forms.MultipleChoiceField(
choices=[
('yes', 'Track resources per version'),
],
widget=forms.CheckboxSelectMultiple(),
required=False,
initial='no',
help_text=ugettext_lazy("Check this if you want to maintain different resources separately for different "
"versions of the application. Leave it unchecked for continuous update to the same"
" set of resources")
)
transifex_project_slug = forms.ChoiceField(label=ugettext_lazy("Trasifex project"), choices=(),
required=True)
target_lang = forms.ChoiceField(label=ugettext_lazy("Translated Language"),
choices=([(None, ugettext_lazy('Select Translated Language'))] +
langcodes.get_all_langs_for_select()),
required=False,
)
action = forms.CharField(widget=forms.HiddenInput)
perform_translated_check = forms.BooleanField(
label=ugettext_lazy("Confirm that resources are completely translated before performing request"),
required=False,
initial=True)
def __init__(self, domain, *args, **kwargs):
super(AppTranslationsForm, self).__init__(*args, **kwargs)
self.domain = domain
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.label_class = 'col-sm-4 col-md-4 col-lg-3'
self.helper.field_class = 'col-sm-6 col-md-6 col-lg-5'
self.fields['app_id'].choices = tuple((app.id, app.name) for app in get_brief_apps_in_domain(domain))
projects = TransifexProject.objects.filter(domain=domain).all()
if projects:
self.fields['transifex_project_slug'].choices = (
tuple((project.slug, project) for project in projects)
)
form_fields = self.form_fields()
form_fields.append(hqcrispy.Field(StrictButton(
ugettext_lazy("Submit"),
type="submit",
css_class="btn btn-primary btn-lg disable-on-submit",
onclick="return confirm('%s')" % ugettext_lazy("Please confirm that you want to proceed?")
)))
self.helper.layout = crispy.Layout(
*form_fields
)
self.fields['action'].initial = self.form_action
def form_fields(self):
return [
hqcrispy.Field('app_id'),
hqcrispy.Field('version'),
hqcrispy.Field('use_version_postfix'),
hqcrispy.Field('transifex_project_slug'),
hqcrispy.Field('action')
]
def clean(self):
# ensure target lang when translation check requested during pull
# to check for translation completion
cleaned_data = super(AppTranslationsForm, self).clean()
version = cleaned_data['version']
if version:
app_id = cleaned_data['app_id']
available_versions = get_available_versions_for_app(self.domain, app_id)
if version not in available_versions:
self.add_error('version', ugettext_lazy('Version not available for app'))
if (not cleaned_data['target_lang'] and
(cleaned_data['action'] == "pull" and cleaned_data['perform_translated_check'])):
self.add_error('target_lang', ugettext_lazy('Target lang required to confirm translation completion'))
return cleaned_data
@classmethod
def form_for(cls, form_action):
if form_action == 'create':
return CreateAppTranslationsForm
elif form_action == 'update':
return UpdateAppTranslationsForm
elif form_action == 'push':
return PushAppTranslationsForm
elif form_action == 'pull':
return PullAppTranslationsForm
elif form_action == 'backup':
return BackUpAppTranslationsForm
elif form_action == 'delete':
return DeleteAppTranslationsForm
class CreateAppTranslationsForm(AppTranslationsForm):
form_action = 'create'
source_lang = forms.ChoiceField(label=ugettext_lazy("Source Language on Transifex"),
choices=langcodes.get_all_langs_for_select(),
initial="en"
)
def form_fields(self):
form_fields = super(CreateAppTranslationsForm, self).form_fields()
form_fields.append(hqcrispy.Field('source_lang', css_class="ko-select2"))
return form_fields
class UpdateAppTranslationsForm(CreateAppTranslationsForm):
form_action = 'update'
class PushAppTranslationsForm(AppTranslationsForm):
form_action = 'push'
def form_fields(self):
form_fields = super(PushAppTranslationsForm, self).form_fields()
form_fields.append(hqcrispy.Field('target_lang', css_class="ko-select2"))
return form_fields
class PullAppTranslationsForm(AppTranslationsForm):
form_action = 'pull'
lock_translations = forms.BooleanField(label=ugettext_lazy("Lock translations for resources that are being "
"pulled"),
help_text=ugettext_lazy("Please note that this will lock the resource"
" for all languages"),
required=False,
initial=False)
def form_fields(self):
form_fields = super(PullAppTranslationsForm, self).form_fields()
form_fields.extend([
hqcrispy.Field('target_lang', css_class="ko-select2"),
hqcrispy.Field('lock_translations'),
hqcrispy.Field('perform_translated_check'),
])
return form_fields
class DeleteAppTranslationsForm(AppTranslationsForm):
form_action = 'delete'
def form_fields(self):
form_fields = super(DeleteAppTranslationsForm, self).form_fields()
form_fields.append(hqcrispy.Field('perform_translated_check'))
return form_fields
class BackUpAppTranslationsForm(AppTranslationsForm):
form_action = 'backup'
class TransifexOrganizationForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TransifexOrganizationForm, self).__init__(*args, **kwargs)
self.initial['api_token'] = b64_aes_decrypt(self.instance.api_token)
Added more .ko-select2 so select boxes all align
from __future__ import absolute_import
from __future__ import unicode_literals
import openpyxl
import langcodes
from django import forms
from zipfile import ZipFile
from crispy_forms.helper import FormHelper
from crispy_forms import layout as crispy
from crispy_forms.bootstrap import StrictButton
from crispy_forms import bootstrap as twbscrispy
from django.utils.translation import ugettext as _, ugettext_lazy
from corehq.apps.app_manager.dbaccessors import get_available_versions_for_app
from corehq.apps.hqwebapp import crispy as hqcrispy
from corehq.apps.app_manager.dbaccessors import get_brief_apps_in_domain
from corehq.apps.translations.models import TransifexProject
from corehq.motech.utils import b64_aes_decrypt
class ConvertTranslationsForm(forms.Form):
upload_file = forms.FileField(label="", required=True,
help_text=ugettext_lazy("Upload a xls/xlsx/po/zip file"))
def __init__(self, *args, **kwargs):
super(ConvertTranslationsForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = crispy.Layout(
hqcrispy.B3MultiField(
"",
crispy.Div(
crispy.Field(
'upload_file',
data_bind="value: file",
),
css_class='col-sm-4'
),
),
StrictButton(
ugettext_lazy('Convert'),
css_class='btn-primary',
type='submit',
),
)
def clean_upload_file(self):
uploaded_file = self.cleaned_data.get('upload_file')
if uploaded_file:
if uploaded_file.name.endswith('.xls') or uploaded_file.name.endswith('.xlsx'):
workbook = openpyxl.load_workbook(uploaded_file)
worksheet = workbook.worksheets[0]
rows = [row for row in worksheet.iter_rows()]
headers = [cell.value for cell in rows[0]]
# ensure mandatory columns in the excel sheet
if 'source' not in headers or 'translation' not in headers:
raise forms.ValidationError(_("Please ensure columns 'source' and 'translation' in the sheet"))
return uploaded_file
elif uploaded_file.name.endswith('.po'):
return uploaded_file
elif uploaded_file.name.endswith('.zip'):
zipfile = ZipFile(uploaded_file)
for fileinfo in zipfile.filelist:
filename = fileinfo.filename
if (not filename.endswith('.xls') and not filename.endswith('.xlsx') and
not filename.endswith('.po')):
raise forms.ValidationError(
_('Unexpected file passed within zip. Please upload xls/xlsx/po files.'))
return uploaded_file
raise forms.ValidationError(_('Unexpected file passed. Please upload xls/xlsx/po/zip file.'))
class PullResourceForm(forms.Form):
transifex_project_slug = forms.ChoiceField(label=ugettext_lazy("Trasifex project"), choices=())
target_lang = forms.ChoiceField(label=ugettext_lazy("Target Language"),
choices=langcodes.get_all_langs_for_select(),
initial="en"
)
resource_slug = forms.CharField(label=_("Resource Slug"), required=False,
help_text=ugettext_lazy("Leave blank to fetch full project")
)
def __init__(self, domain, *args, **kwargs):
super(PullResourceForm, self).__init__(*args, **kwargs)
self.domain = domain
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.label_class = 'col-sm-3 col-md-4 col-lg-2'
self.helper.field_class = 'col-sm-4 col-md-5 col-lg-3'
projects = TransifexProject.objects.filter(domain=domain).all()
if projects:
self.fields['transifex_project_slug'].choices = (
tuple((project.slug, project) for project in projects)
)
self.helper.layout = crispy.Layout(
crispy.Field('transifex_project_slug', css_class="ko-select2"),
crispy.Field('target_lang', css_class="ko-select2"),
'resource_slug',
hqcrispy.FormActions(
twbscrispy.StrictButton(
ugettext_lazy("Submit"),
type="submit",
css_class="btn-primary",
)
)
)
class AppTranslationsForm(forms.Form):
app_id = forms.ChoiceField(label=ugettext_lazy("Application"), choices=(), required=True)
version = forms.IntegerField(label=ugettext_lazy("Application Version"), required=False,
help_text=ugettext_lazy("Leave blank to use current saved state"))
use_version_postfix = forms.MultipleChoiceField(
choices=[
('yes', 'Track resources per version'),
],
widget=forms.CheckboxSelectMultiple(),
required=False,
initial='no',
help_text=ugettext_lazy("Check this if you want to maintain different resources separately for different "
"versions of the application. Leave it unchecked for continuous update to the same"
" set of resources")
)
transifex_project_slug = forms.ChoiceField(label=ugettext_lazy("Trasifex project"), choices=(),
required=True)
target_lang = forms.ChoiceField(label=ugettext_lazy("Translated Language"),
choices=([(None, ugettext_lazy('Select Translated Language'))] +
langcodes.get_all_langs_for_select()),
required=False,
)
action = forms.CharField(widget=forms.HiddenInput)
perform_translated_check = forms.BooleanField(
label=ugettext_lazy("Confirm that resources are completely translated before performing request"),
required=False,
initial=True)
def __init__(self, domain, *args, **kwargs):
super(AppTranslationsForm, self).__init__(*args, **kwargs)
self.domain = domain
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.label_class = 'col-sm-4 col-md-4 col-lg-3'
self.helper.field_class = 'col-sm-6 col-md-6 col-lg-5'
self.fields['app_id'].choices = tuple((app.id, app.name) for app in get_brief_apps_in_domain(domain))
projects = TransifexProject.objects.filter(domain=domain).all()
if projects:
self.fields['transifex_project_slug'].choices = (
tuple((project.slug, project) for project in projects)
)
form_fields = self.form_fields()
form_fields.append(hqcrispy.Field(StrictButton(
ugettext_lazy("Submit"),
type="submit",
css_class="btn btn-primary btn-lg disable-on-submit",
onclick="return confirm('%s')" % ugettext_lazy("Please confirm that you want to proceed?")
)))
self.helper.layout = crispy.Layout(
*form_fields
)
self.fields['action'].initial = self.form_action
def form_fields(self):
return [
hqcrispy.Field('app_id', css_class="ko-select2"),
hqcrispy.Field('version'),
hqcrispy.Field('use_version_postfix'),
hqcrispy.Field('transifex_project_slug', css_class="ko-select2"),
hqcrispy.Field('action')
]
def clean(self):
# ensure target lang when translation check requested during pull
# to check for translation completion
cleaned_data = super(AppTranslationsForm, self).clean()
version = cleaned_data['version']
if version:
app_id = cleaned_data['app_id']
available_versions = get_available_versions_for_app(self.domain, app_id)
if version not in available_versions:
self.add_error('version', ugettext_lazy('Version not available for app'))
if (not cleaned_data['target_lang'] and
(cleaned_data['action'] == "pull" and cleaned_data['perform_translated_check'])):
self.add_error('target_lang', ugettext_lazy('Target lang required to confirm translation completion'))
return cleaned_data
@classmethod
def form_for(cls, form_action):
if form_action == 'create':
return CreateAppTranslationsForm
elif form_action == 'update':
return UpdateAppTranslationsForm
elif form_action == 'push':
return PushAppTranslationsForm
elif form_action == 'pull':
return PullAppTranslationsForm
elif form_action == 'backup':
return BackUpAppTranslationsForm
elif form_action == 'delete':
return DeleteAppTranslationsForm
class CreateAppTranslationsForm(AppTranslationsForm):
form_action = 'create'
source_lang = forms.ChoiceField(label=ugettext_lazy("Source Language on Transifex"),
choices=langcodes.get_all_langs_for_select(),
initial="en"
)
def form_fields(self):
form_fields = super(CreateAppTranslationsForm, self).form_fields()
form_fields.append(hqcrispy.Field('source_lang', css_class="ko-select2"))
return form_fields
class UpdateAppTranslationsForm(CreateAppTranslationsForm):
form_action = 'update'
class PushAppTranslationsForm(AppTranslationsForm):
form_action = 'push'
def form_fields(self):
form_fields = super(PushAppTranslationsForm, self).form_fields()
form_fields.append(hqcrispy.Field('target_lang', css_class="ko-select2"))
return form_fields
class PullAppTranslationsForm(AppTranslationsForm):
form_action = 'pull'
lock_translations = forms.BooleanField(label=ugettext_lazy("Lock translations for resources that are being "
"pulled"),
help_text=ugettext_lazy("Please note that this will lock the resource"
" for all languages"),
required=False,
initial=False)
def form_fields(self):
form_fields = super(PullAppTranslationsForm, self).form_fields()
form_fields.extend([
hqcrispy.Field('target_lang', css_class="ko-select2"),
hqcrispy.Field('lock_translations'),
hqcrispy.Field('perform_translated_check'),
])
return form_fields
class DeleteAppTranslationsForm(AppTranslationsForm):
form_action = 'delete'
def form_fields(self):
form_fields = super(DeleteAppTranslationsForm, self).form_fields()
form_fields.append(hqcrispy.Field('perform_translated_check'))
return form_fields
class BackUpAppTranslationsForm(AppTranslationsForm):
form_action = 'backup'
class TransifexOrganizationForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TransifexOrganizationForm, self).__init__(*args, **kwargs)
self.initial['api_token'] = b64_aes_decrypt(self.instance.api_token)
|
# -*- coding: utf8 -*-
from tests.functional_tests import isolate, run_tuttle_file
import sqlite3
import shutil
from os import path, getcwd
class TestSQLiteResource():
# def test_parse_url(self):
# """A real resource should exist"""
# url = "sqlite://relative/path/to/sqlite_file/tables/mytable"
# res = SQLiteResource(url)
# assert res.db_file == "relative/path/to/sqlite_file"
# assert res.table == "mytable"
#
# def test_sqlite_file_does_not_exists(self):
# """Event the sqlite file does not exits"""
# url = "sqlite://unknonw_sqlite_file/tables/mytable"
# res = SQLiteResource(url)
# assert res.exists() == False
#
# @isolate(['tests.sqlite'])
# def test_sqlite_table_does_not_exists(self):
# """The sqlite file exists but the tabble doesn't"""
# url = "sqlite://tests.sqlite/tables/unknown_test_table"
# res = SQLiteResource(url)
# assert res.exists() == False
#
# @isolate(['tests.sqlite'])
# def test_sqlite_table_exists(self):
# """exists() should return True when the table exists"""
# url = "sqlite://tests.sqlite/tables/test_table"
# res = SQLiteResource(url)
# assert res.exists()
#
# @isolate(['tests.sqlite'])
# def test_remove_table(self):
# """exists() should return True when the table exists"""
# url = "sqlite://tests.sqlite/tables/test_table"
# res = SQLiteResource(url)
# assert res.exists()
# res.remove()
# assert not res.exists()
#
# def test_sqlite_processor_should_be_availlable(self):
# """A project with an SQLite processor should be Ok"""
# project = "sqlite://db.sqlite/tables/my_table <- sqlite://db.sqlite/tables/my_table ! sqlite"
# pp = ProjectParser()
# pp.set_project(project)
# pp.read_line()
# process = pp.parse_dependencies_and_processor()
# assert process._processor.name == "sqlite"
#
# def test_sqlite_pre_check_ok_with_no_outputs(self):
# """Pre-check should work even if there are no outputs"""
# project = " <- sqlite://db.sqlite/tables/my_table ! sqlite"
# pp = ProjectParser()
# pp.set_project(project)
# pp.read_line()
# process = pp.parse_dependencies_and_processor()
# assert process._processor.name == "sqlite"
# process.pre_check()
#
# def test_sqlite_pre_check_ok_with_no_inputs(self):
# """Pre-check should work even if there are no inputs"""
# project = "sqlite://db.sqlite/tables/my_table <- ! sqlite"
# pp = ProjectParser()
# pp.set_project(project)
# pp.read_line()
# process = pp.parse_dependencies_and_processor()
# assert process._processor.name == "sqlite"
# process.pre_check()
#
# def test_sqlite_pre_check_should_fail_without_sqlite_resources(self):
# """Pre-check should fail if no SQLiteResources are specified either in inputs or outputs"""
# project = "<- ! sqlite"
# pp = ProjectParser()
# pp.set_project(project)
# pp.read_line()
# process = pp.parse_dependencies_and_processor()
# assert process._processor.name == "sqlite"
# try:
# process.pre_check()
# assert False, "Pre-check should not have allowed SQLIteProcessor without SQLiteResources"
# except SQLiteTuttleError:
# assert True
@isolate(['test.csv'])
def test_sqlite_processor(self):
"""A project with an SQLite processor should run the sql statements"""
project = """sqlite://db.sqlite/tables/pop <- file://test.csv ! csv2sqlite
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0, output
with sqlite3.connect('db.sqlite') as db:
cur = db.cursor()
cur.execute("SELECT * FROM pop")
expected = u"""Aruba,ABW,102911
Andorra,AND,79218
Afghanistan,AFG,30551674
Angola,AGO,21471618
Albania,ALB,2897366
Arab World,ARB,369762523
United Arab Emirates,ARE,9346129""".split("\n")
for exp in expected:
a_result = cur.next()
assert a_result == tuple(exp.split(','))
try:
cur.next()
assert False, "Detected an extra line on the table"
except:
assert True
@isolate(['bad_csv.csv'])
def test_bad_csv__should_fail_with_csv_2sqlite(self):
""" A csv without the good number of columns in one raw should make the process fail"""
project = """sqlite://db.sqlite/tables/pop <- file://bad_csv.csv ! csv2sqlite
"""
rcode, output = run_tuttle_file(project)
assert rcode == 2, output
assert output.find("Wrong number of columns on line 4")>= 0
def assertF(self, output, truc):
assert False, output
@isolate(['test_csv.py'])
def test_text_file_should_fail_with_csv_2sqlite(self):
""" A source file that is not a csv should make the process fail"""
project = """sqlite://db.sqlite/tables/pop <- file://test_csv.py ! csv2sqlite
"""
rcode, output = run_tuttle_file(project)
assert rcode == 2, output
error_text_found = output.find("Wrong")>= 0
error_text_found = error_text_found or output.find("Is this file a valid CSV file ?") >= 0
assert error_text_found, output
@isolate(['tests.sqlite'])
def test_binary_file_should_fail_with_csv_2sqlite(self):
""" A binary file that is not a csv should make the process fail"""
project = """sqlite://db.sqlite/tables/pop <- file://tests.sqlite ! csv2sqlite
"""
rcode, output = run_tuttle_file(project)
assert rcode == 2, output
assert output.find("Is this file a valid CSV file ?")>= 0, output
# @isolate(['tests.sqlite'])
# def test_sql_error_in_sqlite_processor(self):
# """ If an error occurs, tuttle should fail and output logs should trace the error"""
# project = """sqlite://tests.sqlite/tables/new_table <- sqlite://tests.sqlite/tables/test_table ! sqlite
# CREATE TABLE new_table AS SELECT * FROM test_table;
#
# NOT an SQL statement;
# """
# rcode, output = run_tuttle_file(project)
# assert rcode == 2
# error_log = open(join('.tuttle', 'processes', 'logs', 'sqlite_1_err')).read()
# assert error_log.find('near "NOT": syntax error') >= 0, error_log
#
Fixed csv tests to comply with the new csv urls
# -*- coding: utf8 -*-
from tests.functional_tests import isolate, run_tuttle_file
import sqlite3
import shutil
from os import path, getcwd
class TestSQLiteResource():
# def test_parse_url(self):
# """A real resource should exist"""
# url = "sqlite://relative/path/to/sqlite_file/tables/mytable"
# res = SQLiteResource(url)
# assert res.db_file == "relative/path/to/sqlite_file"
# assert res.table == "mytable"
#
# def test_sqlite_file_does_not_exists(self):
# """Event the sqlite file does not exits"""
# url = "sqlite://unknonw_sqlite_file/tables/mytable"
# res = SQLiteResource(url)
# assert res.exists() == False
#
# @isolate(['tests.sqlite'])
# def test_sqlite_table_does_not_exists(self):
# """The sqlite file exists but the tabble doesn't"""
# url = "sqlite://tests.sqlite/tables/unknown_test_table"
# res = SQLiteResource(url)
# assert res.exists() == False
#
# @isolate(['tests.sqlite'])
# def test_sqlite_table_exists(self):
# """exists() should return True when the table exists"""
# url = "sqlite://tests.sqlite/tables/test_table"
# res = SQLiteResource(url)
# assert res.exists()
#
# @isolate(['tests.sqlite'])
# def test_remove_table(self):
# """exists() should return True when the table exists"""
# url = "sqlite://tests.sqlite/tables/test_table"
# res = SQLiteResource(url)
# assert res.exists()
# res.remove()
# assert not res.exists()
#
# def test_sqlite_processor_should_be_availlable(self):
# """A project with an SQLite processor should be Ok"""
# project = "sqlite://db.sqlite/tables/my_table <- sqlite://db.sqlite/tables/my_table ! sqlite"
# pp = ProjectParser()
# pp.set_project(project)
# pp.read_line()
# process = pp.parse_dependencies_and_processor()
# assert process._processor.name == "sqlite"
#
# def test_sqlite_pre_check_ok_with_no_outputs(self):
# """Pre-check should work even if there are no outputs"""
# project = " <- sqlite://db.sqlite/tables/my_table ! sqlite"
# pp = ProjectParser()
# pp.set_project(project)
# pp.read_line()
# process = pp.parse_dependencies_and_processor()
# assert process._processor.name == "sqlite"
# process.pre_check()
#
# def test_sqlite_pre_check_ok_with_no_inputs(self):
# """Pre-check should work even if there are no inputs"""
# project = "sqlite://db.sqlite/tables/my_table <- ! sqlite"
# pp = ProjectParser()
# pp.set_project(project)
# pp.read_line()
# process = pp.parse_dependencies_and_processor()
# assert process._processor.name == "sqlite"
# process.pre_check()
#
# def test_sqlite_pre_check_should_fail_without_sqlite_resources(self):
# """Pre-check should fail if no SQLiteResources are specified either in inputs or outputs"""
# project = "<- ! sqlite"
# pp = ProjectParser()
# pp.set_project(project)
# pp.read_line()
# process = pp.parse_dependencies_and_processor()
# assert process._processor.name == "sqlite"
# try:
# process.pre_check()
# assert False, "Pre-check should not have allowed SQLIteProcessor without SQLiteResources"
# except SQLiteTuttleError:
# assert True
@isolate(['test.csv'])
def test_sqlite_processor(self):
"""A project with an SQLite processor should run the sql statements"""
project = """sqlite://db.sqlite/pop <- file://test.csv ! csv2sqlite
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0, output
with sqlite3.connect('db.sqlite') as db:
cur = db.cursor()
cur.execute("SELECT * FROM pop")
expected = u"""Aruba,ABW,102911
Andorra,AND,79218
Afghanistan,AFG,30551674
Angola,AGO,21471618
Albania,ALB,2897366
Arab World,ARB,369762523
United Arab Emirates,ARE,9346129""".split("\n")
for exp in expected:
a_result = cur.next()
assert a_result == tuple(exp.split(','))
try:
cur.next()
assert False, "Detected an extra line on the table"
except:
assert True
@isolate(['bad_csv.csv'])
def test_bad_csv__should_fail_with_csv_2sqlite(self):
""" A csv without the good number of columns in one raw should make the process fail"""
project = """sqlite://db.sqlite/pop <- file://bad_csv.csv ! csv2sqlite
"""
rcode, output = run_tuttle_file(project)
assert rcode == 2, output
assert output.find("Wrong number of columns on line 4")>= 0
def assertF(self, output, truc):
assert False, output
@isolate(['test_csv.py'])
def test_text_file_should_fail_with_csv_2sqlite(self):
""" A source file that is not a csv should make the process fail"""
project = """sqlite://db.sqlite/pop <- file://test_csv.py ! csv2sqlite
"""
rcode, output = run_tuttle_file(project)
assert rcode == 2, output
error_text_found = output.find("Wrong")>= 0
error_text_found = error_text_found or output.find("Is this file a valid CSV file ?") >= 0
assert error_text_found, output
@isolate(['tests.sqlite'])
def test_binary_file_should_fail_with_csv_2sqlite(self):
""" A binary file that is not a csv should make the process fail"""
project = """sqlite://db.sqlite/pop <- file://tests.sqlite ! csv2sqlite
"""
rcode, output = run_tuttle_file(project)
assert rcode == 2, output
assert output.find("Is this file a valid CSV file ?")>= 0, output
# @isolate(['tests.sqlite'])
# def test_sql_error_in_sqlite_processor(self):
# """ If an error occurs, tuttle should fail and output logs should trace the error"""
# project = """sqlite://tests.sqlite/tables/new_table <- sqlite://tests.sqlite/tables/test_table ! sqlite
# CREATE TABLE new_table AS SELECT * FROM test_table;
#
# NOT an SQL statement;
# """
# rcode, output = run_tuttle_file(project)
# assert rcode == 2
# error_log = open(join('.tuttle', 'processes', 'logs', 'sqlite_1_err')).read()
# assert error_log.find('near "NOT": syntax error') >= 0, error_log
#
|
bb8b7aa6-2ead-11e5-84c6-7831c1d44c14
bb913917-2ead-11e5-96fe-7831c1d44c14
bb913917-2ead-11e5-96fe-7831c1d44c14 |
from orator.orm import model
from sea import current_app
from sea.contrib.extensions import cache as cache_ext
from sea.contrib.extensions.cache import default_key
def _related_caches_key(cls, id):
return '{}.related_caches.{}.{}.{}'.format(cls.__cache_version__,
cls.__module__,
cls.__name__, id)
def _model_caches_key(cache_version):
def wrapper(f, *args, **kwargs):
return '{}.{}'.format(cache_version, default_key(f, *args, **kwargs))
return wrapper
def _register_to_related_caches(f, id, cls, *args, **kwargs):
cache = current_app.extensions.cache
key = cache._backend.trans_key(_related_caches_key(cls, id))
redis = cache._backend._client
cached_key = cache._backend.trans_key(
f.make_cache_key(cls, *args, **kwargs))
redis.sadd(key, cached_key)
redis.expire(key, cache._backend.default_ttl)
return True
def _find_register(f, ins, cls, *args, **kwargs):
return _register_to_related_caches(f, args[0], cls, *args, **kwargs)
def _find_by_register(f, ins, cls, *args, **kwargs):
if ins is None or ins is cache_ext.CacheNone:
return True
return _register_to_related_caches(f, ins.id, cls, *args, **kwargs)
def _bulk_register_to_related_caches(cls, key_model_map):
cache = current_app.extensions.cache
redis = cache._backend._client
for cached_key, ins in key_model_map.items():
key = cache._backend.trans_key(_related_caches_key(cls, ins.id))
cached_key = cache._backend.trans_key(cached_key)
redis.sadd(key, cached_key)
redis.expire(key, cache._backend.default_ttl)
return True
def _clear_related_caches(instance):
cache = current_app.extensions.cache
key = cache._backend.trans_key(
_related_caches_key(instance.__class__, instance.id))
redis = cache._backend._client
related_caches = redis.smembers(key)
if related_caches:
redis.delete(*related_caches)
return True
def _id_is_list(cls, id, *args, **kwargs):
return isinstance(id, list)
class ModelMeta(model.MetaModel):
def __new__(mcls, name, bases, kws):
max_find_many_cache = kws.get('__max_find_many_cache__', 10)
cache_version = kws.get('__cache_version__', '1.0')
@classmethod
@cache_ext.cached(fallbacked=_find_register,
cache_key=_model_caches_key(cache_version),
unless=_id_is_list, cache_none=True)
def find(cls, id, columns=None):
if isinstance(id, list) and id and len(id) <= max_find_many_cache:
cache = current_app.extensions.cache
keymap = {i: find.__func__.make_cache_key(cls, i) for i in id}
rv = cache.get_many(keymap.values())
models = dict(zip(id, rv))
missids = [
i for i, m in models.items()
if m is None]
models = {
k: m for k, m in models.items()
if not (m is cache_ext.CacheNone or m is None)}
if not missids:
return cls().new_collection(models.values())
missed = super(cls, cls).find(missids, columns)
missed = {m.id: m for m in missed}
models.update(missed)
key_model_map = {keymap[i]: m for i, m in missed.items()}
cache.set_many(key_model_map)
_bulk_register_to_related_caches(cls, key_model_map)
return cls().new_collection(list(models.values()))
return super(cls, cls).find(id, columns)
@classmethod
@cache_ext.cached(fallbacked=_find_by_register,
cache_key=_model_caches_key(cache_version),
cache_none=True)
def find_by(cls, name, val, columns=None):
return super(cls, cls).find_by(name, val, columns)
kws.update({
'find': find,
'find_by': find_by,
'__cache_version__': cache_version,
})
return super().__new__(mcls, name, bases, kws)
def __init__(cls, name, bases, kws):
super().__init__(name, bases, kws)
cls.saved(_clear_related_caches)
cls.deleted(_clear_related_caches)
revert _related_caches_key
from orator.orm import model
from sea import current_app
from sea.contrib.extensions import cache as cache_ext
from sea.contrib.extensions.cache import default_key
def _related_caches_key(cls, id):
return 'related_caches.{}.{}.{}'.format(cls.__module__, cls.__name__, id)
def _model_caches_key(cache_version):
def wrapper(f, *args, **kwargs):
return '{}.{}'.format(cache_version, default_key(f, *args, **kwargs))
return wrapper
def _register_to_related_caches(f, id, cls, *args, **kwargs):
cache = current_app.extensions.cache
key = cache._backend.trans_key(_related_caches_key(cls, id))
redis = cache._backend._client
cached_key = cache._backend.trans_key(
f.make_cache_key(cls, *args, **kwargs))
redis.sadd(key, cached_key)
redis.expire(key, cache._backend.default_ttl)
return True
def _find_register(f, ins, cls, *args, **kwargs):
return _register_to_related_caches(f, args[0], cls, *args, **kwargs)
def _find_by_register(f, ins, cls, *args, **kwargs):
if ins is None or ins is cache_ext.CacheNone:
return True
return _register_to_related_caches(f, ins.id, cls, *args, **kwargs)
def _bulk_register_to_related_caches(cls, key_model_map):
cache = current_app.extensions.cache
redis = cache._backend._client
for cached_key, ins in key_model_map.items():
key = cache._backend.trans_key(_related_caches_key(cls, ins.id))
cached_key = cache._backend.trans_key(cached_key)
redis.sadd(key, cached_key)
redis.expire(key, cache._backend.default_ttl)
return True
def _clear_related_caches(instance):
cache = current_app.extensions.cache
key = cache._backend.trans_key(
_related_caches_key(instance.__class__, instance.id))
redis = cache._backend._client
related_caches = redis.smembers(key)
if related_caches:
redis.delete(*related_caches)
return True
def _id_is_list(cls, id, *args, **kwargs):
return isinstance(id, list)
class ModelMeta(model.MetaModel):
def __new__(mcls, name, bases, kws):
max_find_many_cache = kws.get('__max_find_many_cache__', 10)
cache_version = kws.get('__cache_version__', '1.0')
@classmethod
@cache_ext.cached(fallbacked=_find_register,
cache_key=_model_caches_key(cache_version),
unless=_id_is_list, cache_none=True)
def find(cls, id, columns=None):
if isinstance(id, list) and id and len(id) <= max_find_many_cache:
cache = current_app.extensions.cache
keymap = {i: find.__func__.make_cache_key(cls, i) for i in id}
rv = cache.get_many(keymap.values())
models = dict(zip(id, rv))
missids = [
i for i, m in models.items()
if m is None]
models = {
k: m for k, m in models.items()
if not (m is cache_ext.CacheNone or m is None)}
if not missids:
return cls().new_collection(models.values())
missed = super(cls, cls).find(missids, columns)
missed = {m.id: m for m in missed}
models.update(missed)
key_model_map = {keymap[i]: m for i, m in missed.items()}
cache.set_many(key_model_map)
_bulk_register_to_related_caches(cls, key_model_map)
return cls().new_collection(list(models.values()))
return super(cls, cls).find(id, columns)
@classmethod
@cache_ext.cached(fallbacked=_find_by_register,
cache_key=_model_caches_key(cache_version),
cache_none=True)
def find_by(cls, name, val, columns=None):
return super(cls, cls).find_by(name, val, columns)
kws.update({
'find': find,
'find_by': find_by,
'__cache_version__': cache_version,
})
return super().__new__(mcls, name, bases, kws)
def __init__(cls, name, bases, kws):
super().__init__(name, bases, kws)
cls.saved(_clear_related_caches)
cls.deleted(_clear_related_caches)
|
import os
import logging
import socket
import select
import threading
import logging
import logging.config
import json
import cherrypy
import voltron
import voltron.http
from .api import *
from .plugin import *
from .api import *
log = logging.getLogger("core")
READ_MAX = 0xFFFF
class Server(object):
"""
Main server class instantiated by the debugger host. Responsible for
controlling the background thread that communicates with clients, and
handling requests forwarded from that thread.
"""
def __init__(self):
self.clients = []
self.d_thread = None
self.t_thread = None
self.h_thread = None
# pipes for controlling ServerThreads
self.d_exit_out, self.d_exit_in = os.pipe()
self.t_exit_out, self.t_exit_in = os.pipe()
def start(self):
listen = voltron.config['server']['listen']
if listen['domain']:
log.debug("Starting server thread for domain socket")
self.d_thread = ServerThread(self, self.clients, self.d_exit_out, voltron.env['sock'])
self.d_thread.start()
if listen['tcp']:
log.debug("Starting server thread for TCP socket")
self.t_thread = ServerThread(self, self.clients, self.t_exit_out, tuple(listen['tcp']))
self.t_thread.start()
if voltron.config['server']['listen']['http']:
log.debug("Starting server thread for HTTP server")
(host, port) = tuple(listen['http'])
voltron.http.app.server = self
self.h_thread = HTTPServerThread(self, self.clients, host, port)
self.h_thread.start()
def stop(self):
# terminate the server thread by writing some data to the exit pipe
log.debug("Stopping server threads")
if self.d_thread:
os.write(self.d_exit_in, chr(0))
self.d_thread.join(10)
if self.t_thread:
os.write(self.t_exit_in, chr(0))
self.t_thread.join(10)
if self.h_thread:
self.h_thread.stop()
def client_summary(self):
sums = []
for client in self.clients:
sums.append(str(client))
return sums
def handle_request(self, data, client=None):
req = None
res = None
#
# preprocess the request to make sure the data and environment are OK
#
# make sure we have a debugger, or we're gonna have a bad time
if voltron.debugger:
# parse incoming request with the top level APIRequest class so we can determine the request type
try:
req = APIRequest(data=data)
except Exception, e:
req = None
log.error("Exception raised while parsing API request: {} {}".format(type(e), e))
if req:
# instantiate the request class
try:
req = api_request(req.request, data=data)
except Exception, e:
log.error("Exception raised while creating API request: {} {}".format(type(e), e))
req = None
if not req:
res = APIPluginNotFoundErrorResponse()
else:
res = APIInvalidRequestErrorResponse()
else:
res = APIDebuggerNotPresentErrorResponse()
#
# validate and dispatch the request
#
if not res:
# dispatch the request and send the response
if req and req.request == 'wait':
# wait requests get handled in a background thread
t = threading.Thread(target=self.dispatch_request, args=[req, client])
t.start()
else:
# everything else is handled on the main thread
return self.dispatch_request(req, client)
else:
if client:
# already got an error response and we have a client, send it
client.send_response(str(res))
else:
return res
def dispatch_request(self, req, client=None):
"""
Dispatch a request object.
"""
log.debug("Dispatching request: {}".format(str(req)))
# make sure it's valid
res = None
try:
req.validate()
except MissingFieldError, e:
res = APIMissingFieldErrorResponse(str(e))
# dispatch the request
if not res:
try:
res = req.dispatch()
except Exception, e:
msg = "Exception raised while dispatching request: {}".format(e)
log.error(msg)
res = APIGenericErrorResponse(message=msg)
log.debug("Response: {}".format(str(res)))
# send the response
if client:
log.debug("Client was passed to dispatch_request() - sending response")
client.send_response(str(res))
else:
log.debug("Client was not passed to dispatch_request() - returning response")
return res
class ServerThread(threading.Thread):
"""
Background thread spun off by the Server class. Responsible for
accepting new client connections and communicating with existing clients.
Requests are received from clients and passed to the Server object, which
passes them off to the APIDispatcher to be fulfilled. Then the responses
returned (synchronously) are sent back to the requesting client.
"""
def __init__(self, server, clients, exit_pipe, sock):
threading.Thread.__init__(self)
self.server = server
self.clients = clients
self.exit_pipe = exit_pipe
self.sock = sock
def run(self):
# make sure there's no left over socket file
self.cleanup_socket()
# set up the server socket
serv = ServerSocket(self.sock)
# main event loop
running = True
while running:
# check server accept() socket, exit pipe, and client sockets for activity
rfds, _, _ = select.select([serv, self.exit_pipe] + self.clients, [], [])
# handle any ready sockets
for fd in rfds:
if fd == serv:
# accept a new client connection
client = serv.accept()
client.server = self.server
self.clients.append(client)
elif fd == self.exit_pipe:
# flush the exit pipe and break
os.read(self.exit_pipe, 1)
running = False
break
else:
# read the request from the client and dispatch it
data = None
try:
data = fd.recv_request()
self.server.handle_request(data, fd)
except Exception, e:
log.error("Exception raised while handling request: {} {}".format(type(e), str(e)))
self.purge_client(fd)
# clean up
for client in self.clients:
self.purge_client(client)
os.close(self.exit_pipe)
serv.close()
self.cleanup_socket()
def cleanup_socket(self):
if type(self.sock) == str:
try:
os.remove(self.sock)
except:
pass
def purge_client(self, client):
try:
client.close()
except:
pass
if client in self.clients:
self.clients.remove(client)
class HTTPServerThread(threading.Thread):
"""
Background thread to run the HTTP server.
"""
def __init__(self, server, clients, host="127.0.0.1", port=6969):
threading.Thread.__init__(self)
self.server = server
self.clients = clients
self.host = host
self.port = port
def run(self):
# register routes for all the API methods
voltron.http.register_http_api()
# configure the cherrypy server
cherrypy.config.update({
'log.screen': False,
'server.socket_port': self.port,
'server.socket_host': str(self.host)
})
# mount the main static dir
cherrypy.tree.mount(None, '/static', {'/' : {
'tools.staticdir.dir': os.path.join(os.path.dirname(__file__), 'web/static'),
'tools.staticdir.on': True,
'tools.staticdir.index': 'index.html'
}})
# graft the main flask app (see http.py) onto the cherry tree
cherrypy.tree.graft(voltron.http.app, '/')
# mount web plugins
plugins = voltron.plugin.pm.web_plugins
for name in plugins:
plugin_root = '/view/{}'.format(name)
static_path = '/view/{}/static'.format(name)
# mount app
if plugins[name].app:
# if there's an app object, mount it at the root
log.debug("Mounting app for web plugin '{}' on {}".format(name, plugin_root))
plugins[name].app.server = self.server
cherrypy.tree.graft(plugins[name].app, plugin_root)
else:
# if there's no plugin app, mount the static dir at the plugin's root instead
# neater for static-only apps (ie. javascript-based)
static_path = plugin_root
# mount static directory
directory = os.path.join(plugins[name]._dir, 'static')
if os.path.isdir(directory):
log.debug("Mounting static directory for web plugin '{}' on {}: {}".format(name, static_path, directory))
cherrypy.tree.mount(None, static_path, {'/' : {
'tools.staticdir.dir': directory,
'tools.staticdir.on': True,
'tools.staticdir.index': 'index.html'
}})
# make with the serving
cherrypy.engine.start()
cherrypy.engine.block()
def stop(self):
cherrypy.engine.exit()
class Client(object):
"""
Used by a client (ie. a view) to communicate with the server.
"""
def __init__(self):
"""
Initialise a new client
"""
self.sock = None
def connect(self):
"""
Connect to the server
"""
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(voltron.env['sock'])
def send_request(self, request):
"""
Send a request to the server.
`request` is an APIRequest subclass.
Returns an APIResponse or subclass instance. If an error occurred, it
will be an APIErrorResponse, if the request was successful it will be
the plugin's specified response class if one exists, otherwise it will
be an APIResponse.
"""
# send the request data to the server
data = str(request)
log.debug("Sending request: {}".format(data))
res = self.sock.sendall(data)
if res != None:
log.error("Failed to send request: {}".format(request))
raise SocketDisconnected("socket closed")
# receive response data
data = self.sock.recv(READ_MAX)
if len(data) > 0:
log.debug('Client received message: ' + data)
try:
# parse the response data
generic_response = APIResponse(data=data)
# if there's an error, return an error response
if generic_response.is_error:
res = APIErrorResponse(data=data)
else:
# success; generate a proper response
plugin = voltron.plugin.pm.api_plugin_for_request(request.request)
if plugin and plugin.response_class:
# found a plugin for the request we sent, use its response type
res = plugin.response_class(data=data)
else:
# didn't find a plugin, just return the generic APIResponse we already generated
res = generic_response
except Exception as e:
log.error('Exception parsing message: ' + str(e))
log.error('Invalid message: ' + data)
else:
raise SocketDisconnected("socket closed")
return res
def create_request(self, request_type, *args, **kwargs):
"""
Create a request.
`request_type` is the request type (string). This is used to look up a
plugin, whose request class is instantiated and passed the remaining
arguments passed to this function.
"""
return api_request(request_type, *args, **kwargs)
def perform_request(self, request_type, *args, **kwargs):
"""
Create and send a request.
`request_type` is the request type (string). This is used to look up a
plugin, whose request class is instantiated and passed the remaining
arguments passed to this function.
"""
# create a request
req = api_request(request_type, *args, **kwargs)
# send it
res = self.send_request(req)
return res
class SocketDisconnected(Exception):
"""
Exception raised when a socket disconnects.
"""
pass
class BaseSocket(object):
"""
Base socket class from which ServerSocket and ClientSocket inherit.
"""
def fileno(self):
return self.sock.fileno()
def close(self):
self.sock.close()
def send(self, buf):
self.sock.sendall(buf)
class ServerSocket(BaseSocket):
"""
Server socket for accepting new client connections.
"""
def __init__(self, sock):
if type(sock) == str:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
elif type(sock) == tuple:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(sock)
self.sock.listen(1)
def accept(self):
pair = self.sock.accept()
if pair is not None:
sock, addr = pair
try:
return ClientSocket(sock)
except Exception as e:
log.error("Exception handling accept: " + str(e))
class ClientSocket(BaseSocket):
"""
Client socket for communicating with an individual client. Collected by
ServerThread.
"""
def __init__(self, sock):
self.sock = sock
def recv_request(self):
# read request from socket
data = self.sock.recv(READ_MAX).strip()
log.debug("Received request client -> server: {}".format(data))
if len(data) == 0:
raise SocketDisconnected()
return data
def send_response(self, response):
log.debug("Sending response server -> client: {}".format(response))
self.send(response)
Handle client disconnect
import os
import logging
import socket
import select
import threading
import logging
import logging.config
import json
import cherrypy
import voltron
import voltron.http
from .api import *
from .plugin import *
from .api import *
log = logging.getLogger("core")
READ_MAX = 0xFFFF
class Server(object):
"""
Main server class instantiated by the debugger host. Responsible for
controlling the background thread that communicates with clients, and
handling requests forwarded from that thread.
"""
def __init__(self):
self.clients = []
self.d_thread = None
self.t_thread = None
self.h_thread = None
# pipes for controlling ServerThreads
self.d_exit_out, self.d_exit_in = os.pipe()
self.t_exit_out, self.t_exit_in = os.pipe()
def start(self):
listen = voltron.config['server']['listen']
if listen['domain']:
log.debug("Starting server thread for domain socket")
self.d_thread = ServerThread(self, self.clients, self.d_exit_out, voltron.env['sock'])
self.d_thread.start()
if listen['tcp']:
log.debug("Starting server thread for TCP socket")
self.t_thread = ServerThread(self, self.clients, self.t_exit_out, tuple(listen['tcp']))
self.t_thread.start()
if voltron.config['server']['listen']['http']:
log.debug("Starting server thread for HTTP server")
(host, port) = tuple(listen['http'])
voltron.http.app.server = self
self.h_thread = HTTPServerThread(self, self.clients, host, port)
self.h_thread.start()
def stop(self):
# terminate the server thread by writing some data to the exit pipe
log.debug("Stopping server threads")
if self.d_thread:
os.write(self.d_exit_in, chr(0))
self.d_thread.join(10)
if self.t_thread:
os.write(self.t_exit_in, chr(0))
self.t_thread.join(10)
if self.h_thread:
self.h_thread.stop()
def client_summary(self):
sums = []
for client in self.clients:
sums.append(str(client))
return sums
def handle_request(self, data, client=None):
req = None
res = None
#
# preprocess the request to make sure the data and environment are OK
#
# make sure we have a debugger, or we're gonna have a bad time
if voltron.debugger:
# parse incoming request with the top level APIRequest class so we can determine the request type
try:
req = APIRequest(data=data)
except Exception, e:
req = None
log.error("Exception raised while parsing API request: {} {}".format(type(e), e))
if req:
# instantiate the request class
try:
req = api_request(req.request, data=data)
except Exception, e:
log.error("Exception raised while creating API request: {} {}".format(type(e), e))
req = None
if not req:
res = APIPluginNotFoundErrorResponse()
else:
res = APIInvalidRequestErrorResponse()
else:
res = APIDebuggerNotPresentErrorResponse()
#
# validate and dispatch the request
#
if not res:
# dispatch the request and send the response
if req and req.request == 'wait':
# wait requests get handled in a background thread
t = threading.Thread(target=self.dispatch_request, args=[req, client])
t.start()
else:
# everything else is handled on the main thread
return self.dispatch_request(req, client)
else:
if client:
# already got an error response and we have a client, send it
try:
client.send_response(str(res))
except socket.error:
log.error("Client closed before we could respond")
else:
return res
def dispatch_request(self, req, client=None):
"""
Dispatch a request object.
"""
log.debug("Dispatching request: {}".format(str(req)))
# make sure it's valid
res = None
try:
req.validate()
except MissingFieldError, e:
res = APIMissingFieldErrorResponse(str(e))
# dispatch the request
if not res:
try:
res = req.dispatch()
except Exception, e:
msg = "Exception raised while dispatching request: {}".format(e)
log.error(msg)
res = APIGenericErrorResponse(message=msg)
log.debug("Response: {}".format(str(res)))
# send the response
if client:
log.debug("Client was passed to dispatch_request() - sending response")
try:
client.send_response(str(res))
except socket.error:
log.error("Client closed before we could respond")
else:
log.debug("Client was not passed to dispatch_request() - returning response")
return res
class ServerThread(threading.Thread):
"""
Background thread spun off by the Server class. Responsible for
accepting new client connections and communicating with existing clients.
Requests are received from clients and passed to the Server object, which
passes them off to the APIDispatcher to be fulfilled. Then the responses
returned (synchronously) are sent back to the requesting client.
"""
def __init__(self, server, clients, exit_pipe, sock):
threading.Thread.__init__(self)
self.server = server
self.clients = clients
self.exit_pipe = exit_pipe
self.sock = sock
def run(self):
# make sure there's no left over socket file
self.cleanup_socket()
# set up the server socket
serv = ServerSocket(self.sock)
# main event loop
running = True
while running:
# check server accept() socket, exit pipe, and client sockets for activity
rfds, _, _ = select.select([serv, self.exit_pipe] + self.clients, [], [])
# handle any ready sockets
for fd in rfds:
if fd == serv:
# accept a new client connection
client = serv.accept()
client.server = self.server
self.clients.append(client)
elif fd == self.exit_pipe:
# flush the exit pipe and break
os.read(self.exit_pipe, 1)
running = False
break
else:
# read the request from the client and dispatch it
data = None
try:
data = fd.recv_request()
self.server.handle_request(data, fd)
except Exception, e:
log.error("Exception raised while handling request: {} {}".format(type(e), str(e)))
self.purge_client(fd)
# clean up
for client in self.clients:
self.purge_client(client)
os.close(self.exit_pipe)
serv.close()
self.cleanup_socket()
def cleanup_socket(self):
if type(self.sock) == str:
try:
os.remove(self.sock)
except:
pass
def purge_client(self, client):
try:
client.close()
except:
pass
if client in self.clients:
self.clients.remove(client)
class HTTPServerThread(threading.Thread):
"""
Background thread to run the HTTP server.
"""
def __init__(self, server, clients, host="127.0.0.1", port=6969):
threading.Thread.__init__(self)
self.server = server
self.clients = clients
self.host = host
self.port = port
def run(self):
# register routes for all the API methods
voltron.http.register_http_api()
# configure the cherrypy server
cherrypy.config.update({
'log.screen': False,
'server.socket_port': self.port,
'server.socket_host': str(self.host)
})
# mount the main static dir
cherrypy.tree.mount(None, '/static', {'/' : {
'tools.staticdir.dir': os.path.join(os.path.dirname(__file__), 'web/static'),
'tools.staticdir.on': True,
'tools.staticdir.index': 'index.html'
}})
# graft the main flask app (see http.py) onto the cherry tree
cherrypy.tree.graft(voltron.http.app, '/')
# mount web plugins
plugins = voltron.plugin.pm.web_plugins
for name in plugins:
plugin_root = '/view/{}'.format(name)
static_path = '/view/{}/static'.format(name)
# mount app
if plugins[name].app:
# if there's an app object, mount it at the root
log.debug("Mounting app for web plugin '{}' on {}".format(name, plugin_root))
plugins[name].app.server = self.server
cherrypy.tree.graft(plugins[name].app, plugin_root)
else:
# if there's no plugin app, mount the static dir at the plugin's root instead
# neater for static-only apps (ie. javascript-based)
static_path = plugin_root
# mount static directory
directory = os.path.join(plugins[name]._dir, 'static')
if os.path.isdir(directory):
log.debug("Mounting static directory for web plugin '{}' on {}: {}".format(name, static_path, directory))
cherrypy.tree.mount(None, static_path, {'/' : {
'tools.staticdir.dir': directory,
'tools.staticdir.on': True,
'tools.staticdir.index': 'index.html'
}})
# make with the serving
cherrypy.engine.start()
cherrypy.engine.block()
def stop(self):
cherrypy.engine.exit()
class Client(object):
"""
Used by a client (ie. a view) to communicate with the server.
"""
def __init__(self):
"""
Initialise a new client
"""
self.sock = None
def connect(self):
"""
Connect to the server
"""
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(voltron.env['sock'])
def send_request(self, request):
"""
Send a request to the server.
`request` is an APIRequest subclass.
Returns an APIResponse or subclass instance. If an error occurred, it
will be an APIErrorResponse, if the request was successful it will be
the plugin's specified response class if one exists, otherwise it will
be an APIResponse.
"""
# send the request data to the server
data = str(request)
log.debug("Sending request: {}".format(data))
res = self.sock.sendall(data)
if res != None:
log.error("Failed to send request: {}".format(request))
raise SocketDisconnected("socket closed")
# receive response data
data = self.sock.recv(READ_MAX)
if len(data) > 0:
log.debug('Client received message: ' + data)
try:
# parse the response data
generic_response = APIResponse(data=data)
# if there's an error, return an error response
if generic_response.is_error:
res = APIErrorResponse(data=data)
else:
# success; generate a proper response
plugin = voltron.plugin.pm.api_plugin_for_request(request.request)
if plugin and plugin.response_class:
# found a plugin for the request we sent, use its response type
res = plugin.response_class(data=data)
else:
# didn't find a plugin, just return the generic APIResponse we already generated
res = generic_response
except Exception as e:
log.error('Exception parsing message: ' + str(e))
log.error('Invalid message: ' + data)
else:
raise SocketDisconnected("socket closed")
return res
def create_request(self, request_type, *args, **kwargs):
"""
Create a request.
`request_type` is the request type (string). This is used to look up a
plugin, whose request class is instantiated and passed the remaining
arguments passed to this function.
"""
return api_request(request_type, *args, **kwargs)
def perform_request(self, request_type, *args, **kwargs):
"""
Create and send a request.
`request_type` is the request type (string). This is used to look up a
plugin, whose request class is instantiated and passed the remaining
arguments passed to this function.
"""
# create a request
req = api_request(request_type, *args, **kwargs)
# send it
res = self.send_request(req)
return res
class SocketDisconnected(Exception):
"""
Exception raised when a socket disconnects.
"""
pass
class BaseSocket(object):
"""
Base socket class from which ServerSocket and ClientSocket inherit.
"""
def fileno(self):
return self.sock.fileno()
def close(self):
self.sock.close()
def send(self, buf):
self.sock.sendall(buf)
class ServerSocket(BaseSocket):
"""
Server socket for accepting new client connections.
"""
def __init__(self, sock):
if type(sock) == str:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
elif type(sock) == tuple:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(sock)
self.sock.listen(1)
def accept(self):
pair = self.sock.accept()
if pair is not None:
sock, addr = pair
try:
return ClientSocket(sock)
except Exception as e:
log.error("Exception handling accept: " + str(e))
class ClientSocket(BaseSocket):
"""
Client socket for communicating with an individual client. Collected by
ServerThread.
"""
def __init__(self, sock):
self.sock = sock
def recv_request(self):
# read request from socket
data = self.sock.recv(READ_MAX).strip()
log.debug("Received request client -> server: {}".format(data))
if len(data) == 0:
raise SocketDisconnected()
return data
def send_response(self, response):
log.debug("Sending response server -> client: {}".format(response))
self.send(response)
|
# -*- coding: utf-8 -*-
from rest_framework import generics
from api.serializers import EventListSerializers
from api.processors import get_approved_events
class EventListApi(generics.ListAPIView):
""" Lists epproved Events, takes the following optional GET parameters:
* limit
* order
* country_code
* past
"""
serializer_class = EventListSerializers
def get_queryset(self):
params = {
'limit': self.request.GET.get('limit', None),
'order': self.request.GET.get('order', None),
'country_code': self.request.GET.get('country_code', None),
'past': self.request.GET.get('past', False)
}
return get_approved_events(**params)
fixed a typo in comment for API, which shows as option in meta on website
# -*- coding: utf-8 -*-
from rest_framework import generics
from api.serializers import EventListSerializers
from api.processors import get_approved_events
class EventListApi(generics.ListAPIView):
""" Lists approved Events, takes the following optional GET parameters:
* limit
* order
* country_code
* past
"""
serializer_class = EventListSerializers
def get_queryset(self):
params = {
'limit': self.request.GET.get('limit', None),
'order': self.request.GET.get('order', None),
'country_code': self.request.GET.get('country_code', None),
'past': self.request.GET.get('past', False)
}
return get_approved_events(**params)
|
#!/bin/env python3
import argparse
import subprocess
import json
option_age = ""
option_owner = None
query_cache = {}
def query(*args):
s = subprocess.getoutput("ssh openbmc.gerrit gerrit query " +
"--format json --all-reviewers " +
"--dependencies --current-patch-set -- '" +
" ".join(args) + "'")
results = list(map(json.loads, s.splitlines()))
del results[-1]
for r in results:
query_cache[r['id']] = r
return results
def changes():
args= "age:{}".format(option_age)
if option_owner:
args += " ( {} )".format(option_owner)
return query(args,
"status:open", "-is:draft", "-label:Code-Review=-2",
"-project:openbmc/openbmc-test-automation")
def change_by_id(change_id):
if change_id in query_cache:
return query_cache[change_id]
c = query(change_id)
if len(c):
return c[0]
return None
username_map = {
'adamliyi': "@shyili",
'amboar': "@arj",
'anoo1': "@anoo",
'bradbishop': "@bradleyb",
'chinaridinesh': "@chinari",
'dhruvibm': "@dhruvaraj",
'dkodihal': "@dkodihal",
'geissonator': "@andrewg",
'gtmills': "@gmills",
'jenkins-openbmc': "Jenkins",
'JoshDKing': "@jdking",
'mine260309': "@shyulei",
'msbarth': "@msbarth",
'mtritz': "@mtritz",
'ojayanth': "@ojayanth",
'ratagupt': "@ratagupt",
'saqibkh': "@khansa",
'shenki': "@jms",
'spinler': "@spinler",
'tomjoseph83': "@tomjoseph",
'vishwabmc': "@vishwanath",
'williamspatrick': "@iawillia",
}
def map_username(user, name):
return username_map.get(user, "[{}: {}]".format(user, name))
def map_approvals(approvals):
mapped = {}
for a in approvals:
approval_type = a['type']
approval_owner = map_username(a['by']['username'], a['by']['name'])
approval_score = int(a['value'])
if approval_type not in mapped:
mapped[approval_type] = {}
mapped[approval_type][approval_owner] = approval_score
return mapped
def map_reviewers(reviewers, owner):
mapped = []
for r in reviewers:
reviewer_user = r['username']
reviewer_name = r['name']
if reviewer_user == 'jenkins-openbmc':
continue
reviewer_username = map_username(r['username'], r['name'])
if reviewer_username == owner:
continue
mapped.append(reviewer_username)
return mapped
def reason(change):
subject = change['subject']
owner = map_username(change['owner']['username'], change['owner']['name'])
if 'allReviewers' in change:
reviewers = map_reviewers(change['allReviewers'], owner)
else:
reviewers = []
if 'approvals' in change['currentPatchSet']:
approvals = map_approvals(change['currentPatchSet']['approvals'])
else:
approvals = {}
if len(reviewers) < 2:
return ("{0} has added insufficient reviewers.", owner, None)
if ('Verified' in approvals):
verified = approvals['Verified']
scores = list(filter(lambda x: verified[x] < 0, verified))
if len(scores):
return ("{0} should resolve verification failure.", owner, None)
if ('Code-Review' not in approvals):
return ("Missing code review by {0}.", ", ".join(reviewers), None)
reviewed = approvals['Code-Review']
rejected_by = list(filter(lambda x: reviewed[x] < 0, reviewed))
if len(rejected_by):
return ("{0} should resolve code review comments.", owner, None)
reviewed_by = list(filter(lambda x: reviewed[x] > 0, reviewed))
if len(reviewed_by) < 2:
return ("Missing code review by {0}.",
", ".join(set(reviewers) - set(reviewed_by)), None)
if ('Verified' not in approvals):
return ("May be missing Jenkins verification ({0}).", owner, None)
if ('dependsOn' in change) and (len(change['dependsOn'])):
for dep in change['dependsOn']:
if not dep['isCurrentPatchSet']:
return ("Depends on out of date patch set {1} ({0}).",
owner, dep['id'])
dep_info = change_by_id(dep['id'])
if not dep_info:
continue
if dep_info['status'] != "MERGED":
return ("Depends on unmerged patch set {1} ({0}).",
owner, dep['id'])
approved_by = list(filter(lambda x: reviewed[x] == 2, reviewed))
if len(approved_by):
return ("Ready for merge by {0}.", ", ".join(approved_by), None)
else:
return ("Awaiting merge review.", None, None)
def do_report(args):
for c in changes():
print("{} - {}".format(c['url'], c['id']))
print(c['subject'])
(r,people,dep) = reason(c)
print(r.format(people,dep))
print("----")
parser = argparse.ArgumentParser()
parser.add_argument('--age', help='Change age since last modified', type=str,
default="1d")
parser.add_argument('--owner', help='Change owner', type=str,
action='append')
subparsers = parser.add_subparsers()
report = subparsers.add_parser('report', help='Generate report')
report.set_defaults(func=do_report)
args = parser.parse_args()
if 'age' in args:
option_age = args.age;
if ('owner' in args) and args.owner:
option_owner = " OR ".join(map(lambda x: "owner:" + x,
args.owner));
if 'func' in args:
args.func(args)
else:
parser.print_help()
Change 'people' return from 'reason' to array
This defers the conversion from array to string out to
the caller of reason, in case they want a different
conversion to be done.
Signed-off-by: Patrick Williams <cbb7353e6d953ef360baf960c122346276c6e320@stwcx.xyz>
#!/bin/env python3
import argparse
import subprocess
import json
option_age = ""
option_owner = None
query_cache = {}
def query(*args):
s = subprocess.getoutput("ssh openbmc.gerrit gerrit query " +
"--format json --all-reviewers " +
"--dependencies --current-patch-set -- '" +
" ".join(args) + "'")
results = list(map(json.loads, s.splitlines()))
del results[-1]
for r in results:
query_cache[r['id']] = r
return results
def changes():
args= "age:{}".format(option_age)
if option_owner:
args += " ( {} )".format(option_owner)
return query(args,
"status:open", "-is:draft", "-label:Code-Review=-2",
"-project:openbmc/openbmc-test-automation")
def change_by_id(change_id):
if change_id in query_cache:
return query_cache[change_id]
c = query(change_id)
if len(c):
return c[0]
return None
username_map = {
'adamliyi': "@shyili",
'amboar': "@arj",
'anoo1': "@anoo",
'bradbishop': "@bradleyb",
'chinaridinesh': "@chinari",
'dhruvibm': "@dhruvaraj",
'dkodihal': "@dkodihal",
'geissonator': "@andrewg",
'gtmills': "@gmills",
'jenkins-openbmc': "Jenkins",
'JoshDKing': "@jdking",
'mine260309': "@shyulei",
'msbarth': "@msbarth",
'mtritz': "@mtritz",
'ojayanth': "@ojayanth",
'ratagupt': "@ratagupt",
'saqibkh': "@khansa",
'shenki': "@jms",
'spinler': "@spinler",
'tomjoseph83': "@tomjoseph",
'vishwabmc': "@vishwanath",
'williamspatrick': "@iawillia",
}
def map_username(user, name):
return username_map.get(user, "[{}: {}]".format(user, name))
def map_approvals(approvals):
mapped = {}
for a in approvals:
approval_type = a['type']
approval_owner = map_username(a['by']['username'], a['by']['name'])
approval_score = int(a['value'])
if approval_type not in mapped:
mapped[approval_type] = {}
mapped[approval_type][approval_owner] = approval_score
return mapped
def map_reviewers(reviewers, owner):
mapped = []
for r in reviewers:
reviewer_user = r['username']
reviewer_name = r['name']
if reviewer_user == 'jenkins-openbmc':
continue
reviewer_username = map_username(r['username'], r['name'])
if reviewer_username == owner:
continue
mapped.append(reviewer_username)
return mapped
def reason(change):
subject = change['subject']
owner = map_username(change['owner']['username'], change['owner']['name'])
if 'allReviewers' in change:
reviewers = map_reviewers(change['allReviewers'], owner)
else:
reviewers = []
if 'approvals' in change['currentPatchSet']:
approvals = map_approvals(change['currentPatchSet']['approvals'])
else:
approvals = {}
if len(reviewers) < 2:
return ("{0} has added insufficient reviewers.", [owner], None)
if ('Verified' in approvals):
verified = approvals['Verified']
scores = list(filter(lambda x: verified[x] < 0, verified))
if len(scores):
return ("{0} should resolve verification failure.", [owner], None)
if ('Code-Review' not in approvals):
return ("Missing code review by {0}.", reviewers, None)
reviewed = approvals['Code-Review']
rejected_by = list(filter(lambda x: reviewed[x] < 0, reviewed))
if len(rejected_by):
return ("{0} should resolve code review comments.", [owner], None)
reviewed_by = list(filter(lambda x: reviewed[x] > 0, reviewed))
if len(reviewed_by) < 2:
return ("Missing code review by {0}.",
set(reviewers) - set(reviewed_by), None)
if ('Verified' not in approvals):
return ("May be missing Jenkins verification ({0}).", [owner], None)
if ('dependsOn' in change) and (len(change['dependsOn'])):
for dep in change['dependsOn']:
if not dep['isCurrentPatchSet']:
return ("Depends on out of date patch set {1} ({0}).",
[owner], dep['id'])
dep_info = change_by_id(dep['id'])
if not dep_info:
continue
if dep_info['status'] != "MERGED":
return ("Depends on unmerged patch set {1} ({0}).",
[owner], dep['id'])
approved_by = list(filter(lambda x: reviewed[x] == 2, reviewed))
if len(approved_by):
return ("Ready for merge by {0}.", approved_by, None)
else:
return ("Awaiting merge review.", [], None)
def do_report(args):
for c in changes():
print("{} - {}".format(c['url'], c['id']))
print(c['subject'])
(r,people,dep) = reason(c)
people = ", ".join(people)
print(r.format(people,dep))
print("----")
parser = argparse.ArgumentParser()
parser.add_argument('--age', help='Change age since last modified', type=str,
default="1d")
parser.add_argument('--owner', help='Change owner', type=str,
action='append')
subparsers = parser.add_subparsers()
report = subparsers.add_parser('report', help='Generate report')
report.set_defaults(func=do_report)
args = parser.parse_args()
if 'age' in args:
option_age = args.age;
if ('owner' in args) and args.owner:
option_owner = " OR ".join(map(lambda x: "owner:" + x,
args.owner));
if 'func' in args:
args.func(args)
else:
parser.print_help()
|
from BeautifulSoup import BeautifulSoup
from cms.plugins.text.models import Text
from arkestra_utilities.modifier_pool import adjuster_pool
def get_placeholder_width(context, plugin):
"""
Gets the width placeholder in which a plugin finds itself
{% with
adjust_width=current_page.flags.no_page_title # adjust_width depends on some context variable
width_adjuster="absolute" # the adjustment will be to an absolute value
width_adjustment=200 # the value in pixels
image_border_reduction=8
background_classes="background"
background_adjuster="px"
background_adjustment=32
%}
{% placeholder body %}
{% endwith %}
"""
# try to get placeholder_width context variable; if not, then width;
# if not, use 100 (100 is for admin)
placeholder_width = context.get("placeholder_width")
placeholder_width = placeholder_width or context.get("width")
placeholder_width = float(placeholder_width or 100.0)
# placeholder_width = float(context.get("placeholder_width", context.get("width", 100.0)))
# run all registered placeholder_width modifiers
for cls in adjuster_pool.adjusters["placeholder_width"]:
inst = cls()
placeholder_width = inst.modify(context, placeholder_width)
return placeholder_width
def get_plugin_ancestry(plugin):
"""
Builds a list of plugins, from the instance downwards, but excluding the root plugin
"""
plugins = []
print "plugin", type(plugin)
while plugin.parent:
plugins.append(plugin)
plugin = plugin.parent
return reversed(plugins)
def calculate_container_width(context, instance, width, auto=False):
markers = {}
# we could in theory have nested text/layout plugins, but in practice
# probably never will - it's not necessary, given the inner row/column
# capabilities of the semantic editor - so this list of plugins will usually just contain the plugin we're working on
plugins = get_plugin_ancestry(instance)
for plugin in plugins:
# get the body field (i.e. output HTML) of the Text object this item is inserted into
body = Text.objects.get(id=plugin.parent_id).body
# soup it up
soup = BeautifulSoup(''.join(body))
# find the element with that id in the HTML
target = soup.find(id="plugin_obj_"+str(plugin.id))
# run plugin_width modifiers
for cls in adjuster_pool.adjusters["plugin_width"]:
inst = cls()
width = inst.modify(context, target, width, auto)
elements = reversed(target.findParents()) # get the tree of elements and reverse it
# we start with the root (i.e. document)
for element in elements:
# run image_width modifiers
# check for attributes that have a cumulative adjusting affect - we need to act each time we find one
for cls in adjuster_pool.adjusters["image_width"]:
inst = cls()
width = inst.modify(context, element, width)
# run mark_and_modify modifiers, to mark only
# check for attributes that have an effect only once - act after the loop
for cls in adjuster_pool.adjusters["mark_and_modify"]:
inst = cls()
markers = inst.mark(context, element, markers)
# run mark_and_modify modifiers, to modify
for cls in adjuster_pool.adjusters["mark_and_modify"]:
inst = cls()
width = inst.modify(context, markers, width)
return width
commented out print statement
from BeautifulSoup import BeautifulSoup
from cms.plugins.text.models import Text
from arkestra_utilities.modifier_pool import adjuster_pool
def get_placeholder_width(context, plugin):
"""
Gets the width placeholder in which a plugin finds itself
{% with
adjust_width=current_page.flags.no_page_title # adjust_width depends on some context variable
width_adjuster="absolute" # the adjustment will be to an absolute value
width_adjustment=200 # the value in pixels
image_border_reduction=8
background_classes="background"
background_adjuster="px"
background_adjustment=32
%}
{% placeholder body %}
{% endwith %}
"""
# try to get placeholder_width context variable; if not, then width;
# if not, use 100 (100 is for admin)
placeholder_width = context.get("placeholder_width")
placeholder_width = placeholder_width or context.get("width")
placeholder_width = float(placeholder_width or 100.0)
# placeholder_width = float(context.get("placeholder_width", context.get("width", 100.0)))
# run all registered placeholder_width modifiers
for cls in adjuster_pool.adjusters["placeholder_width"]:
inst = cls()
placeholder_width = inst.modify(context, placeholder_width)
return placeholder_width
def get_plugin_ancestry(plugin):
"""
Builds a list of plugins, from the instance downwards, but excluding the root plugin
"""
plugins = []
# print "plugin", type(plugin)
while plugin.parent:
plugins.append(plugin)
plugin = plugin.parent
return reversed(plugins)
def calculate_container_width(context, instance, width, auto=False):
markers = {}
# we could in theory have nested text/layout plugins, but in practice
# probably never will - it's not necessary, given the inner row/column
# capabilities of the semantic editor - so this list of plugins will usually just contain the plugin we're working on
plugins = get_plugin_ancestry(instance)
for plugin in plugins:
# get the body field (i.e. output HTML) of the Text object this item is inserted into
body = Text.objects.get(id=plugin.parent_id).body
# soup it up
soup = BeautifulSoup(''.join(body))
# find the element with that id in the HTML
target = soup.find(id="plugin_obj_"+str(plugin.id))
# run plugin_width modifiers
for cls in adjuster_pool.adjusters["plugin_width"]:
inst = cls()
width = inst.modify(context, target, width, auto)
elements = reversed(target.findParents()) # get the tree of elements and reverse it
# we start with the root (i.e. document)
for element in elements:
# run image_width modifiers
# check for attributes that have a cumulative adjusting affect - we need to act each time we find one
for cls in adjuster_pool.adjusters["image_width"]:
inst = cls()
width = inst.modify(context, element, width)
# run mark_and_modify modifiers, to mark only
# check for attributes that have an effect only once - act after the loop
for cls in adjuster_pool.adjusters["mark_and_modify"]:
inst = cls()
markers = inst.mark(context, element, markers)
# run mark_and_modify modifiers, to modify
for cls in adjuster_pool.adjusters["mark_and_modify"]:
inst = cls()
width = inst.modify(context, markers, width)
return width
|
import hashlib
import signal
from collections import Counter, defaultdict
from datetime import datetime, timedelta
from django.conf import settings
from corehq.util.metrics import metrics_counter, metrics_histogram_timer
from pillowtop.checkpoints.manager import KafkaPillowCheckpoint
from pillowtop.const import DEFAULT_PROCESSOR_CHUNK_SIZE
from pillowtop.exceptions import PillowConfigError
from pillowtop.logger import pillow_logging
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors import BulkPillowProcessor
from pillowtop.utils import ensure_document_exists, ensure_matched_revisions, bulk_fetch_changes_docs
from corehq.apps.change_feed.consumer.feed import (
KafkaChangeFeed,
KafkaCheckpointEventHandler,
)
from corehq.apps.change_feed.topics import LOCATION as LOCATION_TOPIC
from corehq.apps.domain.dbaccessors import get_domain_ids_by_names
from corehq.apps.userreports.const import KAFKA_TOPICS
from corehq.apps.userreports.data_source_providers import (
DynamicDataSourceProvider,
StaticDataSourceProvider,
)
from corehq.apps.userreports.exceptions import (
BadSpecError,
StaleRebuildError,
TableRebuildError,
UserReportsWarning,
)
from corehq.apps.userreports.models import AsyncIndicator
from corehq.apps.userreports.rebuild import (
get_table_diffs,
get_tables_rebuild_migrate,
migrate_tables,
)
from corehq.apps.userreports.specs import EvaluationContext
from corehq.apps.userreports.sql import get_metadata
from corehq.apps.userreports.tasks import rebuild_indicators
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.sql_db.connections import connection_manager
from corehq.util.soft_assert import soft_assert
from corehq.util.timer import TimingContext
REBUILD_CHECK_INTERVAL = 3 * 60 * 60 # in seconds
LONG_UCR_LOGGING_THRESHOLD = 0.5
class WarmShutdown(object):
# modified from https://stackoverflow.com/a/50174144
shutting_down = False
def __enter__(self):
self.current_handler = signal.signal(signal.SIGTERM, self.handler)
def __exit__(self, exc_type, exc_value, traceback):
if self.shutting_down and exc_type is None:
exit(0)
signal.signal(signal.SIGTERM, self.current_handler)
def handler(self, signum, frame):
self.shutting_down = True
def time_ucr_process_change(method):
def timed(*args, **kw):
ts = datetime.now()
result = method(*args, **kw)
te = datetime.now()
seconds = (te - ts).total_seconds()
if seconds > LONG_UCR_LOGGING_THRESHOLD:
table = args[2]
doc = args[3]
log_message = "UCR data source {} on doc_id {} took {} seconds to process".format(
table.config._id, doc['_id'], seconds
)
pillow_logging.warning(log_message)
return result
return timed
def _filter_by_hash(configs, ucr_division):
ucr_start = ucr_division[0]
ucr_end = ucr_division[-1]
filtered_configs = []
for config in configs:
table_hash = hashlib.md5(config.table_id.encode('utf-8')).hexdigest()[0]
if ucr_start <= table_hash <= ucr_end:
filtered_configs.append(config)
return filtered_configs
def _filter_missing_domains(configs):
"""Return a list of configs whose domain exists on this environment"""
domain_names = [config.domain for config in configs if config.is_static]
existing_domains = list(get_domain_ids_by_names(domain_names))
return [
config for config in configs
if not config.is_static or config.domain in existing_domains
]
class ConfigurableReportTableManagerMixin(object):
def __init__(self, data_source_providers, ucr_division=None,
include_ucrs=None, exclude_ucrs=None, bootstrap_interval=REBUILD_CHECK_INTERVAL,
run_migrations=True):
"""Initializes the processor for UCRs
Keyword Arguments:
ucr_division -- two hexadecimal digits that are used to determine a subset of UCR
datasources to process. The second digit should be higher than the
first
include_ucrs -- list of ucr 'table_ids' to be included in this processor
exclude_ucrs -- list of ucr 'table_ids' to be excluded in this processor
bootstrap_interval -- time in seconds when the pillow checks for any data source changes
run_migrations -- If True, rebuild tables if the data source changes.
Otherwise, do not attempt to change database
"""
self.bootstrapped = False
self.last_bootstrapped = self.last_imported = datetime.utcnow()
self.data_source_providers = data_source_providers
self.ucr_division = ucr_division
self.include_ucrs = include_ucrs
self.exclude_ucrs = exclude_ucrs
self.bootstrap_interval = bootstrap_interval
self.run_migrations = run_migrations
if self.include_ucrs and self.ucr_division:
raise PillowConfigError("You can't have include_ucrs and ucr_division")
def get_all_configs(self):
return [
source
for provider in self.data_source_providers
for source in provider.get_data_sources()
]
def get_filtered_configs(self, configs=None):
configs = configs or self.get_all_configs()
if self.exclude_ucrs:
configs = [config for config in configs if config.table_id not in self.exclude_ucrs]
if self.include_ucrs:
configs = [config for config in configs if config.table_id in self.include_ucrs]
elif self.ucr_division:
configs = _filter_by_hash(configs, self.ucr_division)
configs = _filter_missing_domains(configs)
return configs
def needs_bootstrap(self):
return (
not self.bootstrapped
or datetime.utcnow() - self.last_bootstrapped > timedelta(seconds=self.bootstrap_interval)
)
def bootstrap_if_needed(self):
if self.needs_bootstrap():
self.bootstrap()
else:
self._pull_in_new_and_modified_data_sources()
def bootstrap(self, configs=None):
configs = self.get_filtered_configs(configs)
if not configs:
pillow_logging.warning("UCR pillow has no configs to process")
self.table_adapters_by_domain = defaultdict(list)
for config in configs:
self.table_adapters_by_domain[config.domain].append(
self._get_indicator_adapter(config)
)
if self.run_migrations:
self.rebuild_tables_if_necessary()
self.bootstrapped = True
self.last_bootstrapped = datetime.utcnow()
def _get_indicator_adapter(self, config):
return get_indicator_adapter(config, raise_errors=True, load_source='change_feed')
def rebuild_tables_if_necessary(self):
self._rebuild_sql_tables([
adapter
for adapter_list in self.table_adapters_by_domain.values()
for adapter in adapter_list
])
def _rebuild_sql_tables(self, adapters):
tables_by_engine = defaultdict(dict)
all_adapters = []
for adapter in adapters:
if getattr(adapter, 'all_adapters', None):
all_adapters.extend(adapter.all_adapters)
else:
all_adapters.append(adapter)
for adapter in all_adapters:
tables_by_engine[adapter.engine_id][adapter.get_table().name] = adapter
_assert = soft_assert(notify_admins=True)
_notify_rebuild = lambda msg, obj: _assert(False, msg, obj)
for engine_id, table_map in tables_by_engine.items():
table_names = list(table_map)
engine = connection_manager.get_engine(engine_id)
diffs = get_table_diffs(engine, table_names, get_metadata(engine_id))
tables_to_act_on = get_tables_rebuild_migrate(diffs)
for table_name in tables_to_act_on.rebuild:
pillow_logging.debug("[rebuild] Rebuilding table: %s", table_name)
sql_adapter = table_map[table_name]
table_diffs = [diff for diff in diffs if diff.table_name == table_name]
if not sql_adapter.config.is_static:
try:
self.rebuild_table(sql_adapter, table_diffs)
except TableRebuildError as e:
_notify_rebuild(str(e), sql_adapter.config.to_json())
else:
self.rebuild_table(sql_adapter, table_diffs)
self.migrate_tables(engine, diffs, tables_to_act_on.migrate, table_map)
def migrate_tables(self, engine, diffs, table_names, adapters_by_table):
pillow_logging.debug("[rebuild] Application migrations to tables: %s", table_names)
migration_diffs = [diff for diff in diffs if diff.table_name in table_names]
changes = migrate_tables(engine, migration_diffs)
for table, diffs in changes.items():
adapter = adapters_by_table[table]
adapter.log_table_migrate(source='pillowtop', diffs=diffs)
def rebuild_table(self, adapter, diffs=None):
config = adapter.config
if not config.is_static:
latest_rev = config.get_db().get_rev(config._id)
if config._rev != latest_rev:
raise StaleRebuildError('Tried to rebuild a stale table ({})! Ignoring...'.format(config))
if config.disable_destructive_rebuild and adapter.table_exists:
diff_dicts = [diff.to_dict() for diff in diffs]
adapter.log_table_rebuild_skipped(source='pillowtop', diffs=diff_dicts)
return
rebuild_indicators.delay(adapter.config.get_id, source='pillowtop', engine_id=adapter.engine_id)
def _pull_in_new_and_modified_data_sources(self):
"""
Find any data sources that have been modified since the last time this was bootstrapped
and update the in-memory references.
"""
new_last_imported = datetime.utcnow()
new_data_sources = [
source
for provider in self.data_source_providers
for source in provider.get_data_sources_modified_since(self.last_imported)
]
self._add_data_sources_to_table_adapters(new_data_sources)
self.last_imported = new_last_imported
def _add_data_sources_to_table_adapters(self, new_data_sources):
for new_data_source in new_data_sources:
pillow_logging.info(f'updating modified data source: {new_data_source.domain}: {new_data_source._id}')
domain_adapters = self.table_adapters_by_domain[new_data_source.domain]
# remove any previous adapters if they existed
domain_adapters = [
adapter for adapter in domain_adapters if adapter.config._id != new_data_source._id
]
# add a new one
domain_adapters.append(self._get_indicator_adapter(new_data_source))
# update dictionary
self.table_adapters_by_domain[new_data_source.domain] = domain_adapters
class ConfigurableReportPillowProcessor(ConfigurableReportTableManagerMixin, BulkPillowProcessor):
"""Generic processor for UCR.
Reads from:
- SQLLocation
- Form data source
- Case data source
Writes to:
- UCR database
"""
domain_timing_context = Counter()
@time_ucr_process_change
def _save_doc_to_table(self, domain, table, doc, eval_context):
# best effort will swallow errors in the table
try:
table.best_effort_save(doc, eval_context)
except UserReportsWarning:
# remove it until the next bootstrap call
self.table_adapters_by_domain[domain].remove(table)
def process_changes_chunk(self, changes):
"""
Update UCR tables in bulk by breaking up changes per domain per UCR table.
If an exception is raised in bulk operations of a set of changes,
those changes are returned to pillow for serial reprocessing.
"""
self.bootstrap_if_needed()
# break up changes by domain
changes_by_domain = defaultdict(list)
for change in changes:
# skip if no domain or no UCR tables in the domain
if change.metadata.domain and change.metadata.domain in self.table_adapters_by_domain:
changes_by_domain[change.metadata.domain].append(change)
retry_changes = set()
change_exceptions = []
for domain, changes_chunk in changes_by_domain.items():
with WarmShutdown():
failed, exceptions = self._process_chunk_for_domain(domain, changes_chunk)
retry_changes.update(failed)
change_exceptions.extend(exceptions)
return retry_changes, change_exceptions
def _process_chunk_for_domain(self, domain, changes_chunk):
adapters = list(self.table_adapters_by_domain[domain])
changes_by_id = {change.id: change for change in changes_chunk}
to_delete_by_adapter = defaultdict(list)
rows_to_save_by_adapter = defaultdict(list)
async_configs_by_doc_id = defaultdict(list)
to_update = {change for change in changes_chunk if not change.deleted}
with self._metrics_timer('extract'):
retry_changes, docs = bulk_fetch_changes_docs(to_update, domain)
change_exceptions = []
with self._metrics_timer('single_batch_transform'):
for doc in docs:
change = changes_by_id[doc['_id']]
doc_subtype = change.metadata.document_subtype
eval_context = EvaluationContext(doc)
with self._metrics_timer('single_doc_transform'):
for adapter in adapters:
with self._metrics_timer('transform', adapter.config._id):
if adapter.config.filter(doc, eval_context):
if adapter.run_asynchronous:
async_configs_by_doc_id[doc['_id']].append(adapter.config._id)
else:
try:
rows_to_save_by_adapter[adapter].extend(adapter.get_all_values(doc, eval_context))
except Exception as e:
change_exceptions.append((change, e))
eval_context.reset_iteration()
elif (doc_subtype is None
or doc_subtype in adapter.config.get_case_type_or_xmlns_filter()):
# Delete if the subtype is unknown or
# if the subtype matches our filters, but the full filter no longer applies
to_delete_by_adapter[adapter].append(doc)
with self._metrics_timer('single_batch_delete'):
# bulk delete by adapter
to_delete = [{'_id': c.id} for c in changes_chunk if c.deleted]
for adapter in adapters:
delete_docs = to_delete_by_adapter[adapter] + to_delete
if not delete_docs:
continue
with self._metrics_timer('delete', adapter.config._id):
try:
adapter.bulk_delete(delete_docs)
except Exception:
delete_ids = [doc['_id'] for doc in delete_docs]
retry_changes.update([c for c in changes_chunk if c.id in delete_ids])
with self._metrics_timer('single_batch_load'):
# bulk update by adapter
for adapter, rows in rows_to_save_by_adapter.items():
with self._metrics_timer('load', adapter.config._id):
try:
adapter.save_rows(rows)
except Exception:
retry_changes.update(to_update)
if async_configs_by_doc_id:
with self._metrics_timer('async_config_load'):
doc_type_by_id = {
_id: changes_by_id[_id].metadata.document_type
for _id in async_configs_by_doc_id.keys()
}
AsyncIndicator.bulk_update_records(async_configs_by_doc_id, domain, doc_type_by_id)
return retry_changes, change_exceptions
def _metrics_timer(self, step, config_id=None):
tags = {
'action': step,
'index': 'ucr',
}
if config_id and settings.ENTERPRISE_MODE:
tags['config_id'] = config_id
return metrics_histogram_timer(
'commcare.change_feed.processor.timing',
timing_buckets=(.03, .1, .3, 1, 3, 10), tags=tags
)
def process_change(self, change):
self.bootstrap_if_needed()
domain = change.metadata.domain
if not domain or domain not in self.table_adapters_by_domain:
# if no domain we won't save to any UCR table
return
if change.deleted:
adapters = list(self.table_adapters_by_domain[domain])
for table in adapters:
table.delete({'_id': change.metadata.document_id})
async_tables = []
doc = change.get_document()
ensure_document_exists(change)
ensure_matched_revisions(change, doc)
if doc is None:
return
with TimingContext() as timer:
eval_context = EvaluationContext(doc)
# make copy to avoid modifying list during iteration
adapters = list(self.table_adapters_by_domain[domain])
doc_subtype = change.metadata.document_subtype
for table in adapters:
if table.config.filter(doc, eval_context):
if table.run_asynchronous:
async_tables.append(table.config._id)
else:
self._save_doc_to_table(domain, table, doc, eval_context)
eval_context.reset_iteration()
elif (doc_subtype is None
or doc_subtype in table.config.get_case_type_or_xmlns_filter()):
table.delete(doc)
if async_tables:
AsyncIndicator.update_from_kafka_change(change, async_tables)
self.domain_timing_context.update(**{
domain: timer.duration
})
def checkpoint_updated(self):
total_duration = sum(self.domain_timing_context.values())
duration_seen = 0
top_half_domains = {}
for domain, duration in self.domain_timing_context.most_common():
top_half_domains[domain] = duration
duration_seen += duration
if duration_seen >= total_duration // 2:
break
for domain, duration in top_half_domains.items():
metrics_counter('commcare.change_feed.ucr_slow_log', duration, tags={
'domain': domain
})
self.domain_timing_context.clear()
class ConfigurableReportKafkaPillow(ConstructedPillow):
# todo; To remove after full rollout of https://github.com/dimagi/commcare-hq/pull/21329/
def __init__(self, processor, pillow_name, topics, num_processes, process_num, retry_errors=False,
processor_chunk_size=0):
change_feed = KafkaChangeFeed(
topics, client_id=pillow_name, num_processes=num_processes, process_num=process_num
)
checkpoint = KafkaPillowCheckpoint(pillow_name, topics)
event_handler = KafkaCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=1000, change_feed=change_feed,
checkpoint_callback=processor
)
super(ConfigurableReportKafkaPillow, self).__init__(
name=pillow_name,
change_feed=change_feed,
processor=processor,
checkpoint=checkpoint,
change_processed_event_handler=event_handler,
processor_chunk_size=processor_chunk_size
)
# set by the superclass constructor
assert self.processors is not None
assert len(self.processors) == 1
self._processor = self.processors[0]
assert self._processor.bootstrapped is not None
# retry errors defaults to False because there is not a solution to
# distinguish between doc save errors and data source config errors
self.retry_errors = retry_errors
def bootstrap(self, configs=None):
self._processor.bootstrap(configs)
def rebuild_table(self, sql_adapter):
self._processor.rebuild_table(sql_adapter)
def get_kafka_ucr_pillow(pillow_id='kafka-ucr-main', ucr_division=None,
include_ucrs=None, exclude_ucrs=None, topics=None,
num_processes=1, process_num=0,
processor_chunk_size=DEFAULT_PROCESSOR_CHUNK_SIZE, **kwargs):
"""UCR pillow that reads from all Kafka topics and writes data into the UCR database tables.
Processors:
- :py:class:`corehq.apps.userreports.pillow.ConfigurableReportPillowProcessor`
"""
# todo; To remove after full rollout of https://github.com/dimagi/commcare-hq/pull/21329/
topics = topics or KAFKA_TOPICS
topics = [t for t in topics]
return ConfigurableReportKafkaPillow(
processor=ConfigurableReportPillowProcessor(
data_source_providers=[DynamicDataSourceProvider()],
ucr_division=ucr_division,
include_ucrs=include_ucrs,
exclude_ucrs=exclude_ucrs,
run_migrations=(process_num == 0) # only first process runs migrations
),
pillow_name=pillow_id,
topics=topics,
num_processes=num_processes,
process_num=process_num,
processor_chunk_size=processor_chunk_size,
)
def get_kafka_ucr_static_pillow(pillow_id='kafka-ucr-static', ucr_division=None,
include_ucrs=None, exclude_ucrs=None, topics=None,
num_processes=1, process_num=0,
processor_chunk_size=DEFAULT_PROCESSOR_CHUNK_SIZE, **kwargs):
"""UCR pillow that reads from all Kafka topics and writes data into the UCR database tables.
Only processes `static` UCR datasources (configuration lives in the codebase instead of the database).
Processors:
- :py:class:`corehq.apps.userreports.pillow.ConfigurableReportPillowProcessor`
"""
# todo; To remove after full rollout of https://github.com/dimagi/commcare-hq/pull/21329/
topics = topics or KAFKA_TOPICS
topics = [t for t in topics]
return ConfigurableReportKafkaPillow(
processor=ConfigurableReportPillowProcessor(
data_source_providers=[StaticDataSourceProvider()],
ucr_division=ucr_division,
include_ucrs=include_ucrs,
exclude_ucrs=exclude_ucrs,
bootstrap_interval=7 * 24 * 60 * 60, # 1 week
run_migrations=(process_num == 0) # only first process runs migrations
),
pillow_name=pillow_id,
topics=topics,
num_processes=num_processes,
process_num=process_num,
retry_errors=True,
processor_chunk_size=processor_chunk_size,
)
def get_location_pillow(pillow_id='location-ucr-pillow', include_ucrs=None,
num_processes=1, process_num=0, ucr_configs=None, **kwargs):
"""Processes updates to locations for UCR
Note this is only applicable if a domain on the environment has `LOCATIONS_IN_UCR` flag enabled.
Processors:
- :py:func:`corehq.apps.userreports.pillow.ConfigurableReportPillowProcessor`
"""
change_feed = KafkaChangeFeed(
[LOCATION_TOPIC], client_id=pillow_id, num_processes=num_processes, process_num=process_num
)
ucr_processor = ConfigurableReportPillowProcessor(
data_source_providers=[DynamicDataSourceProvider('Location'), StaticDataSourceProvider('Location')],
include_ucrs=include_ucrs,
)
if ucr_configs:
ucr_processor.bootstrap(ucr_configs)
checkpoint = KafkaPillowCheckpoint(pillow_id, [LOCATION_TOPIC])
event_handler = KafkaCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=1000, change_feed=change_feed,
checkpoint_callback=ucr_processor
)
return ConstructedPillow(
name=pillow_id,
change_feed=change_feed,
checkpoint=checkpoint,
change_processed_event_handler=event_handler,
processor=[ucr_processor]
)
Make "Rebuilding table" message show up in pillow logs
import hashlib
import signal
from collections import Counter, defaultdict
from datetime import datetime, timedelta
from django.conf import settings
from corehq.util.metrics import metrics_counter, metrics_histogram_timer
from pillowtop.checkpoints.manager import KafkaPillowCheckpoint
from pillowtop.const import DEFAULT_PROCESSOR_CHUNK_SIZE
from pillowtop.exceptions import PillowConfigError
from pillowtop.logger import pillow_logging
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors import BulkPillowProcessor
from pillowtop.utils import ensure_document_exists, ensure_matched_revisions, bulk_fetch_changes_docs
from corehq.apps.change_feed.consumer.feed import (
KafkaChangeFeed,
KafkaCheckpointEventHandler,
)
from corehq.apps.change_feed.topics import LOCATION as LOCATION_TOPIC
from corehq.apps.domain.dbaccessors import get_domain_ids_by_names
from corehq.apps.userreports.const import KAFKA_TOPICS
from corehq.apps.userreports.data_source_providers import (
DynamicDataSourceProvider,
StaticDataSourceProvider,
)
from corehq.apps.userreports.exceptions import (
BadSpecError,
StaleRebuildError,
TableRebuildError,
UserReportsWarning,
)
from corehq.apps.userreports.models import AsyncIndicator
from corehq.apps.userreports.rebuild import (
get_table_diffs,
get_tables_rebuild_migrate,
migrate_tables,
)
from corehq.apps.userreports.specs import EvaluationContext
from corehq.apps.userreports.sql import get_metadata
from corehq.apps.userreports.tasks import rebuild_indicators
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.sql_db.connections import connection_manager
from corehq.util.soft_assert import soft_assert
from corehq.util.timer import TimingContext
REBUILD_CHECK_INTERVAL = 3 * 60 * 60 # in seconds
LONG_UCR_LOGGING_THRESHOLD = 0.5
class WarmShutdown(object):
# modified from https://stackoverflow.com/a/50174144
shutting_down = False
def __enter__(self):
self.current_handler = signal.signal(signal.SIGTERM, self.handler)
def __exit__(self, exc_type, exc_value, traceback):
if self.shutting_down and exc_type is None:
exit(0)
signal.signal(signal.SIGTERM, self.current_handler)
def handler(self, signum, frame):
self.shutting_down = True
def time_ucr_process_change(method):
def timed(*args, **kw):
ts = datetime.now()
result = method(*args, **kw)
te = datetime.now()
seconds = (te - ts).total_seconds()
if seconds > LONG_UCR_LOGGING_THRESHOLD:
table = args[2]
doc = args[3]
log_message = "UCR data source {} on doc_id {} took {} seconds to process".format(
table.config._id, doc['_id'], seconds
)
pillow_logging.warning(log_message)
return result
return timed
def _filter_by_hash(configs, ucr_division):
ucr_start = ucr_division[0]
ucr_end = ucr_division[-1]
filtered_configs = []
for config in configs:
table_hash = hashlib.md5(config.table_id.encode('utf-8')).hexdigest()[0]
if ucr_start <= table_hash <= ucr_end:
filtered_configs.append(config)
return filtered_configs
def _filter_missing_domains(configs):
"""Return a list of configs whose domain exists on this environment"""
domain_names = [config.domain for config in configs if config.is_static]
existing_domains = list(get_domain_ids_by_names(domain_names))
return [
config for config in configs
if not config.is_static or config.domain in existing_domains
]
class ConfigurableReportTableManagerMixin(object):
def __init__(self, data_source_providers, ucr_division=None,
include_ucrs=None, exclude_ucrs=None, bootstrap_interval=REBUILD_CHECK_INTERVAL,
run_migrations=True):
"""Initializes the processor for UCRs
Keyword Arguments:
ucr_division -- two hexadecimal digits that are used to determine a subset of UCR
datasources to process. The second digit should be higher than the
first
include_ucrs -- list of ucr 'table_ids' to be included in this processor
exclude_ucrs -- list of ucr 'table_ids' to be excluded in this processor
bootstrap_interval -- time in seconds when the pillow checks for any data source changes
run_migrations -- If True, rebuild tables if the data source changes.
Otherwise, do not attempt to change database
"""
self.bootstrapped = False
self.last_bootstrapped = self.last_imported = datetime.utcnow()
self.data_source_providers = data_source_providers
self.ucr_division = ucr_division
self.include_ucrs = include_ucrs
self.exclude_ucrs = exclude_ucrs
self.bootstrap_interval = bootstrap_interval
self.run_migrations = run_migrations
if self.include_ucrs and self.ucr_division:
raise PillowConfigError("You can't have include_ucrs and ucr_division")
def get_all_configs(self):
return [
source
for provider in self.data_source_providers
for source in provider.get_data_sources()
]
def get_filtered_configs(self, configs=None):
configs = configs or self.get_all_configs()
if self.exclude_ucrs:
configs = [config for config in configs if config.table_id not in self.exclude_ucrs]
if self.include_ucrs:
configs = [config for config in configs if config.table_id in self.include_ucrs]
elif self.ucr_division:
configs = _filter_by_hash(configs, self.ucr_division)
configs = _filter_missing_domains(configs)
return configs
def needs_bootstrap(self):
return (
not self.bootstrapped
or datetime.utcnow() - self.last_bootstrapped > timedelta(seconds=self.bootstrap_interval)
)
def bootstrap_if_needed(self):
if self.needs_bootstrap():
self.bootstrap()
else:
self._pull_in_new_and_modified_data_sources()
def bootstrap(self, configs=None):
configs = self.get_filtered_configs(configs)
if not configs:
pillow_logging.warning("UCR pillow has no configs to process")
self.table_adapters_by_domain = defaultdict(list)
for config in configs:
self.table_adapters_by_domain[config.domain].append(
self._get_indicator_adapter(config)
)
if self.run_migrations:
self.rebuild_tables_if_necessary()
self.bootstrapped = True
self.last_bootstrapped = datetime.utcnow()
def _get_indicator_adapter(self, config):
return get_indicator_adapter(config, raise_errors=True, load_source='change_feed')
def rebuild_tables_if_necessary(self):
self._rebuild_sql_tables([
adapter
for adapter_list in self.table_adapters_by_domain.values()
for adapter in adapter_list
])
def _rebuild_sql_tables(self, adapters):
tables_by_engine = defaultdict(dict)
all_adapters = []
for adapter in adapters:
if getattr(adapter, 'all_adapters', None):
all_adapters.extend(adapter.all_adapters)
else:
all_adapters.append(adapter)
for adapter in all_adapters:
tables_by_engine[adapter.engine_id][adapter.get_table().name] = adapter
_assert = soft_assert(notify_admins=True)
_notify_rebuild = lambda msg, obj: _assert(False, msg, obj)
for engine_id, table_map in tables_by_engine.items():
table_names = list(table_map)
engine = connection_manager.get_engine(engine_id)
diffs = get_table_diffs(engine, table_names, get_metadata(engine_id))
tables_to_act_on = get_tables_rebuild_migrate(diffs)
for table_name in tables_to_act_on.rebuild:
pillow_logging.info("[rebuild] Rebuilding table: %s", table_name)
sql_adapter = table_map[table_name]
table_diffs = [diff for diff in diffs if diff.table_name == table_name]
if not sql_adapter.config.is_static:
try:
self.rebuild_table(sql_adapter, table_diffs)
except TableRebuildError as e:
_notify_rebuild(str(e), sql_adapter.config.to_json())
else:
self.rebuild_table(sql_adapter, table_diffs)
self.migrate_tables(engine, diffs, tables_to_act_on.migrate, table_map)
def migrate_tables(self, engine, diffs, table_names, adapters_by_table):
pillow_logging.debug("[rebuild] Application migrations to tables: %s", table_names)
migration_diffs = [diff for diff in diffs if diff.table_name in table_names]
changes = migrate_tables(engine, migration_diffs)
for table, diffs in changes.items():
adapter = adapters_by_table[table]
adapter.log_table_migrate(source='pillowtop', diffs=diffs)
def rebuild_table(self, adapter, diffs=None):
config = adapter.config
if not config.is_static:
latest_rev = config.get_db().get_rev(config._id)
if config._rev != latest_rev:
raise StaleRebuildError('Tried to rebuild a stale table ({})! Ignoring...'.format(config))
if config.disable_destructive_rebuild and adapter.table_exists:
diff_dicts = [diff.to_dict() for diff in diffs]
adapter.log_table_rebuild_skipped(source='pillowtop', diffs=diff_dicts)
return
rebuild_indicators.delay(adapter.config.get_id, source='pillowtop', engine_id=adapter.engine_id)
def _pull_in_new_and_modified_data_sources(self):
"""
Find any data sources that have been modified since the last time this was bootstrapped
and update the in-memory references.
"""
new_last_imported = datetime.utcnow()
new_data_sources = [
source
for provider in self.data_source_providers
for source in provider.get_data_sources_modified_since(self.last_imported)
]
self._add_data_sources_to_table_adapters(new_data_sources)
self.last_imported = new_last_imported
def _add_data_sources_to_table_adapters(self, new_data_sources):
for new_data_source in new_data_sources:
pillow_logging.info(f'updating modified data source: {new_data_source.domain}: {new_data_source._id}')
domain_adapters = self.table_adapters_by_domain[new_data_source.domain]
# remove any previous adapters if they existed
domain_adapters = [
adapter for adapter in domain_adapters if adapter.config._id != new_data_source._id
]
# add a new one
domain_adapters.append(self._get_indicator_adapter(new_data_source))
# update dictionary
self.table_adapters_by_domain[new_data_source.domain] = domain_adapters
class ConfigurableReportPillowProcessor(ConfigurableReportTableManagerMixin, BulkPillowProcessor):
"""Generic processor for UCR.
Reads from:
- SQLLocation
- Form data source
- Case data source
Writes to:
- UCR database
"""
domain_timing_context = Counter()
@time_ucr_process_change
def _save_doc_to_table(self, domain, table, doc, eval_context):
# best effort will swallow errors in the table
try:
table.best_effort_save(doc, eval_context)
except UserReportsWarning:
# remove it until the next bootstrap call
self.table_adapters_by_domain[domain].remove(table)
def process_changes_chunk(self, changes):
"""
Update UCR tables in bulk by breaking up changes per domain per UCR table.
If an exception is raised in bulk operations of a set of changes,
those changes are returned to pillow for serial reprocessing.
"""
self.bootstrap_if_needed()
# break up changes by domain
changes_by_domain = defaultdict(list)
for change in changes:
# skip if no domain or no UCR tables in the domain
if change.metadata.domain and change.metadata.domain in self.table_adapters_by_domain:
changes_by_domain[change.metadata.domain].append(change)
retry_changes = set()
change_exceptions = []
for domain, changes_chunk in changes_by_domain.items():
with WarmShutdown():
failed, exceptions = self._process_chunk_for_domain(domain, changes_chunk)
retry_changes.update(failed)
change_exceptions.extend(exceptions)
return retry_changes, change_exceptions
def _process_chunk_for_domain(self, domain, changes_chunk):
adapters = list(self.table_adapters_by_domain[domain])
changes_by_id = {change.id: change for change in changes_chunk}
to_delete_by_adapter = defaultdict(list)
rows_to_save_by_adapter = defaultdict(list)
async_configs_by_doc_id = defaultdict(list)
to_update = {change for change in changes_chunk if not change.deleted}
with self._metrics_timer('extract'):
retry_changes, docs = bulk_fetch_changes_docs(to_update, domain)
change_exceptions = []
with self._metrics_timer('single_batch_transform'):
for doc in docs:
change = changes_by_id[doc['_id']]
doc_subtype = change.metadata.document_subtype
eval_context = EvaluationContext(doc)
with self._metrics_timer('single_doc_transform'):
for adapter in adapters:
with self._metrics_timer('transform', adapter.config._id):
if adapter.config.filter(doc, eval_context):
if adapter.run_asynchronous:
async_configs_by_doc_id[doc['_id']].append(adapter.config._id)
else:
try:
rows_to_save_by_adapter[adapter].extend(adapter.get_all_values(doc, eval_context))
except Exception as e:
change_exceptions.append((change, e))
eval_context.reset_iteration()
elif (doc_subtype is None
or doc_subtype in adapter.config.get_case_type_or_xmlns_filter()):
# Delete if the subtype is unknown or
# if the subtype matches our filters, but the full filter no longer applies
to_delete_by_adapter[adapter].append(doc)
with self._metrics_timer('single_batch_delete'):
# bulk delete by adapter
to_delete = [{'_id': c.id} for c in changes_chunk if c.deleted]
for adapter in adapters:
delete_docs = to_delete_by_adapter[adapter] + to_delete
if not delete_docs:
continue
with self._metrics_timer('delete', adapter.config._id):
try:
adapter.bulk_delete(delete_docs)
except Exception:
delete_ids = [doc['_id'] for doc in delete_docs]
retry_changes.update([c for c in changes_chunk if c.id in delete_ids])
with self._metrics_timer('single_batch_load'):
# bulk update by adapter
for adapter, rows in rows_to_save_by_adapter.items():
with self._metrics_timer('load', adapter.config._id):
try:
adapter.save_rows(rows)
except Exception:
retry_changes.update(to_update)
if async_configs_by_doc_id:
with self._metrics_timer('async_config_load'):
doc_type_by_id = {
_id: changes_by_id[_id].metadata.document_type
for _id in async_configs_by_doc_id.keys()
}
AsyncIndicator.bulk_update_records(async_configs_by_doc_id, domain, doc_type_by_id)
return retry_changes, change_exceptions
def _metrics_timer(self, step, config_id=None):
tags = {
'action': step,
'index': 'ucr',
}
if config_id and settings.ENTERPRISE_MODE:
tags['config_id'] = config_id
return metrics_histogram_timer(
'commcare.change_feed.processor.timing',
timing_buckets=(.03, .1, .3, 1, 3, 10), tags=tags
)
def process_change(self, change):
self.bootstrap_if_needed()
domain = change.metadata.domain
if not domain or domain not in self.table_adapters_by_domain:
# if no domain we won't save to any UCR table
return
if change.deleted:
adapters = list(self.table_adapters_by_domain[domain])
for table in adapters:
table.delete({'_id': change.metadata.document_id})
async_tables = []
doc = change.get_document()
ensure_document_exists(change)
ensure_matched_revisions(change, doc)
if doc is None:
return
with TimingContext() as timer:
eval_context = EvaluationContext(doc)
# make copy to avoid modifying list during iteration
adapters = list(self.table_adapters_by_domain[domain])
doc_subtype = change.metadata.document_subtype
for table in adapters:
if table.config.filter(doc, eval_context):
if table.run_asynchronous:
async_tables.append(table.config._id)
else:
self._save_doc_to_table(domain, table, doc, eval_context)
eval_context.reset_iteration()
elif (doc_subtype is None
or doc_subtype in table.config.get_case_type_or_xmlns_filter()):
table.delete(doc)
if async_tables:
AsyncIndicator.update_from_kafka_change(change, async_tables)
self.domain_timing_context.update(**{
domain: timer.duration
})
def checkpoint_updated(self):
total_duration = sum(self.domain_timing_context.values())
duration_seen = 0
top_half_domains = {}
for domain, duration in self.domain_timing_context.most_common():
top_half_domains[domain] = duration
duration_seen += duration
if duration_seen >= total_duration // 2:
break
for domain, duration in top_half_domains.items():
metrics_counter('commcare.change_feed.ucr_slow_log', duration, tags={
'domain': domain
})
self.domain_timing_context.clear()
class ConfigurableReportKafkaPillow(ConstructedPillow):
# todo; To remove after full rollout of https://github.com/dimagi/commcare-hq/pull/21329/
def __init__(self, processor, pillow_name, topics, num_processes, process_num, retry_errors=False,
processor_chunk_size=0):
change_feed = KafkaChangeFeed(
topics, client_id=pillow_name, num_processes=num_processes, process_num=process_num
)
checkpoint = KafkaPillowCheckpoint(pillow_name, topics)
event_handler = KafkaCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=1000, change_feed=change_feed,
checkpoint_callback=processor
)
super(ConfigurableReportKafkaPillow, self).__init__(
name=pillow_name,
change_feed=change_feed,
processor=processor,
checkpoint=checkpoint,
change_processed_event_handler=event_handler,
processor_chunk_size=processor_chunk_size
)
# set by the superclass constructor
assert self.processors is not None
assert len(self.processors) == 1
self._processor = self.processors[0]
assert self._processor.bootstrapped is not None
# retry errors defaults to False because there is not a solution to
# distinguish between doc save errors and data source config errors
self.retry_errors = retry_errors
def bootstrap(self, configs=None):
self._processor.bootstrap(configs)
def rebuild_table(self, sql_adapter):
self._processor.rebuild_table(sql_adapter)
def get_kafka_ucr_pillow(pillow_id='kafka-ucr-main', ucr_division=None,
include_ucrs=None, exclude_ucrs=None, topics=None,
num_processes=1, process_num=0,
processor_chunk_size=DEFAULT_PROCESSOR_CHUNK_SIZE, **kwargs):
"""UCR pillow that reads from all Kafka topics and writes data into the UCR database tables.
Processors:
- :py:class:`corehq.apps.userreports.pillow.ConfigurableReportPillowProcessor`
"""
# todo; To remove after full rollout of https://github.com/dimagi/commcare-hq/pull/21329/
topics = topics or KAFKA_TOPICS
topics = [t for t in topics]
return ConfigurableReportKafkaPillow(
processor=ConfigurableReportPillowProcessor(
data_source_providers=[DynamicDataSourceProvider()],
ucr_division=ucr_division,
include_ucrs=include_ucrs,
exclude_ucrs=exclude_ucrs,
run_migrations=(process_num == 0) # only first process runs migrations
),
pillow_name=pillow_id,
topics=topics,
num_processes=num_processes,
process_num=process_num,
processor_chunk_size=processor_chunk_size,
)
def get_kafka_ucr_static_pillow(pillow_id='kafka-ucr-static', ucr_division=None,
include_ucrs=None, exclude_ucrs=None, topics=None,
num_processes=1, process_num=0,
processor_chunk_size=DEFAULT_PROCESSOR_CHUNK_SIZE, **kwargs):
"""UCR pillow that reads from all Kafka topics and writes data into the UCR database tables.
Only processes `static` UCR datasources (configuration lives in the codebase instead of the database).
Processors:
- :py:class:`corehq.apps.userreports.pillow.ConfigurableReportPillowProcessor`
"""
# todo; To remove after full rollout of https://github.com/dimagi/commcare-hq/pull/21329/
topics = topics or KAFKA_TOPICS
topics = [t for t in topics]
return ConfigurableReportKafkaPillow(
processor=ConfigurableReportPillowProcessor(
data_source_providers=[StaticDataSourceProvider()],
ucr_division=ucr_division,
include_ucrs=include_ucrs,
exclude_ucrs=exclude_ucrs,
bootstrap_interval=7 * 24 * 60 * 60, # 1 week
run_migrations=(process_num == 0) # only first process runs migrations
),
pillow_name=pillow_id,
topics=topics,
num_processes=num_processes,
process_num=process_num,
retry_errors=True,
processor_chunk_size=processor_chunk_size,
)
def get_location_pillow(pillow_id='location-ucr-pillow', include_ucrs=None,
num_processes=1, process_num=0, ucr_configs=None, **kwargs):
"""Processes updates to locations for UCR
Note this is only applicable if a domain on the environment has `LOCATIONS_IN_UCR` flag enabled.
Processors:
- :py:func:`corehq.apps.userreports.pillow.ConfigurableReportPillowProcessor`
"""
change_feed = KafkaChangeFeed(
[LOCATION_TOPIC], client_id=pillow_id, num_processes=num_processes, process_num=process_num
)
ucr_processor = ConfigurableReportPillowProcessor(
data_source_providers=[DynamicDataSourceProvider('Location'), StaticDataSourceProvider('Location')],
include_ucrs=include_ucrs,
)
if ucr_configs:
ucr_processor.bootstrap(ucr_configs)
checkpoint = KafkaPillowCheckpoint(pillow_id, [LOCATION_TOPIC])
event_handler = KafkaCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=1000, change_feed=change_feed,
checkpoint_callback=ucr_processor
)
return ConstructedPillow(
name=pillow_id,
change_feed=change_feed,
checkpoint=checkpoint,
change_processed_event_handler=event_handler,
processor=[ucr_processor]
)
|
import ddapp
import math
import textwrap
import drc as lcmdrc
import bot_core as lcmbotcore
import vtkAll as vtk
from ddapp import transformUtils
from ddapp import visualization as vis
from ddapp import objectmodel as om
from ddapp import lcmUtils
from ddapp import ik
from ddapp import cameraview
from ddapp import affordanceupdater
from ddapp import affordancemanager
from ddapp import segmentation
from ddapp import robotstate
from ddapp.debugVis import DebugData
from ddapp.utime import getUtime
from ddapp.ikplanner import ConstraintSet
import ddapp.tasks.robottasks as rt
from ddapp.ikparameters import IkParameters
import os
import functools
import numpy as np
import scipy.io
from ddapp.tasks.taskuserpanel import TaskUserPanel
class DrivingPlanner(object):
def __init__(self, ikServer, robotSystem):
self.ikServer = ikServer
self.robotSystem = robotSystem
self.ikServer.connectStartupCompleted(self.initialize)
self.steeringAngleDegrees = 0.0
self.maxTurningRadius = 9.5
self.trajectoryX = 0
self.trajectoryY = 0.3
self.trajectoryAngle = 0
self.trajSegments = 25
self.wheelDistance = 1.4
self.tagToLocalTransform = transformUtils.transformFromPose([0,0,0],[1,0,0,0])
self.commandStreamChannel = 'JOINT_POSITION_GOAL'
self.akyIdx = robotstate.getDrakePoseJointNames().index('l_leg_aky')
self.lwyIdx = robotstate.getDrakePoseJointNames().index('l_arm_lwy')
self.anklePositions = np.array([np.nan,np.nan])
self.jointLimitsMin = np.array([self.robotSystem.teleopRobotModel.model.getJointLimits(jointName)[0] for jointName in robotstate.getDrakePoseJointNames()])
self.jointLimitsMax = np.array([self.robotSystem.teleopRobotModel.model.getJointLimits(jointName)[1] for jointName in robotstate.getDrakePoseJointNames()])
self.idleAngleSlack = 10
self.fineGrainedThrottleTravel = 10
self.steeringAngleOffset = 0
self.throttlePublishChannel = 'THROTTLE_COMMAND_POSITION_GOAL'
self.steeringPublishChannel = 'STEERING_COMMAND_POSITION_GOAL'
self.addSubscribers()
self.graspWheelAngle = None
self.graspWristAngle = None
self.plans = []
def getInitCommands(self):
commands = [textwrap.dedent('''
% ------ driving planner startup ------
addpath([getenv('DRC_BASE'), '/software/control/matlab/planners/driving_planner']);
clear driving_planner_options;
driving_planner_options.listen_to_lcm_flag = 0;
driving_planner_options.qstar = q_nom;
dp = drivingPlanner(s.robot, driving_planner_options);
% ------ driving planner startup end ------
''')]
return commands
def addSubscribers(self):
lcmUtils.addSubscriber('THROTTLE_COMMAND', lcmdrc.trigger_finger_t , self.onThrottleCommand)
lcmUtils.addSubscriber('STEERING_COMMAND', lcmdrc.driving_control_cmd_t, self.onSteeringCommand)
def initialize(self, ikServer, success):
if ikServer.restarted:
return
commands = self.getInitCommands()
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
# applies the properties to the driving planner object
def applyProperties(self):
commands = []
commands.append("dp.options.quat_tol = %r;" % self.quatTol)
commands.append("dp.options.tol = %r;" % self.positionTol)
commands.append("dp.options.seed_with_current = %r;" % self.seedWithCurrent)
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def updateWheelTransform(self, xyzquat):
commands = []
startPose = self.getPlanningStartPose()
commands.append("q0 = %s;" % ik.ConstraintBase.toColumnVectorString(startPose))
commands.append("xyzquat = %s;" % ik.ConstraintBase.toColumnVectorString(xyzquat))
commands.append("dp = dp.updateWheelTransform(xyzquat, q0);")
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planSafe(self, speed=1):
commands = []
commands.append("clear options;")
commands.append("options.speed = %r;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planSafe(options,%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planPreGrasp(self, depth=0.2, xyz_des=None, angle=0, speed=1, graspLocation='center', turnRadius=0.187):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.turn_radius = %r;" % turnRadius)
commands.append("options.graspLocation = '%s';" % graspLocation)
commands.append("options.angle = %r;" % np.radians(angle))
commands.append("options.speed = %r;" % speed)
if xyz_des is not None:
commands.append("options.xyz_des = {%s};",ik.ConstraintBase.toColumnVectorString(xyz_des))
startPose = self.getPlanningStartPose()
commands.append("dp.planPreGrasp(options, %s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planTouch(self, depth=0, xyz_des=None, speed=1):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.speed = %r;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planTouch(options, %s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planRetract(self, depth=0.2, speed=1):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.speed = %s;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planRetract(options, %s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planTurn(self, angle=0, speed=1):
commands = []
commands.append("clear options;")
commands.append("options.turn_angle = %r;" % np.radians(angle))
commands.append("options.speed = %r;" % speed)
commands.append("options.use_raw_angle = 1;")
startPose = self.getPlanningStartPose()
commands.append("dp.planTurn(options,%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planSteeringWheelTurn(self, speed=1, knotPoints=20, turnRadius=.187, gazeTol=0.3):
commands = []
commands.append("clear options;")
commands.append("options.speed = %r;" % speed)
commands.append("options.turn_radius = %r;" % turnRadius)
commands.append("options.N = %r;" % knotPoints)
commands.append("options.steering_gaze_tol = %r;" % gazeTol)
startPose = self.getPlanningStartPose()
commands.append("dp.planSteeringWheelTurn(options,%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planSeed(self):
commands = []
startPose = self.getPlanningStartPose()
commands.append("dp.planSeed(%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def getPlanningStartPose(self):
return self.robotSystem.robotStateJointController.q
# move left leg up a bit
def planLegUp(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
lFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('l_foot', startPose)
targetFrame = transformUtils.copyFrame(lFoot2World)
targetFrame.PreMultiply()
targetFrame.Translate([0.0,0.0, 0.05])
footPoseConstraint = self.createLeftFootPoseConstraint(targetFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(footPoseConstraint)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_start'
cs.nominalPoseName = 'q_start'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegSwingIn(self):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([-0.02, 0.0, 0.03])
identityFrame = vtk.vtkTransform()
legAbovePedalConstraint = self.createLeftFootPoseConstraint(legAbovePedalFrame, tspan=[1,1])
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legAbovePedalConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
legSwingFrame = om.findObjectByName('left foot pedal swing').transform
cs.constraints.extend(self.createLeftFootPoseConstraint(legSwingFrame, tspan=[0.3,0.3]))
keyFramePlan = cs.runIkTraj()
self.plans.append(keyFramePlan)
return keyFramePlan
def planLegAbovePedal(self):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([-0.02,0.0, 0.03])
identityFrame = vtk.vtkTransform()
legAbovePedalConstraint = self.createLeftFootPoseConstraint(legAbovePedalFrame, tspan=[1,1])
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legAbovePedalConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegSwingOut(self):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
legUpFrame = transformUtils.copyFrame(om.findObjectByName('left foot start').transform)
legUpFrame.PreMultiply()
legUpFrame.Translate([0.0,0.0, 0.05])
identityFrame = vtk.vtkTransform()
legUpConstraint = self.createLeftFootPoseConstraint(legUpFrame, tspan=[1,1])
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legUpConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
legSwingFrame = om.findObjectByName('left foot pedal swing').transform
cs.constraints.extend(self.createLeftFootPoseConstraint(legSwingFrame, tspan=[0.7,0.7]))
keyFramePlan = cs.runIkTraj()
self.plans.append(keyFramePlan)
return keyFramePlan
def planLegEgressStart(self):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
legDownFrame = transformUtils.copyFrame(om.findObjectByName('left foot start').transform)
identityFrame = vtk.vtkTransform()
legDownConstraint = self.createLeftFootPoseConstraint(legDownFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legDownConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegPedal(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
lfootConstraintFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
identityFrame = vtk.vtkTransform()
lfootPositionOrientationConstraint = ikPlanner.createPositionOrientationConstraint('l_foot', lfootConstraintFrame, identityFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(lfootPositionOrientationConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(quasiStaticShrinkFactor=1, maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
keyFramePlan = cs.planEndPoseGoal()
self.plans.append(keyFramePlan)
return keyFramePlan
def planSteeringWheelReGrasp(self, useLineConstraint=False):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
self.wheelAngleBeforeReGrasp = self.getSteeringWheelAngle()
startPoseName = 'q_regrasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_regrasp_end'
handName = 'left'
handLinkName = 'l_hand'
maxMetersPerSecond = 0.1
retractDepth = 0.15
palmToHand = ikPlanner.getPalmToHandLink(handName)
palmToWorld = ikPlanner.newGraspToWorldFrame(startPose, handName, palmToHand)
finalTargetFrame = transformUtils.copyFrame(palmToWorld)
finalTargetFrame.PreMultiply()
finalTargetFrame.RotateY(180)
finalPoseConstraint = self.createLeftPalmPoseConstraints(finalTargetFrame, tspan=[1,1])
retractTargetFrame = transformUtils.copyFrame(palmToWorld)
retractTargetFrame.PreMultiply()
retractTargetFrame.Translate([0.0, -retractDepth, 0.0])
retractPoseConstraint = self.createLeftPalmPoseConstraints(retractTargetFrame, tspan=[0.25,0.25])
preGraspTargetFrame = transformUtils.copyFrame(retractTargetFrame)
preGraspTargetFrame.PreMultiply()
preGraspTargetFrame.RotateY(180)
preGraspPoseConstraint = self.createLeftPalmPoseConstraints(preGraspTargetFrame, tspan=[0.75, 0.75])
allButLeftArmPostureConstraint = self.createAllButLeftArmPostureConstraint(startPoseName)
lockedBaseConstraint = ikPlanner.createLockedBasePostureConstraint(startPoseName)
lockedRightArmConstraint = ikPlanner.createLockedRightArmPostureConstraint(startPoseName)
lockedTorsoConstraint = ikPlanner.createLockedTorsoPostureConstraint(startPoseName)
constraints = [allButLeftArmPostureConstraint]
# constraints = [lockedTorsoConstraint, lockedRightArmConstraint]
constraints.extend(finalPoseConstraint)
seedPoseName = 'q_regrasp_seed'
# seedPose = startPose
# seedPose[self.lwyIdx] = startPose[self.lwyIdx] + np.pi
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False, maxDegreesPerSecond=60,
maxBodyTranslationSpeed=maxMetersPerSecond, rescaleBodyNames=[handLinkName], rescaleBodyPts=list(ikPlanner.getPalmPoint()))
constraintSet.seedPoseName = seedPoseName
constraintSet.nominalPoseName = seedPoseName
# for c in constraintSet.constraints:
# print c
# vis.updateFrame(palmToWorld, 'palm frame')
# vis.updateFrame(finalTargetFrame, 'target frame')
endPose = constraintSet.runIk()
# move on line constraint
motionVector = np.array(retractTargetFrame.GetPosition()) - np.array(palmToWorld.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(retractTargetFrame.GetPosition()), motionVector)
# vis.updateFrame(motionTargetFrame,'motion frame')
# vis.updateFrame(targetFrame, 'target')
# vis.updateFrame(currentFrame, 'current')
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0], positionTolerance=0.001)
p.tspan = np.array([0.12,0.9])
# p_out = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0], positionTolerance=0.001)
# p_out.tspan = np.linspace(,1,5)
endPose = constraintSet.runIk()
constraintSet.constraints.extend(retractPoseConstraint)
constraintSet.constraints.extend(preGraspPoseConstraint)
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.runIkTraj()
self.plans.append(plan)
return plan
def createLeftFootPoseConstraint(self, targetFrame, tspan=[-np.inf,np.inf]):
positionConstraint, orientationConstraint = self.robotSystem.ikPlanner.createPositionOrientationConstraint('l_foot', targetFrame, vtk.vtkTransform())
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createLeftPalmPoseConstraints(self, targetFrame, tspan=[-np.inf, np.inf]):
ikPlanner = self.robotSystem.ikPlanner
positionConstraint, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints('left', targetFrame)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createPalmPoseConstraints(self, side, targetFrame, tspan=[-np.inf, np.inf]):
ikPlanner = self.robotSystem.ikPlanner
positionConstraint, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints(side, targetFrame)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createLeftHandPoseConstraintOnWheel(self, depth=0.12, tspan=[-np.inf, np.inf]):
targetFrame = self.getSteeringWheelPalmFrame()
targetFrame.PreMultiply()
targetFrame.Translate([0.0, depth, 0.0])
positionConstraint, orientationConstraint = self.robotSystem.ikPlanner.createPositionOrientationConstraint('l_hand_face', targetFrame, vtk.vtkTransform())
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def getSteeringWheelPalmFrame(self):
frame = transformUtils.copyFrame(om.findObjectByName('Steering Wheel frame').transform)
frame.PreMultiply()
frame.RotateX(90)
frame.PreMultiply()
frame.RotateZ(-90)
return frame
def planBarGrasp(self,depth=0.03, useLineConstraint=False):
ikPlanner = self.robotSystem.ikPlanner
handSide = 'right'
handLinkName = 'r_hand'
startPose = self.getPlanningStartPose()
startPoseName = 'q_grasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_end_grasp'
palmToHand = ikPlanner.getPalmToHandLink(handSide)
palmToWorld = transformUtils.copyFrame(ikPlanner.newGraspToWorldFrame(startPose, handSide, palmToHand))
targetFrame = transformUtils.copyFrame(om.findObjectByName('right hand grab bar').transform)
targetFrame.PreMultiply()
targetFrame.Translate([0.0,-depth,0.0])
finalPoseConstraints = self.createPalmPoseConstraints(handSide, targetFrame, tspan=[1,1])
allButRightArmPostureConstraint = self.createAllButRightArmPostureConstraint(startPoseName)
seedPoseName = 'q_bar_grab'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'bar_pre_grab', side=handSide)
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraints = [allButRightArmPostureConstraint]
constraints.extend(finalPoseConstraints)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False)
constraintSet.seedPoseName = seedPoseName
constraintSet.nominalPoseName = seedPoseName
# move on line constraint
motionVector = np.array(targetFrame.GetPosition()) - np.array(palmToWorld.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(targetFrame.GetPosition()), motionVector)
# vis.updateFrame(motionTargetFrame,'motion frame')
# vis.updateFrame(targetFrame, 'target')
# vis.updateFrame(currentFrame, 'current')
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0], positionTolerance=0.001)
p.tspan = np.linspace(0.2,0.8,5)
endPose = constraintSet.runIk()
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.planEndPoseGoal()
self.plans.append(plan)
return plan
def planBarRetract(self, depth=0.3, useLineConstraint=False):
ikPlanner = self.robotSystem.ikPlanner
handSide = 'right'
handLinkName = 'r_hand'
startPose = self.getPlanningStartPose()
startPoseName = 'q_grasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_end_grasp'
maxBodyTranslationSpeed = 0.3
palmToHand = ikPlanner.getPalmToHandLink(handSide)
palmToWorld = transformUtils.copyFrame(ikPlanner.newGraspToWorldFrame(startPose, handSide, palmToHand))
targetFrame = transformUtils.copyFrame(palmToWorld)
targetFrame.PreMultiply()
targetFrame.Translate([0.0,-depth,0.0])
finalPoseConstraints = self.createPalmPoseConstraints(handSide, targetFrame, tspan=[1,1])
allButRightArmPostureConstraint = self.createAllButRightArmPostureConstraint(startPoseName)
seedPoseName = 'q_bar_grab'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'bar_pre_grab', side=handSide)
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraints = [allButRightArmPostureConstraint]
constraints.extend(finalPoseConstraints)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False, maxBodyTranslationSpeed=0.3)
constraintSet.seedPoseName = 'q_bar_grab'
constraintSet.nominalPoseName = 'q_bar_grab'
# move on line constraint
motionVector = np.array(targetFrame.GetPosition()) - np.array(palmToWorld.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(targetFrame.GetPosition()), motionVector)
# vis.updateFrame(motionTargetFrame,'motion frame')
# vis.updateFrame(targetFrame, 'target')
# vis.updateFrame(currentFrame, 'current')
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0.0], positionTolerance=0.02)
p.tspan = np.linspace(0,1,5)
endPose = constraintSet.runIk()
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.planEndPoseGoal()
self.plans.append(plan)
return plan
def commitManipPlan(self):
self.robotSystem.manipPlanner.commitManipPlan(self.plans[-1])
def createAllButLeftLegPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!l_leg)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def createAllButLeftArmPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!l_arm)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def createAllButRightArmPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!r_arm)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def computeDrivingTrajectories(self, steeringAngleDegrees, maxTurningRadius = 10, numTrajPoints = 50):
angle = -steeringAngleDegrees
if abs(angle) < 0.1:
angle = 1e-8
turningRadius = 1.0 / (angle * (1 / (maxTurningRadius * 170.0)))
turningCenter = [0, turningRadius, 0]
trajPoints = list()
for i in range(0, numTrajPoints):
theta = math.radians((40 / turningRadius) * i - 90)
trajPoint = np.asarray(turningCenter)+turningRadius*np.asarray([math.cos(theta), math.sin(theta), 0])
trajPoints.append(trajPoint)
leftTraj = list()
rightTraj = list()
for i in range(0, numTrajPoints - 1):
v1 = trajPoints[i + 1] - trajPoints[i]
v2 = np.cross(v1, [0, 0, 1])
v2 /= np.linalg.norm(v2)
leftTraj.append(trajPoints[i] - 0.5 * self.wheelDistance * v2)
rightTraj.append(trajPoints[i] + 0.5 * self.wheelDistance * v2)
return leftTraj, rightTraj
def transformDrivingTrajectory(self, drivingTraj):
transformedDrivingTraj = list()
transform = vtk.vtkTransform()
z_axis = self.tagToLocalTransform.TransformVector([0,0,1])
tag_origin = self.tagToLocalTransform.TransformPoint([0,0,0])
z_norm = np.linalg.norm(z_axis[0:2])
if z_norm > 1e-6:
z_axis_proj = z_axis[0:2] / z_norm
angle = math.degrees(math.atan2(z_axis_proj[1], z_axis_proj[0]))
else:
angle = 0
transform.Translate([tag_origin[0] , tag_origin[1], 0])
transform.RotateZ(self.trajectoryAngle + angle)
transform.Translate([self.trajectoryX, self.trajectoryY, 0])
for p in drivingTraj:
transformedPoint = np.asarray(transform.TransformPoint(p))
transformedDrivingTraj.append(transformedPoint)
return transformedDrivingTraj
def onThrottleCommand(self, msg):
if np.isnan(self.anklePositions).any():
# print 'you must initialize the LOW/HIGH ankle positions before streaming throttle commands'
# print 'use the Capture Ankle Angle Low/High Buttons'
return
# slider 0 is the coarse grained slider, slider 1 is for fine grained adjustment
slider = self.decodeThrottleMessage(msg)
const = self.jointLimitsMin[self.akyIdx]
ankleGoalPosition = const + slider[0]*self.coarseGrainedThrottleTravel + (slider[1]-1/2.0)*self.fineGrainedThrottleTravel
ankleGoalPositionRadians = np.deg2rad(ankleGoalPosition)
# trip the safety if slider[3] is < 1/2, emergency come off the throttle
if slider[3] < 0.5:
print 'Emergency stop, coming off the throttle'
print "setting l_leg_aky to it's min value"
ankleGoalPositionRadians = self.jointLimitsMin[self.akyIdx]
msg = lcmdrc.joint_position_goal_t()
msg.utime = getUtime()
msg.joint_position = ankleGoalPositionRadians
msg.joint_name = 'l_leg_aky'
lcmUtils.publish(self.throttlePublishChannel, msg)
def onSteeringCommand(self, msg):
steeringAngle = -msg.steering_angle
lwyPositionGoal = steeringAngle + self.steeringAngleOffset
msg = lcmdrc.joint_position_goal_t()
msg.utime = getUtime()
msg.joint_position = lwyPositionGoal
msg.joint_name = 'l_arm_lwy'
lcmUtils.publish(self.steeringPublishChannel, msg)
def decodeThrottleMessage(self,msg):
slider = np.zeros(4)
slider[0] = msg.slider1
slider[1] = msg.slider2
slider[2] = msg.slider3
slider[3] = msg.slider4
return slider
def captureRobotPoseFromStreaming(self):
helper = lcmUtils.MessageResponseHelper(self.commandStreamChannel, lcmdrc.robot_state_t)
msg = helper.waitForResponse(timeout=1000, keepAlive=False)
if msg is None:
print "Didn't receive a JOINT_POSITION_GOAL message"
print "Are you streaming?"
return None
pose = robotstate.convertStateMessageToDrakePose(msg)
return pose
def planCarEntryPose(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'car_entry_new')
plan = ikPlanner.computePostureGoal(startPose, endPose, feetOnGround=False)
self.addPlan(plan)
def planArmsEgressStart(self):
startPose = self.getPlanningStartPose()
ikPlanner = self.robotSystem.ikPlanner
midPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'pre_egress_left_arm', side='left')
midPose = ikPlanner.getMergedPostureFromDatabase(midPose, 'driving', 'pre_egress_right_arm', side='right')
endPose = ikPlanner.getMergedPostureFromDatabase(midPose, 'driving', 'egress-arms')
ikParameters = IkParameters(maxDegreesPerSecond=60)
plan = ikPlanner.computeMultiPostureGoal([startPose, midPose, endPose], feetOnGround=False, ikParameters=ikParameters)
self.addPlan(plan)
def setSteeringWheelAndWristGraspAngles(self):
self.graspWheelAngle = np.deg2rad(self.userSpecifiedGraspWheelAngleInDegrees)
pose = self.getPlanningStartPose()
self.graspWristAngle = pose[self.lwyIdx]
def getSteeringWheelAngle(self):
if self.graspWristAngle is None or self.graspWheelAngle is None:
# this means wrist and hand haven't been initialized yet
return 0
pose = self.getPlanningStartPose()
lwyAngle = pose[self.lwyIdx]
wheelAngle = self.graspWheelAngle + lwyAngle - self.graspWristAngle
return wheelAngle
# executes regrasp plan, updates graspWristAngle, graspWheelAngle
def updateGraspOffsets(self):
pose = self.getPlanningStartPose()
#now that plan has finished update our graspWristAngle
self.graspWristAngle = pose[self.lwyIdx]
self.graspWheelAngle = self.wheelAngleBeforeReGrasp
def printSteeringWheelAngleInDegrees(self):
print np.rad2deg(self.getSteeringWheelAngle())
def addPlan(self, plan):
self.plans.append(plan)
class DrivingPlannerPanel(TaskUserPanel):
def __init__(self, robotSystem):
TaskUserPanel.__init__(self, windowTitle='Driving Task')
self.robotSystem = robotSystem
self.drivingPlanner = DrivingPlanner(robotSystem.ikServer, robotSystem)
self.addDefaultProperties()
self.addButtons()
self.addTasks()
self.showTrajectory = False
self.steeringSub = lcmUtils.addSubscriber('STEERING_COMMAND', lcmdrc.driving_control_cmd_t, self.onSteeringCommand)
self.apriltagSub = lcmUtils.addSubscriber('APRIL_TAG_TO_CAMERA_LEFT', lcmbotcore.rigid_transform_t, self.onAprilTag)
self.imageView = cameraview.CameraImageView(cameraview.imageManager, 'CAMERACHEST_RIGHT', 'right image view')
self.imageViewLeft = cameraview.CameraImageView(cameraview.imageManager, 'CAMERA_LEFT', 'left image view')
self.imageView.view.orientationMarkerWidget().Off()
self.imageView.view.backgroundRenderer().SetBackground([0,0,0])
self.imageView.view.backgroundRenderer().SetBackground2([0,0,0])
self.imageViewLeft.view.orientationMarkerWidget().Off()
self.imageViewLeft.view.backgroundRenderer().SetBackground([0,0,0])
self.imageViewLeft.view.backgroundRenderer().SetBackground2([0,0,0])
self.affordanceUpdater = affordanceupdater.AffordanceInCameraUpdater(segmentation.affordanceManager, self.imageView)
self.affordanceUpdaterLeft = affordanceupdater.AffordanceInCameraUpdater(segmentation.affordanceManager, self.imageViewLeft)
self.affordanceUpdater.prependImageName = True
self.affordanceUpdaterLeft.prependImageName = True
self.affordanceUpdater.timer.start()
self.affordanceUpdaterLeft.timer.start()
self.imageViewLayout.addWidget(self.imageView.view)
self.imageViewLayout.addWidget(self.imageViewLeft.view)
self.callbackId = self.robotSystem.robotStateModel.connectModelChanged(self.onModelChanged)
def onModelChanged(self, unusedrobotstate):
if om.findObjectByName('Steering Wheel') is None:
return
else:
self.updateAndDrawTrajectory()
def onAprilTag(self, msg):
cameraview.imageManager.queue.getTransform('april_tag_car_beam', 'local', msg.utime, self.drivingPlanner.tagToLocalTransform)
self.updateAndDrawTrajectory()
def addButtons(self):
self.addManualButton('Start', self.onStart)
self.addManualButton('Update Wheel Location', self.onUpdateWheelLocation)
self.addManualButton('Plan Safe', self.onPlanSafe)
self.addManualButton('Plan Pre Grasp', self.onPlanPreGrasp)
self.addManualButton('Plan Touch', self.onPlanTouch)
self.addManualButton('Plan Retract', self.onPlanRetract)
self.addManualButton('Plan Turn', self.onPlanTurn)
self.addManualButton('Plan Wheel Re-Grasp', self.drivingPlanner.planSteeringWheelReGrasp)
self.addManualButton('Plan Bar Grab', self.onPlanBarGrasp)
self.addManualButton('Plan Bar Retract', self.onPlanBarRetract)
# self.addManualButton('Plan Steering Wheel Turn', self.onPlanSteeringWheelTurn)
# self.addManualButton('Plan Seed', self.drivingPlanner.planSeed)
# self.addManualButton('Capture Ankle Angle Low', functools.partial(self.drivingPlanner.captureAnklePosition, 0))
# self.addManualButton('Capture Ankle Angle High', functools.partial(self.drivingPlanner.captureAnklePosition, 1))
self.addManualButton('Capture Wheel and Wrist grasp angles', self.drivingPlanner.setSteeringWheelAndWristGraspAngles)
self.addManualButton('Print Steering Wheel Angle', self.drivingPlanner.printSteeringWheelAngleInDegrees)
def addDefaultProperties(self):
self.params.addProperty('PreGrasp/Retract Depth', 0.2, attributes=om.PropertyAttributes(singleStep=0.01, decimals=3))
self.params.addProperty('Touch Depth', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=3))
self.params.addProperty('PreGrasp Angle', 0, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Turn Angle', 0, attributes=om.PropertyAttributes(singleStep=10))
# self.params.addProperty('Steering Wheel Radius (meters)', 0.1873, attributes=om.PropertyAttributes(singleStep=0.01))
# self.params.addProperty('Knot Points', 20, attributes=om.PropertyAttributes(singleStep=1))
# self.params.addProperty('Gaze Constraint Tol', 0.3, attributes=om.PropertyAttributes(singleStep=0.1, decimals=2))
self.params.addProperty('Position Constraint Tol', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Quat Constraint Tol', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Grasp Location', 0, attributes=om.PropertyAttributes(enumNames=['Center','Rim']))
self.params.addProperty('Seed with current posture', 0, attributes=om.PropertyAttributes(enumNames=['False','True']))
self.params.addProperty('Speed', 0.75, attributes=om.PropertyAttributes(singleStep=0.1, decimals=2))
# self.params.addProperty('Throttle Idle Angle Slack', 10, attributes=om.PropertyAttributes(singleStep=1))
self.params.addProperty('Coarse Grained Throttle Travel', 100, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Fine Grained Throttle Travel', 10, attributes=om.PropertyAttributes(singleStep=1))
self.params.addProperty('Bar Grasp/Retract Depth', 0.1, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Steering Wheel Angle when Grasped', 0, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Turning Radius', 9.5, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Wheel Separation', 1.4, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Trajectory Segments', 25, attributes=om.PropertyAttributes(singleStep=1, decimals=0))
self.params.addProperty('Trajectory X Offset', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2)),
self.params.addProperty('Trajectory Y Offset', 0.30, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Trajectory Angle Offset', 0.0, attributes=om.PropertyAttributes(singleStep=1, decimals=0)),
self.params.addProperty('Show Trajectory', False)
self.params.addProperty('Show Driving/Regrasp Tasks',0, attributes=om.PropertyAttributes(enumNames=['Ingress','Regrasp', 'Egress']))
self._syncProperties()
def _syncProperties(self):
self.preGraspDepth = self.params.getProperty('PreGrasp/Retract Depth')
self.touchDepth = self.params.getProperty('Touch Depth')
self.preGraspAngle = self.params.getProperty('PreGrasp Angle')
self.turnAngle = self.params.getProperty('Turn Angle')
self.speed = self.params.getProperty('Speed')
self.turnRadius = 0.18 #self.params.getProperty('Steering Wheel Radius (meters)')
self.knotPoints = 20
self.gazeTol = 0.3
self.drivingPlanner.positionTol = 0.0
self.drivingPlanner.quatTol = 0.0
self.graspLocation = 'center'
self.drivingPlanner.seedWithCurrent = self.params.getProperty('Seed with current posture')
# self.drivingPlanner.throttleIdleAngleSlack = self.params.getProperty('Throttle Idle Angle Slack')
self.drivingPlanner.fineGrainedThrottleTravel = self.params.getProperty('Fine Grained Throttle Travel')
self.drivingPlanner.coarseGrainedThrottleTravel = self.params.getProperty('Coarse Grained Throttle Travel')
self.barGraspDepth = self.params.getProperty('Bar Grasp/Retract Depth')
self.drivingPlanner.maxTurningRadius = self.params.getProperty('Turning Radius')
self.drivingPlanner.userSpecifiedGraspWheelAngleInDegrees = self.params.getProperty('Steering Wheel Angle when Grasped')
self.drivingPlanner.trajSegments = self.params.getProperty('Trajectory Segments')
self.drivingPlanner.wheelDistance = self.params.getProperty('Wheel Separation')
self.showTrajectory = self.params.getProperty('Show Trajectory')
self.drivingPlanner.trajectoryX = self.params.getProperty('Trajectory X Offset')
self.drivingPlanner.trajectoryY = self.params.getProperty('Trajectory Y Offset')
self.drivingPlanner.trajectoryAngle = self.params.getProperty('Trajectory Angle Offset')
self.taskToShow = self.params.getProperty('Show Driving/Regrasp Tasks')
if hasattr(self, 'affordanceUpdater'):
if self.showTrajectory:
self.updateAndDrawTrajectory()
leftTraj = om.findObjectByName('LeftDrivingTrajectory')
rightTraj = om.findObjectByName('RightDrivingTrajectory')
leftTraj.setProperty('Visible', self.showTrajectory)
rightTraj.setProperty('Visible', self.showTrajectory)
self.affordanceUpdater.extraObjects = [leftTraj, rightTraj]
self.affordanceUpdaterLeft.extraObjects = [leftTraj, rightTraj]
else:
self.affordanceUpdater.extraObjects = []
self.affordanceUpdaterLeft.extraObjects = []
self.drivingPlanner.applyProperties()
def onSteeringCommand(self, msg):
if msg.type == msg.TYPE_DRIVE_DELTA_STEERING:
self.drivingPlanner.steeringAngleDegrees = math.degrees(msg.steering_angle)
self.updateAndDrawTrajectory()
def onStart(self):
self.onUpdateWheelLocation()
print('Driving Planner Ready')
def onUpdateWheelLocation(self):
f = om.findObjectByName('Steering Wheel').getChildFrame().transform
xyzquat = transformUtils.poseFromTransform(f)
xyzquat = np.concatenate(xyzquat)
self.drivingPlanner.updateWheelTransform(xyzquat)
def onPlanSafe(self):
self.drivingPlanner.planSafe()
def onPlanPreGrasp(self, depth=None):
self.drivingPlanner.planPreGrasp(depth=self.preGraspDepth, speed=self.speed, angle=self.preGraspAngle,
graspLocation=self.graspLocation, turnRadius=self.turnRadius)
def onPlanTouch(self):
self._syncProperties()
self.drivingPlanner.planTouch(depth=self.touchDepth, speed=self.speed)
def onPlanRetract(self):
self._syncProperties()
self.drivingPlanner.planRetract(depth=self.preGraspDepth, speed=self.speed)
def onPlanTurn(self):
self._syncProperties()
self.drivingPlanner.planTurn(angle=self.turnAngle, speed=self.speed)
def onPlanSteeringWheelTurn(self):
self._syncProperties()
self.drivingPlanner.planSteeringWheelTurn(speed=self.speed, turnRadius=self.turnRadius, knotPoints=self.knotPoints, gazeTol=self.gazeTol)
def onPropertyChanged(self, propertySet, propertyName):
taskToShowOld = self.taskToShow
self._syncProperties()
if not taskToShowOld == self.taskToShow:
self.addTasks()
self.drivingPlanner.planBarGrasp(depth=self.barGraspDepth, useLineConstraint=True)
def onPlanBarRetract(self):
self.drivingPlanner.planBarRetract(depth=self.barGraspDepth, useLineConstraint=True)
def onPlanBarGrasp(self):
self.drivingPlanner.planBarGrasp(depth=self.barGraspDepth, useLineConstraint=True)
def setParamsPreGrasp1(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.22)
def setParamsPreGrasp2(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.10)
def setParamsWheelRetract(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.3)
def setParamsBarRetract(self):
self.params.setProperty('Bar Grasp/Retract Depth', 0.3)
def setParamsBarGrasp(self):
self.params.setProperty('Bar Grasp/Retract Depth', 0.03)
def addTasks(self):
self.taskTree.removeAllTasks()
if self.taskToShow == 0:
self.addIngressTasks()
elif self.taskToShow == 1:
self.addRegraspTasks()
elif self.addEgressTasks() == 2:
self.addEgressTasks()
else:
return
def addIngressTasks(self):
# some helpers
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTaskMatlab(name, planFunc, userPrompt=False, parentFolder=None):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve and commit manipulation plan.'))
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
prep = addFolder('Prep')
addTask(rt.UserPromptTask(name="start streaming", message="Please start streaming"))
addManipTask('car entry posture', self.drivingPlanner.planCarEntryPose, userPrompt=True)
self.folder = prep
addTask(rt.UserPromptTask(name="start April tag process", message="Enable April tag detection"))
addTask(rt.UserPromptTask(name="spawn polaris model", message="launch egress planner and spawn polaris model"))
addFunc(self.onStart, 'update wheel location')
graspWheel = addFolder('Grasp Steering Wheel')
addTask(rt.UserPromptTask(name="approve open left hand", message="Check it is clear to open left hand"))
addTask(rt.OpenHand(name='open left hand', side='Left'))
addFunc(self.setParamsPreGrasp1, 'set params')
addManipTaskMatlab('Pre Grasp 1', self.onPlanPreGrasp)
self.folder = graspWheel
addTask(rt.UserPromptTask(name="check alignment", message="Please ask field team for hand location relative to wheel, adjust wheel affordance if necessary"))
addFunc(self.setParamsPreGrasp2, 'set params')
addManipTaskMatlab('Pre Grasp 2', self.onPlanPreGrasp)
self.folder = graspWheel
addTask(rt.UserPromptTask(name="check alignment", message="Please make any manual adjustments if necessary"))
addTask(rt.UserPromptTask(name="approve close left hand", message="Check clear to close left hand"))
addTask(rt.CloseHand(name='close left hand', side='Left'))
addTask(rt.UserPromptTask(name="set true steering wheel angle", message="Set true steering wheel angle in spin box"))
addFunc(self.drivingPlanner.setSteeringWheelAndWristGraspAngles, 'capture true wheel angle and current wrist angle')
graspBar = addFolder('Grasp Bar')
addTask(rt.UserPromptTask(name="approve open right hand", message="Check clear to open right hand"))
addTask(rt.OpenHand(name='open right hand', side='Right'))
addFunc(self.setParamsBarGrasp, 'set params')
addManipTask('Bar Grasp', self.onPlanBarGrasp, userPrompt=True)
self.folder = graspBar
addTask(rt.UserPromptTask(name="check alignment and depth", message="Please check alignment and depth, make any manual adjustments"))
addTask(rt.UserPromptTask(name="approve close right hand", message="Check ok to close right hand"))
addTask(rt.CloseHand(name='close Right hand', side='Right'))
footToDriving = addFolder('Foot to Driving Pose')
addManipTask('Foot Up', self.drivingPlanner.planLegUp, userPrompt=True)
self.folder = footToDriving
addManipTask('Swing leg in', self.drivingPlanner.planLegSwingIn , userPrompt=True)
self.folder = footToDriving
addManipTask('Foot On Pedal', self.drivingPlanner.planLegPedal, userPrompt=True)
driving = addFolder('Driving')
addTask(rt.UserPromptTask(name="base side streaming", message="Please start base side streaming"))
addTask(rt.UserPromptTask(name="launch drivers", message="Please launch throttle and steering drivers"))
addTask(rt.UserPromptTask(name="switch to regrasp tasks", message="Switch to regrasp task set"))
def addEgressTasks(self):
# some helpers
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTaskMatlab(name, planFunc, userPrompt=False, parentFolder=None):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve and commit manipulation plan.'))
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
footToEgress = addFolder('Foot to Egress Pose')
addManipTask('Foot Off Pedal', self.drivingPlanner.planLegAbovePedal, userPrompt=True)
self.folder = footToEgress
addManipTask('Swing leg out', self.drivingPlanner.planLegSwingOut , userPrompt=True)
self.folder = footToEgress
addManipTask('Foot Down', self.drivingPlanner.planLegEgressStart, userPrompt=True)
ungraspWheel = addFolder('Ungrasp Steering Wheel')
addTask(rt.UserPromptTask(name="approve open left hand", message="Check ok to open left hand"))
addTask(rt.OpenHand(name='open left hand', side='Left'))
addFunc(self.setParamsWheelRetract, 'set params')
addManipTaskMatlab('Retract hand', self.onPlanRetract)
self.folder = ungraspWheel
addTask(rt.UserPromptTask(name="approve close left hand", message="Check ok to close left hand"))
addTask(rt.CloseHand(name='close left hand', side='Left'))
ungraspBar = addFolder('Ungrasp Bar')
addTask(rt.UserPromptTask(name="approve open right hand", message="Check clear to open right hand"))
addTask(rt.OpenHand(name='open left hand', side='Right'))
addFunc(self.setParamsBarRetract, 'set params')
addManipTask('Retract hand', self.onPlanBarRetract, userPrompt=True)
self.folder = ungraspBar
addTask(rt.UserPromptTask(name="approve close right hand", message="Check ok to close right hand"))
addTask(rt.CloseHand(name='close Right hand', side='Right'))
armsToEgressStart = addFolder('Arms to Egress Start')
addManipTask('Arms To Egress Start', self.drivingPlanner.planArmsEgressStart, userPrompt=True)
prep = addFolder('Stop Streaming')
addTask(rt.UserPromptTask(name='stop streaming base side', message='stop streaming base side'))
def addRegraspTasks(self):
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTaskMatlab(name, planFunc, userPrompt=False, parentFolder=None):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve and commit manipulation plan.'))
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
regrasp = addFolder('Regrasp')
addTask(rt.UserPromptTask(name="approve open left hand", message="Check ok to open left hand"))
addTask(rt.OpenHand(name='open left hand', side='Left'))
addManipTask('Plan Regrasp', self.drivingPlanner.planSteeringWheelReGrasp, userPrompt=True)
self.folder = regrasp
addTask(rt.UserPromptTask(name="approve close left hand", message="Check ok to close left hand"))
addTask(rt.CloseHand(name='close left hand', side='Left'))
addFunc(self.drivingPlanner.updateGraspOffsets, 'update steering wheel grasp offsets')
def updateAndDrawTrajectory(self):
steeringAngleDegrees = np.rad2deg(self.drivingPlanner.getSteeringWheelAngle())
leftTraj, rightTraj = self.drivingPlanner.computeDrivingTrajectories(steeringAngleDegrees, self.drivingPlanner.maxTurningRadius, self.drivingPlanner.trajSegments + 1)
self.drawDrivingTrajectory(self.drivingPlanner.transformDrivingTrajectory(leftTraj), 'LeftDrivingTrajectory')
self.drawDrivingTrajectory(self.drivingPlanner.transformDrivingTrajectory(rightTraj), 'RightDrivingTrajectory')
def drawDrivingTrajectory(self, drivingTraj, name):
if not self.showTrajectory:
return
d = DebugData()
numTrajPoints = len(drivingTraj)
if numTrajPoints > 1:
for i in range(0, numTrajPoints):
rgb = [(numTrajPoints - i) / float(numTrajPoints), 1 - (numTrajPoints - i) / float(numTrajPoints), 1]
d.addSphere(drivingTraj[i], 0.05, rgb)
vis.updatePolyData(d.getPolyData(), name)
obj = om.findObjectByName(name)
obj.setProperty('Color By', 1)
call updateAndDrawTrajectory on a fixed timer, rather than on model changed
import ddapp
import math
import textwrap
import drc as lcmdrc
import bot_core as lcmbotcore
import vtkAll as vtk
from ddapp import transformUtils
from ddapp import visualization as vis
from ddapp import objectmodel as om
from ddapp import lcmUtils
from ddapp import ik
from ddapp import cameraview
from ddapp import affordanceupdater
from ddapp import affordancemanager
from ddapp import segmentation
from ddapp import robotstate
from ddapp.debugVis import DebugData
from ddapp.utime import getUtime
from ddapp.ikplanner import ConstraintSet
import ddapp.tasks.robottasks as rt
from ddapp.ikparameters import IkParameters
from ddapp.timercallback import TimerCallback
import os
import functools
import numpy as np
import scipy.io
from ddapp.tasks.taskuserpanel import TaskUserPanel
class DrivingPlanner(object):
def __init__(self, ikServer, robotSystem):
self.ikServer = ikServer
self.robotSystem = robotSystem
self.ikServer.connectStartupCompleted(self.initialize)
self.steeringAngleDegrees = 0.0
self.maxTurningRadius = 9.5
self.trajectoryX = 0
self.trajectoryY = 0.3
self.trajectoryAngle = 0
self.trajSegments = 25
self.wheelDistance = 1.4
self.tagToLocalTransform = transformUtils.transformFromPose([0,0,0],[1,0,0,0])
self.commandStreamChannel = 'JOINT_POSITION_GOAL'
self.akyIdx = robotstate.getDrakePoseJointNames().index('l_leg_aky')
self.lwyIdx = robotstate.getDrakePoseJointNames().index('l_arm_lwy')
self.anklePositions = np.array([np.nan,np.nan])
self.jointLimitsMin = np.array([self.robotSystem.teleopRobotModel.model.getJointLimits(jointName)[0] for jointName in robotstate.getDrakePoseJointNames()])
self.jointLimitsMax = np.array([self.robotSystem.teleopRobotModel.model.getJointLimits(jointName)[1] for jointName in robotstate.getDrakePoseJointNames()])
self.idleAngleSlack = 10
self.fineGrainedThrottleTravel = 10
self.steeringAngleOffset = 0
self.throttlePublishChannel = 'THROTTLE_COMMAND_POSITION_GOAL'
self.steeringPublishChannel = 'STEERING_COMMAND_POSITION_GOAL'
self.addSubscribers()
self.graspWheelAngle = None
self.graspWristAngle = None
self.plans = []
def getInitCommands(self):
commands = [textwrap.dedent('''
% ------ driving planner startup ------
addpath([getenv('DRC_BASE'), '/software/control/matlab/planners/driving_planner']);
clear driving_planner_options;
driving_planner_options.listen_to_lcm_flag = 0;
driving_planner_options.qstar = q_nom;
dp = drivingPlanner(s.robot, driving_planner_options);
% ------ driving planner startup end ------
''')]
return commands
def addSubscribers(self):
lcmUtils.addSubscriber('THROTTLE_COMMAND', lcmdrc.trigger_finger_t , self.onThrottleCommand)
lcmUtils.addSubscriber('STEERING_COMMAND', lcmdrc.driving_control_cmd_t, self.onSteeringCommand)
def initialize(self, ikServer, success):
if ikServer.restarted:
return
commands = self.getInitCommands()
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
# applies the properties to the driving planner object
def applyProperties(self):
commands = []
commands.append("dp.options.quat_tol = %r;" % self.quatTol)
commands.append("dp.options.tol = %r;" % self.positionTol)
commands.append("dp.options.seed_with_current = %r;" % self.seedWithCurrent)
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def updateWheelTransform(self, xyzquat):
commands = []
startPose = self.getPlanningStartPose()
commands.append("q0 = %s;" % ik.ConstraintBase.toColumnVectorString(startPose))
commands.append("xyzquat = %s;" % ik.ConstraintBase.toColumnVectorString(xyzquat))
commands.append("dp = dp.updateWheelTransform(xyzquat, q0);")
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planSafe(self, speed=1):
commands = []
commands.append("clear options;")
commands.append("options.speed = %r;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planSafe(options,%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planPreGrasp(self, depth=0.2, xyz_des=None, angle=0, speed=1, graspLocation='center', turnRadius=0.187):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.turn_radius = %r;" % turnRadius)
commands.append("options.graspLocation = '%s';" % graspLocation)
commands.append("options.angle = %r;" % np.radians(angle))
commands.append("options.speed = %r;" % speed)
if xyz_des is not None:
commands.append("options.xyz_des = {%s};",ik.ConstraintBase.toColumnVectorString(xyz_des))
startPose = self.getPlanningStartPose()
commands.append("dp.planPreGrasp(options, %s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planTouch(self, depth=0, xyz_des=None, speed=1):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.speed = %r;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planTouch(options, %s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planRetract(self, depth=0.2, speed=1):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.speed = %s;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planRetract(options, %s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planTurn(self, angle=0, speed=1):
commands = []
commands.append("clear options;")
commands.append("options.turn_angle = %r;" % np.radians(angle))
commands.append("options.speed = %r;" % speed)
commands.append("options.use_raw_angle = 1;")
startPose = self.getPlanningStartPose()
commands.append("dp.planTurn(options,%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planSteeringWheelTurn(self, speed=1, knotPoints=20, turnRadius=.187, gazeTol=0.3):
commands = []
commands.append("clear options;")
commands.append("options.speed = %r;" % speed)
commands.append("options.turn_radius = %r;" % turnRadius)
commands.append("options.N = %r;" % knotPoints)
commands.append("options.steering_gaze_tol = %r;" % gazeTol)
startPose = self.getPlanningStartPose()
commands.append("dp.planSteeringWheelTurn(options,%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planSeed(self):
commands = []
startPose = self.getPlanningStartPose()
commands.append("dp.planSeed(%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def getPlanningStartPose(self):
return self.robotSystem.robotStateJointController.q
# move left leg up a bit
def planLegUp(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
lFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('l_foot', startPose)
targetFrame = transformUtils.copyFrame(lFoot2World)
targetFrame.PreMultiply()
targetFrame.Translate([0.0,0.0, 0.05])
footPoseConstraint = self.createLeftFootPoseConstraint(targetFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(footPoseConstraint)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_start'
cs.nominalPoseName = 'q_start'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegSwingIn(self):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([-0.02, 0.0, 0.03])
identityFrame = vtk.vtkTransform()
legAbovePedalConstraint = self.createLeftFootPoseConstraint(legAbovePedalFrame, tspan=[1,1])
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legAbovePedalConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
legSwingFrame = om.findObjectByName('left foot pedal swing').transform
cs.constraints.extend(self.createLeftFootPoseConstraint(legSwingFrame, tspan=[0.3,0.3]))
keyFramePlan = cs.runIkTraj()
self.plans.append(keyFramePlan)
return keyFramePlan
def planLegAbovePedal(self):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([-0.02,0.0, 0.03])
identityFrame = vtk.vtkTransform()
legAbovePedalConstraint = self.createLeftFootPoseConstraint(legAbovePedalFrame, tspan=[1,1])
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legAbovePedalConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegSwingOut(self):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
legUpFrame = transformUtils.copyFrame(om.findObjectByName('left foot start').transform)
legUpFrame.PreMultiply()
legUpFrame.Translate([0.0,0.0, 0.05])
identityFrame = vtk.vtkTransform()
legUpConstraint = self.createLeftFootPoseConstraint(legUpFrame, tspan=[1,1])
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legUpConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
legSwingFrame = om.findObjectByName('left foot pedal swing').transform
cs.constraints.extend(self.createLeftFootPoseConstraint(legSwingFrame, tspan=[0.7,0.7]))
keyFramePlan = cs.runIkTraj()
self.plans.append(keyFramePlan)
return keyFramePlan
def planLegEgressStart(self):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
legDownFrame = transformUtils.copyFrame(om.findObjectByName('left foot start').transform)
identityFrame = vtk.vtkTransform()
legDownConstraint = self.createLeftFootPoseConstraint(legDownFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legDownConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegPedal(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
lfootConstraintFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
identityFrame = vtk.vtkTransform()
lfootPositionOrientationConstraint = ikPlanner.createPositionOrientationConstraint('l_foot', lfootConstraintFrame, identityFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(lfootPositionOrientationConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(quasiStaticShrinkFactor=1, maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
keyFramePlan = cs.planEndPoseGoal()
self.plans.append(keyFramePlan)
return keyFramePlan
def planSteeringWheelReGrasp(self, useLineConstraint=False):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
self.wheelAngleBeforeReGrasp = self.getSteeringWheelAngle()
startPoseName = 'q_regrasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_regrasp_end'
handName = 'left'
handLinkName = 'l_hand'
maxMetersPerSecond = 0.1
retractDepth = 0.15
palmToHand = ikPlanner.getPalmToHandLink(handName)
palmToWorld = ikPlanner.newGraspToWorldFrame(startPose, handName, palmToHand)
finalTargetFrame = transformUtils.copyFrame(palmToWorld)
finalTargetFrame.PreMultiply()
finalTargetFrame.RotateY(180)
finalPoseConstraint = self.createLeftPalmPoseConstraints(finalTargetFrame, tspan=[1,1])
retractTargetFrame = transformUtils.copyFrame(palmToWorld)
retractTargetFrame.PreMultiply()
retractTargetFrame.Translate([0.0, -retractDepth, 0.0])
retractPoseConstraint = self.createLeftPalmPoseConstraints(retractTargetFrame, tspan=[0.25,0.25])
preGraspTargetFrame = transformUtils.copyFrame(retractTargetFrame)
preGraspTargetFrame.PreMultiply()
preGraspTargetFrame.RotateY(180)
preGraspPoseConstraint = self.createLeftPalmPoseConstraints(preGraspTargetFrame, tspan=[0.75, 0.75])
allButLeftArmPostureConstraint = self.createAllButLeftArmPostureConstraint(startPoseName)
lockedBaseConstraint = ikPlanner.createLockedBasePostureConstraint(startPoseName)
lockedRightArmConstraint = ikPlanner.createLockedRightArmPostureConstraint(startPoseName)
lockedTorsoConstraint = ikPlanner.createLockedTorsoPostureConstraint(startPoseName)
constraints = [allButLeftArmPostureConstraint]
# constraints = [lockedTorsoConstraint, lockedRightArmConstraint]
constraints.extend(finalPoseConstraint)
seedPoseName = 'q_regrasp_seed'
# seedPose = startPose
# seedPose[self.lwyIdx] = startPose[self.lwyIdx] + np.pi
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False, maxDegreesPerSecond=60,
maxBodyTranslationSpeed=maxMetersPerSecond, rescaleBodyNames=[handLinkName], rescaleBodyPts=list(ikPlanner.getPalmPoint()))
constraintSet.seedPoseName = seedPoseName
constraintSet.nominalPoseName = seedPoseName
# for c in constraintSet.constraints:
# print c
# vis.updateFrame(palmToWorld, 'palm frame')
# vis.updateFrame(finalTargetFrame, 'target frame')
endPose = constraintSet.runIk()
# move on line constraint
motionVector = np.array(retractTargetFrame.GetPosition()) - np.array(palmToWorld.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(retractTargetFrame.GetPosition()), motionVector)
# vis.updateFrame(motionTargetFrame,'motion frame')
# vis.updateFrame(targetFrame, 'target')
# vis.updateFrame(currentFrame, 'current')
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0], positionTolerance=0.001)
p.tspan = np.array([0.12,0.9])
# p_out = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0], positionTolerance=0.001)
# p_out.tspan = np.linspace(,1,5)
endPose = constraintSet.runIk()
constraintSet.constraints.extend(retractPoseConstraint)
constraintSet.constraints.extend(preGraspPoseConstraint)
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.runIkTraj()
self.plans.append(plan)
return plan
def createLeftFootPoseConstraint(self, targetFrame, tspan=[-np.inf,np.inf]):
positionConstraint, orientationConstraint = self.robotSystem.ikPlanner.createPositionOrientationConstraint('l_foot', targetFrame, vtk.vtkTransform())
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createLeftPalmPoseConstraints(self, targetFrame, tspan=[-np.inf, np.inf]):
ikPlanner = self.robotSystem.ikPlanner
positionConstraint, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints('left', targetFrame)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createPalmPoseConstraints(self, side, targetFrame, tspan=[-np.inf, np.inf]):
ikPlanner = self.robotSystem.ikPlanner
positionConstraint, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints(side, targetFrame)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createLeftHandPoseConstraintOnWheel(self, depth=0.12, tspan=[-np.inf, np.inf]):
targetFrame = self.getSteeringWheelPalmFrame()
targetFrame.PreMultiply()
targetFrame.Translate([0.0, depth, 0.0])
positionConstraint, orientationConstraint = self.robotSystem.ikPlanner.createPositionOrientationConstraint('l_hand_face', targetFrame, vtk.vtkTransform())
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def getSteeringWheelPalmFrame(self):
frame = transformUtils.copyFrame(om.findObjectByName('Steering Wheel frame').transform)
frame.PreMultiply()
frame.RotateX(90)
frame.PreMultiply()
frame.RotateZ(-90)
return frame
def planBarGrasp(self,depth=0.03, useLineConstraint=False):
ikPlanner = self.robotSystem.ikPlanner
handSide = 'right'
handLinkName = 'r_hand'
startPose = self.getPlanningStartPose()
startPoseName = 'q_grasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_end_grasp'
palmToHand = ikPlanner.getPalmToHandLink(handSide)
palmToWorld = transformUtils.copyFrame(ikPlanner.newGraspToWorldFrame(startPose, handSide, palmToHand))
targetFrame = transformUtils.copyFrame(om.findObjectByName('right hand grab bar').transform)
targetFrame.PreMultiply()
targetFrame.Translate([0.0,-depth,0.0])
finalPoseConstraints = self.createPalmPoseConstraints(handSide, targetFrame, tspan=[1,1])
allButRightArmPostureConstraint = self.createAllButRightArmPostureConstraint(startPoseName)
seedPoseName = 'q_bar_grab'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'bar_pre_grab', side=handSide)
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraints = [allButRightArmPostureConstraint]
constraints.extend(finalPoseConstraints)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False)
constraintSet.seedPoseName = seedPoseName
constraintSet.nominalPoseName = seedPoseName
# move on line constraint
motionVector = np.array(targetFrame.GetPosition()) - np.array(palmToWorld.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(targetFrame.GetPosition()), motionVector)
# vis.updateFrame(motionTargetFrame,'motion frame')
# vis.updateFrame(targetFrame, 'target')
# vis.updateFrame(currentFrame, 'current')
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0], positionTolerance=0.001)
p.tspan = np.linspace(0.2,0.8,5)
endPose = constraintSet.runIk()
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.planEndPoseGoal()
self.plans.append(plan)
return plan
def planBarRetract(self, depth=0.3, useLineConstraint=False):
ikPlanner = self.robotSystem.ikPlanner
handSide = 'right'
handLinkName = 'r_hand'
startPose = self.getPlanningStartPose()
startPoseName = 'q_grasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_end_grasp'
maxBodyTranslationSpeed = 0.3
palmToHand = ikPlanner.getPalmToHandLink(handSide)
palmToWorld = transformUtils.copyFrame(ikPlanner.newGraspToWorldFrame(startPose, handSide, palmToHand))
targetFrame = transformUtils.copyFrame(palmToWorld)
targetFrame.PreMultiply()
targetFrame.Translate([0.0,-depth,0.0])
finalPoseConstraints = self.createPalmPoseConstraints(handSide, targetFrame, tspan=[1,1])
allButRightArmPostureConstraint = self.createAllButRightArmPostureConstraint(startPoseName)
seedPoseName = 'q_bar_grab'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'bar_pre_grab', side=handSide)
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraints = [allButRightArmPostureConstraint]
constraints.extend(finalPoseConstraints)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False, maxBodyTranslationSpeed=0.3)
constraintSet.seedPoseName = 'q_bar_grab'
constraintSet.nominalPoseName = 'q_bar_grab'
# move on line constraint
motionVector = np.array(targetFrame.GetPosition()) - np.array(palmToWorld.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(targetFrame.GetPosition()), motionVector)
# vis.updateFrame(motionTargetFrame,'motion frame')
# vis.updateFrame(targetFrame, 'target')
# vis.updateFrame(currentFrame, 'current')
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0.0], positionTolerance=0.02)
p.tspan = np.linspace(0,1,5)
endPose = constraintSet.runIk()
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.planEndPoseGoal()
self.plans.append(plan)
return plan
def commitManipPlan(self):
self.robotSystem.manipPlanner.commitManipPlan(self.plans[-1])
def createAllButLeftLegPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!l_leg)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def createAllButLeftArmPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!l_arm)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def createAllButRightArmPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!r_arm)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def computeDrivingTrajectories(self, steeringAngleDegrees, maxTurningRadius = 10, numTrajPoints = 50):
angle = -steeringAngleDegrees
if abs(angle) < 0.1:
angle = 1e-8
turningRadius = 1.0 / (angle * (1 / (maxTurningRadius * 170.0)))
turningCenter = [0, turningRadius, 0]
trajPoints = list()
for i in range(0, numTrajPoints):
theta = math.radians((40 / turningRadius) * i - 90)
trajPoint = np.asarray(turningCenter)+turningRadius*np.asarray([math.cos(theta), math.sin(theta), 0])
trajPoints.append(trajPoint)
leftTraj = list()
rightTraj = list()
for i in range(0, numTrajPoints - 1):
v1 = trajPoints[i + 1] - trajPoints[i]
v2 = np.cross(v1, [0, 0, 1])
v2 /= np.linalg.norm(v2)
leftTraj.append(trajPoints[i] - 0.5 * self.wheelDistance * v2)
rightTraj.append(trajPoints[i] + 0.5 * self.wheelDistance * v2)
return leftTraj, rightTraj
def transformDrivingTrajectory(self, drivingTraj):
transformedDrivingTraj = list()
transform = vtk.vtkTransform()
z_axis = self.tagToLocalTransform.TransformVector([0,0,1])
tag_origin = self.tagToLocalTransform.TransformPoint([0,0,0])
z_norm = np.linalg.norm(z_axis[0:2])
if z_norm > 1e-6:
z_axis_proj = z_axis[0:2] / z_norm
angle = math.degrees(math.atan2(z_axis_proj[1], z_axis_proj[0]))
else:
angle = 0
transform.Translate([tag_origin[0] , tag_origin[1], 0])
transform.RotateZ(self.trajectoryAngle + angle)
transform.Translate([self.trajectoryX, self.trajectoryY, 0])
for p in drivingTraj:
transformedPoint = np.asarray(transform.TransformPoint(p))
transformedDrivingTraj.append(transformedPoint)
return transformedDrivingTraj
def onThrottleCommand(self, msg):
if np.isnan(self.anklePositions).any():
# print 'you must initialize the LOW/HIGH ankle positions before streaming throttle commands'
# print 'use the Capture Ankle Angle Low/High Buttons'
return
# slider 0 is the coarse grained slider, slider 1 is for fine grained adjustment
slider = self.decodeThrottleMessage(msg)
const = self.jointLimitsMin[self.akyIdx]
ankleGoalPosition = const + slider[0]*self.coarseGrainedThrottleTravel + (slider[1]-1/2.0)*self.fineGrainedThrottleTravel
ankleGoalPositionRadians = np.deg2rad(ankleGoalPosition)
# trip the safety if slider[3] is < 1/2, emergency come off the throttle
if slider[3] < 0.5:
print 'Emergency stop, coming off the throttle'
print "setting l_leg_aky to it's min value"
ankleGoalPositionRadians = self.jointLimitsMin[self.akyIdx]
msg = lcmdrc.joint_position_goal_t()
msg.utime = getUtime()
msg.joint_position = ankleGoalPositionRadians
msg.joint_name = 'l_leg_aky'
lcmUtils.publish(self.throttlePublishChannel, msg)
def onSteeringCommand(self, msg):
steeringAngle = -msg.steering_angle
lwyPositionGoal = steeringAngle + self.steeringAngleOffset
msg = lcmdrc.joint_position_goal_t()
msg.utime = getUtime()
msg.joint_position = lwyPositionGoal
msg.joint_name = 'l_arm_lwy'
lcmUtils.publish(self.steeringPublishChannel, msg)
def decodeThrottleMessage(self,msg):
slider = np.zeros(4)
slider[0] = msg.slider1
slider[1] = msg.slider2
slider[2] = msg.slider3
slider[3] = msg.slider4
return slider
def captureRobotPoseFromStreaming(self):
helper = lcmUtils.MessageResponseHelper(self.commandStreamChannel, lcmdrc.robot_state_t)
msg = helper.waitForResponse(timeout=1000, keepAlive=False)
if msg is None:
print "Didn't receive a JOINT_POSITION_GOAL message"
print "Are you streaming?"
return None
pose = robotstate.convertStateMessageToDrakePose(msg)
return pose
def planCarEntryPose(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'car_entry_new')
plan = ikPlanner.computePostureGoal(startPose, endPose, feetOnGround=False)
self.addPlan(plan)
def planArmsEgressStart(self):
startPose = self.getPlanningStartPose()
ikPlanner = self.robotSystem.ikPlanner
midPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'pre_egress_left_arm', side='left')
midPose = ikPlanner.getMergedPostureFromDatabase(midPose, 'driving', 'pre_egress_right_arm', side='right')
endPose = ikPlanner.getMergedPostureFromDatabase(midPose, 'driving', 'egress-arms')
ikParameters = IkParameters(maxDegreesPerSecond=60)
plan = ikPlanner.computeMultiPostureGoal([startPose, midPose, endPose], feetOnGround=False, ikParameters=ikParameters)
self.addPlan(plan)
def setSteeringWheelAndWristGraspAngles(self):
self.graspWheelAngle = np.deg2rad(self.userSpecifiedGraspWheelAngleInDegrees)
pose = self.getPlanningStartPose()
self.graspWristAngle = pose[self.lwyIdx]
def getSteeringWheelAngle(self):
if self.graspWristAngle is None or self.graspWheelAngle is None:
# this means wrist and hand haven't been initialized yet
return 0
pose = self.getPlanningStartPose()
lwyAngle = pose[self.lwyIdx]
wheelAngle = self.graspWheelAngle + lwyAngle - self.graspWristAngle
return wheelAngle
# executes regrasp plan, updates graspWristAngle, graspWheelAngle
def updateGraspOffsets(self):
pose = self.getPlanningStartPose()
#now that plan has finished update our graspWristAngle
self.graspWristAngle = pose[self.lwyIdx]
self.graspWheelAngle = self.wheelAngleBeforeReGrasp
def printSteeringWheelAngleInDegrees(self):
print np.rad2deg(self.getSteeringWheelAngle())
def addPlan(self, plan):
self.plans.append(plan)
class DrivingPlannerPanel(TaskUserPanel):
def __init__(self, robotSystem):
TaskUserPanel.__init__(self, windowTitle='Driving Task')
self.robotSystem = robotSystem
self.drivingPlanner = DrivingPlanner(robotSystem.ikServer, robotSystem)
self.addDefaultProperties()
self.addButtons()
self.addTasks()
self.showTrajectory = False
self.steeringSub = lcmUtils.addSubscriber('STEERING_COMMAND', lcmdrc.driving_control_cmd_t, self.onSteeringCommand)
self.apriltagSub = lcmUtils.addSubscriber('APRIL_TAG_TO_CAMERA_LEFT', lcmbotcore.rigid_transform_t, self.onAprilTag)
self.imageView = cameraview.CameraImageView(cameraview.imageManager, 'CAMERACHEST_RIGHT', 'right image view')
self.imageViewLeft = cameraview.CameraImageView(cameraview.imageManager, 'CAMERA_LEFT', 'left image view')
self.imageView.view.orientationMarkerWidget().Off()
self.imageView.view.backgroundRenderer().SetBackground([0,0,0])
self.imageView.view.backgroundRenderer().SetBackground2([0,0,0])
self.imageViewLeft.view.orientationMarkerWidget().Off()
self.imageViewLeft.view.backgroundRenderer().SetBackground([0,0,0])
self.imageViewLeft.view.backgroundRenderer().SetBackground2([0,0,0])
self.affordanceUpdater = affordanceupdater.AffordanceInCameraUpdater(segmentation.affordanceManager, self.imageView)
self.affordanceUpdaterLeft = affordanceupdater.AffordanceInCameraUpdater(segmentation.affordanceManager, self.imageViewLeft)
self.affordanceUpdater.prependImageName = True
self.affordanceUpdaterLeft.prependImageName = True
self.affordanceUpdater.timer.start()
self.affordanceUpdaterLeft.timer.start()
self.imageViewLayout.addWidget(self.imageView.view)
self.imageViewLayout.addWidget(self.imageViewLeft.view)
self.timer = TimerCallback(targetFps=30)
self.timer.callback = self.updateAndDrawTrajectory
self.timer.start()
def onAprilTag(self, msg):
cameraview.imageManager.queue.getTransform('april_tag_car_beam', 'local', msg.utime, self.drivingPlanner.tagToLocalTransform)
<<<<<<< HEAD
self.updateAndDrawTrajectory()
=======
>>>>>>> call updateAndDrawTrajectory on a fixed timer, rather than on model changed
def addButtons(self):
self.addManualButton('Start', self.onStart)
self.addManualButton('Update Wheel Location', self.onUpdateWheelLocation)
self.addManualButton('Plan Safe', self.onPlanSafe)
self.addManualButton('Plan Pre Grasp', self.onPlanPreGrasp)
self.addManualButton('Plan Touch', self.onPlanTouch)
self.addManualButton('Plan Retract', self.onPlanRetract)
self.addManualButton('Plan Turn', self.onPlanTurn)
self.addManualButton('Plan Wheel Re-Grasp', self.drivingPlanner.planSteeringWheelReGrasp)
self.addManualButton('Plan Bar Grab', self.onPlanBarGrasp)
self.addManualButton('Plan Bar Retract', self.onPlanBarRetract)
# self.addManualButton('Plan Steering Wheel Turn', self.onPlanSteeringWheelTurn)
# self.addManualButton('Plan Seed', self.drivingPlanner.planSeed)
# self.addManualButton('Capture Ankle Angle Low', functools.partial(self.drivingPlanner.captureAnklePosition, 0))
# self.addManualButton('Capture Ankle Angle High', functools.partial(self.drivingPlanner.captureAnklePosition, 1))
self.addManualButton('Capture Wheel and Wrist grasp angles', self.drivingPlanner.setSteeringWheelAndWristGraspAngles)
self.addManualButton('Print Steering Wheel Angle', self.drivingPlanner.printSteeringWheelAngleInDegrees)
def addDefaultProperties(self):
self.params.addProperty('PreGrasp/Retract Depth', 0.2, attributes=om.PropertyAttributes(singleStep=0.01, decimals=3))
self.params.addProperty('Touch Depth', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=3))
self.params.addProperty('PreGrasp Angle', 0, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Turn Angle', 0, attributes=om.PropertyAttributes(singleStep=10))
# self.params.addProperty('Steering Wheel Radius (meters)', 0.1873, attributes=om.PropertyAttributes(singleStep=0.01))
# self.params.addProperty('Knot Points', 20, attributes=om.PropertyAttributes(singleStep=1))
# self.params.addProperty('Gaze Constraint Tol', 0.3, attributes=om.PropertyAttributes(singleStep=0.1, decimals=2))
self.params.addProperty('Position Constraint Tol', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Quat Constraint Tol', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Grasp Location', 0, attributes=om.PropertyAttributes(enumNames=['Center','Rim']))
self.params.addProperty('Seed with current posture', 0, attributes=om.PropertyAttributes(enumNames=['False','True']))
self.params.addProperty('Speed', 0.75, attributes=om.PropertyAttributes(singleStep=0.1, decimals=2))
# self.params.addProperty('Throttle Idle Angle Slack', 10, attributes=om.PropertyAttributes(singleStep=1))
self.params.addProperty('Coarse Grained Throttle Travel', 100, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Fine Grained Throttle Travel', 10, attributes=om.PropertyAttributes(singleStep=1))
self.params.addProperty('Bar Grasp/Retract Depth', 0.1, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Steering Wheel Angle when Grasped', 0, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Turning Radius', 9.5, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Wheel Separation', 1.4, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Trajectory Segments', 25, attributes=om.PropertyAttributes(singleStep=1, decimals=0))
self.params.addProperty('Trajectory X Offset', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2)),
self.params.addProperty('Trajectory Y Offset', 0.30, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Trajectory Angle Offset', 0.0, attributes=om.PropertyAttributes(singleStep=1, decimals=0)),
self.params.addProperty('Show Trajectory', False)
self.params.addProperty('Show Driving/Regrasp Tasks',0, attributes=om.PropertyAttributes(enumNames=['Ingress','Regrasp', 'Egress']))
self._syncProperties()
def _syncProperties(self):
self.preGraspDepth = self.params.getProperty('PreGrasp/Retract Depth')
self.touchDepth = self.params.getProperty('Touch Depth')
self.preGraspAngle = self.params.getProperty('PreGrasp Angle')
self.turnAngle = self.params.getProperty('Turn Angle')
self.speed = self.params.getProperty('Speed')
self.turnRadius = 0.18 #self.params.getProperty('Steering Wheel Radius (meters)')
self.knotPoints = 20
self.gazeTol = 0.3
self.drivingPlanner.positionTol = 0.0
self.drivingPlanner.quatTol = 0.0
self.graspLocation = 'center'
self.drivingPlanner.seedWithCurrent = self.params.getProperty('Seed with current posture')
# self.drivingPlanner.throttleIdleAngleSlack = self.params.getProperty('Throttle Idle Angle Slack')
self.drivingPlanner.fineGrainedThrottleTravel = self.params.getProperty('Fine Grained Throttle Travel')
self.drivingPlanner.coarseGrainedThrottleTravel = self.params.getProperty('Coarse Grained Throttle Travel')
self.barGraspDepth = self.params.getProperty('Bar Grasp/Retract Depth')
self.drivingPlanner.maxTurningRadius = self.params.getProperty('Turning Radius')
self.drivingPlanner.userSpecifiedGraspWheelAngleInDegrees = self.params.getProperty('Steering Wheel Angle when Grasped')
self.drivingPlanner.trajSegments = self.params.getProperty('Trajectory Segments')
self.drivingPlanner.wheelDistance = self.params.getProperty('Wheel Separation')
self.showTrajectory = self.params.getProperty('Show Trajectory')
self.drivingPlanner.trajectoryX = self.params.getProperty('Trajectory X Offset')
self.drivingPlanner.trajectoryY = self.params.getProperty('Trajectory Y Offset')
self.drivingPlanner.trajectoryAngle = self.params.getProperty('Trajectory Angle Offset')
self.taskToShow = self.params.getProperty('Show Driving/Regrasp Tasks')
if hasattr(self, 'affordanceUpdater'):
leftTraj = om.findObjectByName('LeftDrivingTrajectory')
rightTraj = om.findObjectByName('RightDrivingTrajectory')
if leftTraj:
leftTraj.setProperty('Visible', self.showTrajectory)
if rightTraj:
rightTraj.setProperty('Visible', self.showTrajectory)
if self.showTrajectory:
self.affordanceUpdater.extraObjects = [leftTraj, rightTraj]
self.affordanceUpdaterLeft.extraObjects = [leftTraj, rightTraj]
else:
self.affordanceUpdater.extraObjects = []
self.affordanceUpdaterLeft.extraObjects = []
self.drivingPlanner.applyProperties()
def onSteeringCommand(self, msg):
if msg.type == msg.TYPE_DRIVE_DELTA_STEERING:
self.drivingPlanner.steeringAngleDegrees = math.degrees(msg.steering_angle)
self.updateAndDrawTrajectory()
def onStart(self):
self.onUpdateWheelLocation()
print('Driving Planner Ready')
def onUpdateWheelLocation(self):
f = om.findObjectByName('Steering Wheel').getChildFrame().transform
xyzquat = transformUtils.poseFromTransform(f)
xyzquat = np.concatenate(xyzquat)
self.drivingPlanner.updateWheelTransform(xyzquat)
def onPlanSafe(self):
self.drivingPlanner.planSafe()
def onPlanPreGrasp(self, depth=None):
self.drivingPlanner.planPreGrasp(depth=self.preGraspDepth, speed=self.speed, angle=self.preGraspAngle,
graspLocation=self.graspLocation, turnRadius=self.turnRadius)
def onPlanTouch(self):
self._syncProperties()
self.drivingPlanner.planTouch(depth=self.touchDepth, speed=self.speed)
def onPlanRetract(self):
self._syncProperties()
self.drivingPlanner.planRetract(depth=self.preGraspDepth, speed=self.speed)
def onPlanTurn(self):
self._syncProperties()
self.drivingPlanner.planTurn(angle=self.turnAngle, speed=self.speed)
def onPlanSteeringWheelTurn(self):
self._syncProperties()
self.drivingPlanner.planSteeringWheelTurn(speed=self.speed, turnRadius=self.turnRadius, knotPoints=self.knotPoints, gazeTol=self.gazeTol)
def onPropertyChanged(self, propertySet, propertyName):
taskToShowOld = self.taskToShow
self._syncProperties()
if not taskToShowOld == self.taskToShow:
self.addTasks()
def onPlanBarRetract(self):
self.drivingPlanner.planBarRetract(depth=self.barGraspDepth, useLineConstraint=True)
def onPlanBarGrasp(self):
self.drivingPlanner.planBarGrasp(depth=self.barGraspDepth, useLineConstraint=True)
def setParamsPreGrasp1(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.22)
def setParamsPreGrasp2(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.10)
def setParamsWheelRetract(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.3)
def setParamsBarRetract(self):
self.params.setProperty('Bar Grasp/Retract Depth', 0.3)
def setParamsBarGrasp(self):
self.params.setProperty('Bar Grasp/Retract Depth', 0.03)
def addTasks(self):
self.taskTree.removeAllTasks()
if self.taskToShow == 0:
self.addIngressTasks()
elif self.taskToShow == 1:
self.addRegraspTasks()
elif self.addEgressTasks() == 2:
self.addEgressTasks()
else:
return
def addIngressTasks(self):
# some helpers
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTaskMatlab(name, planFunc, userPrompt=False, parentFolder=None):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve and commit manipulation plan.'))
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
prep = addFolder('Prep')
addTask(rt.UserPromptTask(name="start streaming", message="Please start streaming"))
addManipTask('car entry posture', self.drivingPlanner.planCarEntryPose, userPrompt=True)
self.folder = prep
addTask(rt.UserPromptTask(name="start April tag process", message="Enable April tag detection"))
addTask(rt.UserPromptTask(name="spawn polaris model", message="launch egress planner and spawn polaris model"))
addFunc(self.onStart, 'update wheel location')
graspWheel = addFolder('Grasp Steering Wheel')
addTask(rt.UserPromptTask(name="approve open left hand", message="Check it is clear to open left hand"))
addTask(rt.OpenHand(name='open left hand', side='Left'))
addFunc(self.setParamsPreGrasp1, 'set params')
addManipTaskMatlab('Pre Grasp 1', self.onPlanPreGrasp)
self.folder = graspWheel
addTask(rt.UserPromptTask(name="check alignment", message="Please ask field team for hand location relative to wheel, adjust wheel affordance if necessary"))
addFunc(self.setParamsPreGrasp2, 'set params')
addManipTaskMatlab('Pre Grasp 2', self.onPlanPreGrasp)
self.folder = graspWheel
addTask(rt.UserPromptTask(name="check alignment", message="Please make any manual adjustments if necessary"))
addTask(rt.UserPromptTask(name="approve close left hand", message="Check clear to close left hand"))
addTask(rt.CloseHand(name='close left hand', side='Left'))
addTask(rt.UserPromptTask(name="set true steering wheel angle", message="Set true steering wheel angle in spin box"))
addFunc(self.drivingPlanner.setSteeringWheelAndWristGraspAngles, 'capture true wheel angle and current wrist angle')
graspBar = addFolder('Grasp Bar')
addTask(rt.UserPromptTask(name="approve open right hand", message="Check clear to open right hand"))
addTask(rt.OpenHand(name='open right hand', side='Right'))
addFunc(self.setParamsBarGrasp, 'set params')
addManipTask('Bar Grasp', self.onPlanBarGrasp, userPrompt=True)
self.folder = graspBar
addTask(rt.UserPromptTask(name="check alignment and depth", message="Please check alignment and depth, make any manual adjustments"))
addTask(rt.UserPromptTask(name="approve close right hand", message="Check ok to close right hand"))
addTask(rt.CloseHand(name='close Right hand', side='Right'))
footToDriving = addFolder('Foot to Driving Pose')
addManipTask('Foot Up', self.drivingPlanner.planLegUp, userPrompt=True)
self.folder = footToDriving
addManipTask('Swing leg in', self.drivingPlanner.planLegSwingIn , userPrompt=True)
self.folder = footToDriving
addManipTask('Foot On Pedal', self.drivingPlanner.planLegPedal, userPrompt=True)
driving = addFolder('Driving')
addTask(rt.UserPromptTask(name="base side streaming", message="Please start base side streaming"))
addTask(rt.UserPromptTask(name="launch drivers", message="Please launch throttle and steering drivers"))
addTask(rt.UserPromptTask(name="switch to regrasp tasks", message="Switch to regrasp task set"))
def addEgressTasks(self):
# some helpers
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTaskMatlab(name, planFunc, userPrompt=False, parentFolder=None):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve and commit manipulation plan.'))
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
footToEgress = addFolder('Foot to Egress Pose')
addManipTask('Foot Off Pedal', self.drivingPlanner.planLegAbovePedal, userPrompt=True)
self.folder = footToEgress
addManipTask('Swing leg out', self.drivingPlanner.planLegSwingOut , userPrompt=True)
self.folder = footToEgress
addManipTask('Foot Down', self.drivingPlanner.planLegEgressStart, userPrompt=True)
ungraspWheel = addFolder('Ungrasp Steering Wheel')
addTask(rt.UserPromptTask(name="approve open left hand", message="Check ok to open left hand"))
addTask(rt.OpenHand(name='open left hand', side='Left'))
addFunc(self.setParamsWheelRetract, 'set params')
addManipTaskMatlab('Retract hand', self.onPlanRetract)
self.folder = ungraspWheel
addTask(rt.UserPromptTask(name="approve close left hand", message="Check ok to close left hand"))
addTask(rt.CloseHand(name='close left hand', side='Left'))
ungraspBar = addFolder('Ungrasp Bar')
addTask(rt.UserPromptTask(name="approve open right hand", message="Check clear to open right hand"))
addTask(rt.OpenHand(name='open left hand', side='Right'))
addFunc(self.setParamsBarRetract, 'set params')
addManipTask('Retract hand', self.onPlanBarRetract, userPrompt=True)
self.folder = ungraspBar
addTask(rt.UserPromptTask(name="approve close right hand", message="Check ok to close right hand"))
addTask(rt.CloseHand(name='close Right hand', side='Right'))
armsToEgressStart = addFolder('Arms to Egress Start')
addManipTask('Arms To Egress Start', self.drivingPlanner.planArmsEgressStart, userPrompt=True)
prep = addFolder('Stop Streaming')
addTask(rt.UserPromptTask(name='stop streaming base side', message='stop streaming base side'))
def addRegraspTasks(self):
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTaskMatlab(name, planFunc, userPrompt=False, parentFolder=None):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve and commit manipulation plan.'))
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
regrasp = addFolder('Regrasp')
addTask(rt.UserPromptTask(name="approve open left hand", message="Check ok to open left hand"))
addTask(rt.OpenHand(name='open left hand', side='Left'))
addManipTask('Plan Regrasp', self.drivingPlanner.planSteeringWheelReGrasp, userPrompt=True)
self.folder = regrasp
addTask(rt.UserPromptTask(name="approve close left hand", message="Check ok to close left hand"))
addTask(rt.CloseHand(name='close left hand', side='Left'))
addFunc(self.drivingPlanner.updateGraspOffsets, 'update steering wheel grasp offsets')
def updateAndDrawTrajectory(self):
if not self.showTrajectory or om.findObjectByName('Steering Wheel') is None:
return
steeringAngleDegrees = np.rad2deg(self.drivingPlanner.getSteeringWheelAngle())
leftTraj, rightTraj = self.drivingPlanner.computeDrivingTrajectories(steeringAngleDegrees, self.drivingPlanner.maxTurningRadius, self.drivingPlanner.trajSegments + 1)
self.drawDrivingTrajectory(self.drivingPlanner.transformDrivingTrajectory(leftTraj), 'LeftDrivingTrajectory')
self.drawDrivingTrajectory(self.drivingPlanner.transformDrivingTrajectory(rightTraj), 'RightDrivingTrajectory')
def drawDrivingTrajectory(self, drivingTraj, name):
if not self.showTrajectory:
return
d = DebugData()
numTrajPoints = len(drivingTraj)
if numTrajPoints > 1:
for i in range(0, numTrajPoints):
rgb = [(numTrajPoints - i) / float(numTrajPoints), 1 - (numTrajPoints - i) / float(numTrajPoints), 1]
d.addSphere(drivingTraj[i], 0.05, rgb)
vis.updatePolyData(d.getPolyData(), name)
obj = om.findObjectByName(name)
obj.setProperty('Color By', 1)
|
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import time
from exchangelib.attachments import FileAttachment
from exchangelib.folders import Inbox
from exchangelib.items import Message, ReplyToItem
from exchangelib.queryset import DoesNotExist
from exchangelib.version import EXCHANGE_2010_SP2
from ..common import get_random_string
from .test_basics import CommonItemTest
class MessagesTest(CommonItemTest):
# Just test one of the Message-type folders
TEST_FOLDER = 'inbox'
FOLDER_CLASS = Inbox
ITEM_CLASS = Message
INCOMING_MESSAGE_TIMEOUT = 60
def get_incoming_message(self, subject):
t1 = time.monotonic()
while True:
t2 = time.monotonic()
if t2 - t1 > self.INCOMING_MESSAGE_TIMEOUT:
self.skipTest(f'Too bad. Gave up in {self.id()} waiting for the incoming message to show up')
try:
return self.account.inbox.get(subject=subject)
except DoesNotExist:
time.sleep(5)
def test_send(self):
# Test that we can send (only) Message items
item = self.get_test_item()
item.folder = None
item.send()
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
self.assertEqual(self.test_folder.filter(categories__contains=item.categories).count(), 0)
def test_send_pre_2013(self):
# Test < Exchange 2013 fallback for attachments and send-only mode
item = self.get_test_item()
item.attach(FileAttachment(name='file_attachment', content=b'file_attachment'))
self.account.version.build = EXCHANGE_2010_SP2
item.send(save_copy=False)
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
def test_send_no_copy(self):
# Test < Exchange 2013 fallback for attachments and send-only mode
item = self.get_test_item()
item.send(save_copy=False)
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
def test_send_and_save(self):
# Test that we can send_and_save Message items
item = self.get_test_item()
item.send_and_save()
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
time.sleep(5) # Requests are supposed to be transactional, but apparently not...
# Also, the sent item may be followed by an automatic message with the same category
self.assertGreaterEqual(self.test_folder.filter(categories__contains=item.categories).count(), 1)
# Test update, although it makes little sense
item = self.get_test_item()
item.save()
item.send_and_save()
time.sleep(5) # Requests are supposed to be transactional, but apparently not...
# Also, the sent item may be followed by an automatic message with the same category
self.assertGreaterEqual(self.test_folder.filter(categories__contains=item.categories).count(), 1)
def test_send_draft(self):
item = self.get_test_item()
item.folder = self.account.drafts
item.is_draft = True
item.save() # Save a draft
item.send() # Send the draft
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
self.assertEqual(item.folder, self.account.sent)
self.assertEqual(self.test_folder.filter(categories__contains=item.categories).count(), 0)
def test_send_and_copy_to_folder(self):
item = self.get_test_item()
item.send(save_copy=True, copy_to_folder=self.account.sent) # Send the draft and save to the sent folder
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
self.assertEqual(item.folder, self.account.sent)
time.sleep(5) # Requests are supposed to be transactional, but apparently not...
self.assertEqual(self.account.sent.filter(categories__contains=item.categories).count(), 1)
def test_bulk_send(self):
with self.assertRaises(AttributeError):
self.account.bulk_send(ids=[], save_copy=False, copy_to_folder=self.account.trash)
item = self.get_test_item()
item.save()
for res in self.account.bulk_send(ids=[item]):
self.assertEqual(res, True)
time.sleep(10) # Requests are supposed to be transactional, but apparently not...
# By default, sent items are placed in the sent folder
self.assertEqual(self.account.sent.filter(categories__contains=item.categories).count(), 1)
def test_reply(self):
# Test that we can reply to a Message item. EWS only allows items that have been sent to receive a reply
item = self.get_test_item()
item.folder = None
item.send() # get_test_item() sets the to_recipients to the test account
sent_item = self.get_incoming_message(item.subject)
new_subject = (f'Re: {sent_item.subject}')[:255]
sent_item.reply(subject=new_subject, body='Hello reply', to_recipients=[item.author])
self.assertEqual(self.account.sent.filter(subject=new_subject).count(), 1)
def test_create_reply(self):
# Test that we can save a reply without sending it
item = self.get_test_item(folder=None)
item.folder = None
item.send()
sent_item = self.get_incoming_message(item.subject)
new_subject = (f'Re: {sent_item.subject}')[:255]
with self.assertRaises(ValueError) as e:
tmp = sent_item.author
sent_item.author = None
try:
sent_item.create_reply(subject=new_subject, body='Hello reply').save(self.account.drafts)
finally:
sent_item.author = tmp
self.assertEqual(e.exception.args[0], "'to_recipients' must be set when message has no 'author'")
sent_item.create_reply(subject=new_subject, body='Hello reply', to_recipients=[item.author])\
.save(self.account.drafts)
self.assertEqual(self.account.drafts.filter(subject=new_subject).count(), 1)
# Test with no to_recipients
sent_item.create_reply(subject=new_subject, body='Hello reply')\
.save(self.account.drafts)
self.assertEqual(self.account.drafts.filter(subject=new_subject).count(), 2)
def test_reply_all(self):
with self.assertRaises(TypeError) as e:
ReplyToItem(account='XXX')
self.assertEqual(e.exception.args[0], "'account' 'XXX' must be of type <class 'exchangelib.account.Account'>")
# Test that we can reply-all a Message item. EWS only allows items that have been sent to receive a reply
item = self.get_test_item(folder=None)
item.folder = None
item.send()
sent_item = self.get_incoming_message(item.subject)
new_subject = (f'Re: {sent_item.subject}')[:255]
sent_item.reply_all(subject=new_subject, body='Hello reply')
self.assertEqual(self.account.sent.filter(subject=new_subject).count(), 1)
def test_forward(self):
# Test that we can forward a Message item. EWS only allows items that have been sent to receive a reply
item = self.get_test_item(folder=None)
item.folder = None
item.send()
sent_item = self.get_incoming_message(item.subject)
new_subject = (f'Re: {sent_item.subject}')[:255]
sent_item.forward(subject=new_subject, body='Hello reply', to_recipients=[item.author])
self.assertEqual(self.account.sent.filter(subject=new_subject).count(), 1)
def test_create_forward(self):
# Test that we can forward a Message item. EWS only allows items that have been sent to receive a reply
item = self.get_test_item(folder=None)
item.folder = None
item.send()
sent_item = self.get_incoming_message(item.subject)
new_subject = (f'Re: {sent_item.subject}')[:255]
forward_item = sent_item.create_forward(subject=new_subject, body='Hello reply', to_recipients=[item.author])
with self.assertRaises(AttributeError) as e:
forward_item.send(save_copy=False, copy_to_folder=self.account.sent)
self.assertEqual(e.exception.args[0], "'save_copy' must be True when 'copy_to_folder' is set")
forward_item.send()
self.assertEqual(self.account.sent.filter(subject=new_subject).count(), 1)
def test_mark_as_junk(self):
# Test that we can mark a Message item as junk and non-junk, and that the message goes to the junk forlder and
# back to the the inbox.
item = self.get_test_item().save()
item.mark_as_junk(is_junk=False, move_item=False)
self.assertEqual(item.folder, self.test_folder)
self.assertEqual(self.test_folder.get(categories__contains=self.categories).id, item.id)
item.mark_as_junk(is_junk=True, move_item=False)
self.assertEqual(item.folder, self.test_folder)
self.assertEqual(self.test_folder.get(categories__contains=self.categories).id, item.id)
item.mark_as_junk(is_junk=True, move_item=True)
self.assertEqual(item.folder, self.account.junk)
self.assertEqual(self.account.junk.get(categories__contains=self.categories).id, item.id)
item.mark_as_junk(is_junk=False, move_item=True)
self.assertEqual(item.folder, self.account.inbox)
self.assertEqual(self.account.inbox.get(categories__contains=self.categories).id, item.id)
def test_mime_content(self):
# Tests the 'mime_content' field
subject = get_random_string(16)
msg = MIMEMultipart()
msg['From'] = self.account.primary_smtp_address
msg['To'] = self.account.primary_smtp_address
msg['Subject'] = subject
body = 'MIME test mail'
msg.attach(MIMEText(body, 'plain', _charset='utf-8'))
mime_content = msg.as_bytes()
self.ITEM_CLASS(
folder=self.test_folder,
to_recipients=[self.account.primary_smtp_address],
mime_content=mime_content,
categories=self.categories,
).save()
self.assertEqual(self.test_folder.get(subject=subject).body, body)
Fix test
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import time
from exchangelib.attachments import FileAttachment
from exchangelib.folders import Inbox
from exchangelib.items import Message, ReplyToItem
from exchangelib.queryset import DoesNotExist
from exchangelib.version import EXCHANGE_2010_SP2
from ..common import get_random_string
from .test_basics import CommonItemTest
class MessagesTest(CommonItemTest):
# Just test one of the Message-type folders
TEST_FOLDER = 'inbox'
FOLDER_CLASS = Inbox
ITEM_CLASS = Message
INCOMING_MESSAGE_TIMEOUT = 60
def get_incoming_message(self, subject):
t1 = time.monotonic()
while True:
t2 = time.monotonic()
if t2 - t1 > self.INCOMING_MESSAGE_TIMEOUT:
self.skipTest(f'Too bad. Gave up in {self.id()} waiting for the incoming message to show up')
try:
return self.account.inbox.get(subject=subject)
except DoesNotExist:
time.sleep(5)
def test_send(self):
# Test that we can send (only) Message items
item = self.get_test_item()
item.folder = None
item.send()
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
self.assertEqual(self.test_folder.filter(categories__contains=item.categories).count(), 0)
def test_send_pre_2013(self):
# Test < Exchange 2013 fallback for attachments and send-only mode
item = self.get_test_item()
item.attach(FileAttachment(name='file_attachment', content=b'file_attachment'))
self.account.version.build = EXCHANGE_2010_SP2
item.send(save_copy=False)
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
def test_send_no_copy(self):
item = self.get_test_item()
item.folder = None
item.send(save_copy=False)
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
def test_send_and_save(self):
# Test that we can send_and_save Message items
item = self.get_test_item()
item.send_and_save()
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
time.sleep(5) # Requests are supposed to be transactional, but apparently not...
# Also, the sent item may be followed by an automatic message with the same category
self.assertGreaterEqual(self.test_folder.filter(categories__contains=item.categories).count(), 1)
# Test update, although it makes little sense
item = self.get_test_item()
item.save()
item.send_and_save()
time.sleep(5) # Requests are supposed to be transactional, but apparently not...
# Also, the sent item may be followed by an automatic message with the same category
self.assertGreaterEqual(self.test_folder.filter(categories__contains=item.categories).count(), 1)
def test_send_draft(self):
item = self.get_test_item()
item.folder = self.account.drafts
item.is_draft = True
item.save() # Save a draft
item.send() # Send the draft
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
self.assertEqual(item.folder, self.account.sent)
self.assertEqual(self.test_folder.filter(categories__contains=item.categories).count(), 0)
def test_send_and_copy_to_folder(self):
item = self.get_test_item()
item.send(save_copy=True, copy_to_folder=self.account.sent) # Send the draft and save to the sent folder
self.assertIsNone(item.id)
self.assertIsNone(item.changekey)
self.assertEqual(item.folder, self.account.sent)
time.sleep(5) # Requests are supposed to be transactional, but apparently not...
self.assertEqual(self.account.sent.filter(categories__contains=item.categories).count(), 1)
def test_bulk_send(self):
with self.assertRaises(AttributeError):
self.account.bulk_send(ids=[], save_copy=False, copy_to_folder=self.account.trash)
item = self.get_test_item()
item.save()
for res in self.account.bulk_send(ids=[item]):
self.assertEqual(res, True)
time.sleep(10) # Requests are supposed to be transactional, but apparently not...
# By default, sent items are placed in the sent folder
self.assertEqual(self.account.sent.filter(categories__contains=item.categories).count(), 1)
def test_reply(self):
# Test that we can reply to a Message item. EWS only allows items that have been sent to receive a reply
item = self.get_test_item()
item.folder = None
item.send() # get_test_item() sets the to_recipients to the test account
sent_item = self.get_incoming_message(item.subject)
new_subject = (f'Re: {sent_item.subject}')[:255]
sent_item.reply(subject=new_subject, body='Hello reply', to_recipients=[item.author])
self.assertEqual(self.account.sent.filter(subject=new_subject).count(), 1)
def test_create_reply(self):
# Test that we can save a reply without sending it
item = self.get_test_item(folder=None)
item.folder = None
item.send()
sent_item = self.get_incoming_message(item.subject)
new_subject = (f'Re: {sent_item.subject}')[:255]
with self.assertRaises(ValueError) as e:
tmp = sent_item.author
sent_item.author = None
try:
sent_item.create_reply(subject=new_subject, body='Hello reply').save(self.account.drafts)
finally:
sent_item.author = tmp
self.assertEqual(e.exception.args[0], "'to_recipients' must be set when message has no 'author'")
sent_item.create_reply(subject=new_subject, body='Hello reply', to_recipients=[item.author])\
.save(self.account.drafts)
self.assertEqual(self.account.drafts.filter(subject=new_subject).count(), 1)
# Test with no to_recipients
sent_item.create_reply(subject=new_subject, body='Hello reply')\
.save(self.account.drafts)
self.assertEqual(self.account.drafts.filter(subject=new_subject).count(), 2)
def test_reply_all(self):
with self.assertRaises(TypeError) as e:
ReplyToItem(account='XXX')
self.assertEqual(e.exception.args[0], "'account' 'XXX' must be of type <class 'exchangelib.account.Account'>")
# Test that we can reply-all a Message item. EWS only allows items that have been sent to receive a reply
item = self.get_test_item(folder=None)
item.folder = None
item.send()
sent_item = self.get_incoming_message(item.subject)
new_subject = (f'Re: {sent_item.subject}')[:255]
sent_item.reply_all(subject=new_subject, body='Hello reply')
self.assertEqual(self.account.sent.filter(subject=new_subject).count(), 1)
def test_forward(self):
# Test that we can forward a Message item. EWS only allows items that have been sent to receive a reply
item = self.get_test_item(folder=None)
item.folder = None
item.send()
sent_item = self.get_incoming_message(item.subject)
new_subject = (f'Re: {sent_item.subject}')[:255]
sent_item.forward(subject=new_subject, body='Hello reply', to_recipients=[item.author])
self.assertEqual(self.account.sent.filter(subject=new_subject).count(), 1)
def test_create_forward(self):
# Test that we can forward a Message item. EWS only allows items that have been sent to receive a reply
item = self.get_test_item(folder=None)
item.folder = None
item.send()
sent_item = self.get_incoming_message(item.subject)
new_subject = (f'Re: {sent_item.subject}')[:255]
forward_item = sent_item.create_forward(subject=new_subject, body='Hello reply', to_recipients=[item.author])
with self.assertRaises(AttributeError) as e:
forward_item.send(save_copy=False, copy_to_folder=self.account.sent)
self.assertEqual(e.exception.args[0], "'save_copy' must be True when 'copy_to_folder' is set")
forward_item.send()
self.assertEqual(self.account.sent.filter(subject=new_subject).count(), 1)
def test_mark_as_junk(self):
# Test that we can mark a Message item as junk and non-junk, and that the message goes to the junk forlder and
# back to the the inbox.
item = self.get_test_item().save()
item.mark_as_junk(is_junk=False, move_item=False)
self.assertEqual(item.folder, self.test_folder)
self.assertEqual(self.test_folder.get(categories__contains=self.categories).id, item.id)
item.mark_as_junk(is_junk=True, move_item=False)
self.assertEqual(item.folder, self.test_folder)
self.assertEqual(self.test_folder.get(categories__contains=self.categories).id, item.id)
item.mark_as_junk(is_junk=True, move_item=True)
self.assertEqual(item.folder, self.account.junk)
self.assertEqual(self.account.junk.get(categories__contains=self.categories).id, item.id)
item.mark_as_junk(is_junk=False, move_item=True)
self.assertEqual(item.folder, self.account.inbox)
self.assertEqual(self.account.inbox.get(categories__contains=self.categories).id, item.id)
def test_mime_content(self):
# Tests the 'mime_content' field
subject = get_random_string(16)
msg = MIMEMultipart()
msg['From'] = self.account.primary_smtp_address
msg['To'] = self.account.primary_smtp_address
msg['Subject'] = subject
body = 'MIME test mail'
msg.attach(MIMEText(body, 'plain', _charset='utf-8'))
mime_content = msg.as_bytes()
self.ITEM_CLASS(
folder=self.test_folder,
to_recipients=[self.account.primary_smtp_address],
mime_content=mime_content,
categories=self.categories,
).save()
self.assertEqual(self.test_folder.get(subject=subject).body, body)
|
b732439c-2ead-11e5-a7be-7831c1d44c14
b73ae2c0-2ead-11e5-9a57-7831c1d44c14
b73ae2c0-2ead-11e5-9a57-7831c1d44c14 |
from wikidataintegrator import wdi_core
import pprint
# get existing combinations:
query_str = """SELECT ?item ?itemLabel (GROUP_CONCAT(?part; separator=";") as ?f) WHERE {
?item wdt:P527 ?part .
?item wdt:P31 wd:Q1304270 .
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
} GROUP BY ?item ?itemLabel"""
results = wdi_core.WDItemEngine.execute_sparql_query(query_str)['results']['bindings']
combo_qid = {x['item']['value'].replace("http://www.wikidata.org/entity/", ""): frozenset([y.replace("http://www.wikidata.org/entity/", "") for y in x['f']['value'].split(";")]) for x in results}
qid_combo = {v:k for k,v in combo_qid.items()}
#assert len(combo_qid) == len(qid_combo)
for qid1 in combo_qid.keys():
for qid2 in combo_qid.keys():
if qid1 != qid2:
if combo_qid[qid1] == combo_qid[qid2]:
print(qid1, combo_qid[qid1], ":", qid2, combo_qid[qid2])
The issue with emerging combinatorial therapies can now be fixed with an adaption to the script that identifies where the assersion issue is. Now this can be fixed automatically.
from wikidataintegrator import wdi_core, wdi_login
import os
# get existing combinations:
query_str = """SELECT ?item ?itemLabel (GROUP_CONCAT(?part; separator=";") as ?f) WHERE {
?item wdt:P527 ?part .
?item wdt:P31 wd:Q1304270 .
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
} GROUP BY ?item ?itemLabel"""
results = wdi_core.WDItemEngine.execute_sparql_query(query_str)['results']['bindings']
combo_qid = {x['item']['value'].replace("http://www.wikidata.org/entity/", ""): frozenset([y.replace("http://www.wikidata.org/entity/", "") for y in x['f']['value'].split(";")]) for x in results}
qid_combo = {v:k for k,v in combo_qid.items()}
#assert len(combo_qid) == len(qid_combo)
print("Logging in...")
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
login = wdi_login.WDLogin(WDUSER, WDPASS)
solved =[]
for qid1 in combo_qid.keys():
if qid1 in solved:
continue
for qid2 in combo_qid.keys():
if qid1 != qid2:
if combo_qid[qid1] == combo_qid[qid2]:
print(qid1, combo_qid[qid1], ":", qid2, combo_qid[qid2])
print(qid1[1:])
if int(qid1[1:]) > int(qid2[1:]):
source = qid2
target = qid1
else:
source = qid2
target = qid1
wdi_core.WDItemEngine.merge_items(source, target, login)
solved.append(qid1)
solved.append(qid2)
|
'''
This script runs on @loboris fork of MicroPython for the ESP32
The fork runs MicroPython as an RTOS process and wraps interesting display and mqtt modules.
This script displays mqtt messages to the TFT Featherwing using @loboris display module
This takes advantage of a C implementation of MQTT that reuns in the background as a separate freeRTOS task.
The MQTT broker is running on an EC2 instance.
Note that multiple mqtt clients can be created.
The mqtt topic is in a separate file called topic
MQTT messages are json in the form of:
{"text":["The rain in spain {RED} falls mainly on the {GREEN} plain", "Now is the time for all good {BLUE}men {WHITE}to come to the aid of their country"]}
Note you have to explicity unsubscribe - it retains subscriptions through power down somehow
'''
import network, utime
import display
from machine import Pin, I2C, RTC, random
import json
import ure as re
from config import ssid, pw, mqtt_aws_host
with open('mqtt_id', 'r') as f:
mqtt_id = f.read().strip()
with open('topic', 'r') as f:
topic = f.read().strip()
print("mqtt_id =", mqtt_id)
print("host =", mqtt_aws_host)
print("topic =", topic)
tft = display.TFT()
tft.init(tft.ILI9341, width=240, height=320, miso=19, mosi=18, clk=5, cs=15, dc=33, bgr=True)
utime.sleep(1)
tft.clear()
tft.text(10, 10, "Hello Steve", random(0xFFFFFF))
#pin15 = Pin(15, Pin.OUT) #will need to find another pin since this is cs pin
regex= re.compile('{(.*?)}')
#s = "jkdsfl{RED}fkjsdflds{GREEN}jlklfjsl{PINK}lkdsjflkdsjfl"
def display_text(s, n, tag=None, h=0):
# the following two things can only happen the first time a string is processed
if s and s[0] != '{': # deal with strings with no pos 0 tag (they may be tags elsewhere in the string)
s = '{WHITE}' + s
if tag is None:
z = regex.search(s)
tag = z.group(0)
col = tag[1:-1].upper()
col = col if col else 'WHITE' # {} was used for white, which produces col = '', so need to do this check
if col == '':
col = 'WHITE'
if col == 'GREY':
col = 'LIGHTGREY'
s = s[len(tag):]
z = regex.search(s)
if z is None:
print("No more tags")
print("col=",col)
print("text=",s)
tft.text(h, n, s, getattr(tft, col))
return
tag2 = z.group(0)
pos = s.find(tag2) # will be non-zero
print("col=",col)
print("text=",s[:pos])
tft.text(h, n, s[:pos], getattr(tft, col))
h+=tft.textWidth(s[:pos])
return display_text(s[pos:], n, tag2, h)
def wrap(text,lim):
# a little tricky to deal with {RED} since can be at beginning or end of a regular word
# z = regex.search(word)
#if z: inc = len(word) - len(z.group(0)) else len(word)
lines = []
pos = 0
line = []
for word in text.split():
z = regex.search(word)
ln = len(word)-len(z.group(0)) if z else len(word)
if pos+ln < lim+1:
line.append(word)
pos+= ln+1
else:
lines.append(' '.join(line))
line = [word]
pos = ln
lines.append(' '.join(line))
return lines
line_height = tft.fontSize()[1]
MAX_HEIGHT = 320
max_chars_line = 30 #240/tft.fontSize()[0] # note that there is hidden markup that are treated like words
def conncb(task):
print("[{}] Connected".format(task))
#def disconncb(task):
# print("[{}] Disconnected".format(task))
def subscb(task):
print("[{}] Subscribed".format(task))
#def pubcb(pub):
# print("[{}] Published: {}".format(pub[0], pub[1]))
def datacb(msg):
print("[{}] Data arrived - topic: {}, message:{}".format(msg[0], msg[1], msg[2]))
try:
zz = json.loads(msg[2])
except:
zz = {}
#msg = zz.get('message', '')
t = "{}".format(utime.strftime("%c", utime.localtime()))
bullets = zz.get('bullets', True)
tft.clear()
n = line_height #20
for item in zz.get('text',['No text']):
if not item.strip():
n+=line_height
continue
#font.set_bold(False)
n+=4 if bullets else 0 # makes multi-line bullets more separated from prev and next bullet
if n+line_height > MAX_HEIGHT:
break
if item[0] == '#':
item=item[1:]
#font.set_bold(True)
if item[0] == '*':
item=item[1:]
#foo.blit(star, (2,n+7))
elif bullets:
#foo.blit(bullet_surface, (7,n+13)) #(4,n+13)
pass
# neither a star in front of item or a bullet
else:
#max_chars_line+= 1
pass
print("item=",item)
lines = wrap(item, max_chars_line) # if line is just whitespace it returns []
print("lines=",lines)
for line in lines:
display_text(line, n)
n+=line_height
#############################
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.connect(ssid, pw)
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
utime.sleep(5)
rtc = RTC()
print("synchronize time with NTP server ...")
# limit the time waiting since sometimes never connects
rtc.ntp_sync(server="pool.ntp.org")
for k in range(10):
if rtc.synced():
break
utime.sleep_ms(100)
else:
print("Could not synchronize with ntp")
print("Time set to: {}".format(utime.strftime("%c", utime.localtime())))
mqttc = network.mqtt(mqtt_id, mqtt_aws_host)
utime.sleep(1)
mqttc.config(subscribed_cb=subscb, connected_cb=conncb, data_cb=datacb)
mqttc.subscribe(topic)
cur_time = utime.time()
while 1:
t = utime.time()
if t > cur_time + 600:
print(utime.strftime("%c", utime.localtime()))
cur_time = t
utime.sleep(1)
iot_esp32_tft.py: main change is that if a line takes up more than one line on the tft, the previous tag is continued on the following line
'''
This script runs on @loboris fork of MicroPython for the ESP32
The fork runs MicroPython as an RTOS process and wraps interesting display and mqtt modules.
This script displays mqtt messages to the TFT Featherwing using @loboris display module
This takes advantage of a C implementation of MQTT that reuns in the background as a separate freeRTOS task.
The MQTT broker is running on an EC2 instance.
Note that multiple mqtt clients can be created.
The mqtt topic is in a separate file called topic
MQTT messages are json in the form of:
{"text":["The rain in spain {RED} falls mainly on the {GREEN} plain", "Now is the time for all good {BLUE}men {WHITE}to come to the aid of their country"]}
Note you have to explicity unsubscribe - it retains subscriptions through power down somehow
'''
import network, utime
import display
from machine import Pin, I2C, RTC, random
import json
import ure as re
from config import ssid, pw, mqtt_aws_host
with open('mqtt_id', 'r') as f:
mqtt_id = f.read().strip()
with open('topic', 'r') as f:
topic = f.read().strip()
print("mqtt_id =", mqtt_id)
print("host =", mqtt_aws_host)
print("topic =", topic)
tft = display.TFT()
tft.init(tft.ILI9341, width=240, height=320, miso=19, mosi=18, clk=5, cs=15, dc=33, bgr=True)
utime.sleep(1)
tft.clear()
tft.text(10, 10, "Hello Steve", random(0xFFFFFF))
#pin15 = Pin(15, Pin.OUT) #will need to find another pin since this is cs pin
regex= re.compile('{(.*?)}')
#s = "jkdsfl{RED}fkjsdflds{GREEN}jlklfjsl{PINK}lkdsjflkdsjfl"
def display_text(s, n, tag=None, h=0):
# the following two things can only happen the first time a string is processed
if s and s[0] != '{': # deal with strings with no pos 0 tag (they may be tags elsewhere in the string)
s = '{WHITE}' + s
if tag is None:
z = regex.search(s)
tag = z.group(0)
col = tag[1:-1].upper()
col = col if col else 'WHITE' # {} was used for white, which produces col = '', so need to do this check
if col == '':
col = 'WHITE'
if col == 'GREY':
col = 'LIGHTGREY'
s = s[len(tag):]
z = regex.search(s)
if z is None:
print("No more tags")
print("col=",col)
print("text=",s)
tft.text(h, n, s, getattr(tft, col))
return
tag2 = z.group(0)
pos = s.find(tag2) # will be non-zero
print("col=",col)
print("text=",s[:pos])
tft.text(h, n, s[:pos], getattr(tft, col))
h+=tft.textWidth(s[:pos])
return display_text(s[pos:], n, tag2, h)
def wrap(text,lim):
# a little tricky to deal with {RED} since can be at beginning or end of a regular word
lines = []
pos = 0
line = []
last_tag = None
for word in text.split():
ln = len(word)
z = regex.search(word)
if z:
last_tag = z.group(0)
ln-= len(last_tag)
if pos+ln < lim+1:
line.append(word)
pos+= ln+1
else:
lines.append(' '.join(line))
if last_tag and word[0]!='{':
line = [last_tag+word]
else:
line=[word]
pos = ln
lines.append(' '.join(line))
return lines
line_height = tft.fontSize()[1]
MAX_HEIGHT = 320
max_chars_line = 30 #240/tft.fontSize()[0] # note that there is hidden markup that are treated like words
def conncb(task):
print("[{}] Connected".format(task))
def disconncb(task):
print("[{}] Disconnected".format(task))
def subscb(task):
print("[{}] Subscribed".format(task))
#def pubcb(pub):
# print("[{}] Published: {}".format(pub[0], pub[1]))
def datacb(msg):
print("[{}] Data arrived - topic: {}, message:{}".format(msg[0], msg[1], msg[2]))
try:
zz = json.loads(msg[2])
except:
zz = {}
#msg = zz.get('message', '')
t = "{}".format(utime.strftime("%c", utime.localtime()))
bullets = zz.get('bullets', True)
tft.clear()
n = line_height #20
for item in zz.get('text',['No text']):
if not item.strip():
n+=line_height
continue
#font.set_bold(False)
n+=4 if bullets else 0 # makes multi-line bullets more separated from prev and next bullet
if n+line_height > MAX_HEIGHT:
break
if item[0] == '#':
item=item[1:]
#font.set_bold(True)
if item[0] == '*':
item=item[1:]
#foo.blit(star, (2,n+7))
elif bullets:
#foo.blit(bullet_surface, (7,n+13)) #(4,n+13)
pass
# neither a star in front of item or a bullet
else:
#max_chars_line+= 1
pass
print("item=",item)
lines = wrap(item, max_chars_line) # if line is just whitespace it returns []
print("lines=",lines)
for line in lines:
display_text(line, n)
n+=line_height
#############################
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.connect(ssid, pw)
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
utime.sleep(5)
rtc = RTC()
print("synchronize time with NTP server ...")
# limit the time waiting since sometimes never connects
rtc.ntp_sync(server="pool.ntp.org")
for k in range(10):
if rtc.synced():
break
utime.sleep_ms(100)
else:
print("Could not synchronize with ntp")
print("Time set to: {}".format(utime.strftime("%c", utime.localtime())))
mqttc = network.mqtt(mqtt_id, mqtt_aws_host, connected_cb=conncb)
utime.sleep(1)
mqttc.config(subscribed_cb=subscb, disconnected_cb=disconncb, data_cb=datacb)
mqttc.subscribe(topic)
cur_time = utime.time()
while 1:
t = utime.time()
if t > cur_time + 600:
print(utime.strftime("%c", utime.localtime()))
cur_time = t
utime.sleep(1)
|
import collections
import datetime
import re
from collections import defaultdict
from operator import itemgetter
import simplejson as json
import yaml
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.http import JsonResponse
from django.utils import timezone, dateformat
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
import api.models as models
import directions.models as directions
import users.models as users
from api.to_astm import get_iss_astm
from appconf.manager import SettingManager
from barcodes.views import tubes
from clients.models import CardBase, Individual, Card
from directory.models import AutoAdd, Fractions, ParaclinicInputGroups, ParaclinicInputField
from laboratory import settings
from laboratory.decorators import group_required
from podrazdeleniya.models import Podrazdeleniya
from results.views import result_normal
from rmis_integration.client import Client
from slog import models as slog
from slog.models import Log
from statistics_tickets.models import VisitPurpose, ResultOfTreatment, StatisticsTicket, Outcomes, \
ExcludePurposes
def translit(locallangstring):
"""
Translit func
:param locallangstring: orign
:return: translit of locallangstring
"""
conversion = {
u'\u0410': 'A', u'\u0430': 'a',
u'\u0411': 'B', u'\u0431': 'b',
u'\u0412': 'V', u'\u0432': 'v',
u'\u0413': 'G', u'\u0433': 'g',
u'\u0414': 'D', u'\u0434': 'd',
u'\u0415': 'E', u'\u0435': 'e',
u'\u0401': 'Yo', u'\u0451': 'yo',
u'\u0416': 'Zh', u'\u0436': 'zh',
u'\u0417': 'Z', u'\u0437': 'z',
u'\u0418': 'I', u'\u0438': 'i',
u'\u0419': 'Y', u'\u0439': 'y',
u'\u041a': 'K', u'\u043a': 'k',
u'\u041b': 'L', u'\u043b': 'l',
u'\u041c': 'M', u'\u043c': 'm',
u'\u041d': 'N', u'\u043d': 'n',
u'\u041e': 'O', u'\u043e': 'o',
u'\u041f': 'P', u'\u043f': 'p',
u'\u0420': 'R', u'\u0440': 'r',
u'\u0421': 'S', u'\u0441': 's',
u'\u0422': 'T', u'\u0442': 't',
u'\u0423': 'U', u'\u0443': 'u',
u'\u0424': 'F', u'\u0444': 'f',
u'\u0425': 'H', u'\u0445': 'h',
u'\u0426': 'Ts', u'\u0446': 'ts',
u'\u0427': 'Ch', u'\u0447': 'ch',
u'\u0428': 'Sh', u'\u0448': 'sh',
u'\u0429': 'Sch', u'\u0449': 'sch',
u'\u042a': '', u'\u044a': '',
u'\u042b': 'Y', u'\u044b': 'y',
u'\u042c': '', u'\u044c': '',
u'\u042d': 'E', u'\u044d': 'e',
u'\u042e': 'Yu', u'\u044e': 'yu',
u'\u042f': 'Ya', u'\u044f': 'ya',
}
translitstring = []
for c in locallangstring:
translitstring.append(conversion.setdefault(c, c))
return ''.join(translitstring)
@csrf_exempt
def send(request):
"""
Sysmex save results
:param request:
:return:
"""
result = {"ok": False}
try:
if request.method == "POST":
resdict = yaml.load(request.POST["result"])
appkey = request.POST.get("key", "")
else:
resdict = yaml.load(request.GET["result"])
appkey = request.GET.get("key", "")
astm_user = users.DoctorProfile.objects.filter(user__username="astm").first()
resdict["pk"] = int(resdict.get("pk", -111))
if "LYMPH%" in resdict["result"]:
resdict["orders"] = {}
dpk = -1
if "bydirection" in request.POST or "bydirection" in request.GET:
dpk = resdict["pk"]
if dpk >= 4600000000000:
dpk -= 4600000000000
dpk //= 10
tubes(request, direction_implict_id=dpk)
if directions.TubesRegistration.objects.filter(issledovaniya__napravleniye__pk=dpk,
issledovaniya__doc_confirmation__isnull=True).exists():
resdict["pk"] = directions.TubesRegistration.objects.filter(
issledovaniya__napravleniye__pk=dpk, issledovaniya__doc_confirmation__isnull=True).order_by(
"pk").first().pk
else:
resdict["pk"] = False
result["A"] = appkey
if resdict["pk"] and models.Application.objects.filter(key=appkey).exists() and models.Application.objects.get(
key=appkey).active and directions.TubesRegistration.objects.filter(pk=resdict["pk"]).exists():
tubei = directions.TubesRegistration.objects.get(pk=resdict["pk"])
direction = tubei.issledovaniya_set.first().napravleniye
for key in resdict["result"].keys():
if models.RelationFractionASTM.objects.filter(astm_field=key).exists():
fractionRels = models.RelationFractionASTM.objects.filter(astm_field=key)
for fractionRel in fractionRels:
fraction = fractionRel.fraction
if directions.Issledovaniya.objects.filter(napravleniye=direction,
research=fraction.research,
doc_confirmation__isnull=True).exists():
issled = directions.Issledovaniya.objects.filter(napravleniye=direction,
research=fraction.research,
doc_confirmation__isnull=True).order_by(
"pk")[0]
if directions.Result.objects.filter(issledovaniye=issled,
fraction=fraction).exists(): # Если результат для фракции существует
fraction_result = directions.Result.objects.get(issledovaniye=issled,
fraction__pk=fraction.pk) # Выборка результата из базы
else:
fraction_result = directions.Result(issledovaniye=issled,
fraction=fraction) # Создание нового результата
fraction_result.value = str(resdict["result"][key]).strip() # Установка значения
if fraction_result.value.isdigit():
fraction_result.value = "%s.0" % fraction_result.value
import re
find = re.findall("\d+.\d+", fraction_result.value)
if len(find) > 0:
val = float(find[0]) * fractionRel.get_multiplier_display()
if fractionRel.full_round:
val = round(val)
fraction_result.value = fraction_result.value.replace(find[0], str(val))
fraction_result.iteration = 1 # Установка итерации
ref = fractionRel.default_ref
if ref:
fraction_result.ref_title = ref.title
fraction_result.ref_about = ref.about
fraction_result.ref_m = ref.m
fraction_result.ref_f = ref.f
fraction_result.save() # Сохранение
issled.api_app = models.Application.objects.get(key=appkey)
issled.save()
fraction_result.get_ref(re_save=True)
fraction_result.issledovaniye.doc_save = astm_user # Кто сохранил
from datetime import datetime
fraction_result.issledovaniye.time_save = timezone.now() # Время сохранения
fraction_result.issledovaniye.save()
slog.Log(key=appkey, type=22, body=json.dumps(resdict), user=None).save()
result["ok"] = True
elif not directions.TubesRegistration.objects.filter(pk=resdict["pk"]).exists():
if dpk > -1:
resdict["pk"] = dpk
slog.Log(key=resdict["pk"], type=23, body=json.dumps(resdict), user=None).save()
except Exception as e:
result = {"ok": False, "Exception": True, "MSG": str(e)}
return JsonResponse(result)
@csrf_exempt
def endpoint(request):
result = {"answer": False, "body": ""}
data = json.loads(request.POST.get("result", request.GET.get("result", "{}")))
api_key = request.POST.get("key", request.GET.get("key", ""))
message_type = data.get("message_type", "C")
pk_s = str(data.get("pk", ""))
pk = -1 if not pk_s.isdigit() else int(pk_s)
data["app_name"] = "API key is incorrect"
if models.Application.objects.filter(key=api_key).exists():
astm_user = users.DoctorProfile.objects.filter(user__username="astm").first()
if astm_user is None:
astm_user = users.DoctorProfile.objects.filter(user__is_staff=True).order_by("pk").first()
app = models.Application.objects.get(key=api_key)
if app.active:
data["app_name"] = app.name
if message_type == "R":
if pk != -1:
dw = app.direction_work
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
dw = True
if dw:
direction = directions.Napravleniya.objects.filter(pk=pk).first()
else:
direction = directions.Napravleniya.objects.filter(issledovaniya__tubes__pk=pk).first()
oks = []
if direction:
results = data.get("result", {})
for key in results:
ok = False
q = models.RelationFractionASTM.objects.filter(astm_field=key)
if q.filter(application_api=app).exists():
q = q.filter(application_api=app)
ok = True
elif q.filter(application_api__isnull=True).exists():
q = q.filter(application_api__isnull=True)
ok = True
if ok:
for fraction_rel in q:
save_state = []
issleds = []
for issled in directions.Issledovaniya.objects.filter(napravleniye=direction,
research=fraction_rel.fraction.research,
doc_confirmation__isnull=True):
if directions.Result.objects.filter(issledovaniye=issled,
fraction=fraction_rel.fraction).exists():
fraction_result = directions.Result.objects.get(issledovaniye=issled,
fraction=fraction_rel.fraction)
else:
fraction_result = directions.Result(issledovaniye=issled,
fraction=fraction_rel.fraction)
fraction_result.value = str(results[key]).strip()
import re
find = re.findall("\d+.\d+", fraction_result.value)
if len(find) > 0:
val_str = fraction_result.value
for f in find:
val = app.truncate(float(f) * fraction_rel.get_multiplier_display())
val_str = val_str.replace(f, str(val))
fraction_result.value = val_str
fraction_result.iteration = 1
ref = fraction_rel.default_ref
if ref:
fraction_result.ref_title = ref.title
fraction_result.ref_about = ref.about
fraction_result.ref_m = ref.m
fraction_result.ref_f = ref.f
fraction_result.save()
issled.api_app = app
issled.save()
fraction_result.get_ref(re_save=True)
fraction_result.issledovaniye.doc_save = astm_user
fraction_result.issledovaniye.time_save = timezone.now()
fraction_result.issledovaniye.save()
save_state.append({"fraction": fraction_result.fraction.title,
"value": fraction_result.value})
issleds.append({"pk": issled.pk, "title": issled.research.title})
slog.Log(key=json.dumps({"direction": direction.pk, "issleds": str(issleds)}),
type=22, body=json.dumps(save_state), user=None).save()
oks.append(ok)
result["body"] = "{} {} {} {}".format(dw, pk, json.dumps(oks), direction is not None)
else:
result["body"] = "pk '{}' is not exists".format(pk_s)
elif message_type == "Q":
result["answer"] = True
pks = [int(x) for x in data.get("query", [])]
researches = defaultdict(list)
for row in app.get_issledovaniya(pks):
k = row["pk"]
i = row["iss"]
for fraction in Fractions.objects.filter(research=i.research,
hide=False):
rel = models.RelationFractionASTM.objects.filter(fraction=fraction, application_api=app)
if not rel.exists():
rel = models.RelationFractionASTM.objects.filter(fraction=fraction)
if not rel.exists():
continue
rel = rel[0]
researches[k].append(rel.astm_field)
result["body"] = researches
else:
pass
else:
data["app_name"] = "API app banned"
result["body"] = "API app banned"
else:
result["body"] = "API key is incorrect"
slog.Log(key=pk, type=6000, body=json.dumps(data), user=None).save()
return JsonResponse(result)
@login_required
def departments(request):
from podrazdeleniya.models import Podrazdeleniya
can_edit = request.user.is_superuser or request.user.doctorprofile.has_group(
'Создание и редактирование пользователей')
if request.method == "GET":
return JsonResponse(
{"departments": [{"pk": x.pk, "title": x.get_title(), "type": str(x.p_type), "updated": False} for
x in Podrazdeleniya.objects.all().order_by("pk")],
"can_edit": can_edit,
"types": [{"pk": str(x[0]), "title": x[1]} for x in Podrazdeleniya.TYPES if
SettingManager.get("paraclinic_module", default='false', default_type='b') or x[0] != 3]})
elif can_edit:
ok = False
message = ""
try:
req = json.loads(request.body)
data_type = req.get("type", "update")
rows = req.get("data", [])
if data_type == "update":
ok = False
for row in rows:
title = row["title"].strip()
if len(title) > 0:
department = Podrazdeleniya.objects.get(pk=row["pk"])
department.title = title
department.p_type = int(row["type"])
department.save()
ok = True
elif data_type == "insert":
ok = False
for row in rows:
title = row["title"].strip()
if len(title) > 0:
department = Podrazdeleniya(title=title, p_type=int(row["type"]))
department.save()
ok = True
finally:
return JsonResponse({"ok": ok, "message": message})
return JsonResponse(0)
@login_required
def bases(request):
from clients.models import CardBase
return JsonResponse({"bases": [
{"pk": x.pk,
"title": x.title,
"code": x.short_title,
"hide": x.hide,
"history_number": x.history_number,
"fin_sources": [{"pk": y.pk, "title": y.title, "default_diagnos": y.default_diagnos} for y in
directions.IstochnikiFinansirovaniya.objects.filter(base=x)]
} for x in CardBase.objects.all()]})
class ResearchesTemplates(View):
def get(self, request):
from django.db.models import Q
templates = []
for t in users.AssignmentTemplates.objects.filter(Q(doc__isnull=True, podrazdeleniye__isnull=True) |
Q(doc=request.user.doctorprofile) |
Q(podrazdeleniye=request.user.doctorprofile.podrazdeleniye)):
templates.append({"values": [x.research.pk for x in users.AssignmentResearches.objects.filter(template=t)],
"pk": t.pk,
"title": t.title,
"for_current_user": t.doc is not None,
"for_users_department": t.podrazdeleniye is not None})
return JsonResponse({"templates": templates})
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(self.__class__, self).dispatch(request, *args, **kwargs)
from directory.models import Researches as DResearches
class Researches(View):
def get(self, request):
deps = defaultdict(list)
for r in DResearches.objects.filter(hide=False).order_by("title"):
autoadd = [x.b.pk for x in AutoAdd.objects.filter(a=r)]
addto = [x.a.pk for x in AutoAdd.objects.filter(b=r)]
deps[r.podrazdeleniye.pk].append(
{"pk": r.pk,
"onlywith": -1 if not r.onlywith else r.onlywith.pk,
"department_pk": r.podrazdeleniye.pk,
"title": r.get_title(),
"full_title": r.title,
"comment_variants": [] if not r.comment_variants else r.comment_variants.get_variants(),
"autoadd": autoadd,
"addto": addto,
"code": r.code,
"type": str(r.podrazdeleniye.p_type)
})
return JsonResponse({"researches": deps})
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(self.__class__, self).dispatch(request, *args, **kwargs)
def current_user_info(request):
ret = {"auth": request.user.is_authenticated, "doc_pk": -1, "username": "", "fio": "",
"department": {"pk": -1, "title": ""}, "groups": []}
if ret["auth"]:
ret["username"] = request.user.username
ret["fio"] = request.user.doctorprofile.fio
ret["groups"] = list(request.user.groups.values_list('name', flat=True))
ret["doc_pk"] = request.user.doctorprofile.pk
ret["department"] = {"pk": request.user.doctorprofile.podrazdeleniye.pk,
"title": request.user.doctorprofile.podrazdeleniye.title}
return JsonResponse(ret)
@login_required
def directive_from(request):
from users.models import DoctorProfile
data = []
for dep in Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.DEPARTMENT).order_by('title'):
d = {"pk": dep.pk,
"title": dep.title,
"docs": [{"pk": x.pk, "fio": x.fio} for x in DoctorProfile.objects.filter(podrazdeleniye=dep,
user__groups__name="Лечащий врач").order_by(
"fio")]
}
data.append(d)
return JsonResponse({"data": data})
@login_required
def patients_search_card(request):
objects = []
data = []
d = json.loads(request.body)
card_type = CardBase.objects.get(pk=d['type'])
query = d['query'].strip()
p = re.compile(r'[а-яё]{3}[0-9]{8}', re.IGNORECASE)
p2 = re.compile(r'^([А-яЕё]+)( ([А-яЕё]+)( ([А-яЕё]*)( ([0-9]{2}\.[0-9]{2}\.[0-9]{4}))?)?)?$')
p3 = re.compile(r'[0-9]{1,15}')
p4 = re.compile(r'card_pk:\d+')
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1],
patronymic__startswith=initials[2], birthday=btday,
card__base=card_type)
if card_type.is_rmis and len(objects) == 0:
c = Client()
objects = c.patients.import_individual_to_base(
{"surname": query[0] + "%", "name": query[1] + "%", "patrName": query[2] + "%", "birthDate": btday},
fio=True)
except ValidationError:
objects = []
elif re.search(p2, query):
split = str(query).split()
n = p = ""
f = split[0]
rmis_req = {"surname": f + "%"}
if len(split) > 1:
n = split[1]
rmis_req["name"] = n + "%"
if len(split) > 2:
p = split[2]
rmis_req["patrName"] = p + "%"
if len(split) > 3:
btday = split[3].split(".")
btday = btday[2] + "-" + btday[1] + "-" + btday[0]
rmis_req["birthDate"] = btday
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n,
patronymic__istartswith=p, card__base=card_type)[:10]
if len(split) > 3:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n,
patronymic__istartswith=p, card__base=card_type,
birthday=datetime.datetime.strptime(split[3], "%d.%m.%Y").date())[:10]
if card_type.is_rmis and (len(objects) == 0 or (len(split) < 4 and len(objects) < 10)):
objects = list(objects)
try:
c = Client()
objects += c.patients.import_individual_to_base(rmis_req, fio=True, limit=10 - len(objects))
except ConnectionError:
pass
if re.search(p3, query) or card_type.is_rmis:
resync = True
if len(list(objects)) == 0:
resync = False
try:
objects = Individual.objects.filter(card__number=query.upper(), card__is_archive=False,
card__base=card_type)
except ValueError:
pass
if card_type.is_rmis and len(objects) == 0 and len(query) == 16:
c = Client()
objects = c.patients.import_individual_to_base(query)
else:
resync = True
if resync and card_type.is_rmis:
c = Client()
for o in objects:
o.sync_with_rmis(c=c)
if re.search(p4, query):
cards = Card.objects.filter(pk=int(query.split(":")[1]))
else:
cards = Card.objects.filter(base=card_type, individual__in=objects, is_archive=False)
if re.match(p3, query):
cards = cards.filter(number=query)
for row in cards.prefetch_related("individual").distinct():
data.append({"type_title": card_type.title,
"num": row.number,
"family": row.individual.family,
"name": row.individual.name,
"twoname": row.individual.patronymic,
"birthday": row.individual.bd(),
"age": row.individual.age_s(),
"sex": row.individual.sex,
"individual_pk": row.individual.pk,
"pk": row.pk})
return JsonResponse({"results": data})
@login_required
def patients_search_individual(request):
objects = []
data = []
d = json.loads(request.body)
query = d['query'].strip()
p = re.compile(r'[а-яё]{3}[0-9]{8}', re.IGNORECASE)
p2 = re.compile(r'^([А-яЕё]+)( ([А-яЕё]+)( ([А-яЕё]*)( ([0-9]{2}\.[0-9]{2}\.[0-9]{4}))?)?)?$')
p4 = re.compile(r'individual_pk:\d+')
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1],
patronymic__startswith=initials[2], birthday=btday)
except ValidationError:
objects = []
elif re.search(p2, query):
split = str(query).split()
n = p = ""
f = split[0]
rmis_req = {"surname": f + "%"}
if len(split) > 1:
n = split[1]
rmis_req["name"] = n + "%"
if len(split) > 2:
p = split[2]
rmis_req["patrName"] = p + "%"
if len(split) > 3:
btday = split[3].split(".")
btday = btday[2] + "-" + btday[1] + "-" + btday[0]
rmis_req["birthDate"] = btday
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p)
if len(split) > 3:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n,
patronymic__istartswith=p,
birthday=datetime.datetime.strptime(split[3], "%d.%m.%Y").date())
if re.search(p4, query):
objects = Individual.objects.filter(pk=int(query.split(":")[1]))
n = 0
for row in objects.distinct().order_by("family", "name", "patronymic", "birthday"):
n += 1
data.append({"family": row.family,
"name": row.name,
"patronymic": row.patronymic,
"birthday": row.bd(),
"age": row.age_s(),
"sex": row.sex,
"pk": row.pk})
if n == 25:
break
return JsonResponse({"results": data})
@login_required
@group_required("Лечащий врач", "Оператор лечащего врача")
def directions_generate(request):
result = {"ok": False, "directions": [], "message": ""}
if request.method == "POST":
p = json.loads(request.body)
rc = directions.Napravleniya.gen_napravleniya_by_issledovaniya(p.get("card_pk"),
p.get("diagnos"),
p.get("fin_source"),
p.get("history_num"),
p.get("ofname_pk"),
request.user.doctorprofile,
p.get("researches"),
p.get("comments"))
result["ok"] = rc["r"]
result["directions"] = json.loads(rc["list_id"])
if "message" in rc:
result["message"] = rc["message"]
return JsonResponse(result)
@login_required
def directions_history(request):
import datetime
res = {"directions": []}
request_data = json.loads(request.body)
pk = request_data.get("patient", -1)
req_status = request_data.get("type", 4)
date_start = request_data["date_from"].split(".")
date_end = request_data["date_to"].split(".")
date_start = datetime.date(int(date_start[2]), int(date_start[1]), int(date_start[0]))
date_end = datetime.date(int(date_end[2]), int(date_end[1]), int(date_end[0])) + datetime.timedelta(days=1)
try:
if pk >= 0 or req_status == 4:
if req_status != 4:
rows = directions.Napravleniya.objects.filter(data_sozdaniya__range=(date_start, date_end),
client__pk=pk).order_by(
"-data_sozdaniya").prefetch_related()
else:
rows = directions.Napravleniya.objects.filter(Q(data_sozdaniya__range=(date_start, date_end),
doc_who_create=request.user.doctorprofile)
| Q(data_sozdaniya__range=(date_start, date_end),
doc=request.user.doctorprofile)).order_by(
"-data_sozdaniya")
for napr in rows.values("pk", "data_sozdaniya", "cancel"):
iss_list = directions.Issledovaniya.objects.filter(napravleniye__pk=napr["pk"]).prefetch_related(
"tubes", "research", "research__podrazdeleniye")
if not iss_list.exists():
continue
status = 2 # 0 - выписано. 1 - Материал получен лабораторией. 2 - результат подтвержден. -1 - отменено
has_conf = False
researches_list = []
researches_pks = []
for v in iss_list:
researches_list.append(v.research.title)
researches_pks.append(v.research.pk)
iss_status = 1
if not v.doc_confirmation and not v.doc_save and not v.deferred:
iss_status = 1
if v.tubes.count() == 0:
iss_status = 0
else:
for t in v.tubes.all():
if not t.time_recive:
iss_status = 0
elif v.doc_confirmation or v.deferred:
iss_status = 2
if v.doc_confirmation and not has_conf:
has_conf = True
status = min(iss_status, status)
if status == 2 and not has_conf:
status = 1
if req_status in [3, 4] or req_status == status:
res["directions"].append(
{"pk": napr["pk"], "status": -1 if status == 0 and napr["cancel"] else status,
"researches": ' | '.join(researches_list),
"researches_pks": researches_pks,
"date": str(dateformat.format(napr["data_sozdaniya"].date(), settings.DATE_FORMAT_SHORT)),
"lab": iss_list[0].research.get_podrazdeleniye().title, "cancel": napr["cancel"],
"checked": False})
except (ValueError, IndexError) as e:
res["message"] = str(e)
return JsonResponse(res)
@login_required
def directions_cancel(request):
response = {"cancel": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if directions.Napravleniya.objects.filter(pk=pk).exists():
direction = directions.Napravleniya.objects.get(pk=pk)
direction.cancel = not direction.cancel
direction.save()
response["cancel"] = direction.cancel
return JsonResponse(response)
@login_required
def researches_params(request):
response = {"researches": []}
request_data = json.loads(request.body)
pks = request_data.get("pks", [])
for research in DResearches.objects.filter(pk__in=pks):
params = []
if research.is_paraclinic:
for g in ParaclinicInputGroups.objects.filter(research=research).exclude(title="").order_by("order"):
params.append({"pk": g.pk, "title": g.title})
else:
for f in Fractions.objects.filter(research=research).order_by("sort_weight"):
params.append({"pk": f.pk, "title": f.title})
response["researches"].append({"pk": research.pk, "title": research.title,
"short_title": research.get_title(),
"params": params, "is_paraclinic": research.is_paraclinic,
"selected_params": []})
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def researches_by_department(request):
response = {"researches": []}
request_data = json.loads(request.body)
department_pk = int(request_data["department"])
if department_pk != -1:
for research in DResearches.objects.filter(podrazdeleniye__pk=department_pk).order_by("title"):
response["researches"].append({
"pk": research.pk,
"title": research.title,
"short_title": research.short_title,
"preparation": research.preparation,
"hide": research.hide,
"code": research.code,
})
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def researches_update(request):
response = {"ok": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -2)
if pk > -2:
department_pk = request_data.get("department")
title = request_data.get("title").strip()
short_title = request_data.get("short_title").strip()
code = request_data.get("code").strip()
info = request_data.get("info").strip()
hide = request_data.get("hide")
groups = request_data.get("groups")
if len(title) > 0 and Podrazdeleniya.objects.filter(pk=department_pk).exists():
department = Podrazdeleniya.objects.filter(pk=department_pk)[0]
res = None
if pk == -1:
res = DResearches(title=title, short_title=short_title, podrazdeleniye=department, code=code,
is_paraclinic=department.p_type == 3, paraclinic_info=info, hide=hide)
elif DResearches.objects.filter(pk=pk).exists():
res = DResearches.objects.filter(pk=pk)[0]
res.title = title
res.short_title = short_title
res.podrazdeleniye = department
res.code = code
res.is_paraclinic = department.p_type == 3
res.paraclinic_info = info
res.hide = hide
if res:
res.save()
for group in groups:
g = None
pk = group["pk"]
if pk == -1:
g = ParaclinicInputGroups(title=group["title"],
show_title=group["show_title"],
research=res,
order=group["order"],
hide=group["hide"])
elif ParaclinicInputGroups.objects.filter(pk=pk).exists():
g = ParaclinicInputGroups.objects.get(pk=pk)
g.title = group["title"]
g.show_title = group["show_title"]
g.research = res
g.order = group["order"]
g.hide = group["hide"]
if g:
g.save()
for field in group["fields"]:
f = None
pk = field["pk"]
if pk == -1:
f = ParaclinicInputField(title=field["title"],
group=g,
order=field["order"],
lines=field["lines"],
hide=field["hide"],
default_value=field["default"],
input_templates=json.dumps(field["values_to_input"]))
elif ParaclinicInputField.objects.filter(pk=pk).exists():
f = ParaclinicInputField.objects.get(pk=pk)
f.title = field["title"]
f.group = g
f.order = field["order"]
f.lines = field["lines"]
f.hide = field["hide"]
f.default_value = field["default"]
f.input_templates = json.dumps(field["values_to_input"])
if f:
f.save()
response["ok"] = True
slog.Log(key=pk, type=10000, body=json.dumps(request_data), user=request.user.doctorprofile).save()
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def researches_details(request):
response = {"pk": -1, "department": -1, "title": '', "short_title": '', "code": '', "info": '', "hide": False,
"groups": []}
request_data = json.loads(request.body)
pk = request_data.get("pk")
if DResearches.objects.filter(pk=pk).exists():
res = DResearches.objects.filter(pk=pk)[0]
response["pk"] = res.pk
response["department"] = res.podrazdeleniye.pk
response["title"] = res.title
response["short_title"] = res.short_title
response["code"] = res.code
response["info"] = res.paraclinic_info or ""
response["hide"] = res.hide
for group in ParaclinicInputGroups.objects.filter(research__pk=pk).order_by("order"):
g = {"pk": group.pk, "order": group.order, "title": group.title, "show_title": group.show_title,
"hide": group.hide, "fields": []}
for field in ParaclinicInputField.objects.filter(group=group).order_by("order"):
g["fields"].append({
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"default": field.default_value,
"hide": field.hide,
"values_to_input": json.loads(field.input_templates),
"new_value": ""
})
response["groups"].append(g)
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def paraclinic_details(request):
response = {"groups": []}
request_data = json.loads(request.body)
pk = request_data.get("pk")
for group in ParaclinicInputGroups.objects.filter(research__pk=pk).order_by("order"):
g = {"pk": group.pk, "order": group.order, "title": group.title, "show_title": group.show_title,
"hide": group.hide, "fields": []}
for field in ParaclinicInputField.objects.filter(group=group).order_by("order"):
g["fields"].append({
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"default": field.default_value,
"hide": field.hide,
"values_to_input": json.loads(field.input_templates)
})
response["groups"].append(g)
return JsonResponse(response)
@login_required
def directions_results(request):
result = {"ok": False,
"direction": {"pk": -1, "doc": "", "date": ""},
"client": {},
"full": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if directions.Napravleniya.objects.filter(pk=pk).exists():
napr = directions.Napravleniya.objects.get(pk=pk)
dates = {}
for iss in directions.Issledovaniya.objects.filter(napravleniye=napr, time_save__isnull=False):
if iss.time_save:
dt = str(dateformat.format(iss.time_save, settings.DATE_FORMAT))
if dt not in dates.keys():
dates[dt] = 0
dates[dt] += 1
import operator
maxdate = ""
if dates != {}:
maxdate = max(dates.items(), key=operator.itemgetter(1))[0]
iss_list = directions.Issledovaniya.objects.filter(napravleniye=napr)
t = 0
if not iss_list.filter(doc_confirmation__isnull=True).exists() or iss_list.filter(deferred=False).exists():
result["direction"]["pk"] = napr.pk
result["full"] = False
result["ok"] = True
if iss_list.filter(doc_confirmation__isnull=False).exists():
result["direction"]["doc"] = iss_list.filter(doc_confirmation__isnull=False)[
0].doc_confirmation.get_fio()
if iss_list.filter(doc_confirmation__isnull=True, deferred=False).exists():
result["direction"]["doc"] = result["direction"]["doc"] + " (выполнено не полностью)"
else:
result["full"] = True
else:
result["direction"]["doc"] = "Не подтверждено"
result["direction"]["date"] = maxdate
result["client"]["sex"] = napr.client.individual.sex
result["client"]["fio"] = napr.client.individual.fio()
result["client"]["age"] = napr.client.individual.age_s(direction=napr)
result["client"]["cardnum"] = napr.client.number_with_type()
result["client"]["dr"] = napr.client.individual.bd()
result["results"] = collections.OrderedDict()
isses = []
for issledovaniye in iss_list.order_by("tubes__id", "research__sort_weight"):
if issledovaniye.pk in isses:
continue
isses.append(issledovaniye.pk)
t += 1
kint = "%s_%s_%s_%s" % (t,
"-1" if not issledovaniye.research.direction else issledovaniye.research.direction.pk,
issledovaniye.research.sort_weight,
issledovaniye.research.pk)
result["results"][kint] = {"title": issledovaniye.research.title,
"fractions": collections.OrderedDict(),
"sort": issledovaniye.research.sort_weight,
"tube_time_get": ""}
if not issledovaniye.deferred or issledovaniye.doc_confirmation:
for isstube in issledovaniye.tubes.all():
if isstube.time_get:
result["results"][kint]["tube_time_get"] = str(
dateformat.format(isstube.time_get, settings.DATE_FORMAT))
break
results = directions.Result.objects.filter(issledovaniye=issledovaniye).order_by(
"fraction__sort_weight") # Выборка результатов из базы
n = 0
for res in results: # Перебор результатов
pk = res.fraction.sort_weight
if not pk or pk <= 0:
pk = res.fraction.pk
if res.fraction.render_type == 0:
if pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][pk] = {}
result["results"][kint]["fractions"][pk]["result"] = result_normal(res.value)
result["results"][kint]["fractions"][pk]["title"] = res.fraction.title
result["results"][kint]["fractions"][pk]["units"] = res.fraction.units
refs = res.get_ref(full=True)
ref_m = refs["m"]
ref_f = refs["f"]
if isinstance(ref_m, str):
ref_m = json.loads(ref_m)
if isinstance(ref_f, str):
ref_f = json.loads(ref_f)
result["results"][kint]["fractions"][pk]["ref_m"] = ref_m
result["results"][kint]["fractions"][pk]["ref_f"] = ref_f
else:
try:
tmp_results = json.loads("{}" if not res.value else res.value).get("rows", {})
except Exception:
tmp_results = {}
n = 0
for row in tmp_results.values():
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = "Выделенная культура"
result["results"][kint]["fractions"][tmp_pk]["result"] = row["title"]
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
for subrow in row["rows"].values():
if "null" in subrow["value"]:
continue
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = subrow["title"]
result["results"][kint]["fractions"][tmp_pk]["result"] = subrow["value"]
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk][
"title"] = "S - чувствителен; R - резистентен; I - промежуточная чувствительность;"
result["results"][kint]["fractions"][tmp_pk]["result"] = ""
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
if issledovaniye.lab_comment and issledovaniye.lab_comment != "":
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = "Комментарий"
result["results"][kint]["fractions"][tmp_pk]["result"] = issledovaniye.lab_comment.replace("\n",
"<br/>")
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
else:
fr_list = Fractions.objects.filter(research=issledovaniye.research)
for fr in fr_list:
pk = fr.sort_weight
if not pk or pk <= 0:
pk = fr.pk
if pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][pk] = {}
result["results"][kint]["fractions"][pk]["result"] = "отложен" # Значение
result["results"][kint]["fractions"][pk][
"title"] = fr.title # Название фракции
result["results"][kint]["fractions"][pk][
"units"] = fr.units # Еденицы измерения
ref_m = {"": ""} # fr.ref_m
ref_f = {"": ""} # fr.ref_f
if not isinstance(ref_m, str):
ref_m = json.loads(ref_m)
if not isinstance(ref_f, str):
ref_f = json.loads(ref_f)
result["results"][kint]["fractions"][pk]["ref_m"] = ref_m # Референсы М
result["results"][kint]["fractions"][pk]["ref_f"] = ref_f # Референсы Ж
return JsonResponse(result)
@group_required("Оформление статталонов")
def statistics_tickets_types(request):
result = {"visit": [{"pk": x.pk, "title": x.title} for x in VisitPurpose.objects.filter(hide=False).order_by("pk")],
"result": [{"pk": x.pk, "title": x.title} for x in
ResultOfTreatment.objects.filter(hide=False).order_by("pk")],
"outcome": [{"pk": x.pk, "title": x.title} for x in
Outcomes.objects.filter(hide=False).order_by("pk")],
"exclude": [{"pk": x.pk, "title": x.title} for x in
ExcludePurposes.objects.filter(hide=False).order_by("pk")]}
return JsonResponse(result)
@group_required("Оформление статталонов")
def statistics_tickets_send(request):
response = {"ok": True}
rd = json.loads(request.body)
t = StatisticsTicket(card=Card.objects.get(pk=rd["card_pk"]),
purpose=VisitPurpose.objects.get(pk=rd["visit"]),
result=ResultOfTreatment.objects.get(pk=rd["result"]),
info=rd["info"].strip(),
first_time=rd["first_time"],
primary_visit=rd["primary_visit"],
dispensary_registration=int(rd["disp"]),
doctor=request.user.doctorprofile,
outcome=Outcomes.objects.filter(pk=rd["outcome"]).first(),
dispensary_exclude_purpose=ExcludePurposes.objects.filter(pk=rd["exclude"]).first(),
dispensary_diagnos=rd["disp_diagnos"])
t.save()
Log(key="", type=7000, body=json.dumps(rd), user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required("Оформление статталонов")
def statistics_tickets_get(request):
response = {"data": []}
request_data = json.loads(request.body)
date_start = request_data["date"].split(".")
date_start = datetime.date(int(date_start[2]), int(date_start[1]), int(date_start[0]))
date_end = date_start + datetime.timedelta(1)
n = 0
for row in StatisticsTicket.objects.filter(doctor=request.user.doctorprofile,
date__range=(date_start, date_end,)).order_by('pk'):
if not row.invalid_ticket:
n += 1
response["data"].append({
"pk": row.pk,
"n": n if not row.invalid_ticket else '',
"patinet": row.card.individual.fio(full=True),
"card": row.card.number_with_type(),
"purpose": row.purpose.title if row.purpose else "",
"first_time": row.first_time,
"primary": row.primary_visit,
"info": row.info,
"disp": row.get_dispensary_registration_display()
+ (" (" + row.dispensary_diagnos + ")" if row.dispensary_diagnos != "" else "")
+ (" (" + row.dispensary_exclude_purpose.title + ")" if row.dispensary_exclude_purpose else ""),
"result": row.result.title if row.result else "",
"outcome": row.outcome.title if row.outcome else "",
"invalid": row.invalid_ticket,
"can_invalidate": row.can_invalidate()
})
return JsonResponse(response)
@group_required("Оформление статталонов")
def statistics_tickets_invalidate(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
if StatisticsTicket.objects.filter(doctor=request.user.doctorprofile, pk=request_data.get("pk", -1)).exists():
if StatisticsTicket.objects.get(pk=request_data["pk"]).can_invalidate():
StatisticsTicket.objects.filter(pk=request_data["pk"]).update(
invalid_ticket=request_data.get("invalid", False))
response["ok"] = True
Log(key=str(request_data["pk"]), type=7001, body=json.dumps(request_data.get("invalid", False)),
user=request.user.doctorprofile).save()
else:
response["message"] = "Время на отмену или возврат истекло"
return JsonResponse(response)
@group_required("Врач параклиники")
def directions_paraclinic_form(request):
import time
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
add_f = {}
add_fr = {}
if not request.user.is_superuser:
add_f = dict(issledovaniya__research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye)
add_fr = dict(research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye)
if directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True, **add_f).exists():
response["ok"] = True
d = directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True, **add_f).distinct()[0]
response["patient"] = {
"fio_age": d.client.individual.fio(full=True),
"card": d.client.number_with_type(),
"doc": d.doc.get_fio(dots=True) + ", " + d.doc.podrazdeleniye.title
}
response["direction"] = {
"pk": d.pk,
"date": timezone.localtime(d.data_sozdaniya).strftime('%d.%m.%Y'),
"diagnos": d.diagnos,
"fin_source": d.istochnik_f.title
}
response["researches"] = []
for i in directions.Issledovaniya.objects.filter(napravleniye=d, research__is_paraclinic=True, **add_fr):
ctp = int(0 if not i.time_confirmation else int(
time.mktime(i.time_confirmation.timetuple()))) + 8 * 60 * 60
ctime = int(time.time())
cdid = -1 if not i.doc_confirmation else i.doc_confirmation.pk
rt = SettingManager.get("lab_reset_confirm_time_min") * 60
iss = {
"pk": i.pk,
"research": {
"title": i.research.title,
"groups": []
},
"saved": i.time_save is not None,
"confirmed": i.time_confirmation is not None,
"allow_reset_confirm": ((
ctime - ctp < rt and cdid == request.user.doctorprofile.pk) or request.user.is_superuser or "Сброс подтверждений результатов" in [
str(x) for x in
request.user.groups.all()]) and i.time_confirmation is not None,
}
for group in ParaclinicInputGroups.objects.filter(research=i.research).order_by("order"):
g = {"pk": group.pk, "order": group.order, "title": group.title, "show_title": group.show_title,
"hide": group.hide, "fields": []}
for field in ParaclinicInputField.objects.filter(group=group).order_by("order"):
g["fields"].append({
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"hide": field.hide,
"values_to_input": json.loads(field.input_templates),
"value": field.default_value if not directions.ParaclinicResult.objects.filter(
issledovaniye=i, field=field).exists() else
directions.ParaclinicResult.objects.filter(issledovaniye=i, field=field)[0].value,
})
iss["research"]["groups"].append(g)
response["researches"].append(iss)
else:
response["message"] = "Направление не найдено"
return JsonResponse(response)
def delete_keys_from_dict(dict_del, lst_keys):
for k in lst_keys:
try:
del dict_del[k]
except KeyError:
pass
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, lst_keys)
if isinstance(v, list):
for ll in v:
delete_keys_from_dict(ll, lst_keys)
return dict_del
@group_required("Врач параклиники")
def directions_paraclinic_result(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body).get("data", {})
pk = request_data.get("pk", -1)
with_confirm = json.loads(request.body).get("with_confirm", False)
if directions.Issledovaniya.objects.filter(pk=pk, time_confirmation__isnull=True,
research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye).exists():
iss = directions.Issledovaniya.objects.get(pk=pk)
for group in request_data["research"]["groups"]:
for field in group["fields"]:
if not ParaclinicInputField.objects.filter(pk=field["pk"]).exists():
continue
f = ParaclinicInputField.objects.get(pk=field["pk"])
if not directions.ParaclinicResult.objects.filter(issledovaniye=iss, field=f).exists():
f_result = directions.ParaclinicResult(issledovaniye=iss, field=f, value="")
else:
f_result = directions.ParaclinicResult.objects.filter(issledovaniye=iss, field=f)[0]
f_result.value = field["value"]
f_result.save()
iss.doc_save = request.user.doctorprofile
iss.time_save = timezone.now()
if with_confirm:
iss.doc_confirmation = request.user.doctorprofile
iss.time_confirmation = timezone.now()
iss.save()
response["ok"] = True
slog.Log(key=pk, type=13, body=json.dumps(delete_keys_from_dict(request_data,
["hide", "confirmed", "allow_reset_confirm",
"values_to_input", "show_title", "order",
"show_title", "lines", "saved", "pk"])),
user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required("Врач параклиники")
def directions_paraclinic_confirm(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("iss_pk", -1)
if directions.Issledovaniya.objects.filter(pk=pk, time_confirmation__isnull=True,
research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye).exists():
iss = directions.Issledovaniya.objects.get(pk=pk)
t = timezone.now()
if not iss.napravleniye.visit_who_mark or not iss.napravleniye.visit_date:
iss.napravleniye.visit_who_mark = request.user.doctorprofile
iss.napravleniye.visit_date = t
iss.napravleniye.save()
iss.doc_confirmation = request.user.doctorprofile
iss.time_confirmation = t
iss.save()
response["ok"] = True
slog.Log(key=pk, type=14, body=json.dumps(request_data), user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required("Врач параклиники", "Сброс подтверждений результатов")
def directions_paraclinic_confirm_reset(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("iss_pk", -1)
if directions.Issledovaniya.objects.filter(pk=pk).exists():
iss = directions.Issledovaniya.objects.get(pk=pk)
import time
ctp = int(
0 if not iss.time_confirmation else int(time.mktime(iss.time_confirmation.timetuple()))) + 8 * 60 * 60
ctime = int(time.time())
cdid = -1 if not iss.doc_confirmation else iss.doc_confirmation.pk
if (ctime - ctp < SettingManager.get(
"lab_reset_confirm_time_min") * 60 and cdid == request.user.doctorprofile.pk) or request.user.is_superuser or "Сброс подтверждений результатов" in [
str(x) for x in request.user.groups.all()]:
predoc = {"fio": iss.doc_confirmation.get_fio(), "pk": iss.doc_confirmation.pk,
"direction": iss.napravleniye.pk}
iss.doc_confirmation = iss.time_confirmation = None
iss.save()
if iss.napravleniye.result_rmis_send:
c = Client()
c.directions.delete_services(iss.napravleniye, request.user.doctorprofile)
response["ok"] = True
slog.Log(key=pk, type=24, body=json.dumps(predoc), user=request.user.doctorprofile).save()
else:
response["message"] = "Сброс подтверждения разрешен в течении %s минут" % (
str(SettingManager.get("lab_reset_confirm_time_min")))
return JsonResponse(response)
@group_required("Врач параклиники")
def directions_paraclinic_history(request):
response = {"directions": []}
request_data = json.loads(request.body)
date_start = request_data["date"].split(".")
date_start = datetime.date(int(date_start[2]), int(date_start[1]), int(date_start[0]))
date_end = date_start + datetime.timedelta(1)
has_dirs = []
for direction in directions.\
Napravleniya.objects.filter(Q(issledovaniya__doc_save=request.user.doctorprofile) |
Q(issledovaniya__doc_confirmation=request.user.doctorprofile)) \
.filter(Q(issledovaniya__time_confirmation__range=(date_start, date_end)) |
Q(issledovaniya__time_save__range=(date_start, date_end)))\
.order_by("-issledovaniya__time_save", "-issledovaniya__time_confirmation"):
if direction.pk in has_dirs:
continue
has_dirs.append(direction.pk)
d = {
"pk": direction.pk,
"date": timezone.localtime(direction.data_sozdaniya).strftime('%d.%m.%Y'),
"patient": direction.client.individual.fio(full=True, direction=direction),
"card": direction.client.number_with_type(),
"iss": [],
"all_confirmed": True,
"all_saved": True
}
for i in directions.Issledovaniya.objects.filter(napravleniye=direction).order_by("pk"):
iss = {"title": i.research.title,
"saved": i.time_save is not None,
"confirmed": i.time_confirmation is not None}
d["iss"].append(iss)
if not iss["saved"]:
d["all_saved"] = False
if not iss["confirmed"]:
d["all_confirmed"] = False
response["directions"].append(d)
return JsonResponse(response)
@group_required("Врач параклиники", "Посещения по направлениям")
def directions_services(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
if directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True).exists():
n = directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True)[0]
response["ok"] = True
researches = []
for i in directions.Issledovaniya.objects.filter(napravleniye=n):
researches.append({"title": i.research.title,
"department": i.research.podrazdeleniye.get_title()})
response["direction_data"] = {
"date": n.data_sozdaniya.strftime('%d.%m.%Y'),
"client": n.client.individual.fio(full=True),
"card": n.client.number_with_type(),
"diagnos": n.diagnos,
"doc": "{}, {}".format(n.doc.get_fio(), n.doc.podrazdeleniye.title),
"visit_who_mark": "" if not n.visit_who_mark else "{}, {}".format(n.visit_who_mark.get_fio(), n.visit_who_mark.podrazdeleniye.title),
"fin_source": "{} - {}".format(n.istochnik_f.base.title, n.istochnik_f.title)
}
response["researches"] = researches
response["loaded_pk"] = pk
response["visit_status"] = n.visit_date is not None
response["visit_date"] = "" if not n.visit_date else timezone.localtime(n.visit_date).strftime('%d.%m.%Y %X')
else:
response["message"] = "Направление не найдено"
return JsonResponse(response)
@group_required("Врач параклиники", "Посещения по направлениям")
def directions_mark_visit(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True, visit_date__isnull=True).exists():
n = directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True)[0]
response["ok"] = True
n.visit_date = timezone.now()
n.visit_who_mark = request.user.doctorprofile
n.save()
response["visit_status"] = n.visit_date is not None
response["visit_date"] = timezone.localtime(n.visit_date).strftime('%d.%m.%Y %X')
slog.Log(key=pk, type=5001, body=json.dumps({"Посещение": "да", "Дата и время": response["visit_date"]}), user=request.user.doctorprofile).save()
else:
response["message"] = "Направление не найдено"
return JsonResponse(response)
@group_required("Врач параклиники", "Посещения по направлениям")
def directions_visit_journal(request):
response = {"data": []}
request_data = json.loads(request.body)
date_start = request_data["date"].split(".")
date_start = datetime.date(int(date_start[2]), int(date_start[1]), int(date_start[0]))
date_end = date_start + datetime.timedelta(1)
for v in directions.Napravleniya.objects.filter(visit_date__range=(date_start, date_end,), visit_who_mark=request.user.doctorprofile).order_by("-visit_date"):
response["data"].append({
"pk": v.pk,
"client": v.client.individual.fio(full=True),
"card": v.client.number_with_type(),
"datetime": timezone.localtime(v.visit_date).strftime('%d.%m.%Y %X')
})
return JsonResponse(response)
@login_required
def directions_last_result(request):
response = {"ok": False, "data": {}}
request_data = json.loads(request.body)
individual = request_data.get("individual", -1)
research = request_data.get("research", -1)
i = directions.Issledovaniya.objects.filter(napravleniye__client__individual__pk=individual,
research__pk=research,
time_confirmation__isnull=False).order_by("-time_confirmation")
if i.exists():
response["ok"] = True
response["data"] = {"direction": i[0].napravleniye.pk, "datetime": timezone.localtime(i[0].time_confirmation).strftime('%d.%m.%Y')}
return JsonResponse(response)
@login_required
def directions_results_report(request):
import re
data = []
request_data = json.loads(request.body)
individual_pk = request_data.get("individual", -1)
slog.Log(key=str(individual_pk), type=20000, body=json.dumps(request_data), user=request.user.doctorprofile).save()
params = request_data.get("params", [])
date_start = request_data["date_start"].split(".")
date_end = request_data["date_end"].split(".")
date_start = datetime.date(int(date_start[2]), int(date_start[1]), int(date_start[0]))
date_end = datetime.date(int(date_end[2]), int(date_end[1]), int(date_end[0])) + datetime.timedelta(days=1)
pat = re.compile(r"^\d+(.\d+)?-\d+(.\d+)?$")
if Individual.objects.filter(pk=individual_pk).exists():
i = Individual.objects.get(pk=individual_pk)
for param in params:
ppk = param["pk"]
if param["is_paraclinic"]:
if ParaclinicInputGroups.objects.filter(pk=ppk).exists():
g = ParaclinicInputGroups.objects.get(pk=ppk)
for i in directions.Issledovaniya.objects.filter(research__paraclinicinputgroups=g,
time_confirmation__isnull=False):
res = []
for r in directions.ParaclinicResult.objects.filter(field__group=g,
issledovaniye=i).order_by("field__order"):
if r.value == "":
continue
res.append((r.field.title + ": " if r.field.title != "" else "") + r.value)
if len(res) == 0:
continue
paramdata = {"research": i.research.pk,
"pk": ppk,
"order": g.order,
"date": timezone.localtime(i.time_confirmation).strftime('%d.%m.%Y'),
"timestamp": int(timezone.localtime(i.time_confirmation).timestamp()),
"value": "; ".join(res),
"is_norm": "normal",
"not_norm_dir": "",
"delta": 0,
"active_ref": {},
"direction": i.napravleniye.pk}
data.append(paramdata)
else:
if Fractions.objects.filter(pk=ppk).exists():
f = Fractions.objects.get(pk=ppk)
for r in directions.Result.objects.filter(issledovaniye__napravleniye__client__individual=i,
fraction=f,
issledovaniye__time_confirmation__range=(date_start, date_end)):
if r.value == "":
continue
is_norm = r.get_is_norm()
not_norm_dir = ""
delta = ""
active_ref = r.calc_normal(fromsave=False, only_ref=True)
if "r" in active_ref and re.match(r"^\d+(\.\d+)?$", r.value.replace(",", ".").strip()):
x = float(r.value.replace(",", ".").strip())
spl = r.calc_normal(fromsave=False, only_ref=True, raw_ref=False)
if (isinstance(spl, list) or isinstance(spl, tuple)) and len(spl) == 2:
if spl[0] >= x:
not_norm_dir = "down"
nx = spl[0] - x
n10 = spl[0] * 0.2
if nx <= n10:
not_norm_dir = "n_down"
delta = nx
elif spl[1] <= x:
not_norm_dir = "up"
nx = x - spl[1]
n10 = spl[1] * 0.2
if nx <= n10:
not_norm_dir = "n_up"
delta = nx
paramdata = {"research": f.research.pk,
"pk": ppk,
"order": f.sort_weight,
"date": timezone.localtime(r.issledovaniye.time_confirmation).strftime('%d.%m.%Y'),
"timestamp": int(timezone.localtime(r.issledovaniye.time_confirmation).timestamp()),
"value": r.value,
"is_norm": is_norm,
"not_norm_dir": not_norm_dir,
"delta": delta,
"active_ref": active_ref,
"direction": r.issledovaniye.napravleniye.pk}
data.append(paramdata)
data.sort(key=itemgetter("timestamp"), reverse=True)
data.sort(key=itemgetter("pk"))
data.sort(key=itemgetter("order"))
data.sort(key=itemgetter("research"))
return JsonResponse({"data": data})
2.14
+ Отчётный поиск результатов
import collections
import datetime
import re
from collections import defaultdict
from operator import itemgetter
import simplejson as json
import yaml
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.http import JsonResponse
from django.utils import timezone, dateformat
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
import api.models as models
import directions.models as directions
import users.models as users
from api.to_astm import get_iss_astm
from appconf.manager import SettingManager
from barcodes.views import tubes
from clients.models import CardBase, Individual, Card
from directory.models import AutoAdd, Fractions, ParaclinicInputGroups, ParaclinicInputField
from laboratory import settings
from laboratory.decorators import group_required
from podrazdeleniya.models import Podrazdeleniya
from results.views import result_normal
from rmis_integration.client import Client
from slog import models as slog
from slog.models import Log
from statistics_tickets.models import VisitPurpose, ResultOfTreatment, StatisticsTicket, Outcomes, \
ExcludePurposes
def translit(locallangstring):
"""
Translit func
:param locallangstring: orign
:return: translit of locallangstring
"""
conversion = {
u'\u0410': 'A', u'\u0430': 'a',
u'\u0411': 'B', u'\u0431': 'b',
u'\u0412': 'V', u'\u0432': 'v',
u'\u0413': 'G', u'\u0433': 'g',
u'\u0414': 'D', u'\u0434': 'd',
u'\u0415': 'E', u'\u0435': 'e',
u'\u0401': 'Yo', u'\u0451': 'yo',
u'\u0416': 'Zh', u'\u0436': 'zh',
u'\u0417': 'Z', u'\u0437': 'z',
u'\u0418': 'I', u'\u0438': 'i',
u'\u0419': 'Y', u'\u0439': 'y',
u'\u041a': 'K', u'\u043a': 'k',
u'\u041b': 'L', u'\u043b': 'l',
u'\u041c': 'M', u'\u043c': 'm',
u'\u041d': 'N', u'\u043d': 'n',
u'\u041e': 'O', u'\u043e': 'o',
u'\u041f': 'P', u'\u043f': 'p',
u'\u0420': 'R', u'\u0440': 'r',
u'\u0421': 'S', u'\u0441': 's',
u'\u0422': 'T', u'\u0442': 't',
u'\u0423': 'U', u'\u0443': 'u',
u'\u0424': 'F', u'\u0444': 'f',
u'\u0425': 'H', u'\u0445': 'h',
u'\u0426': 'Ts', u'\u0446': 'ts',
u'\u0427': 'Ch', u'\u0447': 'ch',
u'\u0428': 'Sh', u'\u0448': 'sh',
u'\u0429': 'Sch', u'\u0449': 'sch',
u'\u042a': '', u'\u044a': '',
u'\u042b': 'Y', u'\u044b': 'y',
u'\u042c': '', u'\u044c': '',
u'\u042d': 'E', u'\u044d': 'e',
u'\u042e': 'Yu', u'\u044e': 'yu',
u'\u042f': 'Ya', u'\u044f': 'ya',
}
translitstring = []
for c in locallangstring:
translitstring.append(conversion.setdefault(c, c))
return ''.join(translitstring)
@csrf_exempt
def send(request):
"""
Sysmex save results
:param request:
:return:
"""
result = {"ok": False}
try:
if request.method == "POST":
resdict = yaml.load(request.POST["result"])
appkey = request.POST.get("key", "")
else:
resdict = yaml.load(request.GET["result"])
appkey = request.GET.get("key", "")
astm_user = users.DoctorProfile.objects.filter(user__username="astm").first()
resdict["pk"] = int(resdict.get("pk", -111))
if "LYMPH%" in resdict["result"]:
resdict["orders"] = {}
dpk = -1
if "bydirection" in request.POST or "bydirection" in request.GET:
dpk = resdict["pk"]
if dpk >= 4600000000000:
dpk -= 4600000000000
dpk //= 10
tubes(request, direction_implict_id=dpk)
if directions.TubesRegistration.objects.filter(issledovaniya__napravleniye__pk=dpk,
issledovaniya__doc_confirmation__isnull=True).exists():
resdict["pk"] = directions.TubesRegistration.objects.filter(
issledovaniya__napravleniye__pk=dpk, issledovaniya__doc_confirmation__isnull=True).order_by(
"pk").first().pk
else:
resdict["pk"] = False
result["A"] = appkey
if resdict["pk"] and models.Application.objects.filter(key=appkey).exists() and models.Application.objects.get(
key=appkey).active and directions.TubesRegistration.objects.filter(pk=resdict["pk"]).exists():
tubei = directions.TubesRegistration.objects.get(pk=resdict["pk"])
direction = tubei.issledovaniya_set.first().napravleniye
for key in resdict["result"].keys():
if models.RelationFractionASTM.objects.filter(astm_field=key).exists():
fractionRels = models.RelationFractionASTM.objects.filter(astm_field=key)
for fractionRel in fractionRels:
fraction = fractionRel.fraction
if directions.Issledovaniya.objects.filter(napravleniye=direction,
research=fraction.research,
doc_confirmation__isnull=True).exists():
issled = directions.Issledovaniya.objects.filter(napravleniye=direction,
research=fraction.research,
doc_confirmation__isnull=True).order_by(
"pk")[0]
if directions.Result.objects.filter(issledovaniye=issled,
fraction=fraction).exists(): # Если результат для фракции существует
fraction_result = directions.Result.objects.get(issledovaniye=issled,
fraction__pk=fraction.pk) # Выборка результата из базы
else:
fraction_result = directions.Result(issledovaniye=issled,
fraction=fraction) # Создание нового результата
fraction_result.value = str(resdict["result"][key]).strip() # Установка значения
if fraction_result.value.isdigit():
fraction_result.value = "%s.0" % fraction_result.value
import re
find = re.findall("\d+.\d+", fraction_result.value)
if len(find) > 0:
val = float(find[0]) * fractionRel.get_multiplier_display()
if fractionRel.full_round:
val = round(val)
fraction_result.value = fraction_result.value.replace(find[0], str(val))
fraction_result.iteration = 1 # Установка итерации
ref = fractionRel.default_ref
if ref:
fraction_result.ref_title = ref.title
fraction_result.ref_about = ref.about
fraction_result.ref_m = ref.m
fraction_result.ref_f = ref.f
fraction_result.save() # Сохранение
issled.api_app = models.Application.objects.get(key=appkey)
issled.save()
fraction_result.get_ref(re_save=True)
fraction_result.issledovaniye.doc_save = astm_user # Кто сохранил
from datetime import datetime
fraction_result.issledovaniye.time_save = timezone.now() # Время сохранения
fraction_result.issledovaniye.save()
slog.Log(key=appkey, type=22, body=json.dumps(resdict), user=None).save()
result["ok"] = True
elif not directions.TubesRegistration.objects.filter(pk=resdict["pk"]).exists():
if dpk > -1:
resdict["pk"] = dpk
slog.Log(key=resdict["pk"], type=23, body=json.dumps(resdict), user=None).save()
except Exception as e:
result = {"ok": False, "Exception": True, "MSG": str(e)}
return JsonResponse(result)
@csrf_exempt
def endpoint(request):
result = {"answer": False, "body": ""}
data = json.loads(request.POST.get("result", request.GET.get("result", "{}")))
api_key = request.POST.get("key", request.GET.get("key", ""))
message_type = data.get("message_type", "C")
pk_s = str(data.get("pk", ""))
pk = -1 if not pk_s.isdigit() else int(pk_s)
data["app_name"] = "API key is incorrect"
if models.Application.objects.filter(key=api_key).exists():
astm_user = users.DoctorProfile.objects.filter(user__username="astm").first()
if astm_user is None:
astm_user = users.DoctorProfile.objects.filter(user__is_staff=True).order_by("pk").first()
app = models.Application.objects.get(key=api_key)
if app.active:
data["app_name"] = app.name
if message_type == "R":
if pk != -1:
dw = app.direction_work
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
dw = True
if dw:
direction = directions.Napravleniya.objects.filter(pk=pk).first()
else:
direction = directions.Napravleniya.objects.filter(issledovaniya__tubes__pk=pk).first()
oks = []
if direction:
results = data.get("result", {})
for key in results:
ok = False
q = models.RelationFractionASTM.objects.filter(astm_field=key)
if q.filter(application_api=app).exists():
q = q.filter(application_api=app)
ok = True
elif q.filter(application_api__isnull=True).exists():
q = q.filter(application_api__isnull=True)
ok = True
if ok:
for fraction_rel in q:
save_state = []
issleds = []
for issled in directions.Issledovaniya.objects.filter(napravleniye=direction,
research=fraction_rel.fraction.research,
doc_confirmation__isnull=True):
if directions.Result.objects.filter(issledovaniye=issled,
fraction=fraction_rel.fraction).exists():
fraction_result = directions.Result.objects.get(issledovaniye=issled,
fraction=fraction_rel.fraction)
else:
fraction_result = directions.Result(issledovaniye=issled,
fraction=fraction_rel.fraction)
fraction_result.value = str(results[key]).strip()
import re
find = re.findall("\d+.\d+", fraction_result.value)
if len(find) > 0:
val_str = fraction_result.value
for f in find:
val = app.truncate(float(f) * fraction_rel.get_multiplier_display())
val_str = val_str.replace(f, str(val))
fraction_result.value = val_str
fraction_result.iteration = 1
ref = fraction_rel.default_ref
if ref:
fraction_result.ref_title = ref.title
fraction_result.ref_about = ref.about
fraction_result.ref_m = ref.m
fraction_result.ref_f = ref.f
fraction_result.save()
issled.api_app = app
issled.save()
fraction_result.get_ref(re_save=True)
fraction_result.issledovaniye.doc_save = astm_user
fraction_result.issledovaniye.time_save = timezone.now()
fraction_result.issledovaniye.save()
save_state.append({"fraction": fraction_result.fraction.title,
"value": fraction_result.value})
issleds.append({"pk": issled.pk, "title": issled.research.title})
slog.Log(key=json.dumps({"direction": direction.pk, "issleds": str(issleds)}),
type=22, body=json.dumps(save_state), user=None).save()
oks.append(ok)
result["body"] = "{} {} {} {}".format(dw, pk, json.dumps(oks), direction is not None)
else:
result["body"] = "pk '{}' is not exists".format(pk_s)
elif message_type == "Q":
result["answer"] = True
pks = [int(x) for x in data.get("query", [])]
researches = defaultdict(list)
for row in app.get_issledovaniya(pks):
k = row["pk"]
i = row["iss"]
for fraction in Fractions.objects.filter(research=i.research,
hide=False):
rel = models.RelationFractionASTM.objects.filter(fraction=fraction, application_api=app)
if not rel.exists():
rel = models.RelationFractionASTM.objects.filter(fraction=fraction)
if not rel.exists():
continue
rel = rel[0]
researches[k].append(rel.astm_field)
result["body"] = researches
else:
pass
else:
data["app_name"] = "API app banned"
result["body"] = "API app banned"
else:
result["body"] = "API key is incorrect"
slog.Log(key=pk, type=6000, body=json.dumps(data), user=None).save()
return JsonResponse(result)
@login_required
def departments(request):
from podrazdeleniya.models import Podrazdeleniya
can_edit = request.user.is_superuser or request.user.doctorprofile.has_group(
'Создание и редактирование пользователей')
if request.method == "GET":
return JsonResponse(
{"departments": [{"pk": x.pk, "title": x.get_title(), "type": str(x.p_type), "updated": False} for
x in Podrazdeleniya.objects.all().order_by("pk")],
"can_edit": can_edit,
"types": [{"pk": str(x[0]), "title": x[1]} for x in Podrazdeleniya.TYPES if
SettingManager.get("paraclinic_module", default='false', default_type='b') or x[0] != 3]})
elif can_edit:
ok = False
message = ""
try:
req = json.loads(request.body)
data_type = req.get("type", "update")
rows = req.get("data", [])
if data_type == "update":
ok = False
for row in rows:
title = row["title"].strip()
if len(title) > 0:
department = Podrazdeleniya.objects.get(pk=row["pk"])
department.title = title
department.p_type = int(row["type"])
department.save()
ok = True
elif data_type == "insert":
ok = False
for row in rows:
title = row["title"].strip()
if len(title) > 0:
department = Podrazdeleniya(title=title, p_type=int(row["type"]))
department.save()
ok = True
finally:
return JsonResponse({"ok": ok, "message": message})
return JsonResponse(0)
@login_required
def bases(request):
from clients.models import CardBase
return JsonResponse({"bases": [
{"pk": x.pk,
"title": x.title,
"code": x.short_title,
"hide": x.hide,
"history_number": x.history_number,
"fin_sources": [{"pk": y.pk, "title": y.title, "default_diagnos": y.default_diagnos} for y in
directions.IstochnikiFinansirovaniya.objects.filter(base=x)]
} for x in CardBase.objects.all()]})
class ResearchesTemplates(View):
def get(self, request):
from django.db.models import Q
templates = []
for t in users.AssignmentTemplates.objects.filter(Q(doc__isnull=True, podrazdeleniye__isnull=True) |
Q(doc=request.user.doctorprofile) |
Q(podrazdeleniye=request.user.doctorprofile.podrazdeleniye)):
templates.append({"values": [x.research.pk for x in users.AssignmentResearches.objects.filter(template=t)],
"pk": t.pk,
"title": t.title,
"for_current_user": t.doc is not None,
"for_users_department": t.podrazdeleniye is not None})
return JsonResponse({"templates": templates})
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(self.__class__, self).dispatch(request, *args, **kwargs)
from directory.models import Researches as DResearches
class Researches(View):
def get(self, request):
deps = defaultdict(list)
for r in DResearches.objects.filter(hide=False).order_by("title"):
autoadd = [x.b.pk for x in AutoAdd.objects.filter(a=r)]
addto = [x.a.pk for x in AutoAdd.objects.filter(b=r)]
deps[r.podrazdeleniye.pk].append(
{"pk": r.pk,
"onlywith": -1 if not r.onlywith else r.onlywith.pk,
"department_pk": r.podrazdeleniye.pk,
"title": r.get_title(),
"full_title": r.title,
"comment_variants": [] if not r.comment_variants else r.comment_variants.get_variants(),
"autoadd": autoadd,
"addto": addto,
"code": r.code,
"type": str(r.podrazdeleniye.p_type)
})
return JsonResponse({"researches": deps})
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(self.__class__, self).dispatch(request, *args, **kwargs)
def current_user_info(request):
ret = {"auth": request.user.is_authenticated, "doc_pk": -1, "username": "", "fio": "",
"department": {"pk": -1, "title": ""}, "groups": []}
if ret["auth"]:
ret["username"] = request.user.username
ret["fio"] = request.user.doctorprofile.fio
ret["groups"] = list(request.user.groups.values_list('name', flat=True))
ret["doc_pk"] = request.user.doctorprofile.pk
ret["department"] = {"pk": request.user.doctorprofile.podrazdeleniye.pk,
"title": request.user.doctorprofile.podrazdeleniye.title}
return JsonResponse(ret)
@login_required
def directive_from(request):
from users.models import DoctorProfile
data = []
for dep in Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.DEPARTMENT).order_by('title'):
d = {"pk": dep.pk,
"title": dep.title,
"docs": [{"pk": x.pk, "fio": x.fio} for x in DoctorProfile.objects.filter(podrazdeleniye=dep,
user__groups__name="Лечащий врач").order_by(
"fio")]
}
data.append(d)
return JsonResponse({"data": data})
@login_required
def patients_search_card(request):
objects = []
data = []
d = json.loads(request.body)
card_type = CardBase.objects.get(pk=d['type'])
query = d['query'].strip()
p = re.compile(r'[а-яё]{3}[0-9]{8}', re.IGNORECASE)
p2 = re.compile(r'^([А-яЕё]+)( ([А-яЕё]+)( ([А-яЕё]*)( ([0-9]{2}\.[0-9]{2}\.[0-9]{4}))?)?)?$')
p3 = re.compile(r'[0-9]{1,15}')
p4 = re.compile(r'card_pk:\d+')
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1],
patronymic__startswith=initials[2], birthday=btday,
card__base=card_type)
if card_type.is_rmis and len(objects) == 0:
c = Client()
objects = c.patients.import_individual_to_base(
{"surname": query[0] + "%", "name": query[1] + "%", "patrName": query[2] + "%", "birthDate": btday},
fio=True)
except ValidationError:
objects = []
elif re.search(p2, query):
split = str(query).split()
n = p = ""
f = split[0]
rmis_req = {"surname": f + "%"}
if len(split) > 1:
n = split[1]
rmis_req["name"] = n + "%"
if len(split) > 2:
p = split[2]
rmis_req["patrName"] = p + "%"
if len(split) > 3:
btday = split[3].split(".")
btday = btday[2] + "-" + btday[1] + "-" + btday[0]
rmis_req["birthDate"] = btday
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n,
patronymic__istartswith=p, card__base=card_type)[:10]
if len(split) > 3:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n,
patronymic__istartswith=p, card__base=card_type,
birthday=datetime.datetime.strptime(split[3], "%d.%m.%Y").date())[:10]
if card_type.is_rmis and (len(objects) == 0 or (len(split) < 4 and len(objects) < 10)):
objects = list(objects)
try:
c = Client()
objects += c.patients.import_individual_to_base(rmis_req, fio=True, limit=10 - len(objects))
except ConnectionError:
pass
if re.search(p3, query) or card_type.is_rmis:
resync = True
if len(list(objects)) == 0:
resync = False
try:
objects = Individual.objects.filter(card__number=query.upper(), card__is_archive=False,
card__base=card_type)
except ValueError:
pass
if card_type.is_rmis and len(objects) == 0 and len(query) == 16:
c = Client()
objects = c.patients.import_individual_to_base(query)
else:
resync = True
if resync and card_type.is_rmis:
c = Client()
for o in objects:
o.sync_with_rmis(c=c)
if re.search(p4, query):
cards = Card.objects.filter(pk=int(query.split(":")[1]))
else:
cards = Card.objects.filter(base=card_type, individual__in=objects, is_archive=False)
if re.match(p3, query):
cards = cards.filter(number=query)
for row in cards.prefetch_related("individual").distinct():
data.append({"type_title": card_type.title,
"num": row.number,
"family": row.individual.family,
"name": row.individual.name,
"twoname": row.individual.patronymic,
"birthday": row.individual.bd(),
"age": row.individual.age_s(),
"sex": row.individual.sex,
"individual_pk": row.individual.pk,
"pk": row.pk})
return JsonResponse({"results": data})
@login_required
def patients_search_individual(request):
objects = []
data = []
d = json.loads(request.body)
query = d['query'].strip()
p = re.compile(r'[а-яё]{3}[0-9]{8}', re.IGNORECASE)
p2 = re.compile(r'^([А-яЕё]+)( ([А-яЕё]+)( ([А-яЕё]*)( ([0-9]{2}\.[0-9]{2}\.[0-9]{4}))?)?)?$')
p4 = re.compile(r'individual_pk:\d+')
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1],
patronymic__startswith=initials[2], birthday=btday)
except ValidationError:
objects = []
elif re.search(p2, query):
split = str(query).split()
n = p = ""
f = split[0]
rmis_req = {"surname": f + "%"}
if len(split) > 1:
n = split[1]
rmis_req["name"] = n + "%"
if len(split) > 2:
p = split[2]
rmis_req["patrName"] = p + "%"
if len(split) > 3:
btday = split[3].split(".")
btday = btday[2] + "-" + btday[1] + "-" + btday[0]
rmis_req["birthDate"] = btday
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p)
if len(split) > 3:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n,
patronymic__istartswith=p,
birthday=datetime.datetime.strptime(split[3], "%d.%m.%Y").date())
if re.search(p4, query):
objects = Individual.objects.filter(pk=int(query.split(":")[1]))
n = 0
for row in objects.distinct().order_by("family", "name", "patronymic", "birthday"):
n += 1
data.append({"family": row.family,
"name": row.name,
"patronymic": row.patronymic,
"birthday": row.bd(),
"age": row.age_s(),
"sex": row.sex,
"pk": row.pk})
if n == 25:
break
return JsonResponse({"results": data})
@login_required
@group_required("Лечащий врач", "Оператор лечащего врача")
def directions_generate(request):
result = {"ok": False, "directions": [], "message": ""}
if request.method == "POST":
p = json.loads(request.body)
rc = directions.Napravleniya.gen_napravleniya_by_issledovaniya(p.get("card_pk"),
p.get("diagnos"),
p.get("fin_source"),
p.get("history_num"),
p.get("ofname_pk"),
request.user.doctorprofile,
p.get("researches"),
p.get("comments"))
result["ok"] = rc["r"]
result["directions"] = json.loads(rc["list_id"])
if "message" in rc:
result["message"] = rc["message"]
return JsonResponse(result)
@login_required
def directions_history(request):
import datetime
res = {"directions": []}
request_data = json.loads(request.body)
pk = request_data.get("patient", -1)
req_status = request_data.get("type", 4)
date_start = request_data["date_from"].split(".")
date_end = request_data["date_to"].split(".")
date_start = datetime.date(int(date_start[2]), int(date_start[1]), int(date_start[0]))
date_end = datetime.date(int(date_end[2]), int(date_end[1]), int(date_end[0])) + datetime.timedelta(days=1)
try:
if pk >= 0 or req_status == 4:
if req_status != 4:
rows = directions.Napravleniya.objects.filter(data_sozdaniya__range=(date_start, date_end),
client__pk=pk).order_by(
"-data_sozdaniya").prefetch_related()
else:
rows = directions.Napravleniya.objects.filter(Q(data_sozdaniya__range=(date_start, date_end),
doc_who_create=request.user.doctorprofile)
| Q(data_sozdaniya__range=(date_start, date_end),
doc=request.user.doctorprofile)).order_by(
"-data_sozdaniya")
for napr in rows.values("pk", "data_sozdaniya", "cancel"):
iss_list = directions.Issledovaniya.objects.filter(napravleniye__pk=napr["pk"]).prefetch_related(
"tubes", "research", "research__podrazdeleniye")
if not iss_list.exists():
continue
status = 2 # 0 - выписано. 1 - Материал получен лабораторией. 2 - результат подтвержден. -1 - отменено
has_conf = False
researches_list = []
researches_pks = []
for v in iss_list:
researches_list.append(v.research.title)
researches_pks.append(v.research.pk)
iss_status = 1
if not v.doc_confirmation and not v.doc_save and not v.deferred:
iss_status = 1
if v.tubes.count() == 0:
iss_status = 0
else:
for t in v.tubes.all():
if not t.time_recive:
iss_status = 0
elif v.doc_confirmation or v.deferred:
iss_status = 2
if v.doc_confirmation and not has_conf:
has_conf = True
status = min(iss_status, status)
if status == 2 and not has_conf:
status = 1
if req_status in [3, 4] or req_status == status:
res["directions"].append(
{"pk": napr["pk"], "status": -1 if status == 0 and napr["cancel"] else status,
"researches": ' | '.join(researches_list),
"researches_pks": researches_pks,
"date": str(dateformat.format(napr["data_sozdaniya"].date(), settings.DATE_FORMAT_SHORT)),
"lab": iss_list[0].research.get_podrazdeleniye().title, "cancel": napr["cancel"],
"checked": False})
except (ValueError, IndexError) as e:
res["message"] = str(e)
return JsonResponse(res)
@login_required
def directions_cancel(request):
response = {"cancel": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if directions.Napravleniya.objects.filter(pk=pk).exists():
direction = directions.Napravleniya.objects.get(pk=pk)
direction.cancel = not direction.cancel
direction.save()
response["cancel"] = direction.cancel
return JsonResponse(response)
@login_required
def researches_params(request):
response = {"researches": []}
request_data = json.loads(request.body)
pks = request_data.get("pks", [])
for research in DResearches.objects.filter(pk__in=pks):
params = []
if research.is_paraclinic:
for g in ParaclinicInputGroups.objects.filter(research=research).exclude(title="").order_by("order"):
params.append({"pk": g.pk, "title": g.title})
else:
for f in Fractions.objects.filter(research=research).order_by("sort_weight"):
params.append({"pk": f.pk, "title": f.title})
response["researches"].append({"pk": research.pk, "title": research.title,
"short_title": research.get_title(),
"params": params, "is_paraclinic": research.is_paraclinic,
"selected_params": []})
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def researches_by_department(request):
response = {"researches": []}
request_data = json.loads(request.body)
department_pk = int(request_data["department"])
if department_pk != -1:
for research in DResearches.objects.filter(podrazdeleniye__pk=department_pk).order_by("title"):
response["researches"].append({
"pk": research.pk,
"title": research.title,
"short_title": research.short_title,
"preparation": research.preparation,
"hide": research.hide,
"code": research.code,
})
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def researches_update(request):
response = {"ok": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -2)
if pk > -2:
department_pk = request_data.get("department")
title = request_data.get("title").strip()
short_title = request_data.get("short_title").strip()
code = request_data.get("code").strip()
info = request_data.get("info").strip()
hide = request_data.get("hide")
groups = request_data.get("groups")
if len(title) > 0 and Podrazdeleniya.objects.filter(pk=department_pk).exists():
department = Podrazdeleniya.objects.filter(pk=department_pk)[0]
res = None
if pk == -1:
res = DResearches(title=title, short_title=short_title, podrazdeleniye=department, code=code,
is_paraclinic=department.p_type == 3, paraclinic_info=info, hide=hide)
elif DResearches.objects.filter(pk=pk).exists():
res = DResearches.objects.filter(pk=pk)[0]
res.title = title
res.short_title = short_title
res.podrazdeleniye = department
res.code = code
res.is_paraclinic = department.p_type == 3
res.paraclinic_info = info
res.hide = hide
if res:
res.save()
for group in groups:
g = None
pk = group["pk"]
if pk == -1:
g = ParaclinicInputGroups(title=group["title"],
show_title=group["show_title"],
research=res,
order=group["order"],
hide=group["hide"])
elif ParaclinicInputGroups.objects.filter(pk=pk).exists():
g = ParaclinicInputGroups.objects.get(pk=pk)
g.title = group["title"]
g.show_title = group["show_title"]
g.research = res
g.order = group["order"]
g.hide = group["hide"]
if g:
g.save()
for field in group["fields"]:
f = None
pk = field["pk"]
if pk == -1:
f = ParaclinicInputField(title=field["title"],
group=g,
order=field["order"],
lines=field["lines"],
hide=field["hide"],
default_value=field["default"],
input_templates=json.dumps(field["values_to_input"]))
elif ParaclinicInputField.objects.filter(pk=pk).exists():
f = ParaclinicInputField.objects.get(pk=pk)
f.title = field["title"]
f.group = g
f.order = field["order"]
f.lines = field["lines"]
f.hide = field["hide"]
f.default_value = field["default"]
f.input_templates = json.dumps(field["values_to_input"])
if f:
f.save()
response["ok"] = True
slog.Log(key=pk, type=10000, body=json.dumps(request_data), user=request.user.doctorprofile).save()
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def researches_details(request):
response = {"pk": -1, "department": -1, "title": '', "short_title": '', "code": '', "info": '', "hide": False,
"groups": []}
request_data = json.loads(request.body)
pk = request_data.get("pk")
if DResearches.objects.filter(pk=pk).exists():
res = DResearches.objects.filter(pk=pk)[0]
response["pk"] = res.pk
response["department"] = res.podrazdeleniye.pk
response["title"] = res.title
response["short_title"] = res.short_title
response["code"] = res.code
response["info"] = res.paraclinic_info or ""
response["hide"] = res.hide
for group in ParaclinicInputGroups.objects.filter(research__pk=pk).order_by("order"):
g = {"pk": group.pk, "order": group.order, "title": group.title, "show_title": group.show_title,
"hide": group.hide, "fields": []}
for field in ParaclinicInputField.objects.filter(group=group).order_by("order"):
g["fields"].append({
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"default": field.default_value,
"hide": field.hide,
"values_to_input": json.loads(field.input_templates),
"new_value": ""
})
response["groups"].append(g)
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def paraclinic_details(request):
response = {"groups": []}
request_data = json.loads(request.body)
pk = request_data.get("pk")
for group in ParaclinicInputGroups.objects.filter(research__pk=pk).order_by("order"):
g = {"pk": group.pk, "order": group.order, "title": group.title, "show_title": group.show_title,
"hide": group.hide, "fields": []}
for field in ParaclinicInputField.objects.filter(group=group).order_by("order"):
g["fields"].append({
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"default": field.default_value,
"hide": field.hide,
"values_to_input": json.loads(field.input_templates)
})
response["groups"].append(g)
return JsonResponse(response)
@login_required
def directions_results(request):
result = {"ok": False,
"direction": {"pk": -1, "doc": "", "date": ""},
"client": {},
"full": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if directions.Napravleniya.objects.filter(pk=pk).exists():
napr = directions.Napravleniya.objects.get(pk=pk)
dates = {}
for iss in directions.Issledovaniya.objects.filter(napravleniye=napr, time_save__isnull=False):
if iss.time_save:
dt = str(dateformat.format(iss.time_save, settings.DATE_FORMAT))
if dt not in dates.keys():
dates[dt] = 0
dates[dt] += 1
import operator
maxdate = ""
if dates != {}:
maxdate = max(dates.items(), key=operator.itemgetter(1))[0]
iss_list = directions.Issledovaniya.objects.filter(napravleniye=napr)
t = 0
if not iss_list.filter(doc_confirmation__isnull=True).exists() or iss_list.filter(deferred=False).exists():
result["direction"]["pk"] = napr.pk
result["full"] = False
result["ok"] = True
if iss_list.filter(doc_confirmation__isnull=False).exists():
result["direction"]["doc"] = iss_list.filter(doc_confirmation__isnull=False)[
0].doc_confirmation.get_fio()
if iss_list.filter(doc_confirmation__isnull=True, deferred=False).exists():
result["direction"]["doc"] = result["direction"]["doc"] + " (выполнено не полностью)"
else:
result["full"] = True
else:
result["direction"]["doc"] = "Не подтверждено"
result["direction"]["date"] = maxdate
result["client"]["sex"] = napr.client.individual.sex
result["client"]["fio"] = napr.client.individual.fio()
result["client"]["age"] = napr.client.individual.age_s(direction=napr)
result["client"]["cardnum"] = napr.client.number_with_type()
result["client"]["dr"] = napr.client.individual.bd()
result["results"] = collections.OrderedDict()
isses = []
for issledovaniye in iss_list.order_by("tubes__id", "research__sort_weight"):
if issledovaniye.pk in isses:
continue
isses.append(issledovaniye.pk)
t += 1
kint = "%s_%s_%s_%s" % (t,
"-1" if not issledovaniye.research.direction else issledovaniye.research.direction.pk,
issledovaniye.research.sort_weight,
issledovaniye.research.pk)
result["results"][kint] = {"title": issledovaniye.research.title,
"fractions": collections.OrderedDict(),
"sort": issledovaniye.research.sort_weight,
"tube_time_get": ""}
if not issledovaniye.deferred or issledovaniye.doc_confirmation:
for isstube in issledovaniye.tubes.all():
if isstube.time_get:
result["results"][kint]["tube_time_get"] = str(
dateformat.format(isstube.time_get, settings.DATE_FORMAT))
break
results = directions.Result.objects.filter(issledovaniye=issledovaniye).order_by(
"fraction__sort_weight") # Выборка результатов из базы
n = 0
for res in results: # Перебор результатов
pk = res.fraction.sort_weight
if not pk or pk <= 0:
pk = res.fraction.pk
if res.fraction.render_type == 0:
if pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][pk] = {}
result["results"][kint]["fractions"][pk]["result"] = result_normal(res.value)
result["results"][kint]["fractions"][pk]["title"] = res.fraction.title
result["results"][kint]["fractions"][pk]["units"] = res.fraction.units
refs = res.get_ref(full=True)
ref_m = refs["m"]
ref_f = refs["f"]
if isinstance(ref_m, str):
ref_m = json.loads(ref_m)
if isinstance(ref_f, str):
ref_f = json.loads(ref_f)
result["results"][kint]["fractions"][pk]["ref_m"] = ref_m
result["results"][kint]["fractions"][pk]["ref_f"] = ref_f
else:
try:
tmp_results = json.loads("{}" if not res.value else res.value).get("rows", {})
except Exception:
tmp_results = {}
n = 0
for row in tmp_results.values():
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = "Выделенная культура"
result["results"][kint]["fractions"][tmp_pk]["result"] = row["title"]
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
for subrow in row["rows"].values():
if "null" in subrow["value"]:
continue
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = subrow["title"]
result["results"][kint]["fractions"][tmp_pk]["result"] = subrow["value"]
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk][
"title"] = "S - чувствителен; R - резистентен; I - промежуточная чувствительность;"
result["results"][kint]["fractions"][tmp_pk]["result"] = ""
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
if issledovaniye.lab_comment and issledovaniye.lab_comment != "":
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = "Комментарий"
result["results"][kint]["fractions"][tmp_pk]["result"] = issledovaniye.lab_comment.replace("\n",
"<br/>")
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
else:
fr_list = Fractions.objects.filter(research=issledovaniye.research)
for fr in fr_list:
pk = fr.sort_weight
if not pk or pk <= 0:
pk = fr.pk
if pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][pk] = {}
result["results"][kint]["fractions"][pk]["result"] = "отложен" # Значение
result["results"][kint]["fractions"][pk][
"title"] = fr.title # Название фракции
result["results"][kint]["fractions"][pk][
"units"] = fr.units # Еденицы измерения
ref_m = {"": ""} # fr.ref_m
ref_f = {"": ""} # fr.ref_f
if not isinstance(ref_m, str):
ref_m = json.loads(ref_m)
if not isinstance(ref_f, str):
ref_f = json.loads(ref_f)
result["results"][kint]["fractions"][pk]["ref_m"] = ref_m # Референсы М
result["results"][kint]["fractions"][pk]["ref_f"] = ref_f # Референсы Ж
return JsonResponse(result)
@group_required("Оформление статталонов")
def statistics_tickets_types(request):
result = {"visit": [{"pk": x.pk, "title": x.title} for x in VisitPurpose.objects.filter(hide=False).order_by("pk")],
"result": [{"pk": x.pk, "title": x.title} for x in
ResultOfTreatment.objects.filter(hide=False).order_by("pk")],
"outcome": [{"pk": x.pk, "title": x.title} for x in
Outcomes.objects.filter(hide=False).order_by("pk")],
"exclude": [{"pk": x.pk, "title": x.title} for x in
ExcludePurposes.objects.filter(hide=False).order_by("pk")]}
return JsonResponse(result)
@group_required("Оформление статталонов")
def statistics_tickets_send(request):
response = {"ok": True}
rd = json.loads(request.body)
t = StatisticsTicket(card=Card.objects.get(pk=rd["card_pk"]),
purpose=VisitPurpose.objects.get(pk=rd["visit"]),
result=ResultOfTreatment.objects.get(pk=rd["result"]),
info=rd["info"].strip(),
first_time=rd["first_time"],
primary_visit=rd["primary_visit"],
dispensary_registration=int(rd["disp"]),
doctor=request.user.doctorprofile,
outcome=Outcomes.objects.filter(pk=rd["outcome"]).first(),
dispensary_exclude_purpose=ExcludePurposes.objects.filter(pk=rd["exclude"]).first(),
dispensary_diagnos=rd["disp_diagnos"])
t.save()
Log(key="", type=7000, body=json.dumps(rd), user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required("Оформление статталонов")
def statistics_tickets_get(request):
response = {"data": []}
request_data = json.loads(request.body)
date_start = request_data["date"].split(".")
date_start = datetime.date(int(date_start[2]), int(date_start[1]), int(date_start[0]))
date_end = date_start + datetime.timedelta(1)
n = 0
for row in StatisticsTicket.objects.filter(doctor=request.user.doctorprofile,
date__range=(date_start, date_end,)).order_by('pk'):
if not row.invalid_ticket:
n += 1
response["data"].append({
"pk": row.pk,
"n": n if not row.invalid_ticket else '',
"patinet": row.card.individual.fio(full=True),
"card": row.card.number_with_type(),
"purpose": row.purpose.title if row.purpose else "",
"first_time": row.first_time,
"primary": row.primary_visit,
"info": row.info,
"disp": row.get_dispensary_registration_display()
+ (" (" + row.dispensary_diagnos + ")" if row.dispensary_diagnos != "" else "")
+ (" (" + row.dispensary_exclude_purpose.title + ")" if row.dispensary_exclude_purpose else ""),
"result": row.result.title if row.result else "",
"outcome": row.outcome.title if row.outcome else "",
"invalid": row.invalid_ticket,
"can_invalidate": row.can_invalidate()
})
return JsonResponse(response)
@group_required("Оформление статталонов")
def statistics_tickets_invalidate(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
if StatisticsTicket.objects.filter(doctor=request.user.doctorprofile, pk=request_data.get("pk", -1)).exists():
if StatisticsTicket.objects.get(pk=request_data["pk"]).can_invalidate():
StatisticsTicket.objects.filter(pk=request_data["pk"]).update(
invalid_ticket=request_data.get("invalid", False))
response["ok"] = True
Log(key=str(request_data["pk"]), type=7001, body=json.dumps(request_data.get("invalid", False)),
user=request.user.doctorprofile).save()
else:
response["message"] = "Время на отмену или возврат истекло"
return JsonResponse(response)
@group_required("Врач параклиники")
def directions_paraclinic_form(request):
import time
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
add_f = {}
add_fr = {}
if not request.user.is_superuser:
add_f = dict(issledovaniya__research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye)
add_fr = dict(research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye)
if directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True, **add_f).exists():
response["ok"] = True
d = directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True, **add_f).distinct()[0]
response["patient"] = {
"fio_age": d.client.individual.fio(full=True),
"card": d.client.number_with_type(),
"doc": d.doc.get_fio(dots=True) + ", " + d.doc.podrazdeleniye.title
}
response["direction"] = {
"pk": d.pk,
"date": timezone.localtime(d.data_sozdaniya).strftime('%d.%m.%Y'),
"diagnos": d.diagnos,
"fin_source": d.istochnik_f.title
}
response["researches"] = []
for i in directions.Issledovaniya.objects.filter(napravleniye=d, research__is_paraclinic=True, **add_fr):
ctp = int(0 if not i.time_confirmation else int(
time.mktime(i.time_confirmation.timetuple()))) + 8 * 60 * 60
ctime = int(time.time())
cdid = -1 if not i.doc_confirmation else i.doc_confirmation.pk
rt = SettingManager.get("lab_reset_confirm_time_min") * 60
iss = {
"pk": i.pk,
"research": {
"title": i.research.title,
"groups": []
},
"saved": i.time_save is not None,
"confirmed": i.time_confirmation is not None,
"allow_reset_confirm": ((
ctime - ctp < rt and cdid == request.user.doctorprofile.pk) or request.user.is_superuser or "Сброс подтверждений результатов" in [
str(x) for x in
request.user.groups.all()]) and i.time_confirmation is not None,
}
for group in ParaclinicInputGroups.objects.filter(research=i.research).order_by("order"):
g = {"pk": group.pk, "order": group.order, "title": group.title, "show_title": group.show_title,
"hide": group.hide, "fields": []}
for field in ParaclinicInputField.objects.filter(group=group).order_by("order"):
g["fields"].append({
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"hide": field.hide,
"values_to_input": json.loads(field.input_templates),
"value": field.default_value if not directions.ParaclinicResult.objects.filter(
issledovaniye=i, field=field).exists() else
directions.ParaclinicResult.objects.filter(issledovaniye=i, field=field)[0].value,
})
iss["research"]["groups"].append(g)
response["researches"].append(iss)
else:
response["message"] = "Направление не найдено"
return JsonResponse(response)
def delete_keys_from_dict(dict_del, lst_keys):
for k in lst_keys:
try:
del dict_del[k]
except KeyError:
pass
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, lst_keys)
if isinstance(v, list):
for ll in v:
delete_keys_from_dict(ll, lst_keys)
return dict_del
@group_required("Врач параклиники")
def directions_paraclinic_result(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body).get("data", {})
pk = request_data.get("pk", -1)
with_confirm = json.loads(request.body).get("with_confirm", False)
if directions.Issledovaniya.objects.filter(pk=pk, time_confirmation__isnull=True,
research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye).exists():
iss = directions.Issledovaniya.objects.get(pk=pk)
for group in request_data["research"]["groups"]:
for field in group["fields"]:
if not ParaclinicInputField.objects.filter(pk=field["pk"]).exists():
continue
f = ParaclinicInputField.objects.get(pk=field["pk"])
if not directions.ParaclinicResult.objects.filter(issledovaniye=iss, field=f).exists():
f_result = directions.ParaclinicResult(issledovaniye=iss, field=f, value="")
else:
f_result = directions.ParaclinicResult.objects.filter(issledovaniye=iss, field=f)[0]
f_result.value = field["value"]
f_result.save()
iss.doc_save = request.user.doctorprofile
iss.time_save = timezone.now()
if with_confirm:
iss.doc_confirmation = request.user.doctorprofile
iss.time_confirmation = timezone.now()
if not iss.napravleniye.visit_who_mark or not iss.napravleniye.visit_date:
iss.napravleniye.visit_who_mark = request.user.doctorprofile
iss.napravleniye.visit_date = timezone.now()
iss.napravleniye.save()
iss.save()
response["ok"] = True
slog.Log(key=pk, type=13, body=json.dumps(delete_keys_from_dict(request_data,
["hide", "confirmed", "allow_reset_confirm",
"values_to_input", "show_title", "order",
"show_title", "lines", "saved", "pk"])),
user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required("Врач параклиники")
def directions_paraclinic_confirm(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("iss_pk", -1)
if directions.Issledovaniya.objects.filter(pk=pk, time_confirmation__isnull=True,
research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye).exists():
iss = directions.Issledovaniya.objects.get(pk=pk)
t = timezone.now()
if not iss.napravleniye.visit_who_mark or not iss.napravleniye.visit_date:
iss.napravleniye.visit_who_mark = request.user.doctorprofile
iss.napravleniye.visit_date = t
iss.napravleniye.save()
iss.doc_confirmation = request.user.doctorprofile
iss.time_confirmation = t
iss.save()
response["ok"] = True
slog.Log(key=pk, type=14, body=json.dumps(request_data), user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required("Врач параклиники", "Сброс подтверждений результатов")
def directions_paraclinic_confirm_reset(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("iss_pk", -1)
if directions.Issledovaniya.objects.filter(pk=pk).exists():
iss = directions.Issledovaniya.objects.get(pk=pk)
import time
ctp = int(
0 if not iss.time_confirmation else int(time.mktime(iss.time_confirmation.timetuple()))) + 8 * 60 * 60
ctime = int(time.time())
cdid = -1 if not iss.doc_confirmation else iss.doc_confirmation.pk
if (ctime - ctp < SettingManager.get(
"lab_reset_confirm_time_min") * 60 and cdid == request.user.doctorprofile.pk) or request.user.is_superuser or "Сброс подтверждений результатов" in [
str(x) for x in request.user.groups.all()]:
predoc = {"fio": iss.doc_confirmation.get_fio(), "pk": iss.doc_confirmation.pk,
"direction": iss.napravleniye.pk}
iss.doc_confirmation = iss.time_confirmation = None
iss.save()
if iss.napravleniye.result_rmis_send:
c = Client()
c.directions.delete_services(iss.napravleniye, request.user.doctorprofile)
response["ok"] = True
slog.Log(key=pk, type=24, body=json.dumps(predoc), user=request.user.doctorprofile).save()
else:
response["message"] = "Сброс подтверждения разрешен в течении %s минут" % (
str(SettingManager.get("lab_reset_confirm_time_min")))
return JsonResponse(response)
@group_required("Врач параклиники")
def directions_paraclinic_history(request):
response = {"directions": []}
request_data = json.loads(request.body)
date_start = request_data["date"].split(".")
date_start = datetime.date(int(date_start[2]), int(date_start[1]), int(date_start[0]))
date_end = date_start + datetime.timedelta(1)
has_dirs = []
for direction in directions.\
Napravleniya.objects.filter(Q(issledovaniya__doc_save=request.user.doctorprofile) |
Q(issledovaniya__doc_confirmation=request.user.doctorprofile)) \
.filter(Q(issledovaniya__time_confirmation__range=(date_start, date_end)) |
Q(issledovaniya__time_save__range=(date_start, date_end)))\
.order_by("-issledovaniya__time_save", "-issledovaniya__time_confirmation"):
if direction.pk in has_dirs:
continue
has_dirs.append(direction.pk)
d = {
"pk": direction.pk,
"date": timezone.localtime(direction.data_sozdaniya).strftime('%d.%m.%Y'),
"patient": direction.client.individual.fio(full=True, direction=direction),
"card": direction.client.number_with_type(),
"iss": [],
"all_confirmed": True,
"all_saved": True
}
for i in directions.Issledovaniya.objects.filter(napravleniye=direction).order_by("pk"):
iss = {"title": i.research.title,
"saved": i.time_save is not None,
"confirmed": i.time_confirmation is not None}
d["iss"].append(iss)
if not iss["saved"]:
d["all_saved"] = False
if not iss["confirmed"]:
d["all_confirmed"] = False
response["directions"].append(d)
return JsonResponse(response)
@group_required("Врач параклиники", "Посещения по направлениям")
def directions_services(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
if directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True).exists():
n = directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True)[0]
response["ok"] = True
researches = []
for i in directions.Issledovaniya.objects.filter(napravleniye=n):
researches.append({"title": i.research.title,
"department": i.research.podrazdeleniye.get_title()})
response["direction_data"] = {
"date": n.data_sozdaniya.strftime('%d.%m.%Y'),
"client": n.client.individual.fio(full=True),
"card": n.client.number_with_type(),
"diagnos": n.diagnos,
"doc": "{}, {}".format(n.doc.get_fio(), n.doc.podrazdeleniye.title),
"visit_who_mark": "" if not n.visit_who_mark else "{}, {}".format(n.visit_who_mark.get_fio(), n.visit_who_mark.podrazdeleniye.title),
"fin_source": "{} - {}".format(n.istochnik_f.base.title, n.istochnik_f.title)
}
response["researches"] = researches
response["loaded_pk"] = pk
response["visit_status"] = n.visit_date is not None
response["visit_date"] = "" if not n.visit_date else timezone.localtime(n.visit_date).strftime('%d.%m.%Y %X')
else:
response["message"] = "Направление не найдено"
return JsonResponse(response)
@group_required("Врач параклиники", "Посещения по направлениям")
def directions_mark_visit(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True, visit_date__isnull=True).exists():
n = directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True)[0]
response["ok"] = True
n.visit_date = timezone.now()
n.visit_who_mark = request.user.doctorprofile
n.save()
response["visit_status"] = n.visit_date is not None
response["visit_date"] = timezone.localtime(n.visit_date).strftime('%d.%m.%Y %X')
slog.Log(key=pk, type=5001, body=json.dumps({"Посещение": "да", "Дата и время": response["visit_date"]}), user=request.user.doctorprofile).save()
else:
response["message"] = "Направление не найдено"
return JsonResponse(response)
@group_required("Врач параклиники", "Посещения по направлениям")
def directions_visit_journal(request):
response = {"data": []}
request_data = json.loads(request.body)
date_start = request_data["date"].split(".")
date_start = datetime.date(int(date_start[2]), int(date_start[1]), int(date_start[0]))
date_end = date_start + datetime.timedelta(1)
for v in directions.Napravleniya.objects.filter(visit_date__range=(date_start, date_end,), visit_who_mark=request.user.doctorprofile).order_by("-visit_date"):
response["data"].append({
"pk": v.pk,
"client": v.client.individual.fio(full=True),
"card": v.client.number_with_type(),
"datetime": timezone.localtime(v.visit_date).strftime('%d.%m.%Y %X')
})
return JsonResponse(response)
@login_required
def directions_last_result(request):
response = {"ok": False, "data": {}}
request_data = json.loads(request.body)
individual = request_data.get("individual", -1)
research = request_data.get("research", -1)
i = directions.Issledovaniya.objects.filter(napravleniye__client__individual__pk=individual,
research__pk=research,
time_confirmation__isnull=False).order_by("-time_confirmation")
if i.exists():
response["ok"] = True
response["data"] = {"direction": i[0].napravleniye.pk, "datetime": timezone.localtime(i[0].time_confirmation).strftime('%d.%m.%Y')}
return JsonResponse(response)
@login_required
def directions_results_report(request):
import re
data = []
request_data = json.loads(request.body)
individual_pk = request_data.get("individual", -1)
slog.Log(key=str(individual_pk), type=20000, body=json.dumps(request_data), user=request.user.doctorprofile).save()
params = request_data.get("params", [])
date_start = request_data["date_start"].split(".")
date_end = request_data["date_end"].split(".")
date_start = datetime.date(int(date_start[2]), int(date_start[1]), int(date_start[0]))
date_end = datetime.date(int(date_end[2]), int(date_end[1]), int(date_end[0])) + datetime.timedelta(days=1)
pat = re.compile(r"^\d+(.\d+)?-\d+(.\d+)?$")
if Individual.objects.filter(pk=individual_pk).exists():
i = Individual.objects.get(pk=individual_pk)
for param in params:
ppk = param["pk"]
if param["is_paraclinic"]:
if ParaclinicInputGroups.objects.filter(pk=ppk).exists():
g = ParaclinicInputGroups.objects.get(pk=ppk)
for i in directions.Issledovaniya.objects.filter(research__paraclinicinputgroups=g,
time_confirmation__isnull=False):
res = []
for r in directions.ParaclinicResult.objects.filter(field__group=g,
issledovaniye=i).order_by("field__order"):
if r.value == "":
continue
res.append((r.field.title + ": " if r.field.title != "" else "") + r.value)
if len(res) == 0:
continue
paramdata = {"research": i.research.pk,
"pk": ppk,
"order": g.order,
"date": timezone.localtime(i.time_confirmation).strftime('%d.%m.%Y'),
"timestamp": int(timezone.localtime(i.time_confirmation).timestamp()),
"value": "; ".join(res),
"is_norm": "normal",
"not_norm_dir": "",
"delta": 0,
"active_ref": {},
"direction": i.napravleniye.pk}
data.append(paramdata)
else:
if Fractions.objects.filter(pk=ppk).exists():
f = Fractions.objects.get(pk=ppk)
for r in directions.Result.objects.filter(issledovaniye__napravleniye__client__individual=i,
fraction=f,
issledovaniye__time_confirmation__range=(date_start, date_end)):
if r.value == "":
continue
is_norm = r.get_is_norm()
not_norm_dir = ""
delta = ""
active_ref = r.calc_normal(fromsave=False, only_ref=True)
if "r" in active_ref and re.match(r"^\d+(\.\d+)?$", r.value.replace(",", ".").strip()):
x = float(r.value.replace(",", ".").strip())
spl = r.calc_normal(fromsave=False, only_ref=True, raw_ref=False)
if (isinstance(spl, list) or isinstance(spl, tuple)) and len(spl) == 2:
if spl[0] >= x:
not_norm_dir = "down"
nx = spl[0] - x
n10 = spl[0] * 0.2
if nx <= n10:
not_norm_dir = "n_down"
delta = nx
elif spl[1] <= x:
not_norm_dir = "up"
nx = x - spl[1]
n10 = spl[1] * 0.2
if nx <= n10:
not_norm_dir = "n_up"
delta = nx
paramdata = {"research": f.research.pk,
"pk": ppk,
"order": f.sort_weight,
"date": timezone.localtime(r.issledovaniye.time_confirmation).strftime('%d.%m.%Y'),
"timestamp": int(timezone.localtime(r.issledovaniye.time_confirmation).timestamp()),
"value": r.value,
"is_norm": is_norm,
"not_norm_dir": not_norm_dir,
"delta": delta,
"active_ref": active_ref,
"direction": r.issledovaniye.napravleniye.pk}
data.append(paramdata)
data.sort(key=itemgetter("timestamp"), reverse=True)
data.sort(key=itemgetter("pk"))
data.sort(key=itemgetter("order"))
data.sort(key=itemgetter("research"))
return JsonResponse({"data": data})
|
#! /usr/bin/env python
from MFT import MFTEnumerator
import array
import re
import logging
import datetime
import argparse
from jinja2 import Template
from BinaryParser import Mmap
from MFT import Cache
from MFT import ATTR_TYPE
from MFT import MREF
from MFT import MSEQNO
from MFT import IndexRootHeader
from MFT import Attribute
from MFT import FilenameAttribute
from MFT import StandardInformationFieldDoesNotExist
ASCII_BYTE = " !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~"
def ascii_strings(buf, n=4):
reg = "([%s]{%d,})" % (ASCII_BYTE, n)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
if isinstance(match.group(), array.array):
yield match.group().tostring().decode("ascii")
else:
yield match.group().decode("ascii")
def unicode_strings(buf, n=4):
reg = b"((?:[%s]\x00){4,})" % (ASCII_BYTE)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
try:
if isinstance(match.group(), array.array):
yield match.group().tostring().decode("utf-16")
else:
yield match.group().decode("utf-16")
except UnicodeDecodeError:
pass
def get_flags(flags):
"""
Get readable list of attribute flags.
"""
attributes = []
for flag in Attribute.FLAGS.keys():
if flags & flag:
attributes.append(Attribute.FLAGS[flag])
return attributes
def create_safe_datetime(fn):
try:
return fn()
except ValueError:
return datetime.datetime(1970, 1, 1, 0, 0, 0)
def create_safe_timeline_entry(fn, type_, source, path):
return {
"timestamp": create_safe_datetime(fn),
"type": type_,
"source": source,
"path": path,
}
def create_safe_timeline_entries(attr, source, path):
return [
create_safe_timeline_entry(attr.created_time, "birthed", source, path),
create_safe_timeline_entry(attr.accessed_time, "accessed", source, path),
create_safe_timeline_entry(attr.modified_time, "modified", source, path),
create_safe_timeline_entry(attr.changed_time, "changed", source, path),
]
def get_timeline_entries(record):
entries = []
si = record.standard_information()
if si is None:
return entries
fn = record.filename_information()
if fn is None:
return entries
filename = fn.filename()
entries.extend(create_safe_timeline_entries(si, "$SI", filename))
for b in record.attributes():
if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
continue
attr = FilenameAttribute(b.value(), 0, record)
attr_filename = attr.filename()
entries.extend(create_safe_timeline_entries(attr, "$FN", attr_filename))
indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
if indxroot and indxroot.non_resident() == 0:
irh = IndexRootHeader(indxroot.value(), 0, False)
for e in irh.node_header().entries():
fn = e.filename_information()
fn_filename = fn.filename()
entries.extend(create_safe_timeline_entries(fn, "INDX", fn_filename))
for e in irh.node_header().slack_entries():
fn = e.filename_information()
fn_filename = fn.filename()
entries.extend(create_safe_timeline_entries(fn, "slack-INDX", fn_filename))
return sorted(entries, key=lambda x: x["timestamp"])
def make_filename_information_model(attr):
if attr is None:
return None
return {
"type": ["POSIX", "WIN32", "DOS 8.3", "WIN32 + DOS 8.3"][attr.filename_type()],
"name": str(attr.filename()),
"flags": get_flags(attr.flags()),
"logical_size": attr.logical_size(),
"physical_size": attr.physical_size(),
"modified": create_safe_datetime(attr.modified_time),
"accessed": create_safe_datetime(attr.accessed_time),
"changed": create_safe_datetime(attr.changed_time),
"created": create_safe_datetime(attr.created_time),
"parent_ref": MREF(attr.mft_parent_reference()),
"parent_seq": MSEQNO(attr.mft_parent_reference()),
}
def make_standard_information_model(attr):
if attr is None:
return None
# if attr is None:
# default_time = datetime.datetime(1970, 1, 1, 0, 0, 0)
# return {
# "created": default_time,
# "modified": default_time,
# "changed": default_time,
# "accessed": default_time,
# "owner_id": 0,
# "security_id": "",
# "quota_charged": 0,
# "usn": 0
# }
ret = {
"created": create_safe_datetime(attr.created_time),
"modified": create_safe_datetime(attr.modified_time),
"changed": create_safe_datetime(attr.changed_time),
"accessed": create_safe_datetime(attr.accessed_time),
"flags": get_flags(attr.attributes())
}
# since the fields are sequential, we can handle an exception half way through here
# and then ignore the remaining items. Dont have to worry about individual try/catches
try:
ret["owner_id"] = attr.owner_id()
ret["security_id"] = attr.security_id()
ret["quota_charged"] = attr.quota_charged()
ret["usn"] = attr.usn()
except StandardInformationFieldDoesNotExist:
pass
return ret
def make_attribute_model(attr):
ret = {
"type": Attribute.TYPES[attr.type()],
"name": attr.name(),
"flags": get_flags(attr.flags()),
"is_resident": attr.non_resident() == 0,
"data_size": 0,
"allocated_size": 0,
"value_size": 0,
"runs": [],
}
if attr.non_resident() > 0:
ret["data_size"] = attr.data_size()
ret["allocated_size"] = attr.allocated_size()
if attr.allocated_size() > 0:
for (offset, length) in attr.runlist().runs():
ret["runs"].append({
"offset": offset,
"length": length,
})
else:
ret["value_size"] = attr.value_length()
return ret
def make_model(record, path):
active_data = record.active_data()
slack_data = record.slack_data()
model = {
"magic": record.magic(),
"path": path,
"inode": record.inode,
"is_active": record.is_active(),
"is_directory": record.is_directory(),
"size": 0, # updated below
"standard_information": make_standard_information_model(record.standard_information()),
"filename_information": make_filename_information_model(record.filename_information()),
"owner_id": 0, # updated below
"security_id": 0, # updated below
"quota_charged": 0, # updated below
"usn": 0, # updated below
"filenames": [],
"attributes": [],
"indx_entries": [],
"slack_indx_entries": [],
"timeline": get_timeline_entries(record),
"active_ascii_strings": ascii_strings(active_data),
"active_unicode_strings": unicode_strings(active_data),
"slack_ascii_strings": ascii_strings(slack_data),
"slack_unicode_strings": unicode_strings(slack_data),
}
if not record.is_directory():
data_attr = record.data_attribute()
if data_attr and data_attr.non_resident() > 0:
model["size"] = data_attr.data_size()
elif record.filename_information() is not None:
model["size"] = record.filename_information().logical_size()
else:
model["size"] = 0
for b in record.attributes():
if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
continue
attr = FilenameAttribute(b.value(), 0, record)
model["filenames"].append(make_filename_information_model(attr))
for b in record.attributes():
model["attributes"].append(make_attribute_model(b))
indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
if indxroot and indxroot.non_resident() == 0:
irh = IndexRootHeader(indxroot.value(), 0, False)
for e in irh.node_header().entries():
m = make_filename_information_model(e.filename_information())
m["inode"] = MREF(e.mft_reference())
m["sequence_num"] = MSEQNO(e.mft_reference())
model["indx_entries"].append(m)
for e in irh.node_header().slack_entries():
m = make_filename_information_model(e.filename_information())
m["inode"] = MREF(e.mft_reference())
m["sequence_num"] = MSEQNO(e.mft_reference())
model["slack_indx_entries"].append(m)
return model
def format_record(record, path):
template = Template(
"""\
MFT Record: {{ record.inode }}
Path: {{ record.path }}
Metadata:
Active: {{ record.is_active }}
{% if record.is_directory %}\
Type: directory\
{% else %}\
Type: file\
{% endif %}
Flags: {{ record.standard_information.flags|join(', ') }}
$SI Modified: {{ record.standard_information.modified }}
$SI Accessed: {{ record.standard_information.accessed }}
$SI Changed: {{ record.standard_information.changed }}
$SI Birthed: {{ record.standard_information.created }}
Owner ID: {{ record.standard_information.owner_id }}
Security ID: {{ record.standard_information.security_id }}
Quota charged: {{ record.standard_information.quota_charged }}
USN: {{ record.standard_information.usn }}
Filenames: \
{% for filename in record.filenames %}
Type: {{ filename.type }}
Name: {{ filename.name }}
Flags: {{ filename.flags|join(', ') }}
Logical size: {{ filename.logical_size }}
Physical size: {{ filename.physical_size }}
Modified: {{ filename.modified }}
Accessed: {{ filename.accessed }}
Changed: {{ filename.changed }}
Birthed: {{ filename.created }}
Parent reference: {{ filename.parent_ref }}
Parent sequence number: {{ filename.parent_seq }}\
{% endfor %}
Attributes: \
{% for attribute in record.attributes %}
Type: {{ attribute.type }}
Name: {{ attribute.name }}
Flags: {{ attribute.flags|join(', ') }}
Resident: {{ attribute.is_resident }}
Data size: {{ attribute.data_size }}
Allocated size: {{ attribute.allocated_size }}
Value size: {{ attribute.value_size }} \
{% if attribute.runs %}
Data runs: {% for run in attribute.runs %}
Offset (clusters): {{ run.offset }} Length (clusters): {{ run.length }} \
{% endfor %}\
{% endif %}\
{% endfor %}
INDX root entries:\
{% if not record.indx_entries %}\
<none>\
{% endif %}\
{% for indx in record.indx_entries %}
Name: {{ indx.filename }}
Size: {{ indx.size }}
Modified: {{ indx.modified }}
Accessed: {{ indx.accessed }}
Changed: {{ indx.changed }}
Birthed: {{ indx.created }}
Reference: {{ indx.inode }}
Sequence number: {{ indx.sequence_num }}\
{% endfor %}
INDX root slack entries:\
{% if not record.slack_indx_entries %}\
<none>\
{% endif %}\
{% for indx in record.slack_indx_entries %}
Name: {{ indx.filename }}
Size: {{ indx.size }}
Modified: {{ indx.modified }}
Accessed: {{ indx.accessed }}
Changed: {{ indx.changed }}
Birthed: {{ indx.created }}
Reference: {{ indx.inode }}
Sequence number: {{ indx.sequence_num }}\
{% endfor %}
Timeline:
{% for entry in record.timeline %}\
{{ "%-30s%-12s%-8s%s"|format(entry.timestamp, entry.type, entry.source, entry.path) }}
{% endfor %}\
Active strings:
ASCII strings:
{% for string in record.active_ascii_strings %}\
{{ string }}
{% endfor %}\
Unicode strings:
{% for string in record.active_unicode_strings %}\
{{ string }}
{% endfor %}\
Slack strings:
ASCII strings:
{% for string in record.slack_ascii_strings %}\
{{ string }}
{% endfor %}\
Unicode strings:
{% for string in record.slack_unicode_strings %}\
{{ string }}
{% endfor %}\
""")
return template.render(record=make_model(record, path))
def print_indx_info(record, path):
print format_record(record, path)
def main():
parser = argparse.ArgumentParser(description='Inspect '
'a given MFT file record.')
parser.add_argument('-a', action="store", metavar="cache_size", type=int,
dest="cache_size", default=1024,
help="Size of cache.")
parser.add_argument('-p', action="store", metavar="prefix",
nargs=1, dest="prefix", default="\\.",
help="Prefix paths with `prefix` rather than \\.\\")
parser.add_argument('-v', action="store_true", dest="verbose",
help="Print debugging information")
parser.add_argument('mft', action="store",
help="Path to MFT")
parser.add_argument('record_or_path', action="store",
help="MFT record or file path to inspect")
results = parser.parse_args()
if results.verbose:
logging.basicConfig(level=logging.DEBUG)
with Mmap(results.mft) as buf:
record_cache = Cache(results.cache_size)
path_cache = Cache(results.cache_size)
enum = MFTEnumerator(buf,
record_cache=record_cache,
path_cache=path_cache)
try:
record_num = int(results.record_or_path)
record = enum.get_record(record_num)
path = results.prefix + enum.get_path(record)
print_indx_info(record, path)
except ValueError:
path = results.record_or_path
record = enum.get_record_by_path(path)
print_indx_info(record, results.prefix + path)
if __name__ == "__main__":
main()
get_file_info: fix bug handling inode specification
#! /usr/bin/env python
from MFT import MFTEnumerator
import array
import re
import logging
import datetime
import argparse
from jinja2 import Template
from BinaryParser import Mmap
from MFT import Cache
from MFT import ATTR_TYPE
from MFT import MREF
from MFT import MSEQNO
from MFT import IndexRootHeader
from MFT import Attribute
from MFT import FilenameAttribute
from MFT import StandardInformationFieldDoesNotExist
ASCII_BYTE = " !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~"
def ascii_strings(buf, n=4):
reg = "([%s]{%d,})" % (ASCII_BYTE, n)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
if isinstance(match.group(), array.array):
yield match.group().tostring().decode("ascii")
else:
yield match.group().decode("ascii")
def unicode_strings(buf, n=4):
reg = b"((?:[%s]\x00){4,})" % (ASCII_BYTE)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
try:
if isinstance(match.group(), array.array):
yield match.group().tostring().decode("utf-16")
else:
yield match.group().decode("utf-16")
except UnicodeDecodeError:
pass
def get_flags(flags):
"""
Get readable list of attribute flags.
"""
attributes = []
for flag in Attribute.FLAGS.keys():
if flags & flag:
attributes.append(Attribute.FLAGS[flag])
return attributes
def create_safe_datetime(fn):
try:
return fn()
except ValueError:
return datetime.datetime(1970, 1, 1, 0, 0, 0)
def create_safe_timeline_entry(fn, type_, source, path):
return {
"timestamp": create_safe_datetime(fn),
"type": type_,
"source": source,
"path": path,
}
def create_safe_timeline_entries(attr, source, path):
return [
create_safe_timeline_entry(attr.created_time, "birthed", source, path),
create_safe_timeline_entry(attr.accessed_time, "accessed", source, path),
create_safe_timeline_entry(attr.modified_time, "modified", source, path),
create_safe_timeline_entry(attr.changed_time, "changed", source, path),
]
def get_timeline_entries(record):
entries = []
si = record.standard_information()
if si is None:
return entries
fn = record.filename_information()
if fn is None:
return entries
filename = fn.filename()
entries.extend(create_safe_timeline_entries(si, "$SI", filename))
for b in record.attributes():
if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
continue
attr = FilenameAttribute(b.value(), 0, record)
attr_filename = attr.filename()
entries.extend(create_safe_timeline_entries(attr, "$FN", attr_filename))
indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
if indxroot and indxroot.non_resident() == 0:
irh = IndexRootHeader(indxroot.value(), 0, False)
for e in irh.node_header().entries():
fn = e.filename_information()
fn_filename = fn.filename()
entries.extend(create_safe_timeline_entries(fn, "INDX", fn_filename))
for e in irh.node_header().slack_entries():
fn = e.filename_information()
fn_filename = fn.filename()
entries.extend(create_safe_timeline_entries(fn, "slack-INDX", fn_filename))
return sorted(entries, key=lambda x: x["timestamp"])
def make_filename_information_model(attr):
if attr is None:
return None
return {
"type": ["POSIX", "WIN32", "DOS 8.3", "WIN32 + DOS 8.3"][attr.filename_type()],
"name": str(attr.filename()),
"flags": get_flags(attr.flags()),
"logical_size": attr.logical_size(),
"physical_size": attr.physical_size(),
"modified": create_safe_datetime(attr.modified_time),
"accessed": create_safe_datetime(attr.accessed_time),
"changed": create_safe_datetime(attr.changed_time),
"created": create_safe_datetime(attr.created_time),
"parent_ref": MREF(attr.mft_parent_reference()),
"parent_seq": MSEQNO(attr.mft_parent_reference()),
}
def make_standard_information_model(attr):
if attr is None:
return None
# if attr is None:
# default_time = datetime.datetime(1970, 1, 1, 0, 0, 0)
# return {
# "created": default_time,
# "modified": default_time,
# "changed": default_time,
# "accessed": default_time,
# "owner_id": 0,
# "security_id": "",
# "quota_charged": 0,
# "usn": 0
# }
ret = {
"created": create_safe_datetime(attr.created_time),
"modified": create_safe_datetime(attr.modified_time),
"changed": create_safe_datetime(attr.changed_time),
"accessed": create_safe_datetime(attr.accessed_time),
"flags": get_flags(attr.attributes())
}
# since the fields are sequential, we can handle an exception half way through here
# and then ignore the remaining items. Dont have to worry about individual try/catches
try:
ret["owner_id"] = attr.owner_id()
ret["security_id"] = attr.security_id()
ret["quota_charged"] = attr.quota_charged()
ret["usn"] = attr.usn()
except StandardInformationFieldDoesNotExist:
pass
return ret
def make_attribute_model(attr):
ret = {
"type": Attribute.TYPES[attr.type()],
"name": attr.name(),
"flags": get_flags(attr.flags()),
"is_resident": attr.non_resident() == 0,
"data_size": 0,
"allocated_size": 0,
"value_size": 0,
"runs": [],
}
if attr.non_resident() > 0:
ret["data_size"] = attr.data_size()
ret["allocated_size"] = attr.allocated_size()
if attr.allocated_size() > 0:
for (offset, length) in attr.runlist().runs():
ret["runs"].append({
"offset": offset,
"length": length,
})
else:
ret["value_size"] = attr.value_length()
return ret
def make_model(record, path):
active_data = record.active_data()
slack_data = record.slack_data()
model = {
"magic": record.magic(),
"path": path,
"inode": record.inode,
"is_active": record.is_active(),
"is_directory": record.is_directory(),
"size": 0, # updated below
"standard_information": make_standard_information_model(record.standard_information()),
"filename_information": make_filename_information_model(record.filename_information()),
"owner_id": 0, # updated below
"security_id": 0, # updated below
"quota_charged": 0, # updated below
"usn": 0, # updated below
"filenames": [],
"attributes": [],
"indx_entries": [],
"slack_indx_entries": [],
"timeline": get_timeline_entries(record),
"active_ascii_strings": ascii_strings(active_data),
"active_unicode_strings": unicode_strings(active_data),
"slack_ascii_strings": ascii_strings(slack_data),
"slack_unicode_strings": unicode_strings(slack_data),
}
if not record.is_directory():
data_attr = record.data_attribute()
if data_attr and data_attr.non_resident() > 0:
model["size"] = data_attr.data_size()
elif record.filename_information() is not None:
model["size"] = record.filename_information().logical_size()
else:
model["size"] = 0
for b in record.attributes():
if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
continue
attr = FilenameAttribute(b.value(), 0, record)
model["filenames"].append(make_filename_information_model(attr))
for b in record.attributes():
model["attributes"].append(make_attribute_model(b))
indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
if indxroot and indxroot.non_resident() == 0:
irh = IndexRootHeader(indxroot.value(), 0, False)
for e in irh.node_header().entries():
m = make_filename_information_model(e.filename_information())
m["inode"] = MREF(e.mft_reference())
m["sequence_num"] = MSEQNO(e.mft_reference())
model["indx_entries"].append(m)
for e in irh.node_header().slack_entries():
m = make_filename_information_model(e.filename_information())
m["inode"] = MREF(e.mft_reference())
m["sequence_num"] = MSEQNO(e.mft_reference())
model["slack_indx_entries"].append(m)
return model
def format_record(record, path):
template = Template(
"""\
MFT Record: {{ record.inode }}
Path: {{ record.path }}
Metadata:
Active: {{ record.is_active }}
{% if record.is_directory %}\
Type: directory\
{% else %}\
Type: file\
{% endif %}
Flags: {{ record.standard_information.flags|join(', ') }}
$SI Modified: {{ record.standard_information.modified }}
$SI Accessed: {{ record.standard_information.accessed }}
$SI Changed: {{ record.standard_information.changed }}
$SI Birthed: {{ record.standard_information.created }}
Owner ID: {{ record.standard_information.owner_id }}
Security ID: {{ record.standard_information.security_id }}
Quota charged: {{ record.standard_information.quota_charged }}
USN: {{ record.standard_information.usn }}
Filenames: \
{% for filename in record.filenames %}
Type: {{ filename.type }}
Name: {{ filename.name }}
Flags: {{ filename.flags|join(', ') }}
Logical size: {{ filename.logical_size }}
Physical size: {{ filename.physical_size }}
Modified: {{ filename.modified }}
Accessed: {{ filename.accessed }}
Changed: {{ filename.changed }}
Birthed: {{ filename.created }}
Parent reference: {{ filename.parent_ref }}
Parent sequence number: {{ filename.parent_seq }}\
{% endfor %}
Attributes: \
{% for attribute in record.attributes %}
Type: {{ attribute.type }}
Name: {{ attribute.name }}
Flags: {{ attribute.flags|join(', ') }}
Resident: {{ attribute.is_resident }}
Data size: {{ attribute.data_size }}
Allocated size: {{ attribute.allocated_size }}
Value size: {{ attribute.value_size }} \
{% if attribute.runs %}
Data runs: {% for run in attribute.runs %}
Offset (clusters): {{ run.offset }} Length (clusters): {{ run.length }} \
{% endfor %}\
{% endif %}\
{% endfor %}
INDX root entries:\
{% if not record.indx_entries %}\
<none>\
{% endif %}\
{% for indx in record.indx_entries %}
Name: {{ indx.filename }}
Size: {{ indx.size }}
Modified: {{ indx.modified }}
Accessed: {{ indx.accessed }}
Changed: {{ indx.changed }}
Birthed: {{ indx.created }}
Reference: {{ indx.inode }}
Sequence number: {{ indx.sequence_num }}\
{% endfor %}
INDX root slack entries:\
{% if not record.slack_indx_entries %}\
<none>\
{% endif %}\
{% for indx in record.slack_indx_entries %}
Name: {{ indx.filename }}
Size: {{ indx.size }}
Modified: {{ indx.modified }}
Accessed: {{ indx.accessed }}
Changed: {{ indx.changed }}
Birthed: {{ indx.created }}
Reference: {{ indx.inode }}
Sequence number: {{ indx.sequence_num }}\
{% endfor %}
Timeline:
{% for entry in record.timeline %}\
{{ "%-30s%-12s%-8s%s"|format(entry.timestamp, entry.type, entry.source, entry.path) }}
{% endfor %}\
Active strings:
ASCII strings:
{% for string in record.active_ascii_strings %}\
{{ string }}
{% endfor %}\
Unicode strings:
{% for string in record.active_unicode_strings %}\
{{ string }}
{% endfor %}\
Slack strings:
ASCII strings:
{% for string in record.slack_ascii_strings %}\
{{ string }}
{% endfor %}\
Unicode strings:
{% for string in record.slack_unicode_strings %}\
{{ string }}
{% endfor %}\
""")
return template.render(record=make_model(record, path))
def print_indx_info(record, path):
print format_record(record, path)
def main():
parser = argparse.ArgumentParser(description='Inspect '
'a given MFT file record.')
parser.add_argument('-a', action="store", metavar="cache_size", type=int,
dest="cache_size", default=1024,
help="Size of cache.")
parser.add_argument('-p', action="store", metavar="prefix",
nargs=1, dest="prefix", default="\\.",
help="Prefix paths with `prefix` rather than \\.\\")
parser.add_argument('-v', action="store_true", dest="verbose",
help="Print debugging information")
parser.add_argument('mft', action="store",
help="Path to MFT")
parser.add_argument('record_or_path', action="store",
help="MFT record or file path to inspect")
results = parser.parse_args()
if results.verbose:
logging.basicConfig(level=logging.DEBUG)
with Mmap(results.mft) as buf:
record_cache = Cache(results.cache_size)
path_cache = Cache(results.cache_size)
enum = MFTEnumerator(buf,
record_cache=record_cache,
path_cache=path_cache)
should_use_inode = False
try:
record_num = int(results.record_or_path)
should_use_inode = True
except ValueError:
should_use_inode = False
if should_use_inode:
record = enum.get_record(record_num)
path = results.prefix + enum.get_path(record)
print_indx_info(record, path)
else:
path = results.record_or_path
record = enum.get_record_by_path(path)
print_indx_info(record, results.prefix + path)
if __name__ == "__main__":
main()
|
from django.contrib.auth import login
from django.shortcuts import render, get_object_or_404
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from cfp.models import PaperApplication
from cfp.choices import TALK_DURATIONS
from talks.models import Talk
from django.db import IntegrityError
from django.http import JsonResponse, Http404
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from .decorators import require_ticket_holder
from .models import Vote, VoteToken
def authenticate_by_vote_token(request, vote_token):
try:
user = VoteToken.objects.get(ticket_code=vote_token).user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login(request, user)
except VoteToken.DoesNotExist:
raise Http404()
def voting(request, vote_token=None):
if vote_token:
authenticate_by_vote_token(request, vote_token)
already_picked = [t.application_id for t in Talk.objects.all()]
applications = PaperApplication.objects.filter(
cfp_id=settings.ACTIVE_CFP_ID, duration=TALK_DURATIONS.MIN_25
).exclude(
id__in=already_picked, exclude=True
).order_by('title')
if request.user.is_authenticated() and request.user.is_ticket_holder():
# Include boolean attribute to check if the user alerady voted for this talk
votes = Vote.objects.filter(user=request.user,
application_id__in=[x.pk for x in applications])\
.values_list('application_id', flat=True)
for application in applications:
application.voted = False
if application.pk in votes:
application.voted = True
return render(request, 'voting/voting.html', {
'applications': applications,
'voting_enabled': settings.VOTING_ENABLED
})
@login_required
@require_ticket_holder
@require_POST
@csrf_exempt
def add_vote(request, application_id):
application = get_object_or_404(PaperApplication, id=application_id)
try:
Vote.objects.create(
application=application,
user=request.user)
return JsonResponse(
data={"error": None, "message": "Vote saved."})
except IntegrityError:
return JsonResponse(
data={"error": "You already voted for this talk.", "message": None})
@login_required
@require_ticket_holder
@require_POST
@csrf_exempt
def remove_vote(request, application_id):
vote = get_object_or_404(Vote, application_id=application_id, user=request.user)
vote.delete()
return JsonResponse(
data={"message": "Vote deleted."})
Allow only ticket holders to vote
from django.contrib.auth import login
from django.shortcuts import render, get_object_or_404
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from cfp.models import PaperApplication
from cfp.choices import TALK_DURATIONS
from talks.models import Talk
from django.db import IntegrityError
from django.http import JsonResponse, Http404, HttpResponseForbidden
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from .decorators import require_ticket_holder
from .models import Vote, VoteToken
def authenticate_by_vote_token(request, vote_token):
try:
user = VoteToken.objects.get(ticket_code=vote_token).user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login(request, user)
except VoteToken.DoesNotExist:
raise Http404()
def voting(request, vote_token=None):
if vote_token:
authenticate_by_vote_token(request, vote_token)
if not request.user.is_ticket_holder():
return HttpResponseForbidden("You need to be a ticket holder to participate in user voting.")
already_picked = [t.application_id for t in Talk.objects.all()]
applications = PaperApplication.objects.filter(
cfp_id=settings.ACTIVE_CFP_ID, duration=TALK_DURATIONS.MIN_25
).exclude(
id__in=already_picked, exclude=True
).order_by('title')
if request.user.is_authenticated() and request.user.is_ticket_holder():
# Include boolean attribute to check if the user alerady voted for this talk
votes = Vote.objects.filter(user=request.user,
application_id__in=[x.pk for x in applications])\
.values_list('application_id', flat=True)
for application in applications:
application.voted = False
if application.pk in votes:
application.voted = True
return render(request, 'voting/voting.html', {
'applications': applications,
'voting_enabled': settings.VOTING_ENABLED
})
@login_required
@require_ticket_holder
@require_POST
@csrf_exempt
def add_vote(request, application_id):
application = get_object_or_404(PaperApplication, id=application_id)
try:
Vote.objects.create(
application=application,
user=request.user)
return JsonResponse(
data={"error": None, "message": "Vote saved."})
except IntegrityError:
return JsonResponse(
data={"error": "You already voted for this talk.", "message": None})
@login_required
@require_ticket_holder
@require_POST
@csrf_exempt
def remove_vote(request, application_id):
vote = get_object_or_404(Vote, application_id=application_id, user=request.user)
vote.delete()
return JsonResponse(
data={"message": "Vote deleted."})
|
import os
import tempfile
from wsgiref.util import FileWrapper
from django.conf import settings
from django.utils.translation import ugettext as _
from django.urls import reverse
from couchexport.models import Format
from dimagi.utils.django.email import send_HTML_email
from dimagi.utils.web import get_url_base
from soil import DownloadBase, CachedDownload, FileDownload, MultipleTaskDownload, BlobDownload
from soil.exceptions import TaskFailedError
from soil.progress import get_task_status
from corehq.util.view_utils import absolute_reverse
from corehq.blobs import CODES, get_blob_db
from corehq.util.files import safe_filename_header
from zipfile import ZipFile
def expose_cached_download(payload, expiry, file_extension, mimetype=None,
content_disposition=None, download_id=None,
extras=None):
"""
Expose a cache download object.
"""
ref = CachedDownload.create(payload, expiry, mimetype=mimetype,
content_disposition=content_disposition,
download_id=download_id, extras=extras,
suffix=file_extension)
ref.save(expiry)
return ref
def expose_file_download(path, expiry, **kwargs):
"""
Expose a file download object that potentially uses the external drive
"""
ref = FileDownload(path, **kwargs)
ref.save(expiry)
return ref
def expose_blob_download(
identifier,
expiry,
mimetype='text/plain',
content_disposition=None,
download_id=None):
"""
Expose a blob object for download
"""
# TODO add file parameter and refactor blob_db.put(...) into this method
ref = BlobDownload(
identifier,
mimetype=mimetype,
content_disposition=content_disposition,
download_id=download_id,
)
ref.save(expiry)
return ref
def get_download_context(download_id, message=None, require_result=False):
"""
:param require_result: If set to True, is_ready will not be set to True unless result is
also available. If check_state=False, this is ignored.
"""
download_data = DownloadBase.get(download_id)
if download_data is None:
download_data = DownloadBase(download_id=download_id)
task = download_data.task
task_status = get_task_status(
task, is_multiple_download_task=isinstance(download_data, MultipleTaskDownload))
if task_status.failed():
# Celery replaces exceptions with a wrapped one that we can't directly import
# so I think our best choice is to match off the name, even though that's hacky
exception_name = (task.result.__class__.__name__
if isinstance(task.result, Exception) else None)
raise TaskFailedError(task_status.error, exception_name=exception_name)
if require_result:
is_ready = task_status.success() and task_status.result is not None
else:
is_ready = task_status.success()
return {
'result': task_status.result,
'error': task_status.error,
'is_ready': is_ready,
'is_alive': True, # TODO: Fix this
'progress': task_status.progress._asdict(),
'download_id': download_id,
'allow_dropbox_sync': isinstance(download_data, FileDownload) and download_data.use_transfer,
'has_file': download_data is not None and download_data.has_file,
'custom_message': message,
}
def process_email_request(domain, download_id, email_address):
dropbox_url = absolute_reverse('dropbox_upload', args=(download_id,))
download_url = "{}?get_file".format(absolute_reverse('retrieve_download', args=(download_id,)))
try:
allow_dropbox_sync = get_download_context(download_id).get('allow_dropbox_sync', False)
except TaskFailedError:
allow_dropbox_sync = False
dropbox_message = ''
if allow_dropbox_sync:
dropbox_message = _('<br/><br/>You can also upload your data to Dropbox with the link below:<br/>'
'{}').format(dropbox_url)
email_body = _('Your CommCare export for {} is ready! Click on the link below to download your requested data:'
'<br/>{}{}').format(domain, download_url, dropbox_message)
send_HTML_email(_('CommCare Export Complete'), email_address, email_body)
def get_task(task_id):
from celery.task.base import Task
return Task.AsyncResult(task_id)
def get_download_file_path(use_transfer, filename):
if use_transfer:
fpath = os.path.join(settings.SHARED_DRIVE_CONF.transfer_dir, filename)
else:
_, fpath = tempfile.mkstemp()
return fpath
def expose_download(use_transfer, file_path, filename, download_id, file_type):
common_kwargs = {
'mimetype': Format.from_format(file_type).mimetype,
'content_disposition': 'attachment; filename="{fname}"'.format(fname=filename),
'download_id': download_id,
'expiry': (1 * 60 * 60),
}
if use_transfer:
expose_file_download(
file_path,
use_transfer=use_transfer,
**common_kwargs
)
else:
expose_cached_download(
FileWrapper(open(file_path, 'rb')),
file_extension=file_type,
**common_kwargs
)
def expose_zipped_blob_download(data_path, filename, format, domain):
"""Expose zipped file content as a blob download
:param data_path: Path to data file. Will be deleted.
:param filename: File name.
:param format: `couchexport.models.Format` constant.
:param domain: Domain name.
:returns: A link to download the file.
"""
try:
_, zip_temp_path = tempfile.mkstemp(".zip")
with ZipFile(zip_temp_path, 'w') as zip_file_:
zip_file_.write(data_path, filename)
finally:
os.remove(data_path)
try:
expiry_mins = 60 * 24
file_format = Format.from_format(format)
file_name_header = safe_filename_header(filename, file_format.extension)
ref = expose_blob_download(
filename,
expiry=expiry_mins * 60,
mimetype=file_format.mimetype,
content_disposition=file_name_header
)
with open(zip_temp_path, 'rb') as file_:
get_blob_db().put(
file_,
domain=domain,
parent_id=domain,
type_code=CODES.tempfile,
key=ref.download_id,
timeout=expiry_mins
)
finally:
os.remove(zip_temp_path)
return "%s%s?%s" % (
get_url_base(),
reverse('retrieve_download', kwargs={'download_id': ref.download_id}),
"get_file" # download immediately rather than rendering page
)
Add _is_alive for soil downloads
import datetime
import os
import tempfile
from wsgiref.util import FileWrapper
from zipfile import ZipFile
from django.conf import settings
from django.urls import reverse
from django.utils.translation import ugettext as _
from couchexport.models import Format
from dimagi.utils.django.email import send_HTML_email
from dimagi.utils.web import get_url_base
from soil import (
BlobDownload,
CachedDownload,
DownloadBase,
FileDownload,
MultipleTaskDownload,
)
from soil.exceptions import TaskFailedError
from soil.progress import get_task_status
from corehq.blobs import CODES, get_blob_db
from corehq.celery_monitoring.heartbeat import (
Heartbeat,
HeartbeatNeverRecorded,
)
from corehq.util.files import safe_filename_header
from corehq.util.view_utils import absolute_reverse
def expose_cached_download(payload, expiry, file_extension, mimetype=None,
content_disposition=None, download_id=None,
extras=None):
"""
Expose a cache download object.
"""
ref = CachedDownload.create(payload, expiry, mimetype=mimetype,
content_disposition=content_disposition,
download_id=download_id, extras=extras,
suffix=file_extension)
ref.save(expiry)
return ref
def expose_file_download(path, expiry, **kwargs):
"""
Expose a file download object that potentially uses the external drive
"""
ref = FileDownload(path, **kwargs)
ref.save(expiry)
return ref
def expose_blob_download(
identifier,
expiry,
mimetype='text/plain',
content_disposition=None,
download_id=None):
"""
Expose a blob object for download
"""
# TODO add file parameter and refactor blob_db.put(...) into this method
ref = BlobDownload(
identifier,
mimetype=mimetype,
content_disposition=content_disposition,
download_id=download_id,
)
ref.save(expiry)
return ref
def get_download_context(download_id, message=None, require_result=False):
"""
:param require_result: If set to True, is_ready will not be set to True unless result is
also available. If check_state=False, this is ignored.
"""
download_data = DownloadBase.get(download_id)
if download_data is None:
download_data = DownloadBase(download_id=download_id)
task = download_data.task
task_status = get_task_status(
task, is_multiple_download_task=isinstance(download_data, MultipleTaskDownload))
if task_status.failed():
# Celery replaces exceptions with a wrapped one that we can't directly import
# so I think our best choice is to match off the name, even though that's hacky
exception_name = (task.result.__class__.__name__
if isinstance(task.result, Exception) else None)
raise TaskFailedError(task_status.error, exception_name=exception_name)
if require_result:
is_ready = task_status.success() and task_status.result is not None
else:
is_ready = task_status.success()
return {
'result': task_status.result,
'error': task_status.error,
'is_ready': is_ready,
'is_alive': _is_alive(),
'progress': task_status.progress._asdict(),
'download_id': download_id,
'allow_dropbox_sync': isinstance(download_data, FileDownload) and download_data.use_transfer,
'has_file': download_data is not None and download_data.has_file,
'custom_message': message,
}
def _is_alive():
queue = getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery')
try:
blockage = Heartbeat(queue).get_blockage_duration()
if blockage > datetime.timedelta(minutes=5):
return False
return True
except HeartbeatNeverRecorded:
return False
def process_email_request(domain, download_id, email_address):
dropbox_url = absolute_reverse('dropbox_upload', args=(download_id,))
download_url = "{}?get_file".format(absolute_reverse('retrieve_download', args=(download_id,)))
try:
allow_dropbox_sync = get_download_context(download_id).get('allow_dropbox_sync', False)
except TaskFailedError:
allow_dropbox_sync = False
dropbox_message = ''
if allow_dropbox_sync:
dropbox_message = _('<br/><br/>You can also upload your data to Dropbox with the link below:<br/>'
'{}').format(dropbox_url)
email_body = _('Your CommCare export for {} is ready! Click on the link below to download your requested data:'
'<br/>{}{}').format(domain, download_url, dropbox_message)
send_HTML_email(_('CommCare Export Complete'), email_address, email_body)
def get_task(task_id):
from celery.task.base import Task
return Task.AsyncResult(task_id)
def get_download_file_path(use_transfer, filename):
if use_transfer:
fpath = os.path.join(settings.SHARED_DRIVE_CONF.transfer_dir, filename)
else:
_, fpath = tempfile.mkstemp()
return fpath
def expose_download(use_transfer, file_path, filename, download_id, file_type):
common_kwargs = {
'mimetype': Format.from_format(file_type).mimetype,
'content_disposition': 'attachment; filename="{fname}"'.format(fname=filename),
'download_id': download_id,
'expiry': (1 * 60 * 60),
}
if use_transfer:
expose_file_download(
file_path,
use_transfer=use_transfer,
**common_kwargs
)
else:
expose_cached_download(
FileWrapper(open(file_path, 'rb')),
file_extension=file_type,
**common_kwargs
)
def expose_zipped_blob_download(data_path, filename, format, domain):
"""Expose zipped file content as a blob download
:param data_path: Path to data file. Will be deleted.
:param filename: File name.
:param format: `couchexport.models.Format` constant.
:param domain: Domain name.
:returns: A link to download the file.
"""
try:
_, zip_temp_path = tempfile.mkstemp(".zip")
with ZipFile(zip_temp_path, 'w') as zip_file_:
zip_file_.write(data_path, filename)
finally:
os.remove(data_path)
try:
expiry_mins = 60 * 24
file_format = Format.from_format(format)
file_name_header = safe_filename_header(filename, file_format.extension)
ref = expose_blob_download(
filename,
expiry=expiry_mins * 60,
mimetype=file_format.mimetype,
content_disposition=file_name_header
)
with open(zip_temp_path, 'rb') as file_:
get_blob_db().put(
file_,
domain=domain,
parent_id=domain,
type_code=CODES.tempfile,
key=ref.download_id,
timeout=expiry_mins
)
finally:
os.remove(zip_temp_path)
return "%s%s?%s" % (
get_url_base(),
reverse('retrieve_download', kwargs={'download_id': ref.download_id}),
"get_file" # download immediately rather than rendering page
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest2
from mom.builtins import is_bytes
from mom.codec import bytes_to_long
from mom.security.random import \
generate_random_hex_string, generate_random_ulong_between, \
generate_random_bits, generate_random_ulong_atmost, \
generate_random_ulong_exactly, ALPHANUMERIC, \
ASCII_PRINTABLE, ALPHA, LOWERCASE_ALPHANUMERIC, \
LOWERCASE_ALPHA, DIGITS, generate_random_password, \
generate_random_sequence, calculate_entropy
class Test_generate_random_bits(unittest2.TestCase):
def test_range(self):
for i in range(999):
n_bits = 4
value = bytes_to_long(generate_random_bits(n_bits))
self.assertTrue(value >= 0 and value < (2L ** n_bits))
def test_uniqueness(self):
# The likelyhood of recurrence should be tiny if a large enough
# bit size is chosen.
self.assertNotEqual(generate_random_bits(64), generate_random_bits(64))
def test_ValueError_when_0_bits(self):
self.assertRaises(ValueError, generate_random_bits, 0)
def test_TypeError_when_invalid_argument(self):
self.assertRaises(TypeError, generate_random_bits, None)
self.assertRaises(TypeError, generate_random_bits, {})
self.assertRaises(TypeError, generate_random_bits, object)
self.assertRaises(TypeError, generate_random_bits, True)
self.assertRaises(TypeError, generate_random_bits, "")
class Test_generate_random_ulong_exactly(unittest2.TestCase):
def test_range(self):
for i in range(128):
n_bits = i + 1
for j in range(128):
x = generate_random_ulong_exactly(n_bits)
# Ensure high bit is set
#self.assertTrue(x & (2L ** (n_bits - 1)))
self.assertTrue(x >= (2L ** (n_bits - 1)) and
x < (2L ** n_bits), "huh? x=%d" % x)
def test_ValueError_when_0_bits(self):
self.assertRaises(ValueError, generate_random_ulong_exactly, 0)
def test_TypeError_when_invalid_argument(self):
self.assertRaises(TypeError, generate_random_ulong_exactly, None)
self.assertRaises(TypeError, generate_random_ulong_exactly, {})
self.assertRaises(TypeError, generate_random_ulong_exactly, object)
self.assertRaises(TypeError, generate_random_ulong_exactly, True)
self.assertRaises(TypeError, generate_random_ulong_exactly, "")
class Test_generate_random_ulong_atmost(unittest2.TestCase):
def test_range(self):
for i in range(128):
n_bits = i + 1
for j in range(128):
x = generate_random_ulong_atmost(n_bits)
self.assertTrue(x >= 0 and x < (2L ** n_bits),
"huh? x=%d" % x)
def test_ValueError_when_0_bits(self):
self.assertRaises(ValueError, generate_random_ulong_atmost, 0)
def test_TypeError_when_invalid_argument(self):
self.assertRaises(TypeError, generate_random_ulong_atmost, None)
self.assertRaises(TypeError, generate_random_ulong_atmost, {})
self.assertRaises(TypeError, generate_random_ulong_atmost, object)
self.assertRaises(TypeError, generate_random_ulong_atmost, True)
self.assertRaises(TypeError, generate_random_ulong_atmost, "")
class Test_generate_random_hex_string(unittest2.TestCase):
def test_length(self):
default_length = 8
self.assertEqual(len(generate_random_hex_string()), default_length,
"Length does not match "\
"default expected length of %d." % default_length)
self.assertEqual(len(generate_random_hex_string(length=10)), 10,
"Length does not match expected length.")
def test_uniqueness(self):
# The likelyhood of recurrence should be tiny if a large enough
# length is chosen.
self.assertNotEqual(generate_random_hex_string(),
generate_random_hex_string(),
"Not unique.")
def test_is_string(self):
self.assertTrue(is_bytes(generate_random_hex_string()),
"Not a bytestring.")
def test_TypeError_if_invalid_length_type(self):
self.assertRaises(TypeError, generate_random_hex_string, None)
self.assertRaises(TypeError, generate_random_hex_string, "")
def test_raises_ValueError_if_invalid_length(self):
self.assertRaises(ValueError, generate_random_hex_string, 33)
self.assertRaises(ValueError, generate_random_hex_string, 0)
self.assertRaises(ValueError, generate_random_hex_string, -1)
self.assertRaises(ValueError, generate_random_hex_string, 33)
self.assertRaises(ValueError, generate_random_hex_string, True)
self.assertRaises(ValueError, generate_random_hex_string, False)
class Test_generate_random_ulong_between(unittest2.TestCase):
def test_range(self):
low, high = 1, 10
for x in range(1000):
value = generate_random_ulong_between(low, high)
self.assertTrue(value >= low and value < high)
def test_ValueError_when_low_greater_than_high(self):
low, high = 4, 3
self.assertRaises(ValueError, generate_random_ulong_between, low, high)
def test_TypeError_when_invalid_argument(self):
self.assertRaises(TypeError, generate_random_ulong_between, None, None)
self.assertRaises(TypeError, generate_random_ulong_between, {}, {})
self.assertRaises(TypeError, generate_random_ulong_between, object, object)
self.assertRaises(TypeError, generate_random_ulong_between, True, True)
self.assertRaises(TypeError, generate_random_ulong_between, "", "")
class Test_generate_random_password(unittest2.TestCase):
def test_random_password_length(self):
symbol_sets = [
DIGITS,
LOWERCASE_ALPHA,
LOWERCASE_ALPHANUMERIC,
ALPHA,
ALPHANUMERIC,
ASCII_PRINTABLE,
]
lengths_64 = [
20,
14,
13,
12,
11,
10
]
lengths_1024 = [
309,
218,
199,
180,
172,
157,
]
for length, symbols in zip(lengths_64, symbol_sets):
for i in range(10):
self.assertEqual(len(generate_random_password(64, symbols)),
length)
for length, symbols in zip(lengths_1024, symbol_sets):
for i in range(10):
self.assertEqual(len(generate_random_password(1024, symbols)),
length)
def test_uniqueness(self):
# For a decent enough entropy.
for i in range(100):
self.assertNotEqual(generate_random_password(64),
generate_random_password(64))
class Test_generate_random_sequence(unittest2.TestCase):
def test_raises_TypeError_when_length_is_not_integer(self):
self.assertRaises(TypeError, generate_random_sequence, None, ALPHA)
def test_raises_TypeError_when_pool_is_None(self):
self.assertRaises(TypeError, generate_random_sequence, 6, None)
def test_raises_ValueError_when_length_is_not_positive(self):
self.assertRaises(ValueError, generate_random_sequence, 0, ALPHA)
self.assertRaises(ValueError, generate_random_sequence, -1, ALPHA)
class Test_calculate_entropy(unittest2.TestCase):
def test_entropy(self):
symbol_sets = [
DIGITS,
LOWERCASE_ALPHA,
LOWERCASE_ALPHANUMERIC,
ALPHA,
ALPHANUMERIC,
ASCII_PRINTABLE,
]
lengths_64 = [
20,
14,
13,
12,
11,
10
]
lengths_1024 = [
309,
218,
199,
180,
172,
157,
]
lengths_128 = [
39,
28,
25,
23,
22,
20,
]
for length, symbols in zip(lengths_64, symbol_sets):
for i in range(10):
self.assertTrue(calculate_entropy(length, symbols) >= 64)
for length, symbols in zip(lengths_1024, symbol_sets):
for i in range(10):
self.assertTrue(calculate_entropy(length, symbols) >= 1024)
for length, symbols in zip(lengths_128, symbol_sets):
for i in range(10):
self.assertTrue(calculate_entropy(length, symbols) >= 128)
``generate_random_string`` tests
Signed-off-by: Gora Khargosh <a2078c57e3ac12c6dfb97b7c2c4e6d6d7db7e92f@gmail.com>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest2
from mom.builtins import is_bytes, is_bytes_or_unicode
from mom.codec import bytes_to_long
from mom.security.random import \
generate_random_hex_string, generate_random_ulong_between, \
generate_random_bits, generate_random_ulong_atmost, \
generate_random_ulong_exactly, ALPHANUMERIC, \
ASCII_PRINTABLE, ALPHA, LOWERCASE_ALPHANUMERIC, \
LOWERCASE_ALPHA, DIGITS, generate_random_password, \
generate_random_sequence, calculate_entropy, generate_random_string
class Test_generate_random_bits(unittest2.TestCase):
def test_range(self):
for i in range(999):
n_bits = 4
value = bytes_to_long(generate_random_bits(n_bits))
self.assertTrue(value >= 0 and value < (2L ** n_bits))
def test_uniqueness(self):
# The likelyhood of recurrence should be tiny if a large enough
# bit size is chosen.
self.assertNotEqual(generate_random_bits(64), generate_random_bits(64))
def test_ValueError_when_0_bits(self):
self.assertRaises(ValueError, generate_random_bits, 0)
def test_TypeError_when_invalid_argument(self):
self.assertRaises(TypeError, generate_random_bits, None)
self.assertRaises(TypeError, generate_random_bits, {})
self.assertRaises(TypeError, generate_random_bits, object)
self.assertRaises(TypeError, generate_random_bits, True)
self.assertRaises(TypeError, generate_random_bits, "")
class Test_generate_random_ulong_exactly(unittest2.TestCase):
def test_range(self):
for i in range(128):
n_bits = i + 1
for j in range(128):
x = generate_random_ulong_exactly(n_bits)
# Ensure high bit is set
#self.assertTrue(x & (2L ** (n_bits - 1)))
self.assertTrue(x >= (2L ** (n_bits - 1)) and
x < (2L ** n_bits), "huh? x=%d" % x)
def test_ValueError_when_0_bits(self):
self.assertRaises(ValueError, generate_random_ulong_exactly, 0)
def test_TypeError_when_invalid_argument(self):
self.assertRaises(TypeError, generate_random_ulong_exactly, None)
self.assertRaises(TypeError, generate_random_ulong_exactly, {})
self.assertRaises(TypeError, generate_random_ulong_exactly, object)
self.assertRaises(TypeError, generate_random_ulong_exactly, True)
self.assertRaises(TypeError, generate_random_ulong_exactly, "")
class Test_generate_random_ulong_atmost(unittest2.TestCase):
def test_range(self):
for i in range(128):
n_bits = i + 1
for j in range(128):
x = generate_random_ulong_atmost(n_bits)
self.assertTrue(x >= 0 and x < (2L ** n_bits),
"huh? x=%d" % x)
def test_ValueError_when_0_bits(self):
self.assertRaises(ValueError, generate_random_ulong_atmost, 0)
def test_TypeError_when_invalid_argument(self):
self.assertRaises(TypeError, generate_random_ulong_atmost, None)
self.assertRaises(TypeError, generate_random_ulong_atmost, {})
self.assertRaises(TypeError, generate_random_ulong_atmost, object)
self.assertRaises(TypeError, generate_random_ulong_atmost, True)
self.assertRaises(TypeError, generate_random_ulong_atmost, "")
class Test_generate_random_hex_string(unittest2.TestCase):
def test_length(self):
default_length = 8
self.assertEqual(len(generate_random_hex_string()), default_length,
"Length does not match "\
"default expected length of %d." % default_length)
self.assertEqual(len(generate_random_hex_string(length=10)), 10,
"Length does not match expected length.")
def test_uniqueness(self):
# The likelyhood of recurrence should be tiny if a large enough
# length is chosen.
self.assertNotEqual(generate_random_hex_string(),
generate_random_hex_string(),
"Not unique.")
def test_is_string(self):
self.assertTrue(is_bytes(generate_random_hex_string()),
"Not a bytestring.")
def test_TypeError_if_invalid_length_type(self):
self.assertRaises(TypeError, generate_random_hex_string, None)
self.assertRaises(TypeError, generate_random_hex_string, "")
def test_raises_ValueError_if_invalid_length(self):
self.assertRaises(ValueError, generate_random_hex_string, 33)
self.assertRaises(ValueError, generate_random_hex_string, 0)
self.assertRaises(ValueError, generate_random_hex_string, -1)
self.assertRaises(ValueError, generate_random_hex_string, 33)
self.assertRaises(ValueError, generate_random_hex_string, True)
self.assertRaises(ValueError, generate_random_hex_string, False)
class Test_generate_random_ulong_between(unittest2.TestCase):
def test_range(self):
low, high = 1, 10
for x in range(1000):
value = generate_random_ulong_between(low, high)
self.assertTrue(value >= low and value < high)
def test_ValueError_when_low_greater_than_high(self):
low, high = 4, 3
self.assertRaises(ValueError, generate_random_ulong_between, low, high)
def test_TypeError_when_invalid_argument(self):
self.assertRaises(TypeError, generate_random_ulong_between, None, None)
self.assertRaises(TypeError, generate_random_ulong_between, {}, {})
self.assertRaises(TypeError, generate_random_ulong_between, object, object)
self.assertRaises(TypeError, generate_random_ulong_between, True, True)
self.assertRaises(TypeError, generate_random_ulong_between, "", "")
class Test_generate_random_string(unittest2.TestCase):
def test_random_string_length(self):
for i in range(10):
self.assertEqual(len(generate_random_string(64)),
len(generate_random_string(64)))
self.assertEqual(len(generate_random_string(64)), 64)
def test_is_string(self):
self.assertTrue(is_bytes_or_unicode(generate_random_string(64)))
def test_uniqueness(self):
# For a decent enough entropy.
for i in range(10):
self.assertNotEqual(generate_random_string(64),
generate_random_string(64))
class Test_generate_random_password(unittest2.TestCase):
def test_random_password_length(self):
symbol_sets = [
DIGITS,
LOWERCASE_ALPHA,
LOWERCASE_ALPHANUMERIC,
ALPHA,
ALPHANUMERIC,
ASCII_PRINTABLE,
]
lengths_64 = [
20,
14,
13,
12,
11,
10
]
lengths_1024 = [
309,
218,
199,
180,
172,
157,
]
for length, symbols in zip(lengths_64, symbol_sets):
for i in range(10):
self.assertEqual(len(generate_random_password(64, symbols)),
length)
for length, symbols in zip(lengths_1024, symbol_sets):
for i in range(10):
self.assertEqual(len(generate_random_password(1024, symbols)),
length)
def test_uniqueness(self):
# For a decent enough entropy.
for i in range(10):
self.assertNotEqual(generate_random_password(64),
generate_random_password(64))
class Test_generate_random_sequence(unittest2.TestCase):
def test_raises_TypeError_when_length_is_not_integer(self):
self.assertRaises(TypeError, generate_random_sequence, None, ALPHA)
def test_raises_TypeError_when_pool_is_None(self):
self.assertRaises(TypeError, generate_random_sequence, 6, None)
def test_raises_ValueError_when_length_is_not_positive(self):
self.assertRaises(ValueError, generate_random_sequence, 0, ALPHA)
self.assertRaises(ValueError, generate_random_sequence, -1, ALPHA)
class Test_calculate_entropy(unittest2.TestCase):
def test_entropy(self):
symbol_sets = [
DIGITS,
LOWERCASE_ALPHA,
LOWERCASE_ALPHANUMERIC,
ALPHA,
ALPHANUMERIC,
ASCII_PRINTABLE,
]
lengths_64 = [
20,
14,
13,
12,
11,
10
]
lengths_1024 = [
309,
218,
199,
180,
172,
157,
]
lengths_128 = [
39,
28,
25,
23,
22,
20,
]
for length, symbols in zip(lengths_64, symbol_sets):
for i in range(10):
self.assertTrue(calculate_entropy(length, symbols) >= 64)
for length, symbols in zip(lengths_1024, symbol_sets):
for i in range(10):
self.assertTrue(calculate_entropy(length, symbols) >= 1024)
for length, symbols in zip(lengths_128, symbol_sets):
for i in range(10):
self.assertTrue(calculate_entropy(length, symbols) >= 128)
|
e3aa3402-2ead-11e5-8e94-7831c1d44c14
e3b0d635-2ead-11e5-a040-7831c1d44c14
e3b0d635-2ead-11e5-a040-7831c1d44c14 |
from datetime import datetime
import json
from ipuz.exceptions import IPUZException
from ipuz.structures import (
validate_crosswordvalue,
validate_groupspec,
validate_stylespec,
)
IPUZ_MANDATORY_FIELDS = (
"version",
"kind",
)
IPUZ_OPTIONAL_FIELDS = (
"copyright",
"publisher",
"publication",
"url",
"uniqueid",
"title",
"intro",
"explanation",
"annotation",
"author",
"editor",
"date",
"notes",
"difficulty",
"origin",
"block",
"empty",
"styles",
)
IPUZ_PUZZLEKIND_MANDATORY_FIELDS = {
"http://ipuz.org/crossword": (
"dimensions",
"puzzle",
),
"http://ipuz.org/sudoku": (
"puzzle",
),
"http://ipuz.org/block": (
"dimensions",
),
"http://ipuz.org/wordsearch": (
"dimensions",
),
}
def validate_dimensions(field_name, field_data):
for key in ["width", "height"]:
if key not in field_data:
raise IPUZException(
"Mandatory field {} of dimensions is missing".format(key)
)
if field_data[key] < 1:
raise IPUZException(
"Field {} of dimensions is less than one".format(key)
)
def validate_date(field_name, field_data):
try:
datetime.strptime(field_data, '%m/%d/%Y')
except ValueError:
raise IPUZException("Invalid date format: {}".format(field_data))
def validate_styles(field_name, field_data):
for _, stylespec in field_data.items():
validate_stylespec(stylespec)
def validate_crosswordvalues(field_name, field_data):
if type(field_data) is not list or any(type(e) is not list for e in field_data):
raise IPUZException("Invalid {} value found".format(field_name))
for line in field_data:
for element in line:
if not validate_crosswordvalue(element):
raise IPUZException("Invalid CrosswordValue in {} element found".format(field_name))
def validate_saved(field_name, field_data):
validate_crosswordvalues("saved", field_data)
def validate_solution(field_name, field_data):
validate_crosswordvalues("solution", field_data)
def validate_zones(field_name, field_data):
if type(field_data) is not list:
raise IPUZException("Invalid zones value found")
for element in field_data:
if not validate_groupspec(element):
raise IPUZException("Invalid GroupSpec in zones element found")
def validate_showenumerations(field_name, field_data):
if type(field_data) is not bool:
raise IPUZException("Invalid showenumerations value found")
def validate_clueplacement(field_name, field_data):
if field_data not in [None, "before", "after", "blocks"]:
raise IPUZException("Invalid clueplacement value found")
def validate_answer(field_name, field_data):
if type(field_data) not in [str, unicode]:
raise IPUZException("Invalid answer value found")
def validate_answers(field_name, field_data):
if type(field_data) is not list or not field_data:
raise IPUZException("Invalid answers value found")
for element in field_data:
try:
validate_answer(field_name, element)
except IPUZException:
raise IPUZException("Invalid answers value found")
def validate_charset(field_name, field_data):
if type(field_data) not in [str, unicode] or (len(field_data) != 9):
raise IPUZException("Invalid charset value found")
def validate_displaycharset(field_name, field_data):
if type(field_data) is not bool:
raise IPUZException("Invalid displaycharset value found")
def validate_boxes(field_name, field_data):
if type(field_data) is not bool:
raise IPUZException("Invalid boxes value found")
def validate_showoperators(field_name, field_data):
if type(field_data) is not bool:
raise IPUZException("Invalid showoperators value found")
def validate_cageborder(field_name, field_data):
if field_data not in ["thick", "dashed"]:
raise IPUZException("Invalid cageborder value found")
def validate_useall(field_name, field_data):
if type(field_data) is not bool:
raise IPUZException("Invalid useall value found")
IPUZ_FIELD_VALIDATORS = {
"dimensions": validate_dimensions,
"date": validate_date,
"styles": validate_styles,
}
IPUZ_CROSSWORD_VALIDATORS = {
"saved": validate_saved,
"solution": validate_solution,
"zones": validate_zones,
"showenumerations": validate_showenumerations,
"clueplacement": validate_clueplacement,
"answer": validate_answer,
"answers": validate_answers,
}
IPUZ_SUDOKU_VALIDATORS = {
"charset": validate_charset,
"displaycharset": validate_displaycharset,
"boxes": validate_boxes,
"showoperators": validate_showoperators,
"cageborder": validate_cageborder,
}
IPUZ_WORDSEARCH_VALIDATORS = {
"useall": validate_useall,
}
IPUZ_PUZZLEKIND_VALIDATORS = {
"http://ipuz.org/crossword": IPUZ_CROSSWORD_VALIDATORS,
"http://ipuz.org/sudoku": IPUZ_SUDOKU_VALIDATORS,
"http://ipuz.org/block": {},
"http://ipuz.org/wordsearch": IPUZ_WORDSEARCH_VALIDATORS,
}
def read(data):
if data.endswith(')'):
data = data[data.index('(') + 1:-1]
try:
json_data = json.loads(data)
except ValueError:
raise IPUZException("No valid JSON could be found")
for field in IPUZ_MANDATORY_FIELDS:
if field not in json_data:
raise IPUZException("Mandatory field {} is missing".format(field))
for kind in json_data["kind"]:
for official_kind, fields in IPUZ_PUZZLEKIND_MANDATORY_FIELDS.items():
if official_kind not in kind:
continue
for field in fields:
if field not in json_data:
raise IPUZException("Mandatory field {} is missing".format(field))
for field, value in json_data.items():
if field in IPUZ_PUZZLEKIND_VALIDATORS[official_kind]:
IPUZ_PUZZLEKIND_VALIDATORS[official_kind][field](field, value)
for field, value in json_data.items():
if field in IPUZ_FIELD_VALIDATORS:
IPUZ_FIELD_VALIDATORS[field](field, value)
return json_data
def write(data, callback_name=None, json_only=False):
if callback_name is None:
callback_name = "ipuz"
json_string = json.dumps(data)
if json_only:
return json_string
return ''.join([callback_name, '(', json_string, ')'])
Refactored similar boolean validation functions
from datetime import datetime
import json
from ipuz.exceptions import IPUZException
from ipuz.structures import (
validate_crosswordvalue,
validate_groupspec,
validate_stylespec,
)
IPUZ_MANDATORY_FIELDS = (
"version",
"kind",
)
IPUZ_OPTIONAL_FIELDS = (
"copyright",
"publisher",
"publication",
"url",
"uniqueid",
"title",
"intro",
"explanation",
"annotation",
"author",
"editor",
"date",
"notes",
"difficulty",
"origin",
"block",
"empty",
"styles",
)
IPUZ_PUZZLEKIND_MANDATORY_FIELDS = {
"http://ipuz.org/crossword": (
"dimensions",
"puzzle",
),
"http://ipuz.org/sudoku": (
"puzzle",
),
"http://ipuz.org/block": (
"dimensions",
),
"http://ipuz.org/wordsearch": (
"dimensions",
),
}
def validate_dimensions(field_name, field_data):
for key in ["width", "height"]:
if key not in field_data:
raise IPUZException(
"Mandatory field {} of dimensions is missing".format(key)
)
if field_data[key] < 1:
raise IPUZException(
"Field {} of dimensions is less than one".format(key)
)
def validate_date(field_name, field_data):
try:
datetime.strptime(field_data, '%m/%d/%Y')
except ValueError:
raise IPUZException("Invalid date format: {}".format(field_data))
def validate_styles(field_name, field_data):
for _, stylespec in field_data.items():
validate_stylespec(stylespec)
def validate_crosswordvalues(field_name, field_data):
if type(field_data) is not list or any(type(e) is not list for e in field_data):
raise IPUZException("Invalid {} value found".format(field_name))
for line in field_data:
for element in line:
if not validate_crosswordvalue(element):
raise IPUZException("Invalid CrosswordValue in {} element found".format(field_name))
def validate_saved(field_name, field_data):
validate_crosswordvalues("saved", field_data)
def validate_solution(field_name, field_data):
validate_crosswordvalues("solution", field_data)
def validate_zones(field_name, field_data):
if type(field_data) is not list:
raise IPUZException("Invalid zones value found")
for element in field_data:
if not validate_groupspec(element):
raise IPUZException("Invalid GroupSpec in zones element found")
def validate_showenumerations(field_name, field_data):
if type(field_data) is not bool:
raise IPUZException("Invalid showenumerations value found")
def validate_clueplacement(field_name, field_data):
if field_data not in [None, "before", "after", "blocks"]:
raise IPUZException("Invalid clueplacement value found")
def validate_answer(field_name, field_data):
if type(field_data) not in [str, unicode]:
raise IPUZException("Invalid answer value found")
def validate_answers(field_name, field_data):
if type(field_data) is not list or not field_data:
raise IPUZException("Invalid answers value found")
for element in field_data:
try:
validate_answer(field_name, element)
except IPUZException:
raise IPUZException("Invalid answers value found")
def validate_charset(field_name, field_data):
if type(field_data) not in [str, unicode] or (len(field_data) != 9):
raise IPUZException("Invalid charset value found")
def validate_cageborder(field_name, field_data):
if field_data not in ["thick", "dashed"]:
raise IPUZException("Invalid cageborder value found")
def validate_bool(field_name, field_data):
if type(field_data) is not bool:
raise IPUZException("Invalid {} value found".format(field_name))
IPUZ_FIELD_VALIDATORS = {
"dimensions": validate_dimensions,
"date": validate_date,
"styles": validate_styles,
}
IPUZ_CROSSWORD_VALIDATORS = {
"saved": validate_saved,
"solution": validate_solution,
"zones": validate_zones,
"showenumerations": validate_showenumerations,
"clueplacement": validate_clueplacement,
"answer": validate_answer,
"answers": validate_answers,
}
IPUZ_SUDOKU_VALIDATORS = {
"charset": validate_charset,
"displaycharset": validate_bool,
"boxes": validate_bool,
"showoperators": validate_bool,
"cageborder": validate_cageborder,
}
IPUZ_WORDSEARCH_VALIDATORS = {
"useall": validate_bool,
}
IPUZ_PUZZLEKIND_VALIDATORS = {
"http://ipuz.org/crossword": IPUZ_CROSSWORD_VALIDATORS,
"http://ipuz.org/sudoku": IPUZ_SUDOKU_VALIDATORS,
"http://ipuz.org/block": {},
"http://ipuz.org/wordsearch": IPUZ_WORDSEARCH_VALIDATORS,
}
def read(data):
if data.endswith(')'):
data = data[data.index('(') + 1:-1]
try:
json_data = json.loads(data)
except ValueError:
raise IPUZException("No valid JSON could be found")
for field in IPUZ_MANDATORY_FIELDS:
if field not in json_data:
raise IPUZException("Mandatory field {} is missing".format(field))
for kind in json_data["kind"]:
for official_kind, fields in IPUZ_PUZZLEKIND_MANDATORY_FIELDS.items():
if official_kind not in kind:
continue
for field in fields:
if field not in json_data:
raise IPUZException("Mandatory field {} is missing".format(field))
for field, value in json_data.items():
if field in IPUZ_PUZZLEKIND_VALIDATORS[official_kind]:
IPUZ_PUZZLEKIND_VALIDATORS[official_kind][field](field, value)
for field, value in json_data.items():
if field in IPUZ_FIELD_VALIDATORS:
IPUZ_FIELD_VALIDATORS[field](field, value)
return json_data
def write(data, callback_name=None, json_only=False):
if callback_name is None:
callback_name = "ipuz"
json_string = json.dumps(data)
if json_only:
return json_string
return ''.join([callback_name, '(', json_string, ')'])
|
##########################################################################
###Main view file for the request routes
###Currently supports search, view and submit of assistance and donation
###requests.
###Depends on const.py and auth.py
##########################################################################
from flask import Flask, request, redirect, url_for, abort, send_file, g
from datetime import datetime
from os import environ
from functools import wraps
import json
import base64
import logging
import time
from bson import json_util
from logging.handlers import RotatingFileHandler
from logging import Formatter
#constants file in this directory
from const import RequestType, ResponseType, RequestActions, RequestStatus
from aafrequest import AAFRequest, AAFSearch, InvalidActionException
from ldap import GetUserById, IsAdminGroupDn, LdapError
from voluptuous.error import MultipleInvalid
from database import MongoConnection
from flask_pymongo import PyMongo
import config
#move this to init script - stest up the base app object
app = Flask(__name__)
handler = RotatingFileHandler(config.LOG_LOCATION, maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
handler.setFormatter(Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
))
app.logger.addHandler(handler)
app.config['MONGO_HOST'] = config.MONGO_HOST
app.config['MONGO_PORT'] = config.MONGO_PORT
app.config['MONGO_DBNAME'] = config.MONGO_DBNAME
mongo = PyMongo(app)
from flask import g
#decorator for creating callbacks to be executed after the response is generated
def after_this_request(f):
if not hasattr(g, 'after_request_callbacks'):
g.after_request_callbacks = []
g.after_request_callbacks.append(f)
return f
#check request type from the path
def IsValidRequest(request_type):
if request_type == RequestType.ASSISTANCE or request_type == RequestType.DONATION:
return True
else:
return False
def GetCurUserId():
return request.headers.get('Uid')
#prepare the response for the user
def GetResponseJson(response_status, results):
return json_util.dumps({"status" : response_status, "result" : results}, json_options=json_util.STRICT_JSON_OPTIONS)
def IsUserAdmin():
return IsAdminGroupDn(request.headers.get('Memberof'))
@app.before_request
def check_auth_header():
app.logger.error(request.path)
if request.method != 'OPTIONS' and 'Uid' not in request.headers:
abort(401)
@app.after_request
def set_user_headers(response):
app.logger.error(request.headers.get('Memberof'))
response.headers['Uid'] = GetCurUserId() #user_id
response.headers['IsAdmin'] = IsUserAdmin() #request.headers.get('Memberof') #is_admin
return response
#Test route for the root directory - Remove
@app.route('/')
def hello():
return GetResponseJson(ResponseType.SUCCESS, "Hello World!")
#Search method - query string can contain any of the attributes of a request
#If _id is previded as a search param, redirects to /request_type/id
@app.route('/request/<request_type>/search', methods=['POST'])
def search_requests(request_type):
per_page = request.args.get('perPage')
page_num = request.args.get('pageNumber')
if not page_num:
page_num = 1
if IsValidRequest(request_type):
if request.json:
find_input = request.json
else:
find_input = { }
if not IsUserAdmin():
find_input['createdBy'] = GetCurUserId()
conn = MongoConnection(mongo.db)
search_results = AAFSearch.Search(conn, request_type, find_input, per_page, page_num)
return GetResponseJson(ResponseType.SUCCESS, search_results)
else:
return GetResponseJson(ResponseType.ERROR, "invalid request - type")
#view method for requests - takes MongoDB id and returns the dict result from Mongo
@app.route('/request/<request_type>', methods=['POST'])
@app.route('/request/<request_type>/<request_id>', methods=['GET','POST'])
def get_upd_request(request_type, request_id=None):
user_id = int(GetCurUserId())
if not IsValidRequest(request_type):
return GetResponseJson(ResponseType.ERROR, "invalid request type")
else:
conn = MongoConnection(mongo.db)
aaf_request = AAFRequest(conn, request_type, request_id)
if request_id and not aaf_request.IsExistingRequest():
return abort(404)
elif not IsUserAdmin() and\
(not aaf_request.IsExistingRequest() or not aaf_request.IsUserCreator(user_id)) and\
aaf_request.IsExistingRequest():
return abort(403)
if request.method == 'POST':
if request.json:
try:
aaf_request.Update(user_id, request.json)
return GetResponseJson(ResponseType.SUCCESS, aaf_request.request_id)
except MultipleInvalid as ex:
return GetResponseJson(ResponseType.ERROR, str(ex))
else:
GetResponseJson(ResponseType.ERROR, "invalid request - no json recieved")
else:
return GetResponseJson(ResponseType.SUCCESS, aaf_request.request_details)
@app.route('/request/<request_type>/<request_id>/<action>', methods=['POST'])
def request_action(request_type, request_id, action):
if not IsValidRequest(request_type):
return GetResponseJson(ResponseType.ERROR, "invalid request")
user_id = int(GetCurUserId())
admin_flag = IsUserAdmin()
conn = MongoConnection(mongo.db)
aaf_request = AAFRequest(conn, request_type, request_id)
try:
aaf_request.PerformAction(action, user_id, admin_flag)
except InvalidActionException as ex:
return GetResponseJson(ResponseType.ERROR, str(ex))
return GetResponseJson(ResponseType.SUCCESS, aaf_request.request_details)
@app.route('/request/<request_type>/<request_id>/document', methods=['GET'])
def get_request_docs():
return('type: %s - id: %s - get_docs' % (request_type, request_id))
@app.route('/api/request/<request_type>/<request_id>/document', methods=['POST'])
@app.route('/api/request/<request_type>/<request_id>/document/<document_id>', methods=['GET', 'DELETE'])
def document(request_type, request_id, document_id=None):
user_id = int(GetCurUserId())
if not IsValidRequest(request_type):
return GetResponseJson(ResponseType.ERROR, "invalid request")
else:
conn = MongoConnection(mongo.db)
aaf_request = AAFRequest(conn, request_type, request_id)
if request.method == 'POST':
if request.json:
input = request.json
results = [ ]
#if request is a singe doc, wrap in the a list and process.
if type(input) == dict:
input = [input]
for document in input:
results.append(aaf_request.UploadDocument(user_id, document['fileName'], document['base64String'], document['description']))
return GetResponseJson(ResponseType.SUCCESS, results)
else:
return GetResponseJson(ResponseType.ERROR, 'No file data recieved')
elif request.method == 'DELETE':
if document_id:
aaf_request.DeleteDocument(user_id, document_id)
return GetResponseJson(ResponseType.SUCCESS, 'File %s deleted.' % (document_id))
else:
abort(404)
else:
if document_id:
document = aaf_request.GetDocument(document_id)
if document == None:
abort(404)
return GetResponseJson(ResponseType.SUCCESS, document)
#returns current user info from ldap
@app.route('/userinfo', methods=['GET'])
@app.route('/userinfo/', methods=['GET'])
@app.route('/userinfo/<user_id>', methods=['GET'])
def curr_user_details(user_id=None):
try:
if user_id:
user_details = GetUserById(user_id)
else:
user_details = GetUserById(GetCurUserId())
return GetResponseJson(ResponseType.SUCCESS, user_details)
except LdapError as ex:
return GetResponseJson(ResponseType.ERROR, str(ex))
@app.errorhandler(500)
def server_error(e):
return GetResponseJson(ResponseType.ERROR, "Unexpected server error, please see app logs for additional details.")
#run the app, needs to be moved to init file
if __name__ == '__main__':
HOST = environ.get('SERVER_HOST', '192.168.250.133')
try:
PORT = int(environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
Fixed user info
##########################################################################
###Main view file for the request routes
###Currently supports search, view and submit of assistance and donation
###requests.
###Depends on const.py and auth.py
##########################################################################
from flask import Flask, request, redirect, url_for, abort, send_file, g
from datetime import datetime
from os import environ
from functools import wraps
import json
import base64
import logging
import time
from bson import json_util
from logging.handlers import RotatingFileHandler
from logging import Formatter
#constants file in this directory
from const import RequestType, ResponseType, RequestActions, RequestStatus
from aafrequest import AAFRequest, AAFSearch, InvalidActionException
from ldap import GetUserById, IsAdminGroupDn, LdapError
from voluptuous.error import MultipleInvalid
from database import MongoConnection
from flask_pymongo import PyMongo
import config
#move this to init script - stest up the base app object
app = Flask(__name__)
handler = RotatingFileHandler(config.LOG_LOCATION, maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
handler.setFormatter(Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
))
app.logger.addHandler(handler)
app.config['MONGO_HOST'] = config.MONGO_HOST
app.config['MONGO_PORT'] = config.MONGO_PORT
app.config['MONGO_DBNAME'] = config.MONGO_DBNAME
mongo = PyMongo(app)
from flask import g
#decorator for creating callbacks to be executed after the response is generated
def after_this_request(f):
if not hasattr(g, 'after_request_callbacks'):
g.after_request_callbacks = []
g.after_request_callbacks.append(f)
return f
#check request type from the path
def IsValidRequest(request_type):
if request_type == RequestType.ASSISTANCE or request_type == RequestType.DONATION:
return True
else:
return False
def GetCurUserId():
return request.headers.get('Uid')
#prepare the response for the user
def GetResponseJson(response_status, results):
return json_util.dumps({"status" : response_status, "result" : results}, json_options=json_util.STRICT_JSON_OPTIONS)
def IsUserAdmin():
return IsAdminGroupDn(request.headers.get('Memberof'))
@app.before_request
def check_auth_header():
app.logger.error(request.path)
if request.method != 'OPTIONS' and 'Uid' not in request.headers:
abort(401)
@app.after_request
def set_user_headers(response):
app.logger.error(request.headers.get('Memberof'))
response.headers['Uid'] = GetCurUserId() #user_id
response.headers['IsAdmin'] = IsUserAdmin() #request.headers.get('Memberof') #is_admin
return response
#Test route for the root directory - Remove
@app.route('/')
def hello():
return GetResponseJson(ResponseType.SUCCESS, "Hello World!")
#Search method - query string can contain any of the attributes of a request
#If _id is previded as a search param, redirects to /request_type/id
@app.route('/request/<request_type>/search', methods=['POST'])
def search_requests(request_type):
per_page = request.args.get('perPage')
page_num = request.args.get('pageNumber')
if not page_num:
page_num = 1
if IsValidRequest(request_type):
if request.json:
find_input = request.json
else:
find_input = { }
if not IsUserAdmin():
find_input['createdBy'] = GetCurUserId()
conn = MongoConnection(mongo.db)
search_results = AAFSearch.Search(conn, request_type, find_input, per_page, page_num)
return GetResponseJson(ResponseType.SUCCESS, search_results)
else:
return GetResponseJson(ResponseType.ERROR, "invalid request - type")
#view method for requests - takes MongoDB id and returns the dict result from Mongo
@app.route('/request/<request_type>', methods=['POST'])
@app.route('/request/<request_type>/<request_id>', methods=['GET','POST'])
def get_upd_request(request_type, request_id=None):
user_id = int(GetCurUserId())
if not IsValidRequest(request_type):
return GetResponseJson(ResponseType.ERROR, "invalid request type")
else:
conn = MongoConnection(mongo.db)
aaf_request = AAFRequest(conn, request_type, request_id)
if request_id and not aaf_request.IsExistingRequest():
return abort(404)
elif not IsUserAdmin() and\
(not aaf_request.IsExistingRequest() or not aaf_request.IsUserCreator(user_id)) and\
aaf_request.IsExistingRequest():
return abort(403)
if request.method == 'POST':
if request.json:
try:
aaf_request.Update(user_id, request.json)
return GetResponseJson(ResponseType.SUCCESS, aaf_request.request_id)
except MultipleInvalid as ex:
return GetResponseJson(ResponseType.ERROR, str(ex))
else:
GetResponseJson(ResponseType.ERROR, "invalid request - no json recieved")
else:
return GetResponseJson(ResponseType.SUCCESS, aaf_request.request_details)
@app.route('/request/<request_type>/<request_id>/<action>', methods=['POST'])
def request_action(request_type, request_id, action):
if not IsValidRequest(request_type):
return GetResponseJson(ResponseType.ERROR, "invalid request")
user_id = int(GetCurUserId())
admin_flag = IsUserAdmin()
conn = MongoConnection(mongo.db)
aaf_request = AAFRequest(conn, request_type, request_id)
try:
aaf_request.PerformAction(action, user_id, admin_flag)
except InvalidActionException as ex:
return GetResponseJson(ResponseType.ERROR, str(ex))
return GetResponseJson(ResponseType.SUCCESS, aaf_request.request_details)
@app.route('/request/<request_type>/<request_id>/document', methods=['GET'])
def get_request_docs():
return('type: %s - id: %s - get_docs' % (request_type, request_id))
@app.route('/api/request/<request_type>/<request_id>/document', methods=['POST'])
@app.route('/api/request/<request_type>/<request_id>/document/<document_id>', methods=['GET', 'DELETE'])
def document(request_type, request_id, document_id=None):
user_id = int(GetCurUserId())
if not IsValidRequest(request_type):
return GetResponseJson(ResponseType.ERROR, "invalid request")
else:
conn = MongoConnection(mongo.db)
aaf_request = AAFRequest(conn, request_type, request_id)
if request.method == 'POST':
if request.json:
input = request.json
results = [ ]
#if request is a singe doc, wrap in the a list and process.
if type(input) == dict:
input = [input]
for document in input:
results.append(aaf_request.UploadDocument(user_id, document['fileName'], document['base64String'], document['description']))
return GetResponseJson(ResponseType.SUCCESS, results)
else:
return GetResponseJson(ResponseType.ERROR, 'No file data recieved')
elif request.method == 'DELETE':
if document_id:
aaf_request.DeleteDocument(user_id, document_id)
return GetResponseJson(ResponseType.SUCCESS, 'File %s deleted.' % (document_id))
else:
abort(404)
else:
if document_id:
document = aaf_request.GetDocument(document_id)
if document == None:
abort(404)
return GetResponseJson(ResponseType.SUCCESS, document)
#returns current user info from ldap
@app.route('/userinfo', methods=['GET'])
@app.route('/userinfo/', methods=['GET'])
@app.route('/userinfo/<user_id>', methods=['GET'])
def curr_user_details(user_id=None):
try:
if user_id:
user_details = GetUserById(user_id)
else:
user_details = GetUserById(GetCurUserId())
user_details['IsAdmin'] = IsUserAdmin()
return GetResponseJson(ResponseType.SUCCESS, user_details)
except LdapError as ex:
return GetResponseJson(ResponseType.ERROR, str(ex))
@app.errorhandler(500)
def server_error(e):
return GetResponseJson(ResponseType.ERROR, "Unexpected server error, please see app logs for additional details.")
#run the app, needs to be moved to init file
if __name__ == '__main__':
HOST = environ.get('SERVER_HOST', '192.168.250.133')
try:
PORT = int(environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
|
#! /usr/bin/env python
from MFT import MFTEnumerator
import array
import re
import logging
import datetime
import argparse
from jinja2 import Template
from BinaryParser import Mmap
from MFT import Cache
from MFT import ATTR_TYPE
from MFT import MREF
from MFT import MSEQNO
from MFT import IndexRootHeader
from MFT import Attribute
from MFT import FilenameAttribute
from MFT import StandardInformationFieldDoesNotExist
ASCII_BYTE = " !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~"
def ascii_strings(buf, n=4):
reg = "([%s]{%d,})" % (ASCII_BYTE, n)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
if isinstance(match.group(), array.array):
yield match.group().tostring().decode("ascii")
else:
yield match.group().decode("ascii")
def unicode_strings(buf, n=4):
reg = b"((?:[%s]\x00){4,})" % (ASCII_BYTE)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
try:
if isinstance(match.group(), array.array):
yield match.group().tostring().decode("utf-16")
else:
yield match.group().decode("utf-16")
except UnicodeDecodeError:
pass
def get_flags(flags):
"""
Get readable list of attribute flags.
"""
attributes = []
for flag in Attribute.FLAGS.keys():
if flags & flag:
attributes.append(Attribute.FLAGS[flag])
return attributes
def create_safe_datetime(fn):
try:
return fn()
except ValueError:
return datetime.datetime(1970, 1, 1, 0, 0, 0)
def create_safe_timeline_entry(fn, type_, source, path):
return {
"timestamp": create_safe_datetime(fn),
"type": type_,
"source": source,
"path": path,
}
def create_safe_timeline_entries(attr, source, path):
return [
create_safe_timeline_entry(attr.created_time, "birthed", source, path),
create_safe_timeline_entry(attr.accessed_time, "accessed", source, path),
create_safe_timeline_entry(attr.modified_time, "modified", source, path),
create_safe_timeline_entry(attr.changed_time, "changed", source, path),
]
def get_timeline_entries(record):
entries = []
si = record.standard_information()
if si is None:
return entries
fn = record.filename_information()
if fn is None:
return entries
filename = fn.filename()
entries.extend(create_safe_timeline_entries(si, "$SI", filename))
for b in record.attributes():
if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
continue
attr = FilenameAttribute(b.value(), 0, record)
attr_filename = attr.filename()
entries.extend(create_safe_timeline_entries(attr, "$FN", attr_filename))
indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
if indxroot and indxroot.non_resident() == 0:
irh = IndexRootHeader(indxroot.value(), 0, False)
for e in irh.node_header().entries():
fn = e.filename_information()
fn_filename = fn.filename()
entries.extend(create_safe_timeline_entries(fn, "INDX", fn_filename))
for e in irh.node_header().slack_entries():
fn = e.filename_information()
fn_filename = fn.filename()
entries.extend(create_safe_timeline_entries(fn, "slack-INDX", fn_filename))
return sorted(entries, key=lambda x: x["timestamp"])
def make_filename_information_model(attr):
if attr is None:
return None
return {
"type": ["POSIX", "WIN32", "DOS 8.3", "WIN32 + DOS 8.3"][attr.filename_type()],
"name": attr.filename(),
"flags": get_flags(attr.flags()),
"logical_size": attr.logical_size(),
"physical_size": attr.physical_size(),
"modified": create_safe_datetime(attr.modified_time),
"accessed": create_safe_datetime(attr.accessed_time),
"changed": create_safe_datetime(attr.changed_time),
"created": create_safe_datetime(attr.created_time),
"parent_ref": MREF(attr.mft_parent_reference()),
"parent_seq": MSEQNO(attr.mft_parent_reference()),
}
def make_standard_information_model(attr):
if attr is None:
return None
# if attr is None:
# default_time = datetime.datetime(1970, 1, 1, 0, 0, 0)
# return {
# "created": default_time,
# "modified": default_time,
# "changed": default_time,
# "accessed": default_time,
# "owner_id": 0,
# "security_id": "",
# "quota_charged": 0,
# "usn": 0
# }
ret = {
"created": create_safe_datetime(attr.created_time),
"modified": create_safe_datetime(attr.modified_time),
"changed": create_safe_datetime(attr.changed_time),
"accessed": create_safe_datetime(attr.accessed_time),
"flags": get_flags(attr.attributes())
}
# since the fields are sequential, we can handle an exception half way through here
# and then ignore the remaining items. Dont have to worry about individual try/catches
try:
ret["owner_id"] = attr.owner_id()
ret["security_id"] = attr.security_id()
ret["quota_charged"] = attr.quota_charged()
ret["usn"] = attr.usn()
except StandardInformationFieldDoesNotExist:
pass
return ret
def make_attribute_model(attr):
ret = {
"type": Attribute.TYPES[attr.type()],
"name": attr.name(),
"flags": get_flags(attr.flags()),
"is_resident": attr.non_resident() == 0,
"data_size": 0,
"allocated_size": 0,
"value_size": 0,
"runs": [],
}
if attr.non_resident() > 0:
ret["data_size"] = attr.data_size()
ret["allocated_size"] = attr.allocated_size()
if attr.allocated_size() > 0:
for (offset, length) in attr.runlist().runs():
ret["runs"].append({
"offset": offset,
"length": length,
})
else:
ret["value_size"] = attr.value_length()
return ret
def make_model(record, path):
active_data = record.active_data()
slack_data = record.slack_data()
model = {
"magic": record.magic(),
"path": path,
"inode": record.inode,
"is_active": record.is_active(),
"is_directory": record.is_directory(),
"size": 0, # updated below
"standard_information": make_standard_information_model(record.standard_information()),
"filename_information": make_filename_information_model(record.filename_information()),
"owner_id": 0, # updated below
"security_id": 0, # updated below
"quota_charged": 0, # updated below
"usn": 0, # updated below
"filenames": [],
"attributes": [],
"indx_entries": [],
"slack_indx_entries": [],
"timeline": get_timeline_entries(record),
"active_ascii_strings": ascii_strings(active_data),
"active_unicode_strings": unicode_strings(active_data),
"slack_ascii_strings": ascii_strings(slack_data),
"slack_unicode_strings": unicode_strings(slack_data),
}
if not record.is_directory():
data_attr = record.data_attribute()
if data_attr and data_attr.non_resident() > 0:
model["size"] = data_attr.data_size()
elif record.filename_information() is not None:
model["size"] = record.filename_information().logical_size()
else:
model["size"] = 0
for b in record.attributes():
if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
continue
attr = FilenameAttribute(b.value(), 0, record)
model["filenames"].append(make_filename_information_model(attr))
for b in record.attributes():
model["attributes"].append(make_attribute_model(b))
indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
if indxroot and indxroot.non_resident() == 0:
irh = IndexRootHeader(indxroot.value(), 0, False)
for e in irh.node_header().entries():
m = make_filename_information_model(e.filename_information())
m["inode"] = MREF(e.mft_reference())
m["sequence_num"] = MSEQNO(e.mft_reference())
model["indx_entries"].append(m)
for e in irh.node_header().slack_entries():
m = make_filename_information_model(e.filename_information())
m["inode"] = MREF(e.mft_reference())
m["sequence_num"] = MSEQNO(e.mft_reference())
model["slack_indx_entries"].append(m)
return model
def format_record(record, path):
template = Template(
"""\
MFT Record: {{ record.inode }}
Path: {{ record.path }}
Metadata:
Active: {{ record.is_active }}
{% if record.is_directory %}\
Type: directory\
{% else %}\
Type: file\
{% endif %}
Flags: {{ record.standard_information.flags|join(', ') }}
$SI Modified: {{ record.standard_information.modified }}
$SI Accessed: {{ record.standard_information.accessed }}
$SI Changed: {{ record.standard_information.changed }}
$SI Birthed: {{ record.standard_information.created }}
Owner ID: {{ record.standard_information.owner_id }}
Security ID: {{ record.standard_information.security_id }}
Quota charged: {{ record.standard_information.quota_charged }}
USN: {{ record.standard_information.usn }}
Filenames: \
{% for filename in record.filenames %}
Type: {{ filename.type }}
Name: {{ filename.name }}
Flags: {{ filename.flags|join(', ') }}
Logical size: {{ filename.logical_size }}
Physical size: {{ filename.physical_size }}
Modified: {{ filename.modified }}
Accessed: {{ filename.accessed }}
Changed: {{ filename.changed }}
Birthed: {{ filename.created }}
Parent reference: {{ filename.parent_ref }}
Parent sequence number: {{ filename.parent_seq }}\
{% endfor %}
Attributes: \
{% for attribute in record.attributes %}
Type: {{ attribute.type }}
Name: {{ attribute.name }}
Flags: {{ attribute.flags|join(', ') }}
Resident: {{ attribute.is_resident }}
Data size: {{ attribute.data_size }}
Allocated size: {{ attribute.allocated_size }}
Value size: {{ attribute.value_size }} \
{% if attribute.runs %}
Data runs: {% for run in attribute.runs %}
Offset (clusters): {{ run.offset }} Length (clusters): {{ run.length }} \
{% endfor %}\
{% endif %}\
{% endfor %}
INDX root entries:\
{% if not record.indx_entries %}\
<none>\
{% endif %}\
{% for indx in record.indx_entries %}
Name: {{ indx.filename }}
Size: {{ indx.size }}
Modified: {{ indx.modified }}
Accessed: {{ indx.accessed }}
Changed: {{ indx.changed }}
Birthed: {{ indx.created }}
Reference: {{ indx.inode }}
Sequence number: {{ indx.sequence_num }}\
{% endfor %}
INDX root slack entries:\
{% if not record.slack_indx_entries %}\
<none>\
{% endif %}\
{% for indx in record.slack_indx_entries %}
Name: {{ indx.filename }}
Size: {{ indx.size }}
Modified: {{ indx.modified }}
Accessed: {{ indx.accessed }}
Changed: {{ indx.changed }}
Birthed: {{ indx.created }}
Reference: {{ indx.inode }}
Sequence number: {{ indx.sequence_num }}\
{% endfor %}
Timeline:
{% for entry in record.timeline %}\
{{ "%-30s%-12s%-8s%s"|format(entry.timestamp, entry.type, entry.source, entry.path) }}
{% endfor %}\
Active strings:
ASCII strings:
{% for string in record.active_ascii_strings %}\
{{ string }}
{% endfor %}\
Unicode strings:
{% for string in record.active_unicode_strings %}\
{{ string }}
{% endfor %}\
Slack strings:
ASCII strings:
{% for string in record.slack_ascii_strings %}\
{{ string }}
{% endfor %}\
Unicode strings:
{% for string in record.slack_unicode_strings %}\
{{ string }}
{% endfor %}\
""")
return template.render(record=make_model(record, path))
def print_indx_info(record, path):
print format_record(record, path)
def main():
parser = argparse.ArgumentParser(description='Inspect '
'a given MFT file record.')
parser.add_argument('-a', action="store", metavar="cache_size", type=int,
dest="cache_size", default=1024,
help="Size of cache.")
parser.add_argument('-p', action="store", metavar="prefix",
nargs=1, dest="prefix", default="\\.",
help="Prefix paths with `prefix` rather than \\.\\")
parser.add_argument('-v', action="store_true", dest="verbose",
help="Print debugging information")
parser.add_argument('mft', action="store",
help="Path to MFT")
parser.add_argument('record_or_path', action="store",
help="MFT record or file path to inspect")
results = parser.parse_args()
if results.verbose:
logging.basicConfig(level=logging.DEBUG)
with Mmap(results.mft) as buf:
record_cache = Cache(results.cache_size)
path_cache = Cache(results.cache_size)
enum = MFTEnumerator(buf,
record_cache=record_cache,
path_cache=path_cache)
should_use_inode = False
try:
record_num = int(results.record_or_path)
should_use_inode = True
except ValueError:
should_use_inode = False
if should_use_inode:
record = enum.get_record(record_num)
path = results.prefix + enum.get_path(record)
print_indx_info(record, path)
else:
path = results.record_or_path
record = enum.get_record_by_path(path)
print_indx_info(record, results.prefix + path)
if __name__ == "__main__":
main()
Other attribute types may have timestamps even if SI is none
If you return right away is si is None you don't get the ability to create the timeline at the end.
#! /usr/bin/env python
from MFT import MFTEnumerator
import array
import re
import logging
import datetime
import argparse
from jinja2 import Template
from BinaryParser import Mmap
from MFT import Cache
from MFT import ATTR_TYPE
from MFT import MREF
from MFT import MSEQNO
from MFT import IndexRootHeader
from MFT import Attribute
from MFT import FilenameAttribute
from MFT import StandardInformationFieldDoesNotExist
ASCII_BYTE = " !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~"
def ascii_strings(buf, n=4):
reg = "([%s]{%d,})" % (ASCII_BYTE, n)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
if isinstance(match.group(), array.array):
yield match.group().tostring().decode("ascii")
else:
yield match.group().decode("ascii")
def unicode_strings(buf, n=4):
reg = b"((?:[%s]\x00){4,})" % (ASCII_BYTE)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
try:
if isinstance(match.group(), array.array):
yield match.group().tostring().decode("utf-16")
else:
yield match.group().decode("utf-16")
except UnicodeDecodeError:
pass
def get_flags(flags):
"""
Get readable list of attribute flags.
"""
attributes = []
for flag in Attribute.FLAGS.keys():
if flags & flag:
attributes.append(Attribute.FLAGS[flag])
return attributes
def create_safe_datetime(fn):
try:
return fn()
except ValueError:
return datetime.datetime(1970, 1, 1, 0, 0, 0)
def create_safe_timeline_entry(fn, type_, source, path):
return {
"timestamp": create_safe_datetime(fn),
"type": type_,
"source": source,
"path": path,
}
def create_safe_timeline_entries(attr, source, path):
return [
create_safe_timeline_entry(attr.created_time, "birthed", source, path),
create_safe_timeline_entry(attr.accessed_time, "accessed", source, path),
create_safe_timeline_entry(attr.modified_time, "modified", source, path),
create_safe_timeline_entry(attr.changed_time, "changed", source, path),
]
def get_timeline_entries(record):
entries = []
si = record.standard_information()
fn = record.filename_information()
filename = fn.filename()
if si:
entries.extend(create_safe_timeline_entries(si, "$SI", filename))
for b in record.attributes():
if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
continue
attr = FilenameAttribute(b.value(), 0, record)
attr_filename = attr.filename()
entries.extend(create_safe_timeline_entries(attr, "$FN", attr_filename))
indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
if indxroot and indxroot.non_resident() == 0:
irh = IndexRootHeader(indxroot.value(), 0, False)
for e in irh.node_header().entries():
fn = e.filename_information()
fn_filename = fn.filename()
entries.extend(create_safe_timeline_entries(fn, "INDX", fn_filename))
for e in irh.node_header().slack_entries():
fn = e.filename_information()
fn_filename = fn.filename()
entries.extend(create_safe_timeline_entries(fn, "slack-INDX", fn_filename))
return sorted(entries, key=lambda x: x["timestamp"])
def make_filename_information_model(attr):
if attr is None:
return None
return {
"type": ["POSIX", "WIN32", "DOS 8.3", "WIN32 + DOS 8.3"][attr.filename_type()],
"name": attr.filename(),
"flags": get_flags(attr.flags()),
"logical_size": attr.logical_size(),
"physical_size": attr.physical_size(),
"modified": create_safe_datetime(attr.modified_time),
"accessed": create_safe_datetime(attr.accessed_time),
"changed": create_safe_datetime(attr.changed_time),
"created": create_safe_datetime(attr.created_time),
"parent_ref": MREF(attr.mft_parent_reference()),
"parent_seq": MSEQNO(attr.mft_parent_reference()),
}
def make_standard_information_model(attr):
if attr is None:
return None
# if attr is None:
# default_time = datetime.datetime(1970, 1, 1, 0, 0, 0)
# return {
# "created": default_time,
# "modified": default_time,
# "changed": default_time,
# "accessed": default_time,
# "owner_id": 0,
# "security_id": "",
# "quota_charged": 0,
# "usn": 0
# }
ret = {
"created": create_safe_datetime(attr.created_time),
"modified": create_safe_datetime(attr.modified_time),
"changed": create_safe_datetime(attr.changed_time),
"accessed": create_safe_datetime(attr.accessed_time),
"flags": get_flags(attr.attributes())
}
# since the fields are sequential, we can handle an exception half way through here
# and then ignore the remaining items. Dont have to worry about individual try/catches
try:
ret["owner_id"] = attr.owner_id()
ret["security_id"] = attr.security_id()
ret["quota_charged"] = attr.quota_charged()
ret["usn"] = attr.usn()
except StandardInformationFieldDoesNotExist:
pass
return ret
def make_attribute_model(attr):
ret = {
"type": Attribute.TYPES[attr.type()],
"name": attr.name(),
"flags": get_flags(attr.flags()),
"is_resident": attr.non_resident() == 0,
"data_size": 0,
"allocated_size": 0,
"value_size": 0,
"runs": [],
}
if attr.non_resident() > 0:
ret["data_size"] = attr.data_size()
ret["allocated_size"] = attr.allocated_size()
if attr.allocated_size() > 0:
for (offset, length) in attr.runlist().runs():
ret["runs"].append({
"offset": offset,
"length": length,
})
else:
ret["value_size"] = attr.value_length()
return ret
def make_model(record, path):
active_data = record.active_data()
slack_data = record.slack_data()
model = {
"magic": record.magic(),
"path": path,
"inode": record.inode,
"is_active": record.is_active(),
"is_directory": record.is_directory(),
"size": 0, # updated below
"standard_information": make_standard_information_model(record.standard_information()),
"filename_information": make_filename_information_model(record.filename_information()),
"owner_id": 0, # updated below
"security_id": 0, # updated below
"quota_charged": 0, # updated below
"usn": 0, # updated below
"filenames": [],
"attributes": [],
"indx_entries": [],
"slack_indx_entries": [],
"timeline": get_timeline_entries(record),
"active_ascii_strings": ascii_strings(active_data),
"active_unicode_strings": unicode_strings(active_data),
"slack_ascii_strings": ascii_strings(slack_data),
"slack_unicode_strings": unicode_strings(slack_data),
}
if not record.is_directory():
data_attr = record.data_attribute()
if data_attr and data_attr.non_resident() > 0:
model["size"] = data_attr.data_size()
elif record.filename_information() is not None:
model["size"] = record.filename_information().logical_size()
else:
model["size"] = 0
for b in record.attributes():
if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
continue
attr = FilenameAttribute(b.value(), 0, record)
model["filenames"].append(make_filename_information_model(attr))
for b in record.attributes():
model["attributes"].append(make_attribute_model(b))
indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
if indxroot and indxroot.non_resident() == 0:
irh = IndexRootHeader(indxroot.value(), 0, False)
for e in irh.node_header().entries():
m = make_filename_information_model(e.filename_information())
m["inode"] = MREF(e.mft_reference())
m["sequence_num"] = MSEQNO(e.mft_reference())
model["indx_entries"].append(m)
for e in irh.node_header().slack_entries():
m = make_filename_information_model(e.filename_information())
m["inode"] = MREF(e.mft_reference())
m["sequence_num"] = MSEQNO(e.mft_reference())
model["slack_indx_entries"].append(m)
return model
def format_record(record, path):
template = Template(
"""\
MFT Record: {{ record.inode }}
Path: {{ record.path }}
Metadata:
Active: {{ record.is_active }}
{% if record.is_directory %}\
Type: directory\
{% else %}\
Type: file\
{% endif %}
Flags: {{ record.standard_information.flags|join(', ') }}
$SI Modified: {{ record.standard_information.modified }}
$SI Accessed: {{ record.standard_information.accessed }}
$SI Changed: {{ record.standard_information.changed }}
$SI Birthed: {{ record.standard_information.created }}
Owner ID: {{ record.standard_information.owner_id }}
Security ID: {{ record.standard_information.security_id }}
Quota charged: {{ record.standard_information.quota_charged }}
USN: {{ record.standard_information.usn }}
Filenames: \
{% for filename in record.filenames %}
Type: {{ filename.type }}
Name: {{ filename.name }}
Flags: {{ filename.flags|join(', ') }}
Logical size: {{ filename.logical_size }}
Physical size: {{ filename.physical_size }}
Modified: {{ filename.modified }}
Accessed: {{ filename.accessed }}
Changed: {{ filename.changed }}
Birthed: {{ filename.created }}
Parent reference: {{ filename.parent_ref }}
Parent sequence number: {{ filename.parent_seq }}\
{% endfor %}
Attributes: \
{% for attribute in record.attributes %}
Type: {{ attribute.type }}
Name: {{ attribute.name }}
Flags: {{ attribute.flags|join(', ') }}
Resident: {{ attribute.is_resident }}
Data size: {{ attribute.data_size }}
Allocated size: {{ attribute.allocated_size }}
Value size: {{ attribute.value_size }} \
{% if attribute.runs %}
Data runs: {% for run in attribute.runs %}
Offset (clusters): {{ run.offset }} Length (clusters): {{ run.length }} \
{% endfor %}\
{% endif %}\
{% endfor %}
INDX root entries:\
{% if not record.indx_entries %}\
<none>\
{% endif %}\
{% for indx in record.indx_entries %}
Name: {{ indx.filename }}
Size: {{ indx.size }}
Modified: {{ indx.modified }}
Accessed: {{ indx.accessed }}
Changed: {{ indx.changed }}
Birthed: {{ indx.created }}
Reference: {{ indx.inode }}
Sequence number: {{ indx.sequence_num }}\
{% endfor %}
INDX root slack entries:\
{% if not record.slack_indx_entries %}\
<none>\
{% endif %}\
{% for indx in record.slack_indx_entries %}
Name: {{ indx.filename }}
Size: {{ indx.size }}
Modified: {{ indx.modified }}
Accessed: {{ indx.accessed }}
Changed: {{ indx.changed }}
Birthed: {{ indx.created }}
Reference: {{ indx.inode }}
Sequence number: {{ indx.sequence_num }}\
{% endfor %}
Timeline:
{% for entry in record.timeline %}\
{{ "%-30s%-12s%-8s%s"|format(entry.timestamp, entry.type, entry.source, entry.path) }}
{% endfor %}\
Active strings:
ASCII strings:
{% for string in record.active_ascii_strings %}\
{{ string }}
{% endfor %}\
Unicode strings:
{% for string in record.active_unicode_strings %}\
{{ string }}
{% endfor %}\
Slack strings:
ASCII strings:
{% for string in record.slack_ascii_strings %}\
{{ string }}
{% endfor %}\
Unicode strings:
{% for string in record.slack_unicode_strings %}\
{{ string }}
{% endfor %}\
""")
return template.render(record=make_model(record, path))
def print_indx_info(record, path):
print format_record(record, path)
def main():
parser = argparse.ArgumentParser(description='Inspect '
'a given MFT file record.')
parser.add_argument('-a', action="store", metavar="cache_size", type=int,
dest="cache_size", default=1024,
help="Size of cache.")
parser.add_argument('-p', action="store", metavar="prefix",
nargs=1, dest="prefix", default="\\.",
help="Prefix paths with `prefix` rather than \\.\\")
parser.add_argument('-v', action="store_true", dest="verbose",
help="Print debugging information")
parser.add_argument('mft', action="store",
help="Path to MFT")
parser.add_argument('record_or_path', action="store",
help="MFT record or file path to inspect")
results = parser.parse_args()
if results.verbose:
logging.basicConfig(level=logging.DEBUG)
with Mmap(results.mft) as buf:
record_cache = Cache(results.cache_size)
path_cache = Cache(results.cache_size)
enum = MFTEnumerator(buf,
record_cache=record_cache,
path_cache=path_cache)
should_use_inode = False
try:
record_num = int(results.record_or_path)
should_use_inode = True
except ValueError:
should_use_inode = False
if should_use_inode:
record = enum.get_record(record_num)
path = results.prefix + enum.get_path(record)
print_indx_info(record, path)
else:
path = results.record_or_path
record = enum.get_record_by_path(path)
print_indx_info(record, results.prefix + path)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#coding: utf-8
import json
import os
import difflib
import subfield
from datetime import date
from django.core.management import setup_environ
from django.core import exceptions
try:
from scielomanager import settings
except ImportError:
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))
from sys import path
path.append(BASE_PATH)
import settings
setup_environ(settings)
from journalmanager.models import *
class JournalImport:
def __init__(self):
self._publishers_pool = []
self._sponsors_pool = []
self._summary = {}
def iso_format(self, dates, string='-'):
day = dates[6:8]
if day == "00":
day = "01"
month = dates[4:6]
if month == "00":
month = "01"
dateformated = "%s-%s-%s" % (dates[0:4],month,day)
return dateformated
def charge_summary(self, attribute):
"""
Function: charge_summary
Carrega com +1 cada atributo passado para o metodo, se o attributo nao existir ele e criado.
"""
if not self._summary.has_key(attribute):
self._summary[attribute] = 0
self._summary[attribute] += 1
def have_similar_publishers(self, match_string):
"""
Function: have_similar_publishers
Identifica se existe instituicao ja registrada com o mesmo nome, com o objetivo de filtrar
instituticoes duplicadas.
Retorna o id da instituicao se houver uma cadastrada com o mesmo nome, caso contrario Retorna
False.
"""
publisher_id=""
if len(self._publishers_pool) > 0:
for inst in self._publishers_pool:
if inst["match_string"] == match_string:
publisher_id = inst["id"]
break
else:
publisher_id = False
else:
publisher_id = False
return publisher_id
def have_similar_sponsors(self, match_string):
"""
Function: have_similar_sponsors
Identifica se existe instituicao ja registrada com o mesmo nome, com o objetivo de filtrar
instituticoes duplicadas.
Retorna o id da instituicao se houver uma cadastrada com o mesmo nome, caso contrario Retorna
False.
"""
sponsor_id=""
if len(self._sponsors_pool) > 0:
for inst in self._sponsors_pool:
if inst["match_string"] == match_string:
sponsor_id = inst["id"]
break
else:
sponsor_id = False
else:
sponsor_id = False
return sponsor_id
def load_publisher(self, collection, record):
"""
Function: load_publisher
Retorna um objeto Publisher() caso a gravação do mesmo em banco de dados for concluida
"""
publisher = Publisher()
# Publishers Import
if not record.has_key('480'):
return []
publisher.name = record['480'][0]
publisher.address = " ".join(record['63'])
match_string=publisher.name
similar_key = self.have_similar_publishers(match_string)
loaded_publisher=""
if similar_key != False:
similar_publisher=Publisher.objects.get(id=similar_key)
similar_publisher.address += "\n"+publisher.address
similar_publisher.save()
self.charge_summary("publishers_duplication_fix")
loaded_publisher = similar_publisher
else:
publisher.save(force_insert=True)
publisher.collections.add(collection)
self.charge_summary("publishers")
loaded_publisher = publisher
self._publishers_pool.append(dict({"id":publisher.id,"match_string":match_string}))
return [loaded_publisher,]
def load_sponsor(self, collection, record):
"""
Function: load_sponsor
Retorna um objeto Sponsor() caso a gravação do mesmo em banco de dados for concluida
"""
sponsor = Sponsor()
# Sponsors Import
if not record.has_key('140'):
return []
sponsor.name = record['140'][0]
match_string=sponsor.name.strip()
similar_key = self.have_similar_sponsors(match_string)
loaded_sponsor=""
if similar_key != False:
similar_sponsor=Sponsor.objects.get(id=similar_key)
self.charge_summary("sponsors_duplication_fix")
loaded_sponsor = similar_sponsor
else:
sponsor.save(force_insert=True)
sponsor.collections.add(collection)
self.charge_summary("sponsors")
loaded_sponsor = sponsor
self._sponsors_pool.append(dict({"id":sponsor.id,"match_string":match_string.strip()}))
return [loaded_sponsor,]
def load_studyarea(self, journal, areas):
for i in areas:
studyarea = JournalStudyArea()
studyarea.study_area = i
journal.journalstudyarea_set.add(studyarea)
self.charge_summary("studyarea")
def load_textlanguage(self, journal, langs):
from sectionimport import LANG_DICT as lang_dict
for i in langs:
language = Language.objects.get_or_create(iso_code = i, name = lang_dict.get(i, '###NOT FOUND###'))[0]
journal.languages.add(language)
self.charge_summary("language_%s" % i)
def load_mission(self, journal, missions):
from sectionimport import LANG_DICT as lang_dict
for i in missions:
parsed_subfields = subfield.CompositeField(subfield.expand(i))
mission = JournalMission()
try:
language = Language.objects.get_or_create(iso_code = parsed_subfields['l'], name = lang_dict.get(parsed_subfields['l'], '###NOT FOUND###'))[0]
mission.language = language
except:
pass
mission.description = parsed_subfields['_']
journal.journalmission_set.add(mission)
self.charge_summary("mission")
def load_historic(self, journal, historicals):
import operator
trans_pub_status = {'c':'current',
'd':'deceased',
's':'duspended',
'?':'inprogress',
}
lifecycles = {}
for i in historicals:
expanded = subfield.expand(i)
parsed_subfields = dict(expanded)
try:
lifecycles[self.iso_format(parsed_subfields['a'])] = parsed_subfields['b']
except KeyError:
self.charge_summary("history_error_field")
try:
lifecycles[self.iso_format(parsed_subfields['c'])] = parsed_subfields['d']
except KeyError:
self.charge_summary("history_error_field")
for cyclekey,cyclevalue in iter(sorted(lifecycles.iteritems())):
try:
journalhist = JournalPublicationEvents()
journalhist.created_at = cyclekey
journalhist.status = trans_pub_status.get(cyclevalue.lower(),'inprogress')
journalhist.journal = journal
journalhist.changed_by_id = 1
journalhist.save()
journalhist.created_at = cyclekey
journalhist.save() #Updating to real date, once when saving the model is given a automatica value
self.charge_summary("publication_events")
except exceptions.ValidationError:
self.charge_summary("publications_events_error_data")
return False
return True
def load_title(self, journal, titles, category):
for i in titles:
title = JournalTitle()
title.title = i
title.category = category
journal.journaltitle_set.add(title)
self.charge_summary("title")
def load_use_license(self):
return UseLicense.objects.get_or_create(license_code='###PLACEBO###')[0]
def load_journal(self, collection, loaded_publisher, loaded_sponsor, record):
"""
Function: load_journal
Retorna um objeto journal() caso a gravação do mesmo em banco de dados for concluida
"""
issn_type=""
print_issn=""
electronic_issn=""
use_license = self.load_use_license()
journal = Journal()
if record['35'][0] == "PRINT":
issn_type="print"
print_issn = record['935'][0]
if record['935'][0] != record['400'][0]:
electronic_issn = record['400'][0]
else:
issn_type="electronic"
electronic_issn = record['935'][0]
if record['935'][0] != record['400'][0]:
print_issn = record['400'][0]
journal.title = record['100'][0]
journal.short_title = record['150'][0]
journal.acronym = record['930'][0]
journal.scielo_issn = issn_type
journal.print_issn = print_issn
journal.eletronic_issn = electronic_issn
journal.use_license = use_license
journal.subject_descriptors = '\n'.join(record['440']).lower()
if record.has_key('450'):
journal.index_coverage = '\n'.join(record['450']).lower()
# Text Language
if record.has_key('301'):
journal.init_year = record['301'][0]
if record.has_key('302'):
journal.init_vol = record['302'][0]
if record.has_key('303'):
journal.init_num = record['303'][0]
if record.has_key('304'):
journal.final_year = record['304'][0]
if record.has_key('305'):
journal.final_vol = record['305'][0]
if record.has_key('306'):
journal.final_num = record['306'][0]
if record.has_key('380'):
journal.frequency = record['380'][0]
if record.has_key('50'):
journal.pub_status = record['50'][0]
if record.has_key('340'):
journal.alphabet = record['340'][0]
if record.has_key('430'):
journal.classification = record['430'][0]
if record.has_key('20'):
journal.national_code = record['20'][0]
if record.has_key('117'):
journal.editorial_standard = record['117'][0]
if record.has_key('85'):
journal.ctrl_vocabulary = record['85'][0]
if record.has_key('5'):
journal.literature_type = record['5'][0]
if record.has_key('6'):
journal.treatment_level = record['6'][0]
if record.has_key('330'):
journal.pub_level = record['330'][0]
if record.has_key('37'):
journal.secs_code = record['37'][0]
journal.pub_status_changed_by_id = 1
journal.creator_id = 1
journal.save(force_insert=True)
journal.collections.add(collection)
self.charge_summary("journals")
journal.publisher = loaded_publisher
journal.sponsor = loaded_sponsor
# text language
if record.has_key('350'):
self.load_textlanguage(journal,record['350'])
# study area
if record.has_key('441'):
self.load_studyarea(journal,record['441'])
# mission
if record.has_key('901'):
self.load_mission(journal,record['901'])
# historic - JournalPublicationEvents
if record.has_key('51'):
self.load_historic(journal,record['51'])
# titles
if record.has_key('421'):
self.load_title(journal,record['421'],'other')
if record.has_key('150'):
self.load_title(journal,record['150'],'other')
if record.has_key('151'):
self.load_title(journal,record['151'],'other')
if record.has_key('230'):
self.load_title(journal,record['230'],'paralleltitle')
return journal
def run_import(self, json_file, collection):
"""
Function: run_import
Dispara processo de importacao de dados
"""
json_parsed={}
json_file = open(json_file,'r')
json_parsed = json.loads(json_file.read())
for record in json_parsed:
loaded_publisher = self.load_publisher(collection, record)
loaded_sponsor = self.load_sponsor(collection, record)
loaded_journal = self.load_journal(collection, loaded_publisher, loaded_sponsor, record)
# Cleaning data
JournalPublicationEvents.objects.filter(created_at__month=date.today().month, created_at__year=date.today().year).delete()
def get_summary(self):
"""
Function: get_summary
Retorna o resumo de carga de registros
"""
return self._summary
tk261
#!/usr/bin/env python
#coding: utf-8
import json
import os
import difflib
import subfield
from datetime import date
from django.core.management import setup_environ
from django.core import exceptions
try:
from scielomanager import settings
except ImportError:
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))
from sys import path
path.append(BASE_PATH)
import settings
setup_environ(settings)
from journalmanager.models import *
class JournalImport:
def __init__(self):
self._publishers_pool = []
self._sponsors_pool = []
self._summary = {}
self.trans_pub_status = {'c':'current',
'd':'deceased',
's':'suspended',
'?':'inprogress',
}
def iso_format(self, dates, string='-'):
day = dates[6:8]
if day == "00":
day = "01"
month = dates[4:6]
if month == "00":
month = "01"
dateformated = "%s-%s-%s" % (dates[0:4],month,day)
return dateformated
def charge_summary(self, attribute):
"""
Function: charge_summary
Carrega com +1 cada atributo passado para o metodo, se o attributo nao existir ele e criado.
"""
if not self._summary.has_key(attribute):
self._summary[attribute] = 0
self._summary[attribute] += 1
def have_similar_publishers(self, match_string):
"""
Function: have_similar_publishers
Identifica se existe instituicao ja registrada com o mesmo nome, com o objetivo de filtrar
instituticoes duplicadas.
Retorna o id da instituicao se houver uma cadastrada com o mesmo nome, caso contrario Retorna
False.
"""
publisher_id=""
if len(self._publishers_pool) > 0:
for inst in self._publishers_pool:
if inst["match_string"] == match_string:
publisher_id = inst["id"]
break
else:
publisher_id = False
else:
publisher_id = False
return publisher_id
def have_similar_sponsors(self, match_string):
"""
Function: have_similar_sponsors
Identifica se existe instituicao ja registrada com o mesmo nome, com o objetivo de filtrar
instituticoes duplicadas.
Retorna o id da instituicao se houver uma cadastrada com o mesmo nome, caso contrario Retorna
False.
"""
sponsor_id=""
if len(self._sponsors_pool) > 0:
for inst in self._sponsors_pool:
if inst["match_string"] == match_string:
sponsor_id = inst["id"]
break
else:
sponsor_id = False
else:
sponsor_id = False
return sponsor_id
def load_publisher(self, collection, record):
"""
Function: load_publisher
Retorna um objeto Publisher() caso a gravação do mesmo em banco de dados for concluida
"""
publisher = Publisher()
# Publishers Import
if not record.has_key('480'):
return []
publisher.name = record['480'][0]
publisher.address = " ".join(record['63'])
match_string=publisher.name
similar_key = self.have_similar_publishers(match_string)
loaded_publisher=""
if similar_key != False:
similar_publisher=Publisher.objects.get(id=similar_key)
similar_publisher.address += "\n"+publisher.address
similar_publisher.save()
self.charge_summary("publishers_duplication_fix")
loaded_publisher = similar_publisher
else:
publisher.save(force_insert=True)
publisher.collections.add(collection)
self.charge_summary("publishers")
loaded_publisher = publisher
self._publishers_pool.append(dict({"id":publisher.id,"match_string":match_string}))
return [loaded_publisher,]
def load_sponsor(self, collection, record):
"""
Function: load_sponsor
Retorna um objeto Sponsor() caso a gravação do mesmo em banco de dados for concluida
"""
sponsor = Sponsor()
# Sponsors Import
if not record.has_key('140'):
return []
sponsor.name = record['140'][0]
match_string=sponsor.name.strip()
similar_key = self.have_similar_sponsors(match_string)
loaded_sponsor=""
if similar_key != False:
similar_sponsor=Sponsor.objects.get(id=similar_key)
self.charge_summary("sponsors_duplication_fix")
loaded_sponsor = similar_sponsor
else:
sponsor.save(force_insert=True)
sponsor.collections.add(collection)
self.charge_summary("sponsors")
loaded_sponsor = sponsor
self._sponsors_pool.append(dict({"id":sponsor.id,"match_string":match_string.strip()}))
return [loaded_sponsor,]
def load_studyarea(self, journal, areas):
for i in areas:
studyarea = JournalStudyArea()
studyarea.study_area = i
journal.journalstudyarea_set.add(studyarea)
self.charge_summary("studyarea")
def load_textlanguage(self, journal, langs):
from sectionimport import LANG_DICT as lang_dict
for i in langs:
language = Language.objects.get_or_create(iso_code = i, name = lang_dict.get(i, '###NOT FOUND###'))[0]
journal.languages.add(language)
self.charge_summary("language_%s" % i)
def load_mission(self, journal, missions):
from sectionimport import LANG_DICT as lang_dict
for i in missions:
parsed_subfields = subfield.CompositeField(subfield.expand(i))
mission = JournalMission()
try:
language = Language.objects.get_or_create(iso_code = parsed_subfields['l'], name = lang_dict.get(parsed_subfields['l'], '###NOT FOUND###'))[0]
mission.language = language
except:
pass
mission.description = parsed_subfields['_']
journal.journalmission_set.add(mission)
self.charge_summary("mission")
def load_historic(self, journal, historicals):
import operator
lifecycles = {}
for i in historicals:
expanded = subfield.expand(i)
parsed_subfields = dict(expanded)
try:
lifecycles[self.iso_format(parsed_subfields['a'])] = parsed_subfields['b']
except KeyError:
self.charge_summary("history_error_field")
try:
lifecycles[self.iso_format(parsed_subfields['c'])] = parsed_subfields['d']
except KeyError:
self.charge_summary("history_error_field")
for cyclekey,cyclevalue in iter(sorted(lifecycles.iteritems())):
try:
journalhist = JournalPublicationEvents()
journalhist.created_at = cyclekey
journalhist.status = self.trans_pub_status.get(cyclevalue.lower(),'inprogress')
journalhist.journal = journal
journalhist.changed_by_id = 1
journalhist.save()
journalhist.created_at = cyclekey
journalhist.save() #Updating to real date, once when saving the model is given a automatica value
self.charge_summary("publication_events")
except exceptions.ValidationError:
self.charge_summary("publications_events_error_data")
return False
return True
def get_last_status(self, historicals):
import operator
lifecycles = {}
for i in historicals:
expanded = subfield.expand(i)
parsed_subfields = dict(expanded)
try:
lifecycles[self.iso_format(parsed_subfields['a'])] = parsed_subfields['b']
except KeyError:
self.charge_summary("history_error_field")
try:
lifecycles[self.iso_format(parsed_subfields['c'])] = parsed_subfields['d']
except KeyError:
self.charge_summary("history_error_field")
return sorted(lifecycles.iteritems())[-1][1]
def load_title(self, journal, titles, category):
for i in titles:
title = JournalTitle()
title.title = i
title.category = category
journal.journaltitle_set.add(title)
self.charge_summary("title")
def load_use_license(self):
return UseLicense.objects.get_or_create(license_code='###PLACEBO###')[0]
def load_journal(self, collection, loaded_publisher, loaded_sponsor, record):
"""
Function: load_journal
Retorna um objeto journal() caso a gravação do mesmo em banco de dados for concluida
"""
issn_type=""
print_issn=""
electronic_issn=""
use_license = self.load_use_license()
journal = Journal()
if record['35'][0] == "PRINT":
issn_type="print"
print_issn = record['935'][0]
if record['935'][0] != record['400'][0]:
electronic_issn = record['400'][0]
else:
issn_type="electronic"
electronic_issn = record['935'][0]
if record['935'][0] != record['400'][0]:
print_issn = record['400'][0]
journal.title = record['100'][0]
journal.short_title = record['150'][0]
journal.acronym = record['930'][0]
journal.scielo_issn = issn_type
journal.print_issn = print_issn
journal.eletronic_issn = electronic_issn
journal.use_license = use_license
journal.subject_descriptors = '\n'.join(record['440']).lower()
if record.has_key('450'):
journal.index_coverage = '\n'.join(record['450']).lower()
# Text Language
if record.has_key('301'):
journal.init_year = record['301'][0]
if record.has_key('302'):
journal.init_vol = record['302'][0]
if record.has_key('303'):
journal.init_num = record['303'][0]
if record.has_key('304'):
journal.final_year = record['304'][0]
if record.has_key('305'):
journal.final_vol = record['305'][0]
if record.has_key('306'):
journal.final_num = record['306'][0]
if record.has_key('380'):
journal.frequency = record['380'][0]
if record.has_key('51'):
journal.pub_status = self.trans_pub_status.get(self.get_last_status(record['51']).lower(),'inprogress')
if record.has_key('340'):
journal.alphabet = record['340'][0]
if record.has_key('430'):
journal.classification = record['430'][0]
if record.has_key('20'):
journal.national_code = record['20'][0]
if record.has_key('117'):
journal.editorial_standard = record['117'][0]
if record.has_key('85'):
journal.ctrl_vocabulary = record['85'][0]
if record.has_key('5'):
journal.literature_type = record['5'][0]
if record.has_key('6'):
journal.treatment_level = record['6'][0]
if record.has_key('330'):
journal.pub_level = record['330'][0]
if record.has_key('37'):
journal.secs_code = record['37'][0]
journal.pub_status_changed_by_id = 1
journal.creator_id = 1
journal.save(force_insert=True)
journal.collections.add(collection)
self.charge_summary("journals")
journal.publisher = loaded_publisher
journal.sponsor = loaded_sponsor
# text language
if record.has_key('350'):
self.load_textlanguage(journal,record['350'])
# study area
if record.has_key('441'):
self.load_studyarea(journal,record['441'])
# mission
if record.has_key('901'):
self.load_mission(journal,record['901'])
# historic - JournalPublicationEvents
if record.has_key('51'):
self.load_historic(journal,record['51'])
# titles
if record.has_key('421'):
self.load_title(journal,record['421'],'other')
if record.has_key('150'):
self.load_title(journal,record['150'],'other')
if record.has_key('151'):
self.load_title(journal,record['151'],'other')
if record.has_key('230'):
self.load_title(journal,record['230'],'paralleltitle')
return journal
def run_import(self, json_file, collection):
"""
Function: run_import
Dispara processo de importacao de dados
"""
json_parsed={}
json_file = open(json_file,'r')
json_parsed = json.loads(json_file.read())
for record in json_parsed:
loaded_publisher = self.load_publisher(collection, record)
loaded_sponsor = self.load_sponsor(collection, record)
loaded_journal = self.load_journal(collection, loaded_publisher, loaded_sponsor, record)
# Cleaning data
JournalPublicationEvents.objects.filter(created_at__month=date.today().month, created_at__year=date.today().year).delete()
def get_summary(self):
"""
Function: get_summary
Retorna o resumo de carga de registros
"""
return self._summary
|
########
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import json
import os
import uuid
from fabric import api as fabric_api
from fabric import context_managers as fabric_context_managers
from influxdb import InfluxDBClient
import jinja2
import retrying
import sh
from cosmo_tester.framework import util
from cosmo_tester.framework import git_helper
REMOTE_PRIVATE_KEY_PATH = '/etc/cloudify/key.pem'
REMOTE_OPENSTACK_CONFIG_PATH = '/etc/cloudify/openstack_config.json'
MANAGER_BLUEPRINTS_REPO_URL = 'https://github.com/cloudify-cosmo/cloudify-manager-blueprints.git' # noqa
class _CloudifyManager(object):
def __init__(self,
index,
public_ip_address,
private_ip_address,
rest_client,
ssh_key,
cfy,
attributes,
logger):
self.index = index
self.ip_address = public_ip_address
self.private_ip_address = private_ip_address
self.client = rest_client
self.deleted = False
self._ssh_key = ssh_key
self._cfy = cfy
self._attributes = attributes
self._logger = logger
self._openstack = util.create_openstack_client()
self.influxdb_client = InfluxDBClient(public_ip_address, 8086,
'root', 'root', 'cloudify')
@property
def remote_private_key_path(self):
"""Returns the private key path on the manager."""
return REMOTE_PRIVATE_KEY_PATH
@contextmanager
def ssh(self, **kwargs):
with fabric_context_managers.settings(
host_string=self.ip_address,
user=self._attributes.centos7_username,
key_filename=self._ssh_key.private_key_path,
**kwargs):
yield fabric_api
def __str__(self):
return 'Cloudify manager [{}:{}]'.format(self.index, self.ip_address)
@retrying.retry(stop_max_attempt_number=3, wait_fixed=3000)
def use(self):
self._cfy.profiles.use('{0} -u {1} -p {2} -t {3}'.format(
self.ip_address,
self._attributes.cloudify_username,
self._attributes.cloudify_password,
self._attributes.cloudify_tenant).split())
@property
def server_id(self):
"""Returns this server's Id from the terraform outputs."""
key = 'server_id_{}'.format(self.index)
return self._attributes[key]
def delete(self):
"""Deletes this manager's VM from the OpenStack envrionment."""
self._logger.info('Deleting server.. [id=%s]', self.server_id)
self._openstack.compute.delete_server(self.server_id)
self._wait_for_server_to_be_deleted()
self.deleted = True
@retrying.retry(stop_max_attempt_number=12, wait_fixed=5000)
def _wait_for_server_to_be_deleted(self):
self._logger.info('Waiting for server to terminate..')
servers = [x for x in self._openstack.compute.servers()
if x.id == self.server_id]
if servers:
self._logger.info('- server.status = %s', servers[0].status)
assert len(servers) == 0
self._logger.info('Server terminated!')
@retrying.retry(stop_max_attempt_number=24, wait_fixed=5000)
def verify_services_are_running(self):
self._logger.info('Verifying all services are running on manager%d..',
self.index)
status = self.client.manager.get_status()
for service in status['services']:
for instance in service['instances']:
assert instance['SubState'] == 'running', \
'service {0} is in {1} state'.format(
service['display_name'], instance['SubState'])
class _ManagerConfig(object):
def __init__(self):
self.image_name = None
self.upload_plugins = None
class CloudifyCluster(object):
__metaclass__ = ABCMeta
def __init__(self,
cfy,
ssh_key,
tmpdir,
attributes,
logger,
number_of_managers=1):
super(CloudifyCluster, self).__init__()
self._logger = logger
self._attributes = attributes
self._tmpdir = tmpdir
self._ssh_key = ssh_key
self._cfy = cfy
self._number_of_managers = number_of_managers
self._terraform = util.sh_bake(sh.terraform)
self._terraform_inputs_file = self._tmpdir / 'terraform-vars.json'
self._managers = None
self.preconfigure_callback = None
self._managers_config = [self._get_default_manager_config()
for _ in range(number_of_managers)]
def _get_default_manager_config(self):
config = _ManagerConfig()
config.image_name = self._get_latest_manager_image_name()
config.upload_plugins = True
return config
def _bootstrap_managers(self):
pass
@abstractmethod
def _get_latest_manager_image_name(self):
"""Returns the image name for the manager's VM."""
pass
@staticmethod
def create_image_based(
cfy, ssh_key, tmpdir, attributes, logger, number_of_managers=1,
create=True):
"""Creates an image based Cloudify manager.
:param create: Determines whether to actually create the environment
in this invocation. If set to False, create() should be invoked in
order to create the environment. Setting it to False allows to
change the servers configuration using the servers_config property
before calling create().
"""
cluster = ImageBasedCloudifyCluster(
cfy,
ssh_key,
tmpdir,
attributes,
logger,
number_of_managers=number_of_managers)
if create:
cluster.create()
return cluster
@staticmethod
def create_bootstrap_based(cfy, ssh_key, tmpdir, attributes, logger,
preconfigure_callback=None):
"""Bootstraps a Cloudify manager using simple manager blueprint."""
cluster = BootstrapBasedCloudifyCluster(cfy,
ssh_key,
tmpdir,
attributes,
logger)
logger.info('Bootstrapping cloudify manager using simple '
'manager blueprint..')
if preconfigure_callback:
cluster.preconfigure_callback = preconfigure_callback
cluster.create()
return cluster
def _get_server_flavor(self):
return self._attributes.medium_flavor_name
@property
def managers(self):
"""Returns a list containing the managers in the cluster."""
if not self._managers:
raise RuntimeError('_managers is not set')
return self._managers
@property
def managers_config(self):
"""Returns a list containing a manager configuration obj per manager
to be created."""
return self._managers_config
def create(self):
"""Creates the OpenStack infrastructure for a Cloudify manager.
The openstack credentials file and private key file for SSHing
to provisioned VMs are uploaded to the server."""
self._logger.info('Creating an image based cloudify cluster '
'[number_of_managers=%d]', self._number_of_managers)
openstack_config_file = self._tmpdir / 'openstack_config.json'
openstack_config_file.write_text(json.dumps({
'username': os.environ['OS_USERNAME'],
'password': os.environ['OS_PASSWORD'],
'tenant_name': os.environ.get('OS_TENANT_NAME',
os.environ['OS_PROJECT_NAME']),
'auth_url': os.environ['OS_AUTH_URL']
}, indent=2))
terraform_template_file = self._tmpdir / 'openstack-vm.tf'
input_file = util.get_resource_path(
'terraform/openstack-vm.tf.template')
with open(input_file, 'r') as f:
terraform_template = f.read()
output = jinja2.Template(terraform_template).render({
'servers': self.managers_config
})
terraform_template_file.write_text(output)
self._terraform_inputs_file.write_text(json.dumps({
'resource_suffix': str(uuid.uuid4()),
'public_key_path': self._ssh_key.public_key_path,
'private_key_path': self._ssh_key.private_key_path,
'flavor': self._get_server_flavor()
}, indent=2))
try:
with self._tmpdir:
self._terraform.apply(['-var-file',
self._terraform_inputs_file])
outputs = util.AttributesDict(
{k: v['value'] for k, v in json.loads(
self._terraform.output(
['-json']).stdout).items()})
self._attributes.update(outputs)
self._create_managers_list(outputs)
if self.preconfigure_callback:
self.preconfigure_callback(self.managers)
self._bootstrap_managers()
for manager in self.managers:
manager.verify_services_are_running()
for i, manager in enumerate(self._managers):
manager.use()
self._upload_necessary_files_to_manager(manager,
openstack_config_file)
if self.managers_config[i].upload_plugins:
self._upload_plugin_to_manager(
manager, 'openstack_centos_core')
self._logger.info('Cloudify cluster successfully created!')
except Exception as e:
self._logger.error(
'Error creating image based cloudify cluster: %s', e)
try:
self.destroy()
except sh.ErrorReturnCode as ex:
self._logger.error('Error on terraform destroy: %s', ex)
raise
@retrying.retry(stop_max_attempt_number=3, wait_fixed=3000)
def _upload_plugin_to_manager(self, manager, plugin_name):
plugins_list = util.get_plugin_wagon_urls()
plugin_wagon = [
x['wgn_url'] for x in plugins_list
if x['name'] == plugin_name]
if len(plugin_wagon) != 1:
self._logger.error(
'%s plugin wagon not found in:%s%s',
plugin_name,
os.linesep,
json.dumps(plugins_list, indent=2))
raise RuntimeError(
'{} plugin not found in wagons list'.format(plugin_name))
self._logger.info('Uploading %s plugin [%s] to %s..',
plugin_name,
plugin_wagon[0],
manager)
# we keep this because plugin upload may fail but the manager
# will contain the uploaded plugin which is in some corrupted state.
plugins_ids_before_upload = [
x.id for x in manager.client.plugins.list()]
try:
manager.client.plugins.upload(plugin_wagon[0])
self._cfy.plugins.list()
except Exception as cce:
self._logger.error('Error on plugin upload: %s', cce)
current_plugins_ids = [x.id for x in manager.client.plugins.list()]
new_plugin_id = list(set(current_plugins_ids).intersection(
set(plugins_ids_before_upload)))
if new_plugin_id:
self._logger.info(
'Removing plugin after upload plugin failure: %s',
new_plugin_id[0])
manager.client.plugins.delete(new_plugin_id[0])
raise
def _upload_necessary_files_to_manager(self,
manager,
openstack_config_file):
self._logger.info('Uploading necessary files to %s', manager)
with manager.ssh() as fabric_ssh:
fabric_ssh.put(openstack_config_file,
REMOTE_OPENSTACK_CONFIG_PATH,
use_sudo=True)
fabric_ssh.put(self._ssh_key.private_key_path,
REMOTE_PRIVATE_KEY_PATH,
use_sudo=True)
fabric_ssh.sudo('chown root:cfyuser {key_file}'.format(
key_file=REMOTE_PRIVATE_KEY_PATH,
))
fabric_ssh.sudo('chmod 440 {key_file}'.format(
key_file=REMOTE_PRIVATE_KEY_PATH,
))
def destroy(self):
"""Destroys the OpenStack infrastructure."""
self._logger.info('Destroying cloudify cluster..')
with self._tmpdir:
self._terraform.destroy(
['-var-file', self._terraform_inputs_file, '-force'])
def _create_managers_list(self, outputs):
self._managers = []
for i in range(self._number_of_managers):
public_ip_address = outputs['public_ip_address_{}'.format(i)]
private_ip_address = outputs['private_ip_address_{}'.format(i)]
rest_clinet = util.create_rest_client(
public_ip_address,
username=self._attributes.cloudify_username,
password=self._attributes.cloudify_password,
tenant=self._attributes.cloudify_tenant)
self._managers.append(_CloudifyManager(i,
public_ip_address,
private_ip_address,
rest_clinet,
self._ssh_key,
self._cfy,
self._attributes,
self._logger))
class ImageBasedCloudifyCluster(CloudifyCluster):
"""
Starts a manager from an image on OpenStack.
"""
def _get_latest_manager_image_name(self):
"""
Returns the manager image name based on installed CLI version.
For CLI version "4.0.0-m15"
Returns: "cloudify-manager-premium-4.0m15"
"""
version = util.get_cli_version().replace('-', '').replace('0.0', '0')
return '{}-{}'.format(
self._attributes.cloudify_manager_image_name_prefix, version)
class BootstrapBasedCloudifyCluster(CloudifyCluster):
"""
Bootstraps a Cloudify manager using simple manager blueprint.
"""
def __init__(self, *args, **kwargs):
super(BootstrapBasedCloudifyCluster, self).__init__(*args, **kwargs)
self._manager_resources_package = \
util.get_manager_resources_package_url()
self._manager_blueprints_path = None
self._inputs_file = None
def _get_server_flavor(self):
return self._attributes.large_flavor_name
def _get_latest_manager_image_name(self):
return self._attributes.centos7_image_name
def _bootstrap_managers(self):
super(BootstrapBasedCloudifyCluster, self)._bootstrap_managers()
self._clone_manager_blueprints()
self._create_inputs_file()
self._bootstrap_manager()
def _clone_manager_blueprints(self):
self._manager_blueprints_path = git_helper.clone(
MANAGER_BLUEPRINTS_REPO_URL,
str(self._tmpdir))
def _create_inputs_file(self):
self._inputs_file = self._tmpdir / 'inputs.json'
bootstrap_inputs = json.dumps({
'public_ip': self.managers[0].ip_address,
'private_ip': self.managers[0].private_ip_address,
'ssh_user': self._attributes.centos7_username,
'ssh_key_filename': self._ssh_key.private_key_path,
'admin_username': self._attributes.cloudify_username,
'admin_password': self._attributes.cloudify_password,
'manager_resources_package': self._manager_resources_package},
indent=2)
self._logger.info(
'Bootstrap inputs:%s%s', os.linesep, bootstrap_inputs)
self._inputs_file.write_text(bootstrap_inputs)
def _bootstrap_manager(self):
manager_blueprint_path = \
self._manager_blueprints_path / 'simple-manager-blueprint.yaml'
self._cfy.bootstrap([manager_blueprint_path, '-i', self._inputs_file])
Fix 4.0.0-4.0.1 test compatibility (#529)
########
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import json
import os
import uuid
from fabric import api as fabric_api
from fabric import context_managers as fabric_context_managers
from influxdb import InfluxDBClient
import jinja2
import retrying
import sh
from cosmo_tester.framework import util
from cosmo_tester.framework import git_helper
REMOTE_PRIVATE_KEY_PATH = '/etc/cloudify/key.pem'
REMOTE_OPENSTACK_CONFIG_PATH = '/etc/cloudify/openstack_config.json'
MANAGER_BLUEPRINTS_REPO_URL = 'https://github.com/cloudify-cosmo/cloudify-manager-blueprints.git' # noqa
class _CloudifyManager(object):
def __init__(self,
index,
public_ip_address,
private_ip_address,
rest_client,
ssh_key,
cfy,
attributes,
logger,
config):
self.index = index
self.ip_address = public_ip_address
self.private_ip_address = private_ip_address
self.client = rest_client
self.deleted = False
self._ssh_key = ssh_key
self._cfy = cfy
self._attributes = attributes
self._logger = logger
self._openstack = util.create_openstack_client()
self.influxdb_client = InfluxDBClient(public_ip_address, 8086,
'root', 'root', 'cloudify')
self.config = config
@property
def remote_private_key_path(self):
"""Returns the private key path on the manager."""
return REMOTE_PRIVATE_KEY_PATH
@contextmanager
def ssh(self, **kwargs):
with fabric_context_managers.settings(
host_string=self.ip_address,
user=self._attributes.centos7_username,
key_filename=self._ssh_key.private_key_path,
**kwargs):
yield fabric_api
def __str__(self):
return 'Cloudify manager [{}:{}]'.format(self.index, self.ip_address)
@retrying.retry(stop_max_attempt_number=3, wait_fixed=3000)
def use(self):
self._cfy.profiles.use('{0} -u {1} -p {2} -t {3}'.format(
self.ip_address,
self._attributes.cloudify_username,
self._attributes.cloudify_password,
self._attributes.cloudify_tenant).split())
@property
def server_id(self):
"""Returns this server's Id from the terraform outputs."""
key = 'server_id_{}'.format(self.index)
return self._attributes[key]
def delete(self):
"""Deletes this manager's VM from the OpenStack envrionment."""
self._logger.info('Deleting server.. [id=%s]', self.server_id)
self._openstack.compute.delete_server(self.server_id)
self._wait_for_server_to_be_deleted()
self.deleted = True
@retrying.retry(stop_max_attempt_number=12, wait_fixed=5000)
def _wait_for_server_to_be_deleted(self):
self._logger.info('Waiting for server to terminate..')
servers = [x for x in self._openstack.compute.servers()
if x.id == self.server_id]
if servers:
self._logger.info('- server.status = %s', servers[0].status)
assert len(servers) == 0
self._logger.info('Server terminated!')
@retrying.retry(stop_max_attempt_number=24, wait_fixed=5000)
def verify_services_are_running(self):
self._logger.info('Verifying all services are running on manager%d..',
self.index)
status = self.client.manager.get_status()
for service in status['services']:
for instance in service['instances']:
assert instance['SubState'] == 'running', \
'service {0} is in {1} state'.format(
service['display_name'], instance['SubState'])
class _ManagerConfig(object):
def __init__(self):
self.image_name = None
self.upload_plugins = True
@property
def is_4_0(self):
# This is a temporary measure. We will probably have subclasses for it
return self.image_name.endswith('4.0')
class CloudifyCluster(object):
__metaclass__ = ABCMeta
def __init__(self,
cfy,
ssh_key,
tmpdir,
attributes,
logger,
number_of_managers=1):
super(CloudifyCluster, self).__init__()
self._logger = logger
self._attributes = attributes
self._tmpdir = tmpdir
self._ssh_key = ssh_key
self._cfy = cfy
self._number_of_managers = number_of_managers
self._terraform = util.sh_bake(sh.terraform)
self._terraform_inputs_file = self._tmpdir / 'terraform-vars.json'
self._managers = None
self.preconfigure_callback = None
self._managers_config = [self._get_default_manager_config()
for _ in range(number_of_managers)]
def _get_default_manager_config(self):
config = _ManagerConfig()
config.image_name = self._get_latest_manager_image_name()
return config
def _bootstrap_managers(self):
pass
@abstractmethod
def _get_latest_manager_image_name(self):
"""Returns the image name for the manager's VM."""
pass
@staticmethod
def create_image_based(
cfy, ssh_key, tmpdir, attributes, logger, number_of_managers=1,
create=True):
"""Creates an image based Cloudify manager.
:param create: Determines whether to actually create the environment
in this invocation. If set to False, create() should be invoked in
order to create the environment. Setting it to False allows to
change the servers configuration using the servers_config property
before calling create().
"""
cluster = ImageBasedCloudifyCluster(
cfy,
ssh_key,
tmpdir,
attributes,
logger,
number_of_managers=number_of_managers)
if create:
cluster.create()
return cluster
@staticmethod
def create_bootstrap_based(cfy, ssh_key, tmpdir, attributes, logger,
preconfigure_callback=None):
"""Bootstraps a Cloudify manager using simple manager blueprint."""
cluster = BootstrapBasedCloudifyCluster(cfy,
ssh_key,
tmpdir,
attributes,
logger)
logger.info('Bootstrapping cloudify manager using simple '
'manager blueprint..')
if preconfigure_callback:
cluster.preconfigure_callback = preconfigure_callback
cluster.create()
return cluster
def _get_server_flavor(self):
return self._attributes.medium_flavor_name
@property
def managers(self):
"""Returns a list containing the managers in the cluster."""
if not self._managers:
raise RuntimeError('_managers is not set')
return self._managers
@property
def managers_config(self):
"""Returns a list containing a manager configuration obj per manager
to be created."""
return self._managers_config
def create(self):
"""Creates the OpenStack infrastructure for a Cloudify manager.
The openstack credentials file and private key file for SSHing
to provisioned VMs are uploaded to the server."""
self._logger.info('Creating an image based cloudify cluster '
'[number_of_managers=%d]', self._number_of_managers)
openstack_config_file = self._tmpdir / 'openstack_config.json'
openstack_config_file.write_text(json.dumps({
'username': os.environ['OS_USERNAME'],
'password': os.environ['OS_PASSWORD'],
'tenant_name': os.environ.get('OS_TENANT_NAME',
os.environ['OS_PROJECT_NAME']),
'auth_url': os.environ['OS_AUTH_URL']
}, indent=2))
terraform_template_file = self._tmpdir / 'openstack-vm.tf'
input_file = util.get_resource_path(
'terraform/openstack-vm.tf.template')
with open(input_file, 'r') as f:
terraform_template = f.read()
output = jinja2.Template(terraform_template).render({
'servers': self.managers_config
})
terraform_template_file.write_text(output)
self._terraform_inputs_file.write_text(json.dumps({
'resource_suffix': str(uuid.uuid4()),
'public_key_path': self._ssh_key.public_key_path,
'private_key_path': self._ssh_key.private_key_path,
'flavor': self._get_server_flavor()
}, indent=2))
try:
with self._tmpdir:
self._terraform.apply(['-var-file',
self._terraform_inputs_file])
outputs = util.AttributesDict(
{k: v['value'] for k, v in json.loads(
self._terraform.output(
['-json']).stdout).items()})
self._attributes.update(outputs)
self._create_managers_list(outputs)
if self.preconfigure_callback:
self.preconfigure_callback(self.managers)
self._bootstrap_managers()
for manager in self.managers:
manager.verify_services_are_running()
for i, manager in enumerate(self._managers):
manager.use()
self._upload_necessary_files_to_manager(manager,
openstack_config_file)
if self.managers_config[i].upload_plugins:
self._upload_plugin_to_manager(
manager, 'openstack_centos_core')
self._logger.info('Cloudify cluster successfully created!')
except Exception as e:
self._logger.error(
'Error creating image based cloudify cluster: %s', e)
try:
self.destroy()
except sh.ErrorReturnCode as ex:
self._logger.error('Error on terraform destroy: %s', ex)
raise
@retrying.retry(stop_max_attempt_number=3, wait_fixed=3000)
def _upload_plugin_to_manager(self, manager, plugin_name):
plugins_list = util.get_plugin_wagon_urls()
plugin_wagon = [
x['wgn_url'] for x in plugins_list
if x['name'] == plugin_name]
if len(plugin_wagon) != 1:
self._logger.error(
'%s plugin wagon not found in:%s%s',
plugin_name,
os.linesep,
json.dumps(plugins_list, indent=2))
raise RuntimeError(
'{} plugin not found in wagons list'.format(plugin_name))
self._logger.info('Uploading %s plugin [%s] to %s..',
plugin_name,
plugin_wagon[0],
manager)
# we keep this because plugin upload may fail but the manager
# will contain the uploaded plugin which is in some corrupted state.
plugins_ids_before_upload = [
x.id for x in manager.client.plugins.list()]
try:
manager.client.plugins.upload(plugin_wagon[0])
self._cfy.plugins.list()
except Exception as cce:
self._logger.error('Error on plugin upload: %s', cce)
current_plugins_ids = [x.id for x in manager.client.plugins.list()]
new_plugin_id = list(set(current_plugins_ids).intersection(
set(plugins_ids_before_upload)))
if new_plugin_id:
self._logger.info(
'Removing plugin after upload plugin failure: %s',
new_plugin_id[0])
manager.client.plugins.delete(new_plugin_id[0])
raise
def _upload_necessary_files_to_manager(self,
manager,
openstack_config_file):
self._logger.info('Uploading necessary files to %s', manager)
with manager.ssh() as fabric_ssh:
if manager.config.is_4_0:
openstack_json_path = '/root/openstack_config.json'
else:
openstack_json_path = REMOTE_OPENSTACK_CONFIG_PATH
fabric_ssh.put(openstack_config_file,
openstack_json_path,
use_sudo=True)
fabric_ssh.put(self._ssh_key.private_key_path,
REMOTE_PRIVATE_KEY_PATH,
use_sudo=True)
if not manager.config.is_4_0:
fabric_ssh.sudo('chown root:cfyuser {key_file}'.format(
key_file=REMOTE_PRIVATE_KEY_PATH,
))
fabric_ssh.sudo('chmod 440 {key_file}'.format(
key_file=REMOTE_PRIVATE_KEY_PATH,
))
def destroy(self):
"""Destroys the OpenStack infrastructure."""
self._logger.info('Destroying cloudify cluster..')
with self._tmpdir:
self._terraform.destroy(
['-var-file', self._terraform_inputs_file, '-force'])
def _create_managers_list(self, outputs):
self._managers = []
for i in range(self._number_of_managers):
public_ip_address = outputs['public_ip_address_{}'.format(i)]
private_ip_address = outputs['private_ip_address_{}'.format(i)]
rest_clinet = util.create_rest_client(
public_ip_address,
username=self._attributes.cloudify_username,
password=self._attributes.cloudify_password,
tenant=self._attributes.cloudify_tenant)
self._managers.append(_CloudifyManager(i,
public_ip_address,
private_ip_address,
rest_clinet,
self._ssh_key,
self._cfy,
self._attributes,
self._logger,
self.managers_config[i]))
class ImageBasedCloudifyCluster(CloudifyCluster):
"""
Starts a manager from an image on OpenStack.
"""
def _get_latest_manager_image_name(self):
"""
Returns the manager image name based on installed CLI version.
For CLI version "4.0.0-m15"
Returns: "cloudify-manager-premium-4.0m15"
"""
version = util.get_cli_version().replace('-', '').replace('0.0', '0')
return '{}-{}'.format(
self._attributes.cloudify_manager_image_name_prefix, version)
class BootstrapBasedCloudifyCluster(CloudifyCluster):
"""
Bootstraps a Cloudify manager using simple manager blueprint.
"""
def __init__(self, *args, **kwargs):
super(BootstrapBasedCloudifyCluster, self).__init__(*args, **kwargs)
self._manager_resources_package = \
util.get_manager_resources_package_url()
self._manager_blueprints_path = None
self._inputs_file = None
def _get_server_flavor(self):
return self._attributes.large_flavor_name
def _get_latest_manager_image_name(self):
return self._attributes.centos7_image_name
def _bootstrap_managers(self):
super(BootstrapBasedCloudifyCluster, self)._bootstrap_managers()
self._clone_manager_blueprints()
self._create_inputs_file()
self._bootstrap_manager()
def _clone_manager_blueprints(self):
self._manager_blueprints_path = git_helper.clone(
MANAGER_BLUEPRINTS_REPO_URL,
str(self._tmpdir))
def _create_inputs_file(self):
self._inputs_file = self._tmpdir / 'inputs.json'
bootstrap_inputs = json.dumps({
'public_ip': self.managers[0].ip_address,
'private_ip': self.managers[0].private_ip_address,
'ssh_user': self._attributes.centos7_username,
'ssh_key_filename': self._ssh_key.private_key_path,
'admin_username': self._attributes.cloudify_username,
'admin_password': self._attributes.cloudify_password,
'manager_resources_package': self._manager_resources_package},
indent=2)
self._logger.info(
'Bootstrap inputs:%s%s', os.linesep, bootstrap_inputs)
self._inputs_file.write_text(bootstrap_inputs)
def _bootstrap_manager(self):
manager_blueprint_path = \
self._manager_blueprints_path / 'simple-manager-blueprint.yaml'
self._cfy.bootstrap([manager_blueprint_path, '-i', self._inputs_file])
|
ddfc6b4c-2ead-11e5-a854-7831c1d44c14
de02a870-2ead-11e5-a5ca-7831c1d44c14
de02a870-2ead-11e5-a5ca-7831c1d44c14 |
# -*- coding: utf-8 -*-
from nose.tools import assert_equal
from openfisca_core import legislations
from openfisca_dummy_country import DummyTaxBenefitSystem
def test_multiple_xml_based_tax_benefit_system():
tax_benefit_system = DummyTaxBenefitSystem()
legislation_json = tax_benefit_system.get_legislation()
assert legislation_json is not None
assert isinstance(legislation_json, dict), legislation_json
dated_legislation_json = legislations.generate_dated_legislation_json(legislation_json, '2012-01-01')
assert isinstance(dated_legislation_json, dict), legislation_json
compact_legislation = legislations.compact_dated_node_json(dated_legislation_json)
assert_equal(compact_legislation.impot.taux, 0.3)
assert_equal(compact_legislation.contribution_sociale.activite.crds.activite.taux, 0.005)
fixup! Adapt test_param to new dummy package version
# -*- coding: utf-8 -*-
from nose.tools import assert_equal
from openfisca_core import legislations
from openfisca_dummy_country import DummyTaxBenefitSystem
def test_multiple_xml_based_tax_benefit_system():
tax_benefit_system = DummyTaxBenefitSystem()
legislation_json = tax_benefit_system.get_legislation()
assert legislation_json is not None
assert isinstance(legislation_json, dict), legislation_json
dated_legislation_json = legislations.generate_dated_legislation_json(legislation_json, '2012-01-01')
assert isinstance(dated_legislation_json, dict), legislation_json
compact_legislation = legislations.compact_dated_node_json(dated_legislation_json)
assert_equal(compact_legislation.impot.taux, 0.3)
assert_equal(compact_legislation.contribution_sociale.crds.activite.taux, 0.005)
|
__version__ = '0.2017.12.31.1639'
RELEASE: Bump version for pypi to 0.2018.01.03.2133
__version__ = '0.2018.01.03.2133'
|
# Create your views here.
from django.http import HttpResponse
from django.http import HttpResponseNotAllowed, HttpResponseBadRequest, HttpResponseForbidden
from django.utils import simplejson
from django.contrib.auth.models import User
from api import models
from api import utils
from api import permissions
import datetime
import logging
import urllib2
import math
__PlacesKey__ = 'AIzaSyBLHdzA-5F9DCllQbLmataclCyVp8MSXok'
def login(request):
"""Login request"""
if request.method == 'POST':
username = request.POST.get('username', None)
password = request.POST.get('password', None)
device_type = request.POST.get('device_type', None)
device_manufacture = request.POST.get('device_manufacture', None)
device_os = request.POST.get('device_os', None)
os_version = request.POST.get('os_version', None)
device_id = request.POST.get('device_id', None)
logging.info('Login request from user %s', username)
logging.info('device_type %s', device_type)
logging.info('device_manufacture %s', device_manufacture)
logging.info('device_os %s', device_os)
logging.info('os_version %s', os_version)
logging.info('device_id %s', device_id)
if not username or not password or not device_id or not device_type or not device_os or not os_version:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
user = User.objects.get(username=username)
if not user.check_password(password):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Username and password not matching'}))
except User.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Username and password not matching'}))
try:
device_info = models.DeviceInfo.objects.filter(device_id=device_id).get()
except models.DeviceInfo.DoesNotExist:
device_info = models.DeviceInfo(device_id=device_id, device_manufacture=device_manufacture, device_os=device_os,
device_type=device_type, os_version=os_version, device_owner=user)
device_info.save()
#Generate token and save it
auth_string = utils.tokenGenerator(size=16)
while models.TokenAuthModel.objects.filter(token=auth_string).count():
auth_string = utils.tokenGenerator(size=16)
expire_date = datetime.datetime.now()
try:
auth_token = models.TokenAuthModel.objects.filter(device=device_info).get()
auth_token.expiring_date = expire_date
auth_token.token = auth_string
auth_token.user = user
device_info.device_owner = user
device_info.save()
auth_token.save()
except models.TokenAuthModel.DoesNotExist:
auth_token = models.TokenAuthModel(user=user, device=device_info, token=auth_string,
expiring_date=expire_date)
auth_token.save()
return HttpResponse(simplejson.dumps({'auth_token': auth_string}))
return HttpResponseNotAllowed(['GET'])
def register(request):
if request.method == 'POST':
username = request.POST.get('username', None)
email = request.POST.get('email', None)
password = request.POST.get('password', None)
logging.info('User %s is trying to register with email %s', username, email)
if not email or not password:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if not utils.validateEmail(email):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Invalid email'}))
if len(password) < 4:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Password too short'}))
users = User.objects.filter(email=email)
if users.count():
return HttpResponseBadRequest(simplejson.dumps({'error': 'Email already used'}))
users = User.objects.filter(username=username)
if users.count():
return HttpResponseBadRequest(simplejson.dumps({'error': 'User already registered'}))
new_user=User.objects.create_user(username, email, password)
new_user.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getUserInfo(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
return HttpResponse(simplejson.dumps({'email': user.email, 'full_name': user.first_name, 'username': user.username}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getFullUserInfo(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
try:
personalInfo = models.ExtraInfoForUser.objects.filter(user=user).get()
except models.ExtraInfoForUser.DoesNotExist:
personalInfo = models.ExtraInfoForUser(user=user)
personalInfo.save()
dictToReturn = {'full_name': user.first_name, 'email': user.email, 'birthday': personalInfo.birthday, 'sex': personalInfo.sex, 'status': personalInfo.status, 'username': user.username}
return HttpResponse(simplejson.dumps(dictToReturn))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def updateUserInfo(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
first_name = request.POST.get('full_name', None)
email = request.POST.get('email', None)
birthday = request.POST.get('birthday', None)
sex = request.POST.get('sex', None)
status = request.POST.get('status', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
users = User.objects.filter(email=email)
if first_name is None or email is None or birthday is None or sex is None or status is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if not utils.validateEmail(email):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Invalid email'}))
if (users.count() == 1 and users.get() != user) or users.count() > 1:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Email already used'}))
try:
personalInfo = models.ExtraInfoForUser.objects.filter(user=user).get()
except modesl.ExtraInfoForUser.DoesNotExist:
personalInfo = models.ExtraInfoForUser(user=user, sex=sex, birthday=birthday, status=status)
personalInfo.save()
user.first_name = first_name
personalInfo.status = status
personalInfo.sex = sex
personalInfo.birthday = birthday
personalInfo.save()
user.email = email
user.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getSecretQuestion(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
try:
personalInfo = models.ExtraInfoForUser.objects.filter(user=user).get()
except models.ExtraInfoForUser.DoesNotExist:
personalInfo = models.ExtraInfoForUser(user=user)
personalInfo.save()
dictToReturn = {'secret_question': personalInfo.secret_question, 'secret_answer': personalInfo.secret_answer}
return HttpResponse(simplejson.dumps(dictToReturn))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def updateSecretQuestion(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
secret_question = request.POST.get('secret_question', None)
secret_answer = request.POST.get('secret_answer', None)
password = request.POST.get('password', None)
logging.error('%s', request.POST)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
if secret_question is None or secret_answer is None or password is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if not user.check_password(password):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Wrong password'}))
try:
personalInfo = models.ExtraInfoForUser.objects.filter(user=user).get()
except models.ExtraInfoForUser.DoesNotExist:
personalInfo = models.ExtraInfoForUser(user=user)
personalInfo.save()
personalInfo.secret_question = secret_question
personalInfo.secret_answer = secret_answer
personalInfo.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def updatePassword(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
new_password = request.POST.get('new_password', None)
password = request.POST.get('password', None)
if password is None or new_password is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
if not user.check_password(password):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Wrong password'}))
user.set_password(new_password)
user.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def checkProfileCompletion(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
try:
extra_info = models.ExtraInfoForUser.objects.filter(user=auth_token.user).get()
except models.ExtraInfoForUser.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'No data'}))
if extra_info.secret_question == '' or extra_info.secret_answer == '' or extra_info.birthday == '' or extra_info.sex == '':
return HttpResponseBadRequest(simplejson.dumps({'error': 'No data'}))
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
def getSecretQuestionForRecovery(request):
if request.method == 'POST':
username = request.POST.get('username', None)
if username is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
user = User.objects.filter(username=username).get()
except User.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'User does not exists'}))
try:
user_extra = models.ExtraInfoForUser.objects.filter(user=user).get()
except models.ExtraInfoForUser.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'User does not have security questions'}))
if user_extra.secret_answer == '' or user_extra.secret_question == '' or user_extra.birthday == '':
return HttpResponseBadRequest(simplejson.dumps({'error': 'User does not have security questions'}))
return HttpResponse(simplejson.dumps({'secret_question': user_extra.secret_question}))
return HttpResponseNotAllowed(['GET'])
def getRecoveryTempToken(request):
if request.method == 'POST':
username = request.POST.get('username', None)
answer = request.POST.get('answer', None)
birthday = request.POST.get('birthday', None)
if username is None or answer is None or birthday is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
user = User.objects.filter(username=username).get()
except User.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'User dose not exists'}))
try:
user_extra = models.ExtraInfoForUser.objects.filter(user=user).get()
except models.ExtraInfoForUser.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'User dose not have security questions'}))
if user_extra.secret_answer == '' or user_extra.secret_question == '' or user_extra.birthday == '':
return HttpResponseBadRequest(simplejson.dumps({'error': 'User dose not have security questions'}))
if user_extra.secret_answer != answer or user_extra.birthday != birthday:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Wrong answer'}))
tmp_token = utils.tokenGenerator(size=10)
revoery_model = models.RecoveryTokens(token=tmp_token, user=user, expiringDate = datetime.datetime.now() + datetime.timedelta(seconds=30))
revoery_model.save()
return HttpResponse(simplejson.dumps({'tmp_token': tmp_token}))
return HttpResponseNotAllowed(['GET'])
def updatePasswordAfterRecovery(request):
if request.method == 'POST':
token = request.POST.get('tmp_token', None)
new_password = request.POST.get('new_password', None)
username = request.POST.get('user', None)
if token is None or new_password is None or username is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
user = User.objects.filter(username=username).get()
tmp_auth = models.RecoveryTokens.objects.filter(token=token, user=user).get()
if tmp_auth:
user.set_password(new_password)
user.save()
except (models.RecoveryTokens.DoesNotExist, User.DoesNotExist):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Wrong data'}))
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
def _getPlaceDetails(place_id):
if not place_id:
return None
url = 'https://maps.googleapis.com/maps/api/place/details/json?reference=' + place_id + '&sensor=false&key='+__PlacesKey__
json = urllib2.urlopen(url).read()
data = simplejson.loads(json)
return data['result']
@permissions.is_logged_in
def getPlaces(request):
if request.method == 'POST':
places = _getPlaces(request)
return HttpResponse(simplejson.dumps({'list': places}))
return HttpResponseNotAllowed(['GET'])
def _getPlaces(request):
radius = request.POST.get('radius', None)
latitude = request.POST.get('latitude', None)
longitude = request.POST.get('longitude', None)
if not radius or not latitude or not longitude:
return None
logging.info("Lon %s, Lat %s, Radious %s",longitude,latitude,radius)
url = 'https://maps.googleapis.com/maps/api/place/search/json?location=' + latitude + ',' + longitude + '&radius=' + radius + '&types=bar|night_club&name=&sensor=false&key='+ __PlacesKey__
json = urllib2.urlopen(url).read()
data = simplejson.loads(json)
logging.info("Google Places: %s",data)
to_return = []
for d in data['results']:
to_return.append({'id': d['id'],'reference':d['reference'], 'image_url': d['icon'], 'source': 'False', 'type': d['types'][0], 'name': d['name'], 'description': '', 'address': d['vicinity'], 'lon': d['geometry']['location']['lng'], 'lat': d['geometry']['location']['lat']})
R=6378137.0
radius = (float)(radius) * 1.0
dLat = radius/R
lon_coef = math.pi*((float)(latitude))/180.0
lon_coef = math.cos(lon_coef)
dLon = radius/(R*lon_coef)
logging.info("%s %s",dLat,dLon)
lat_range = (float(latitude)-dLat * 180/math.pi, float(latitude)+dLat * 180/math.pi)
lon_range = (float(longitude)-dLon * 180/math.pi, float(longitude)+dLon * 180/math.pi)
local_places = models.LocalPlaces.objects.filter(lat__range=lat_range)
for obj in local_places:
if float(obj.lon) >= lon_range[0] and float(obj.lon) <= lon_range[1]:
to_return.append({'id': obj.id, 'reference':obj.id,'image_url': 'http://naperville-webdesign.net/wp-content/uploads/2012/12/home-icon-hi.png', 'source': 'True', 'type': obj.type, 'name': obj.name, 'description': obj.description, 'address': obj.address, 'lon': obj.lon, 'lat': obj.lat})
return to_return
@permissions.is_logged_in
def getEvents(request):
if request.method == 'POST':
places = _getPlaces(request)
to_return = []
for place in places:
if place['source'] != "False":
events = models.Event.objects.filter(place_id=place['id']).all()
else:
events = models.Event.objects.filter(place_id=place['id']).all()
logging.info("Events Count : %d %s", events.count(),place['id'])
for event in events:
if event.status == "Closed":
logging.info("Exited because event is closed")
continue
currentDate = datetime.datetime.now()
timeDelta = event.date.day - currentDate.day
if timeDelta > 1 or timeDelta < -1:
logging.info("Exited because event has not the good date")
continue
try:
if event.local != "False":
if not event.place_id:
lon=lat=0
else:
place_tmp = models.LocalPlaces.objects.filter(id=event.place_id).get()
lon = place_tmp.lon
lat = place_tmp.lat
else:
if not event.place_id:
lon=lat=0
else:
logging.info('Place id %s:', event.place_id)
place_tmp = _getPlaceDetails(event.reference)
logging.info('data :%s',place)
lon = place_tmp['geometry']['location']['lng']
lat = place_tmp['geometry']['location']['lat']
except models.LocalPlaces.DoesNotExist:
lon = lat = 0;
if lon > 0 and lat > 0:
to_return.append({'id': event.id, 'name': event.name,'address':_convertToAddress(lon,lat), 'description': event.description, 'start_time': event.start_time, 'end_time': event.end_time, 'type': place['type'],'lon': lon, 'lat': lat})
return HttpResponse(simplejson.dumps({'list': to_return}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getIntrestsList(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
intrests = models.Intrests.objects.all()
toReturn = []
for i in intrests:
isSelected = False
if models.UserIntrest.objects.filter(user=user, intrest=i).count():
isSelected = True
toReturn.append({'name': i.name, 'description': i.description, 'selected': isSelected, 'id': i.id})
return HttpResponse(simplejson.dumps({'list': toReturn}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def updateUserIntrest(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
intrest = request.POST.get('intrest', None)
if intrest is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
try:
intrest_model = models.Intrests.objects.filter(id=intrest).get()
except models.Intrests.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Intrest dose not exist'}))
try:
user_intrest = models.UserIntrest.objects.filter(user=user, intrest=intrest_model).get()
user_intrest.delete()
except models.UserIntrest.DoesNotExist:
user_intrest = models.UserIntrest(user=user, intrest=intrest_model)
user_intrest.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
def addIntrest(request):
models.Intrests(name=request.GET.get('name', ''), description=request.GET.get('description', '')).save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
@permissions.is_logged_in
def addChatRoomMessage(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
message = request.POST.get('message', None)
event = request.POST.get('event_id', None)
if message is None or event is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
try:
event_models = models.Event.objects.filter(id=event).get()
except models.Event.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Event dose not exist'}))
user = auth_token.user
message_model = models.EventChatRoom(user=user, message=message, event=event_models, date = datetime.datetime.now())
message_model.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getChatRoomMessage(request):
if request.method == 'POST':
event = request.POST.get('event_id', None)
if event is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event_models = models.Event.objects.filter(id=event).get()
except models.Event.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Event dose not exist'}))
messages = models.EventChatRoom.objects.filter(event = event_models).order_by('-date')
to_return = []
for msg in messages:
to_return.append({'date': msg.date.strftime('%d/%m/%y %H:%M:%S'), 'message': msg.message, 'user': msg.user.username})
return HttpResponse(simplejson.dumps({'list': to_return}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getFullEventInfo(request):
if request.method == 'POST':
id = request.POST.get('id', None)
if id is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event = models.Event.objects.filter(id=id).get()
try:
if event.local != "False":
if not event.place_id:
lon=lat=0
else:
place_tmp = models.LocalPlaces.objects.filter(id=event.place_id).get()
lon = place_tmp.lon
lat = place_tmp.lat
name = place_tmp.name
address = place_tmp.address
type = place_tmp.type
place_description = place_tmp.description
else:
if not event.place_id:
lon=lat=0
else:
logging.info('Place id %s:', event.place_id)
place_tmp = _getPlaceDetails(event.reference)
name = place_tmp['name']
place_description = ''
lon = place_tmp['geometry']['location']['lng']
lat = place_tmp['geometry']['location']['lat']
logging.info("Place details %s",place_tmp)
address = _convertToAddress(lon,lat)
type = place_tmp['types'][0]
except models.LocalPlaces.DoesNotExist:
lon = lat = 0;
if lon > 0 and lat > 0:
myDict = {'name': event.name, 'close': event.status, 'description': event.description, 'price': event.price, 'start_time': event.start_time, 'end_time': event.end_time,
'age_average':event.age_average, 'female_ratio':event.female_ratio, 'stars':event.stars, 'single_ratio':event.single_ratio, 'headcount':event.headcount, 'lon':lon,
'lat':lat,'place_name':name,'place_description':place_description,'place_address':address,
'type':type}
return HttpResponse(simplejson.dumps(myDict))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getEventInfo(request):
if request.method == 'POST':
id = request.POST.get('id', None)
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
if not id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event = models.Event.objects.filter(id=id).get()
if event.creator_id != auth_token.user:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Forbidden to edit'}))
return HttpResponse(simplejson.dumps({'name': event.name, 'close': event.status, 'description': event.description, 'price': event.price, 'start_time': event.start_time, 'end_time': event.end_time}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def saveEventInfo(request):
if request.method == 'POST':
name = request.POST.get('name', None)
description = request.POST.get('description', None)
start_time = request.POST.get('start_time', None)
end_time = request.POST.get('end_time', None)
price = request.POST.get('price', None)
id = request.POST.get('id', None)
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
if not name or not description or not start_time or not end_time or not price:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if not id:
event = models.Event(name=name, description=description,
date = datetime.datetime.now(),
start_time=start_time, end_time=end_time, price=price, creator_id=auth_token.user)
event.save()
return HttpResponse(simplejson.dumps({'id': event.id}))
else:
try:
event = models.Event.objects.filter(id=id).get()
event.name = name
event.description = description
event.start_time = start_time
event.date = datetime.datetime.now()
event.end_time = end_time
event.price = price
if event.creator_id != auth_token.user:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Forbidden to edit'}))
event.save()
return HttpResponse(simplejson.dumps({'id': event.id}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def saveEventPlace(request):
if request.method == 'POST':
place_id = request.POST.get('place_id', None)
place_reference = request.POST.get('place_reference', None)
event_id = request.POST.get('event_id', None)
is_local = request.POST.get('is_local', None)
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
if not place_id or not event_id or not is_local:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if is_local == "True":
try:
place = models.LocalPlaces.objects.filter(id=place_id).get()
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
try:
event = models.Event.objects.filter(id=event_id).get()
if event.creator_id != auth_token.user:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Forbidden to edit'}))
event.local = is_local
if is_local == "True":
event.place_id = place.id
else:
event.place_id = place_id
event.reference = place_reference
event.save()
return HttpResponseBadRequest(simplejson.dumps({'id': event.id}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getPersonalEvents(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
personalEvents = models.Event.objects.filter(creator_id=auth_token.user)
to_return = []
for event in personalEvents:
try:
if event.local != "False":
if not event.place_id:
lon=lat=0
else:
place = models.LocalPlaces.objects.filter(id=event.place_id).get()
lon = place.lon
lat = place.lat
else:
if not event.place_id:
lon=lat=0
else:
logging.info('Place id %s:', event.place_id)
place = _getPlaceDetails(event.reference)
logging.info('data :%s',place)
lon = place['geometry']['location']['lng']
lat = place['geometry']['location']['lat']
except models.LocalPlaces.DoesNotExist:
lon = lat = 0;
if lon > 0 and lat > 0:
to_return.append({'id': event.id, 'name': event.name, 'description': event.description, 'start_time': event.start_time, 'end_time': event.end_time, 'lon': lon, 'lat': lat})
return HttpResponse(simplejson.dumps({'list': to_return}))
return HttpResponseNotAllowed(['GET'])
def _convertToAddress(lon, lat):
url = 'http://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&sensor=false' % (lat,lon)
json = urllib2.urlopen(url).read()
data = simplejson.loads(json)
logging.info("Address %s", data)
return data['results'][0]['formatted_address']
@permissions.is_logged_in
def getCurrentAddress(request):
if request.method == 'POST':
lon = request.POST.get('longitude', None)
lat = request.POST.get('latitude', None)
if not lon or not lat:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
return HttpResponse(simplejson.dumps({'address': _convertToAddress(lon, lat)}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def addLocalPlace(request):
if request.method == 'POST':
name = request.POST.get('name', None)
description = request.POST.get('description', None)
type = request.POST.get('type', None)
lon = request.POST.get('longitude', None)
lat = request.POST.get('latitude', None)
id = request.POST.get('id', None)
if not name or not description or not type or not lon or not lat:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if not id:
place = models.LocalPlaces(name=name, description=description, lon=lon, lat=lat, type=type, address=_convertToAddress(lon, lat))
place.save()
return HttpResponse(simplejson.dumps({'id': place.id}))
else:
try:
place = models.LocalPlaces.objects.filter(id=id).get()
place.name = name
place.description = description
place.type = type
place.lon = lon
place.lat = lat
place.address = _convertToAddress(lon, lat)
place.save()
return HttpResponse(simplejson.dumps({'id': place.id}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getLocalPlaceInfo(request):
if request.method == 'POST':
id = request.POST.get('id', None)
if not id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
place = models.LocalPlaces.objects.filter(id=id).get()
return HttpResponse(simplejson.dumps({'name': place.name, 'description': place.description, 'type': place.type, 'lon': place.lon, 'lat': place.lat, 'address': place.address}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def saveEventIntrests(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
intrest = request.POST.get('intrest_id', None)
if intrest is None or not event_id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
intrest_model = models.Intrests.objects.filter(id=intrest).get()
except models.Intrests.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Intrest dose not exist'}))
try:
event_model = models.Event.objects.filter(id=event_id).get()
except models.Intrests.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Event dose not exist'}))
try:
event_intrest = models.EventIntrests.objects.filter(event=event_model, intrest=intrest_model).get()
event_intrest.delete()
except models.EventIntrests.DoesNotExist:
event_intrest = models.EventIntrests(event=event_model, intrest=intrest_model)
event_intrest.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getEventIntrests(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
if not event_id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event_model = models.Event.objects.filter(id=event_id).get()
except models.Intrests.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Event dose not exist'}))
intrests = models.Intrests.objects.all()
toReturn = []
for i in intrests:
isSelected = False
if models.EventIntrests.objects.filter(event=event_model, intrest=i).count():
isSelected = True
toReturn.append({'name': i.name, 'description': i.description, 'selected': isSelected, 'id': i.id})
return HttpResponse(simplejson.dumps({'list': toReturn}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def closeEvent(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
if not event_id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event = models.Event.objects.filter(id=event_id).get()
if event.creator_id != auth_token.user:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Forbidden to edit'}))
if event.status == 'Closed':
event.status = ''
else:
event.status = 'Closed'
event.save()
return HttpResponseBadRequest(simplejson.dumps({'id': event.id}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getStatus(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
if not event_id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event = models.Event.objects.filter(id=event_id).get()
if event.status != "Closed":
status = "Open"
else:
status = event.status
return HttpResponseBadRequest(simplejson.dumps({'status': status}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def deleteEvent(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
if not event_id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event = models.Event.objects.filter(id=event_id).get()
if event.creator_id != auth_token.user:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Forbidden to edit'}))
event.delete()
return HttpResponseBadRequest(simplejson.dumps({'id': event.id}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def checkin(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
user = models.TokenAuthModel.objects.filter(token=token).get().user
event_id = request.POST.get('event_id', None)
event = models.Event.objects.filter(id=event_id).get()
subscription = models.Subscription(user=user, event=event)
subscription.save();
_recompute(event)
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def checkout(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
user = models.TokenAuthModel.objects.filter(token=token).get().user
event_id = request.POST.get('event_id', None)
event = models.Event.objects.filter(id=event_id).get()
subscription = models.Subscription.objects.filter(user=user, event=event).get()
subscription.delete()
_recompute(event)
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
def _recompute(event):
event.headcount = 0
event.age_average = 0
event.female_ratio = 0
event.single_ratio = 0
now = datetime.datetime.now()
for s in models.Subscription.objects.filter(event=event).all():
info = models.ExtraInfoForUser.objects.filter(user=s.user)
if (len(info.birthday) != 8):
continue
event.headcount += 1
year = int(info.birthday[:4])
month = int(info.birthday[4:6])
day = int(info.birthday[6:8])
event.age_average += now.year - year - (now.month < month or ( now.month == month and now.day < day ) )
event.female_ratio += info.sex == "2"
event.single_ratio += info.status == "2"
if (event.headcount > 0):
event.age_average /= event.headcount
event.female_ratio /= event.headcount
event.single_ratio /= event.headcount
event.save()
@permissions.is_logged_in
def star(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
user = models.TokenAuthModel.objects.filter(token=token).get().user
event_id = request.POST.get('event_id', None)
event = models.Event.objects.filter(id=event_id).get()
stars = request.POST.get('stars', None)
subscription = models.Subscription.objects.filter(user=user, event=event).get()
subscription.stars = stars
subscription.save()
# recalculate event stars
total = 0
count = 0
for s in models.Subscription.objects.filter(event=event).all():
if (not s.stars):
continue
total += int(s.stars)
count += 1
event.stars = (total / count) if (count > 0) else 0
event.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
check in only if not already checked in
# Create your views here.
from django.http import HttpResponse
from django.http import HttpResponseNotAllowed, HttpResponseBadRequest, HttpResponseForbidden
from django.utils import simplejson
from django.contrib.auth.models import User
from api import models
from api import utils
from api import permissions
import datetime
import logging
import urllib2
import math
__PlacesKey__ = 'AIzaSyBLHdzA-5F9DCllQbLmataclCyVp8MSXok'
def login(request):
"""Login request"""
if request.method == 'POST':
username = request.POST.get('username', None)
password = request.POST.get('password', None)
device_type = request.POST.get('device_type', None)
device_manufacture = request.POST.get('device_manufacture', None)
device_os = request.POST.get('device_os', None)
os_version = request.POST.get('os_version', None)
device_id = request.POST.get('device_id', None)
logging.info('Login request from user %s', username)
logging.info('device_type %s', device_type)
logging.info('device_manufacture %s', device_manufacture)
logging.info('device_os %s', device_os)
logging.info('os_version %s', os_version)
logging.info('device_id %s', device_id)
if not username or not password or not device_id or not device_type or not device_os or not os_version:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
user = User.objects.get(username=username)
if not user.check_password(password):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Username and password not matching'}))
except User.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Username and password not matching'}))
try:
device_info = models.DeviceInfo.objects.filter(device_id=device_id).get()
except models.DeviceInfo.DoesNotExist:
device_info = models.DeviceInfo(device_id=device_id, device_manufacture=device_manufacture, device_os=device_os,
device_type=device_type, os_version=os_version, device_owner=user)
device_info.save()
#Generate token and save it
auth_string = utils.tokenGenerator(size=16)
while models.TokenAuthModel.objects.filter(token=auth_string).count():
auth_string = utils.tokenGenerator(size=16)
expire_date = datetime.datetime.now()
try:
auth_token = models.TokenAuthModel.objects.filter(device=device_info).get()
auth_token.expiring_date = expire_date
auth_token.token = auth_string
auth_token.user = user
device_info.device_owner = user
device_info.save()
auth_token.save()
except models.TokenAuthModel.DoesNotExist:
auth_token = models.TokenAuthModel(user=user, device=device_info, token=auth_string,
expiring_date=expire_date)
auth_token.save()
return HttpResponse(simplejson.dumps({'auth_token': auth_string}))
return HttpResponseNotAllowed(['GET'])
def register(request):
if request.method == 'POST':
username = request.POST.get('username', None)
email = request.POST.get('email', None)
password = request.POST.get('password', None)
logging.info('User %s is trying to register with email %s', username, email)
if not email or not password:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if not utils.validateEmail(email):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Invalid email'}))
if len(password) < 4:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Password too short'}))
users = User.objects.filter(email=email)
if users.count():
return HttpResponseBadRequest(simplejson.dumps({'error': 'Email already used'}))
users = User.objects.filter(username=username)
if users.count():
return HttpResponseBadRequest(simplejson.dumps({'error': 'User already registered'}))
new_user=User.objects.create_user(username, email, password)
new_user.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getUserInfo(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
return HttpResponse(simplejson.dumps({'email': user.email, 'full_name': user.first_name, 'username': user.username}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getFullUserInfo(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
try:
personalInfo = models.ExtraInfoForUser.objects.filter(user=user).get()
except models.ExtraInfoForUser.DoesNotExist:
personalInfo = models.ExtraInfoForUser(user=user)
personalInfo.save()
dictToReturn = {'full_name': user.first_name, 'email': user.email, 'birthday': personalInfo.birthday, 'sex': personalInfo.sex, 'status': personalInfo.status, 'username': user.username}
return HttpResponse(simplejson.dumps(dictToReturn))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def updateUserInfo(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
first_name = request.POST.get('full_name', None)
email = request.POST.get('email', None)
birthday = request.POST.get('birthday', None)
sex = request.POST.get('sex', None)
status = request.POST.get('status', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
users = User.objects.filter(email=email)
if first_name is None or email is None or birthday is None or sex is None or status is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if not utils.validateEmail(email):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Invalid email'}))
if (users.count() == 1 and users.get() != user) or users.count() > 1:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Email already used'}))
try:
personalInfo = models.ExtraInfoForUser.objects.filter(user=user).get()
except modesl.ExtraInfoForUser.DoesNotExist:
personalInfo = models.ExtraInfoForUser(user=user, sex=sex, birthday=birthday, status=status)
personalInfo.save()
user.first_name = first_name
personalInfo.status = status
personalInfo.sex = sex
personalInfo.birthday = birthday
personalInfo.save()
user.email = email
user.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getSecretQuestion(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
try:
personalInfo = models.ExtraInfoForUser.objects.filter(user=user).get()
except models.ExtraInfoForUser.DoesNotExist:
personalInfo = models.ExtraInfoForUser(user=user)
personalInfo.save()
dictToReturn = {'secret_question': personalInfo.secret_question, 'secret_answer': personalInfo.secret_answer}
return HttpResponse(simplejson.dumps(dictToReturn))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def updateSecretQuestion(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
secret_question = request.POST.get('secret_question', None)
secret_answer = request.POST.get('secret_answer', None)
password = request.POST.get('password', None)
logging.error('%s', request.POST)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
if secret_question is None or secret_answer is None or password is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if not user.check_password(password):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Wrong password'}))
try:
personalInfo = models.ExtraInfoForUser.objects.filter(user=user).get()
except models.ExtraInfoForUser.DoesNotExist:
personalInfo = models.ExtraInfoForUser(user=user)
personalInfo.save()
personalInfo.secret_question = secret_question
personalInfo.secret_answer = secret_answer
personalInfo.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def updatePassword(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
new_password = request.POST.get('new_password', None)
password = request.POST.get('password', None)
if password is None or new_password is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
if not user.check_password(password):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Wrong password'}))
user.set_password(new_password)
user.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def checkProfileCompletion(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
try:
extra_info = models.ExtraInfoForUser.objects.filter(user=auth_token.user).get()
except models.ExtraInfoForUser.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'No data'}))
if extra_info.secret_question == '' or extra_info.secret_answer == '' or extra_info.birthday == '' or extra_info.sex == '':
return HttpResponseBadRequest(simplejson.dumps({'error': 'No data'}))
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
def getSecretQuestionForRecovery(request):
if request.method == 'POST':
username = request.POST.get('username', None)
if username is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
user = User.objects.filter(username=username).get()
except User.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'User does not exists'}))
try:
user_extra = models.ExtraInfoForUser.objects.filter(user=user).get()
except models.ExtraInfoForUser.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'User does not have security questions'}))
if user_extra.secret_answer == '' or user_extra.secret_question == '' or user_extra.birthday == '':
return HttpResponseBadRequest(simplejson.dumps({'error': 'User does not have security questions'}))
return HttpResponse(simplejson.dumps({'secret_question': user_extra.secret_question}))
return HttpResponseNotAllowed(['GET'])
def getRecoveryTempToken(request):
if request.method == 'POST':
username = request.POST.get('username', None)
answer = request.POST.get('answer', None)
birthday = request.POST.get('birthday', None)
if username is None or answer is None or birthday is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
user = User.objects.filter(username=username).get()
except User.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'User dose not exists'}))
try:
user_extra = models.ExtraInfoForUser.objects.filter(user=user).get()
except models.ExtraInfoForUser.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'User dose not have security questions'}))
if user_extra.secret_answer == '' or user_extra.secret_question == '' or user_extra.birthday == '':
return HttpResponseBadRequest(simplejson.dumps({'error': 'User dose not have security questions'}))
if user_extra.secret_answer != answer or user_extra.birthday != birthday:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Wrong answer'}))
tmp_token = utils.tokenGenerator(size=10)
revoery_model = models.RecoveryTokens(token=tmp_token, user=user, expiringDate = datetime.datetime.now() + datetime.timedelta(seconds=30))
revoery_model.save()
return HttpResponse(simplejson.dumps({'tmp_token': tmp_token}))
return HttpResponseNotAllowed(['GET'])
def updatePasswordAfterRecovery(request):
if request.method == 'POST':
token = request.POST.get('tmp_token', None)
new_password = request.POST.get('new_password', None)
username = request.POST.get('user', None)
if token is None or new_password is None or username is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
user = User.objects.filter(username=username).get()
tmp_auth = models.RecoveryTokens.objects.filter(token=token, user=user).get()
if tmp_auth:
user.set_password(new_password)
user.save()
except (models.RecoveryTokens.DoesNotExist, User.DoesNotExist):
return HttpResponseBadRequest(simplejson.dumps({'error': 'Wrong data'}))
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
def _getPlaceDetails(place_id):
if not place_id:
return None
url = 'https://maps.googleapis.com/maps/api/place/details/json?reference=' + place_id + '&sensor=false&key='+__PlacesKey__
json = urllib2.urlopen(url).read()
data = simplejson.loads(json)
return data['result']
@permissions.is_logged_in
def getPlaces(request):
if request.method == 'POST':
places = _getPlaces(request)
return HttpResponse(simplejson.dumps({'list': places}))
return HttpResponseNotAllowed(['GET'])
def _getPlaces(request):
radius = request.POST.get('radius', None)
latitude = request.POST.get('latitude', None)
longitude = request.POST.get('longitude', None)
if not radius or not latitude or not longitude:
return None
logging.info("Lon %s, Lat %s, Radious %s",longitude,latitude,radius)
url = 'https://maps.googleapis.com/maps/api/place/search/json?location=' + latitude + ',' + longitude + '&radius=' + radius + '&types=bar|night_club&name=&sensor=false&key='+ __PlacesKey__
json = urllib2.urlopen(url).read()
data = simplejson.loads(json)
logging.info("Google Places: %s",data)
to_return = []
for d in data['results']:
to_return.append({'id': d['id'],'reference':d['reference'], 'image_url': d['icon'], 'source': 'False', 'type': d['types'][0], 'name': d['name'], 'description': '', 'address': d['vicinity'], 'lon': d['geometry']['location']['lng'], 'lat': d['geometry']['location']['lat']})
R=6378137.0
radius = (float)(radius) * 1.0
dLat = radius/R
lon_coef = math.pi*((float)(latitude))/180.0
lon_coef = math.cos(lon_coef)
dLon = radius/(R*lon_coef)
logging.info("%s %s",dLat,dLon)
lat_range = (float(latitude)-dLat * 180/math.pi, float(latitude)+dLat * 180/math.pi)
lon_range = (float(longitude)-dLon * 180/math.pi, float(longitude)+dLon * 180/math.pi)
local_places = models.LocalPlaces.objects.filter(lat__range=lat_range)
for obj in local_places:
if float(obj.lon) >= lon_range[0] and float(obj.lon) <= lon_range[1]:
to_return.append({'id': obj.id, 'reference':obj.id,'image_url': 'http://naperville-webdesign.net/wp-content/uploads/2012/12/home-icon-hi.png', 'source': 'True', 'type': obj.type, 'name': obj.name, 'description': obj.description, 'address': obj.address, 'lon': obj.lon, 'lat': obj.lat})
return to_return
@permissions.is_logged_in
def getEvents(request):
if request.method == 'POST':
places = _getPlaces(request)
to_return = []
for place in places:
if place['source'] != "False":
events = models.Event.objects.filter(place_id=place['id']).all()
else:
events = models.Event.objects.filter(place_id=place['id']).all()
logging.info("Events Count : %d %s", events.count(),place['id'])
for event in events:
if event.status == "Closed":
logging.info("Exited because event is closed")
continue
currentDate = datetime.datetime.now()
timeDelta = event.date.day - currentDate.day
if timeDelta > 1 or timeDelta < -1:
logging.info("Exited because event has not the good date")
continue
try:
if event.local != "False":
if not event.place_id:
lon=lat=0
else:
place_tmp = models.LocalPlaces.objects.filter(id=event.place_id).get()
lon = place_tmp.lon
lat = place_tmp.lat
else:
if not event.place_id:
lon=lat=0
else:
logging.info('Place id %s:', event.place_id)
place_tmp = _getPlaceDetails(event.reference)
logging.info('data :%s',place)
lon = place_tmp['geometry']['location']['lng']
lat = place_tmp['geometry']['location']['lat']
except models.LocalPlaces.DoesNotExist:
lon = lat = 0;
if lon > 0 and lat > 0:
to_return.append({'id': event.id, 'name': event.name,'address':_convertToAddress(lon,lat), 'description': event.description, 'start_time': event.start_time, 'end_time': event.end_time, 'type': place['type'],'lon': lon, 'lat': lat})
return HttpResponse(simplejson.dumps({'list': to_return}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getIntrestsList(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
intrests = models.Intrests.objects.all()
toReturn = []
for i in intrests:
isSelected = False
if models.UserIntrest.objects.filter(user=user, intrest=i).count():
isSelected = True
toReturn.append({'name': i.name, 'description': i.description, 'selected': isSelected, 'id': i.id})
return HttpResponse(simplejson.dumps({'list': toReturn}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def updateUserIntrest(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
intrest = request.POST.get('intrest', None)
if intrest is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
user = auth_token.user
try:
intrest_model = models.Intrests.objects.filter(id=intrest).get()
except models.Intrests.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Intrest dose not exist'}))
try:
user_intrest = models.UserIntrest.objects.filter(user=user, intrest=intrest_model).get()
user_intrest.delete()
except models.UserIntrest.DoesNotExist:
user_intrest = models.UserIntrest(user=user, intrest=intrest_model)
user_intrest.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
def addIntrest(request):
models.Intrests(name=request.GET.get('name', ''), description=request.GET.get('description', '')).save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
@permissions.is_logged_in
def addChatRoomMessage(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
message = request.POST.get('message', None)
event = request.POST.get('event_id', None)
if message is None or event is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
try:
event_models = models.Event.objects.filter(id=event).get()
except models.Event.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Event dose not exist'}))
user = auth_token.user
message_model = models.EventChatRoom(user=user, message=message, event=event_models, date = datetime.datetime.now())
message_model.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getChatRoomMessage(request):
if request.method == 'POST':
event = request.POST.get('event_id', None)
if event is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event_models = models.Event.objects.filter(id=event).get()
except models.Event.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Event dose not exist'}))
messages = models.EventChatRoom.objects.filter(event = event_models).order_by('-date')
to_return = []
for msg in messages:
to_return.append({'date': msg.date.strftime('%d/%m/%y %H:%M:%S'), 'message': msg.message, 'user': msg.user.username})
return HttpResponse(simplejson.dumps({'list': to_return}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getFullEventInfo(request):
if request.method == 'POST':
id = request.POST.get('id', None)
if id is None:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event = models.Event.objects.filter(id=id).get()
try:
if event.local != "False":
if not event.place_id:
lon=lat=0
else:
place_tmp = models.LocalPlaces.objects.filter(id=event.place_id).get()
lon = place_tmp.lon
lat = place_tmp.lat
name = place_tmp.name
address = place_tmp.address
type = place_tmp.type
place_description = place_tmp.description
else:
if not event.place_id:
lon=lat=0
else:
logging.info('Place id %s:', event.place_id)
place_tmp = _getPlaceDetails(event.reference)
name = place_tmp['name']
place_description = ''
lon = place_tmp['geometry']['location']['lng']
lat = place_tmp['geometry']['location']['lat']
logging.info("Place details %s",place_tmp)
address = _convertToAddress(lon,lat)
type = place_tmp['types'][0]
except models.LocalPlaces.DoesNotExist:
lon = lat = 0;
if lon > 0 and lat > 0:
myDict = {'name': event.name, 'close': event.status, 'description': event.description, 'price': event.price, 'start_time': event.start_time, 'end_time': event.end_time,
'age_average':event.age_average, 'female_ratio':event.female_ratio, 'stars':event.stars, 'single_ratio':event.single_ratio, 'headcount':event.headcount, 'lon':lon,
'lat':lat,'place_name':name,'place_description':place_description,'place_address':address,
'type':type}
return HttpResponse(simplejson.dumps(myDict))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getEventInfo(request):
if request.method == 'POST':
id = request.POST.get('id', None)
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
if not id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event = models.Event.objects.filter(id=id).get()
if event.creator_id != auth_token.user:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Forbidden to edit'}))
return HttpResponse(simplejson.dumps({'name': event.name, 'close': event.status, 'description': event.description, 'price': event.price, 'start_time': event.start_time, 'end_time': event.end_time}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def saveEventInfo(request):
if request.method == 'POST':
name = request.POST.get('name', None)
description = request.POST.get('description', None)
start_time = request.POST.get('start_time', None)
end_time = request.POST.get('end_time', None)
price = request.POST.get('price', None)
id = request.POST.get('id', None)
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
if not name or not description or not start_time or not end_time or not price:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if not id:
event = models.Event(name=name, description=description,
date = datetime.datetime.now(),
start_time=start_time, end_time=end_time, price=price, creator_id=auth_token.user)
event.save()
return HttpResponse(simplejson.dumps({'id': event.id}))
else:
try:
event = models.Event.objects.filter(id=id).get()
event.name = name
event.description = description
event.start_time = start_time
event.date = datetime.datetime.now()
event.end_time = end_time
event.price = price
if event.creator_id != auth_token.user:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Forbidden to edit'}))
event.save()
return HttpResponse(simplejson.dumps({'id': event.id}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def saveEventPlace(request):
if request.method == 'POST':
place_id = request.POST.get('place_id', None)
place_reference = request.POST.get('place_reference', None)
event_id = request.POST.get('event_id', None)
is_local = request.POST.get('is_local', None)
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
if not place_id or not event_id or not is_local:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if is_local == "True":
try:
place = models.LocalPlaces.objects.filter(id=place_id).get()
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
try:
event = models.Event.objects.filter(id=event_id).get()
if event.creator_id != auth_token.user:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Forbidden to edit'}))
event.local = is_local
if is_local == "True":
event.place_id = place.id
else:
event.place_id = place_id
event.reference = place_reference
event.save()
return HttpResponseBadRequest(simplejson.dumps({'id': event.id}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getPersonalEvents(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
personalEvents = models.Event.objects.filter(creator_id=auth_token.user)
to_return = []
for event in personalEvents:
try:
if event.local != "False":
if not event.place_id:
lon=lat=0
else:
place = models.LocalPlaces.objects.filter(id=event.place_id).get()
lon = place.lon
lat = place.lat
else:
if not event.place_id:
lon=lat=0
else:
logging.info('Place id %s:', event.place_id)
place = _getPlaceDetails(event.reference)
logging.info('data :%s',place)
lon = place['geometry']['location']['lng']
lat = place['geometry']['location']['lat']
except models.LocalPlaces.DoesNotExist:
lon = lat = 0;
if lon > 0 and lat > 0:
to_return.append({'id': event.id, 'name': event.name, 'description': event.description, 'start_time': event.start_time, 'end_time': event.end_time, 'lon': lon, 'lat': lat})
return HttpResponse(simplejson.dumps({'list': to_return}))
return HttpResponseNotAllowed(['GET'])
def _convertToAddress(lon, lat):
url = 'http://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&sensor=false' % (lat,lon)
json = urllib2.urlopen(url).read()
data = simplejson.loads(json)
logging.info("Address %s", data)
return data['results'][0]['formatted_address']
@permissions.is_logged_in
def getCurrentAddress(request):
if request.method == 'POST':
lon = request.POST.get('longitude', None)
lat = request.POST.get('latitude', None)
if not lon or not lat:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
return HttpResponse(simplejson.dumps({'address': _convertToAddress(lon, lat)}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def addLocalPlace(request):
if request.method == 'POST':
name = request.POST.get('name', None)
description = request.POST.get('description', None)
type = request.POST.get('type', None)
lon = request.POST.get('longitude', None)
lat = request.POST.get('latitude', None)
id = request.POST.get('id', None)
if not name or not description or not type or not lon or not lat:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
if not id:
place = models.LocalPlaces(name=name, description=description, lon=lon, lat=lat, type=type, address=_convertToAddress(lon, lat))
place.save()
return HttpResponse(simplejson.dumps({'id': place.id}))
else:
try:
place = models.LocalPlaces.objects.filter(id=id).get()
place.name = name
place.description = description
place.type = type
place.lon = lon
place.lat = lat
place.address = _convertToAddress(lon, lat)
place.save()
return HttpResponse(simplejson.dumps({'id': place.id}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getLocalPlaceInfo(request):
if request.method == 'POST':
id = request.POST.get('id', None)
if not id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
place = models.LocalPlaces.objects.filter(id=id).get()
return HttpResponse(simplejson.dumps({'name': place.name, 'description': place.description, 'type': place.type, 'lon': place.lon, 'lat': place.lat, 'address': place.address}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def saveEventIntrests(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
intrest = request.POST.get('intrest_id', None)
if intrest is None or not event_id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
intrest_model = models.Intrests.objects.filter(id=intrest).get()
except models.Intrests.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Intrest dose not exist'}))
try:
event_model = models.Event.objects.filter(id=event_id).get()
except models.Intrests.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Event dose not exist'}))
try:
event_intrest = models.EventIntrests.objects.filter(event=event_model, intrest=intrest_model).get()
event_intrest.delete()
except models.EventIntrests.DoesNotExist:
event_intrest = models.EventIntrests(event=event_model, intrest=intrest_model)
event_intrest.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getEventIntrests(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
if not event_id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event_model = models.Event.objects.filter(id=event_id).get()
except models.Intrests.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Event dose not exist'}))
intrests = models.Intrests.objects.all()
toReturn = []
for i in intrests:
isSelected = False
if models.EventIntrests.objects.filter(event=event_model, intrest=i).count():
isSelected = True
toReturn.append({'name': i.name, 'description': i.description, 'selected': isSelected, 'id': i.id})
return HttpResponse(simplejson.dumps({'list': toReturn}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def closeEvent(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
if not event_id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event = models.Event.objects.filter(id=event_id).get()
if event.creator_id != auth_token.user:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Forbidden to edit'}))
if event.status == 'Closed':
event.status = ''
else:
event.status = 'Closed'
event.save()
return HttpResponseBadRequest(simplejson.dumps({'id': event.id}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def getStatus(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
if not event_id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event = models.Event.objects.filter(id=event_id).get()
if event.status != "Closed":
status = "Open"
else:
status = event.status
return HttpResponseBadRequest(simplejson.dumps({'status': status}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def deleteEvent(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
token = request.POST.get('auth_token', None)
auth_token = models.TokenAuthModel.objects.filter(token=token).get()
if not event_id:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Incomplete data'}))
try:
event = models.Event.objects.filter(id=event_id).get()
if event.creator_id != auth_token.user:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Forbidden to edit'}))
event.delete()
return HttpResponseBadRequest(simplejson.dumps({'id': event.id}))
except models.LocalPlaces.DoesNotExist:
return HttpResponseBadRequest(simplejson.dumps({'error': 'Object does not exists'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def checkin(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
user = models.TokenAuthModel.objects.filter(token=token).get().user
event_id = request.POST.get('event_id', None)
event = models.Event.objects.filter(id=event_id).get()
try:
subscription = models.Subscription.objects.filter(user=user, event=event).get()
except models.Subscription.DoesNotExist:
subscription = models.Subscription(user=user, event=event)
subscription.save()
_recompute(event)
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
@permissions.is_logged_in
def checkout(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
user = models.TokenAuthModel.objects.filter(token=token).get().user
event_id = request.POST.get('event_id', None)
event = models.Event.objects.filter(id=event_id).get()
subscription = models.Subscription.objects.filter(user=user, event=event).get()
subscription.delete()
_recompute(event)
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
def _recompute(event):
event.headcount = 0
event.age_average = 0
event.female_ratio = 0
event.single_ratio = 0
now = datetime.datetime.now()
for s in models.Subscription.objects.filter(event=event).all():
info = models.ExtraInfoForUser.objects.filter(user=s.user)
if (len(info.birthday) != 8):
continue
event.headcount += 1
year = int(info.birthday[:4])
month = int(info.birthday[4:6])
day = int(info.birthday[6:8])
event.age_average += now.year - year - (now.month < month or ( now.month == month and now.day < day ) )
event.female_ratio += info.sex == "2"
event.single_ratio += info.status == "2"
if (event.headcount > 0):
event.age_average /= event.headcount
event.female_ratio /= event.headcount
event.single_ratio /= event.headcount
event.save()
@permissions.is_logged_in
def star(request):
if request.method == 'POST':
token = request.POST.get('auth_token', None)
user = models.TokenAuthModel.objects.filter(token=token).get().user
event_id = request.POST.get('event_id', None)
event = models.Event.objects.filter(id=event_id).get()
stars = request.POST.get('stars', None)
subscription = models.Subscription.objects.filter(user=user, event=event).get()
subscription.stars = stars
subscription.save()
# recalculate event stars
total = 0
count = 0
for s in models.Subscription.objects.filter(event=event).all():
if (not s.stars):
continue
total += int(s.stars)
count += 1
event.stars = (total / count) if (count > 0) else 0
event.save()
return HttpResponse(simplejson.dumps({'empty': 'empty'}))
return HttpResponseNotAllowed(['GET'])
|
from numpy import inner, zeros, inf, finfo
from numpy.linalg import norm
from math import sqrt
from .utils import make_system
__all__ = ['minres']
def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
M=None, callback=None, show=False, check=False):
"""
Use MINimum RESidual iteration to solve Ax=b
MINRES minimizes norm(A*x - b) for a real symmetric matrix A. Unlike
the Conjugate Gradient method, A can be indefinite or singular.
If shift != 0 then the method solves (A - shift*I)x = b
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real symmetric N-by-N matrix of the linear system
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : ndarray
Starting guess for the solution.
tol : float
Tolerance to achieve. The algorithm terminates when the relative
residual is below `tol`.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import minres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> A = A + A.T
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = minres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
References
----------
Solution of sparse indefinite systems of linear equations,
C. C. Paige and M. A. Saunders (1975),
SIAM J. Numer. Anal. 12(4), pp. 617-629.
https://web.stanford.edu/group/SOL/software/minres/
This file is a translation of the following MATLAB implementation:
https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
matvec = A.matvec
psolve = M.matvec
first = 'Enter minres. '
last = 'Exit minres. '
n = A.shape[0]
if maxiter is None:
maxiter = 5 * n
msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1
' beta1 = 0. The exact solution is x0 ', # 0
' A solution to Ax = b was found, given rtol ', # 1
' A least-squares solution was found, given rtol ', # 2
' Reasonable accuracy achieved, given eps ', # 3
' x has converged to an eigenvector ', # 4
' acond has exceeded 0.1/eps ', # 5
' The iteration limit was reached ', # 6
' A does not define a symmetric matrix ', # 7
' M does not define a symmetric matrix ', # 8
' M does not define a pos-def preconditioner '] # 9
if show:
print(first + 'Solution of symmetric Ax = b')
print(first + 'n = %3g shift = %23.14e' % (n,shift))
print(first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol))
print()
istop = 0
itn = 0
Anorm = 0
Acond = 0
rnorm = 0
ynorm = 0
xtype = x.dtype
eps = finfo(xtype).eps
# Set up y and v for the first Lanczos vector v1.
# y = beta1 P' v1, where P = C**(-1).
# v is really P' v1.
r1 = b - A*x
y = psolve(r1)
beta1 = inner(r1, y)
if beta1 < 0:
raise ValueError('indefinite preconditioner')
elif beta1 == 0:
return (postprocess(x), 0)
bnorm = norm(b)
if bnorm == 0:
x = b
return (postprocess(x), 0)
beta1 = sqrt(beta1)
if check:
# are these too strict?
# see if A is symmetric
w = matvec(y)
r2 = matvec(w)
s = inner(w,w)
t = inner(y,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric matrix')
# see if M is symmetric
r2 = psolve(y)
s = inner(y,y)
t = inner(r1,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric preconditioner')
# Initialize other quantities
oldb = 0
beta = beta1
dbar = 0
epsln = 0
qrnorm = beta1
phibar = beta1
rhs1 = beta1
rhs2 = 0
tnorm2 = 0
gmax = 0
gmin = finfo(xtype).max
cs = -1
sn = 0
w = zeros(n, dtype=xtype)
w2 = zeros(n, dtype=xtype)
r2 = r1
if show:
print()
print()
print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|')
while itn < maxiter:
itn += 1
s = 1.0/beta
v = s*y
y = matvec(v)
y = y - shift * v
if itn >= 2:
y = y - (beta/oldb)*r1
alfa = inner(v,y)
y = y - (alfa/beta)*r2
r1 = r2
r2 = y
y = psolve(r2)
oldb = beta
beta = inner(r2,y)
if beta < 0:
raise ValueError('non-symmetric matrix')
beta = sqrt(beta)
tnorm2 += alfa**2 + oldb**2 + beta**2
if itn == 1:
if beta/beta1 <= 10*eps:
istop = -1 # Terminate later
# Apply previous rotation Qk-1 to get
# [deltak epslnk+1] = [cs sn][dbark 0 ]
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
oldeps = epsln
delta = cs * dbar + sn * alfa # delta1 = 0 deltak
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
epsln = sn * beta # epsln2 = 0 epslnk+1
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
root = norm([gbar, dbar])
Arnorm = phibar * root
# Compute the next plane rotation Qk
gamma = norm([gbar, beta]) # gammak
gamma = max(gamma, eps)
cs = gbar / gamma # ck
sn = beta / gamma # sk
phi = cs * phibar # phik
phibar = sn * phibar # phibark+1
# Update x.
denom = 1.0/gamma
w1 = w2
w2 = w
w = (v - oldeps*w1 - delta*w2) * denom
x = x + phi*w
# Go round again.
gmax = max(gmax, gamma)
gmin = min(gmin, gamma)
z = rhs1 / gamma
rhs1 = rhs2 - delta*z
rhs2 = - epsln*z
# Estimate various norms and test for convergence.
Anorm = sqrt(tnorm2)
ynorm = norm(x)
epsa = Anorm * eps
epsx = Anorm * ynorm * eps
epsr = Anorm * ynorm * tol
diag = gbar
if diag == 0:
diag = epsa
qrnorm = phibar
rnorm = qrnorm
if ynorm == 0 or Anorm == 0:
test1 = inf
else:
test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||)
if Anorm == 0:
test2 = inf
else:
test2 = root / Anorm # ||Ar|| / (||A|| ||r||)
# Estimate cond(A).
# In this version we look at the diagonals of R in the
# factorization of the lower Hessenberg matrix, Q * H = R,
# where H is the tridiagonal matrix from Lanczos with one
# extra row, beta(k+1) e_k^T.
Acond = gmax/gmin
# See if any of the stopping criteria are satisfied.
# In rare cases, istop is already -1 from above (Abar = const*I).
if istop == 0:
t1 = 1 + test1 # These tests work if tol < eps
t2 = 1 + test2
if t2 <= 1:
istop = 2
if t1 <= 1:
istop = 1
if itn >= maxiter:
istop = 6
if Acond >= 0.1/eps:
istop = 4
if epsx >= beta1:
istop = 3
# if rnorm <= epsx : istop = 2
# if rnorm <= epsr : istop = 1
if test2 <= tol:
istop = 2
if test1 <= tol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= maxiter-10:
prnt = True
if itn % 10 == 0:
prnt = True
if qrnorm <= 10*epsx:
prnt = True
if qrnorm <= 10*epsr:
prnt = True
if Acond <= 1e-2/eps:
prnt = True
if istop != 0:
prnt = True
if show and prnt:
str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1)
str2 = ' %10.3e' % (test2,)
str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm)
print(str1 + str2 + str3)
if itn % 10 == 0:
print()
if callback is not None:
callback(x)
if istop != 0:
break # TODO check this
if show:
print()
print(last + ' istop = %3g itn =%5g' % (istop,itn))
print(last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond))
print(last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm))
print(last + ' Arnorm = %12.4e' % (Arnorm,))
print(last + msg[istop+1])
if istop == 6:
info = maxiter
else:
info = 0
return (postprocess(x),info)
if __name__ == '__main__':
from numpy import arange
from scipy.sparse import spdiags
n = 10
residuals = []
def cb(x):
residuals.append(norm(b - A*x))
# A = poisson((10,),format='csr')
A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr')
M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr')
A.psolve = M.matvec
b = zeros(A.shape[0])
x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
# x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
DOC: Add missing params to minres docstring.
from numpy import inner, zeros, inf, finfo
from numpy.linalg import norm
from math import sqrt
from .utils import make_system
__all__ = ['minres']
def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
M=None, callback=None, show=False, check=False):
"""
Use MINimum RESidual iteration to solve Ax=b
MINRES minimizes norm(A*x - b) for a real symmetric matrix A. Unlike
the Conjugate Gradient method, A can be indefinite or singular.
If shift != 0 then the method solves (A - shift*I)x = b
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real symmetric N-by-N matrix of the linear system
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : ndarray
Starting guess for the solution.
shift : float
Value to apply to the system ``(A - shift * I)x = b``. Default is 0.
tol : float
Tolerance to achieve. The algorithm terminates when the relative
residual is below `tol`.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
show : bool
If ``True``, print out a summary and metrics related to the solution
during iterations. Default is ``False``.
check : bool
If ``True``, run additional input validation to check that `A` and
`M` (if specified) are symmetric. Default is ``False``.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import minres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> A = A + A.T
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = minres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
References
----------
Solution of sparse indefinite systems of linear equations,
C. C. Paige and M. A. Saunders (1975),
SIAM J. Numer. Anal. 12(4), pp. 617-629.
https://web.stanford.edu/group/SOL/software/minres/
This file is a translation of the following MATLAB implementation:
https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
matvec = A.matvec
psolve = M.matvec
first = 'Enter minres. '
last = 'Exit minres. '
n = A.shape[0]
if maxiter is None:
maxiter = 5 * n
msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1
' beta1 = 0. The exact solution is x0 ', # 0
' A solution to Ax = b was found, given rtol ', # 1
' A least-squares solution was found, given rtol ', # 2
' Reasonable accuracy achieved, given eps ', # 3
' x has converged to an eigenvector ', # 4
' acond has exceeded 0.1/eps ', # 5
' The iteration limit was reached ', # 6
' A does not define a symmetric matrix ', # 7
' M does not define a symmetric matrix ', # 8
' M does not define a pos-def preconditioner '] # 9
if show:
print(first + 'Solution of symmetric Ax = b')
print(first + 'n = %3g shift = %23.14e' % (n,shift))
print(first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol))
print()
istop = 0
itn = 0
Anorm = 0
Acond = 0
rnorm = 0
ynorm = 0
xtype = x.dtype
eps = finfo(xtype).eps
# Set up y and v for the first Lanczos vector v1.
# y = beta1 P' v1, where P = C**(-1).
# v is really P' v1.
r1 = b - A*x
y = psolve(r1)
beta1 = inner(r1, y)
if beta1 < 0:
raise ValueError('indefinite preconditioner')
elif beta1 == 0:
return (postprocess(x), 0)
bnorm = norm(b)
if bnorm == 0:
x = b
return (postprocess(x), 0)
beta1 = sqrt(beta1)
if check:
# are these too strict?
# see if A is symmetric
w = matvec(y)
r2 = matvec(w)
s = inner(w,w)
t = inner(y,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric matrix')
# see if M is symmetric
r2 = psolve(y)
s = inner(y,y)
t = inner(r1,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric preconditioner')
# Initialize other quantities
oldb = 0
beta = beta1
dbar = 0
epsln = 0
qrnorm = beta1
phibar = beta1
rhs1 = beta1
rhs2 = 0
tnorm2 = 0
gmax = 0
gmin = finfo(xtype).max
cs = -1
sn = 0
w = zeros(n, dtype=xtype)
w2 = zeros(n, dtype=xtype)
r2 = r1
if show:
print()
print()
print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|')
while itn < maxiter:
itn += 1
s = 1.0/beta
v = s*y
y = matvec(v)
y = y - shift * v
if itn >= 2:
y = y - (beta/oldb)*r1
alfa = inner(v,y)
y = y - (alfa/beta)*r2
r1 = r2
r2 = y
y = psolve(r2)
oldb = beta
beta = inner(r2,y)
if beta < 0:
raise ValueError('non-symmetric matrix')
beta = sqrt(beta)
tnorm2 += alfa**2 + oldb**2 + beta**2
if itn == 1:
if beta/beta1 <= 10*eps:
istop = -1 # Terminate later
# Apply previous rotation Qk-1 to get
# [deltak epslnk+1] = [cs sn][dbark 0 ]
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
oldeps = epsln
delta = cs * dbar + sn * alfa # delta1 = 0 deltak
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
epsln = sn * beta # epsln2 = 0 epslnk+1
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
root = norm([gbar, dbar])
Arnorm = phibar * root
# Compute the next plane rotation Qk
gamma = norm([gbar, beta]) # gammak
gamma = max(gamma, eps)
cs = gbar / gamma # ck
sn = beta / gamma # sk
phi = cs * phibar # phik
phibar = sn * phibar # phibark+1
# Update x.
denom = 1.0/gamma
w1 = w2
w2 = w
w = (v - oldeps*w1 - delta*w2) * denom
x = x + phi*w
# Go round again.
gmax = max(gmax, gamma)
gmin = min(gmin, gamma)
z = rhs1 / gamma
rhs1 = rhs2 - delta*z
rhs2 = - epsln*z
# Estimate various norms and test for convergence.
Anorm = sqrt(tnorm2)
ynorm = norm(x)
epsa = Anorm * eps
epsx = Anorm * ynorm * eps
epsr = Anorm * ynorm * tol
diag = gbar
if diag == 0:
diag = epsa
qrnorm = phibar
rnorm = qrnorm
if ynorm == 0 or Anorm == 0:
test1 = inf
else:
test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||)
if Anorm == 0:
test2 = inf
else:
test2 = root / Anorm # ||Ar|| / (||A|| ||r||)
# Estimate cond(A).
# In this version we look at the diagonals of R in the
# factorization of the lower Hessenberg matrix, Q * H = R,
# where H is the tridiagonal matrix from Lanczos with one
# extra row, beta(k+1) e_k^T.
Acond = gmax/gmin
# See if any of the stopping criteria are satisfied.
# In rare cases, istop is already -1 from above (Abar = const*I).
if istop == 0:
t1 = 1 + test1 # These tests work if tol < eps
t2 = 1 + test2
if t2 <= 1:
istop = 2
if t1 <= 1:
istop = 1
if itn >= maxiter:
istop = 6
if Acond >= 0.1/eps:
istop = 4
if epsx >= beta1:
istop = 3
# if rnorm <= epsx : istop = 2
# if rnorm <= epsr : istop = 1
if test2 <= tol:
istop = 2
if test1 <= tol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= maxiter-10:
prnt = True
if itn % 10 == 0:
prnt = True
if qrnorm <= 10*epsx:
prnt = True
if qrnorm <= 10*epsr:
prnt = True
if Acond <= 1e-2/eps:
prnt = True
if istop != 0:
prnt = True
if show and prnt:
str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1)
str2 = ' %10.3e' % (test2,)
str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm)
print(str1 + str2 + str3)
if itn % 10 == 0:
print()
if callback is not None:
callback(x)
if istop != 0:
break # TODO check this
if show:
print()
print(last + ' istop = %3g itn =%5g' % (istop,itn))
print(last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond))
print(last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm))
print(last + ' Arnorm = %12.4e' % (Arnorm,))
print(last + msg[istop+1])
if istop == 6:
info = maxiter
else:
info = 0
return (postprocess(x),info)
if __name__ == '__main__':
from numpy import arange
from scipy.sparse import spdiags
n = 10
residuals = []
def cb(x):
residuals.append(norm(b - A*x))
# A = poisson((10,),format='csr')
A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr')
M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr')
A.psolve = M.matvec
b = zeros(A.shape[0])
x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
# x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
|
"""Checkouts application middleware"""
import logging
import time
from .statsdproxy import statsd
logger = logging.getLogger('analytics')
class Analytics():
"""Tracks request details useful for analysis of usage patterns.
To ensure that the name of the logged in user can be accessed, this
middleware should come after Django's built-in AuthenticationMiddleware
in the project settings.
"""
def is_monitor_agent(request):
"""Returns True if this request is related to a known monitoring agent."""
keywords = [
'UptimeRobot', # Nifty free service
]
useragent = request.META.get('HTTP_USER_AGENT', None)
if useragent is None:
return False # Can't recognize a blank useragent
for word in keywords:
if word in useragent:
return True
return False
def collect_request_details(self, request):
"""Gathers information of interest from the request and returns a dictionary."""
# Use the REMOTE_ADDR if we have it. If not, Nginx is configured to
# set both X-Real-IP and X-Forwarded-For; pick one of those instead.
client_ip = request.META['REMOTE_ADDR']
if client_ip == '':
client_ip = request.META['HTTP_X_REAL_IP']
context = {
'ip': client_ip,
'method': request.method,
'path': request.path,
}
context['useragent'] = request.META.get('HTTP_USER_AGENT', 'None')
if hasattr(request, 'user') and request.user.is_authenticated():
context['user'] = request.user.username
else:
context['user'] = 'anonymous'
return context
def process_request(self, request):
"""Captures the current time and saves it to the request object."""
if self.is_monitor_agent(request):
return # No metrics
request._analytics_start_time = time.time()
statsd.incr('request')
def process_response(self, request, response):
"""Organizes info from each request/response and saves it to a log."""
if self.is_monitor_agent(request):
return response # No metrics, no logging
context = self.collect_request_details(request)
context['status'] = response.status_code
context['bytes'] = len(response.content)
if hasattr(request, '_analytics_start_time'):
elapsed = (time.time() - request._analytics_start_time) * 1000.0
context['elapsed'] = elapsed
statsd.timing('response.elapsed', elapsed)
else:
context['elapsed'] = -1.0
template = "client=%(user)s@%(ip)s method=%(method)s path=%(path)s real=%(elapsed).0fms status=%(status)s bytes=%(bytes)s useragent=\"%(useragent)s\""
logger.info(template % context)
return response
Instance function requires 'self'
Associated #19
"""Checkouts application middleware"""
import logging
import time
from .statsdproxy import statsd
logger = logging.getLogger('analytics')
class Analytics():
"""Tracks request details useful for analysis of usage patterns.
To ensure that the name of the logged in user can be accessed, this
middleware should come after Django's built-in AuthenticationMiddleware
in the project settings.
"""
def is_monitor_agent(self, request):
"""Returns True if this request is related to a known monitoring agent."""
keywords = [
'UptimeRobot', # Nifty free service
]
useragent = request.META.get('HTTP_USER_AGENT', None)
if useragent is None:
return False # Can't recognize a blank useragent
for word in keywords:
if word in useragent:
return True
return False
def collect_request_details(self, request):
"""Gathers information of interest from the request and returns a dictionary."""
# Use the REMOTE_ADDR if we have it. If not, Nginx is configured to
# set both X-Real-IP and X-Forwarded-For; pick one of those instead.
client_ip = request.META['REMOTE_ADDR']
if client_ip == '':
client_ip = request.META['HTTP_X_REAL_IP']
context = {
'ip': client_ip,
'method': request.method,
'path': request.path,
}
context['useragent'] = request.META.get('HTTP_USER_AGENT', 'None')
if hasattr(request, 'user') and request.user.is_authenticated():
context['user'] = request.user.username
else:
context['user'] = 'anonymous'
return context
def process_request(self, request):
"""Captures the current time and saves it to the request object."""
if self.is_monitor_agent(request):
return # No metrics
request._analytics_start_time = time.time()
statsd.incr('request')
def process_response(self, request, response):
"""Organizes info from each request/response and saves it to a log."""
if self.is_monitor_agent(request):
return response # No metrics, no logging
context = self.collect_request_details(request)
context['status'] = response.status_code
context['bytes'] = len(response.content)
if hasattr(request, '_analytics_start_time'):
elapsed = (time.time() - request._analytics_start_time) * 1000.0
context['elapsed'] = elapsed
statsd.timing('response.elapsed', elapsed)
else:
context['elapsed'] = -1.0
template = "client=%(user)s@%(ip)s method=%(method)s path=%(path)s real=%(elapsed).0fms status=%(status)s bytes=%(bytes)s useragent=\"%(useragent)s\""
logger.info(template % context)
return response
|
d8670dfd-2ead-11e5-9091-7831c1d44c14
d86cd891-2ead-11e5-a713-7831c1d44c14
d86cd891-2ead-11e5-a713-7831c1d44c14 |
import sys
import os
import unittest
from unittest import mock
from scrapy.item import Item, Field
from scrapy.utils.misc import arg_to_iter, create_instance, load_object, set_environ, walk_modules
__doctests__ = ['scrapy.utils.misc']
class UtilsMiscTestCase(unittest.TestCase):
def test_load_object_class(self):
obj = load_object(Field)
self.assertIs(obj, Field)
obj = load_object('scrapy.item.Field')
self.assertIs(obj, Field)
def test_load_object_function(self):
obj = load_object(load_object)
self.assertIs(obj, load_object)
obj = load_object('scrapy.utils.misc.load_object')
self.assertIs(obj, load_object)
def test_load_object_exceptions(self):
self.assertRaises(ImportError, load_object, 'nomodule999.mod.function')
self.assertRaises(NameError, load_object, 'scrapy.utils.misc.load_object999')
self.assertRaises(TypeError, load_object, dict())
def test_walk_modules(self):
mods = walk_modules('tests.test_utils_misc.test_walk_modules')
expected = [
'tests.test_utils_misc.test_walk_modules',
'tests.test_utils_misc.test_walk_modules.mod',
'tests.test_utils_misc.test_walk_modules.mod.mod0',
'tests.test_utils_misc.test_walk_modules.mod1',
]
self.assertEqual({m.__name__ for m in mods}, set(expected))
mods = walk_modules('tests.test_utils_misc.test_walk_modules.mod')
expected = [
'tests.test_utils_misc.test_walk_modules.mod',
'tests.test_utils_misc.test_walk_modules.mod.mod0',
]
self.assertEqual({m.__name__ for m in mods}, set(expected))
mods = walk_modules('tests.test_utils_misc.test_walk_modules.mod1')
expected = [
'tests.test_utils_misc.test_walk_modules.mod1',
]
self.assertEqual({m.__name__ for m in mods}, set(expected))
self.assertRaises(ImportError, walk_modules, 'nomodule999')
def test_walk_modules_egg(self):
egg = os.path.join(os.path.dirname(__file__), 'test.egg')
sys.path.append(egg)
try:
mods = walk_modules('testegg')
expected = [
'testegg.spiders',
'testegg.spiders.a',
'testegg.spiders.b',
'testegg'
]
self.assertEqual({m.__name__ for m in mods}, set(expected))
finally:
sys.path.remove(egg)
def test_arg_to_iter(self):
class TestItem(Item):
name = Field()
assert hasattr(arg_to_iter(None), '__iter__')
assert hasattr(arg_to_iter(100), '__iter__')
assert hasattr(arg_to_iter('lala'), '__iter__')
assert hasattr(arg_to_iter([1, 2, 3]), '__iter__')
assert hasattr(arg_to_iter(c for c in 'abcd'), '__iter__')
self.assertEqual(list(arg_to_iter(None)), [])
self.assertEqual(list(arg_to_iter('lala')), ['lala'])
self.assertEqual(list(arg_to_iter(100)), [100])
self.assertEqual(list(arg_to_iter(c for c in 'abc')), ['a', 'b', 'c'])
self.assertEqual(list(arg_to_iter([1, 2, 3])), [1, 2, 3])
self.assertEqual(list(arg_to_iter({'a': 1})), [{'a': 1}])
self.assertEqual(list(arg_to_iter(TestItem(name="john"))), [TestItem(name="john")])
def test_create_instance(self):
settings = mock.MagicMock()
crawler = mock.MagicMock(spec_set=['settings'])
args = (True, 100.)
kwargs = {'key': 'val'}
def _test_with_settings(mock, settings):
create_instance(mock, settings, None, *args, **kwargs)
if hasattr(mock, 'from_crawler'):
self.assertEqual(mock.from_crawler.call_count, 0)
if hasattr(mock, 'from_settings'):
mock.from_settings.assert_called_once_with(settings, *args,
**kwargs)
self.assertEqual(mock.call_count, 0)
else:
mock.assert_called_once_with(*args, **kwargs)
def _test_with_crawler(mock, settings, crawler):
create_instance(mock, settings, crawler, *args, **kwargs)
if hasattr(mock, 'from_crawler'):
mock.from_crawler.assert_called_once_with(crawler, *args,
**kwargs)
if hasattr(mock, 'from_settings'):
self.assertEqual(mock.from_settings.call_count, 0)
self.assertEqual(mock.call_count, 0)
elif hasattr(mock, 'from_settings'):
mock.from_settings.assert_called_once_with(settings, *args,
**kwargs)
self.assertEqual(mock.call_count, 0)
else:
mock.assert_called_once_with(*args, **kwargs)
# Check usage of correct constructor using four mocks:
# 1. with no alternative constructors
# 2. with from_settings() constructor
# 3. with from_crawler() constructor
# 4. with from_settings() and from_crawler() constructor
spec_sets = (
['__qualname__'],
['__qualname__', 'from_settings'],
['__qualname__', 'from_crawler'],
['__qualname__', 'from_settings', 'from_crawler'],
)
for specs in spec_sets:
m = mock.MagicMock(spec_set=specs)
_test_with_settings(m, settings)
m.reset_mock()
_test_with_crawler(m, settings, crawler)
# Check adoption of crawler settings
m = mock.MagicMock(spec_set=['__qualname__', 'from_settings'])
create_instance(m, None, crawler, *args, **kwargs)
m.from_settings.assert_called_once_with(crawler.settings, *args,
**kwargs)
with self.assertRaises(ValueError):
create_instance(m, None, None)
m.from_settings.return_value = None
with self.assertRaises(TypeError):
create_instance(m, settings, None)
def test_set_environ(self):
assert os.environ.get('some_test_environ') is None
with set_environ(some_test_environ='test_value'):
assert os.environ.get('some_test_environ') == 'test_value'
assert os.environ.get('some_test_environ') is None
os.environ['some_test_environ'] = 'test'
assert os.environ.get('some_test_environ') == 'test'
with set_environ(some_test_environ='test_value'):
assert os.environ.get('some_test_environ') == 'test_value'
assert os.environ.get('some_test_environ') == 'test'
if __name__ == "__main__":
unittest.main()
Add test for rel_has_nofollow
import sys
import os
import unittest
from unittest import mock
from scrapy.item import Item, Field
from scrapy.utils.misc import arg_to_iter, create_instance, load_object, rel_has_nofollow, set_environ, walk_modules
__doctests__ = ['scrapy.utils.misc']
class UtilsMiscTestCase(unittest.TestCase):
def test_load_object_class(self):
obj = load_object(Field)
self.assertIs(obj, Field)
obj = load_object('scrapy.item.Field')
self.assertIs(obj, Field)
def test_load_object_function(self):
obj = load_object(load_object)
self.assertIs(obj, load_object)
obj = load_object('scrapy.utils.misc.load_object')
self.assertIs(obj, load_object)
def test_load_object_exceptions(self):
self.assertRaises(ImportError, load_object, 'nomodule999.mod.function')
self.assertRaises(NameError, load_object, 'scrapy.utils.misc.load_object999')
self.assertRaises(TypeError, load_object, dict())
def test_walk_modules(self):
mods = walk_modules('tests.test_utils_misc.test_walk_modules')
expected = [
'tests.test_utils_misc.test_walk_modules',
'tests.test_utils_misc.test_walk_modules.mod',
'tests.test_utils_misc.test_walk_modules.mod.mod0',
'tests.test_utils_misc.test_walk_modules.mod1',
]
self.assertEqual({m.__name__ for m in mods}, set(expected))
mods = walk_modules('tests.test_utils_misc.test_walk_modules.mod')
expected = [
'tests.test_utils_misc.test_walk_modules.mod',
'tests.test_utils_misc.test_walk_modules.mod.mod0',
]
self.assertEqual({m.__name__ for m in mods}, set(expected))
mods = walk_modules('tests.test_utils_misc.test_walk_modules.mod1')
expected = [
'tests.test_utils_misc.test_walk_modules.mod1',
]
self.assertEqual({m.__name__ for m in mods}, set(expected))
self.assertRaises(ImportError, walk_modules, 'nomodule999')
def test_walk_modules_egg(self):
egg = os.path.join(os.path.dirname(__file__), 'test.egg')
sys.path.append(egg)
try:
mods = walk_modules('testegg')
expected = [
'testegg.spiders',
'testegg.spiders.a',
'testegg.spiders.b',
'testegg'
]
self.assertEqual({m.__name__ for m in mods}, set(expected))
finally:
sys.path.remove(egg)
def test_arg_to_iter(self):
class TestItem(Item):
name = Field()
assert hasattr(arg_to_iter(None), '__iter__')
assert hasattr(arg_to_iter(100), '__iter__')
assert hasattr(arg_to_iter('lala'), '__iter__')
assert hasattr(arg_to_iter([1, 2, 3]), '__iter__')
assert hasattr(arg_to_iter(c for c in 'abcd'), '__iter__')
self.assertEqual(list(arg_to_iter(None)), [])
self.assertEqual(list(arg_to_iter('lala')), ['lala'])
self.assertEqual(list(arg_to_iter(100)), [100])
self.assertEqual(list(arg_to_iter(c for c in 'abc')), ['a', 'b', 'c'])
self.assertEqual(list(arg_to_iter([1, 2, 3])), [1, 2, 3])
self.assertEqual(list(arg_to_iter({'a': 1})), [{'a': 1}])
self.assertEqual(list(arg_to_iter(TestItem(name="john"))), [TestItem(name="john")])
def test_create_instance(self):
settings = mock.MagicMock()
crawler = mock.MagicMock(spec_set=['settings'])
args = (True, 100.)
kwargs = {'key': 'val'}
def _test_with_settings(mock, settings):
create_instance(mock, settings, None, *args, **kwargs)
if hasattr(mock, 'from_crawler'):
self.assertEqual(mock.from_crawler.call_count, 0)
if hasattr(mock, 'from_settings'):
mock.from_settings.assert_called_once_with(settings, *args,
**kwargs)
self.assertEqual(mock.call_count, 0)
else:
mock.assert_called_once_with(*args, **kwargs)
def _test_with_crawler(mock, settings, crawler):
create_instance(mock, settings, crawler, *args, **kwargs)
if hasattr(mock, 'from_crawler'):
mock.from_crawler.assert_called_once_with(crawler, *args,
**kwargs)
if hasattr(mock, 'from_settings'):
self.assertEqual(mock.from_settings.call_count, 0)
self.assertEqual(mock.call_count, 0)
elif hasattr(mock, 'from_settings'):
mock.from_settings.assert_called_once_with(settings, *args,
**kwargs)
self.assertEqual(mock.call_count, 0)
else:
mock.assert_called_once_with(*args, **kwargs)
# Check usage of correct constructor using four mocks:
# 1. with no alternative constructors
# 2. with from_settings() constructor
# 3. with from_crawler() constructor
# 4. with from_settings() and from_crawler() constructor
spec_sets = (
['__qualname__'],
['__qualname__', 'from_settings'],
['__qualname__', 'from_crawler'],
['__qualname__', 'from_settings', 'from_crawler'],
)
for specs in spec_sets:
m = mock.MagicMock(spec_set=specs)
_test_with_settings(m, settings)
m.reset_mock()
_test_with_crawler(m, settings, crawler)
# Check adoption of crawler settings
m = mock.MagicMock(spec_set=['__qualname__', 'from_settings'])
create_instance(m, None, crawler, *args, **kwargs)
m.from_settings.assert_called_once_with(crawler.settings, *args,
**kwargs)
with self.assertRaises(ValueError):
create_instance(m, None, None)
m.from_settings.return_value = None
with self.assertRaises(TypeError):
create_instance(m, settings, None)
def test_set_environ(self):
assert os.environ.get('some_test_environ') is None
with set_environ(some_test_environ='test_value'):
assert os.environ.get('some_test_environ') == 'test_value'
assert os.environ.get('some_test_environ') is None
os.environ['some_test_environ'] = 'test'
assert os.environ.get('some_test_environ') == 'test'
with set_environ(some_test_environ='test_value'):
assert os.environ.get('some_test_environ') == 'test_value'
assert os.environ.get('some_test_environ') == 'test'
def test_rel_has_nofollow(self):
assert os.environ.get('some_test_environ') is None
asert rel_has_nofollow('ugc nofollow') == True
asert rel_has_nofollow('ugc,nofollow') == True
asert rel_has_nofollow('ugc') == False
if __name__ == "__main__":
unittest.main()
|
__version__ = '0.2018.08.29.1434'
RELEASE: Bump version to 0.2018.09.18.1655
__version__ = '0.2018.09.18.1655'
|
import re
import openpyxl
import sys
import os
#a regular expression for a formated option description where the strike is an integer
OPTION_SHEET_PATTERN_INT = re.compile(r'^\w+\s\w+\s\d{2}-\d{2}-\d{2}\s\w+$')
#a regular expression for a formated option description where the strike is a foat
OPTION_SHEET_PATTERN_FLOAT= re.compile(r'^\w+\s\w+\s\d{2}-\d{2}-\d{2}\s\w+\.\w+$')
#a regular expression pattern for the stock sheet
STOCK_SHEET_PATTERN =re.compile(r'^\w+\s\w+\s\w+$')
#a regular expression for a formated option description where the strike is an integer
OPTION_DESCRIPTION_PATTERN_INT= re.compile(r'^\w+\s\w+\s\d{2}/\d{2}/\d{2}\s\w+$')
#a regular expression for a formated option description where the strike is a foat
OPTION_DESCRIPTION_PATTERN_FLOAT = re.compile(r'^\w+\s\w+\s\d{2}/\d{2}/\d{2}\s\w+\.\w+$')
#a regular expression to designate whether an option description is a Call
CALL_DESIGNATION_PATTERN = re.compile(r'[C]\d+')
#a regular expression to designate whether an option description is a Put
PUT_DESIGNATION_PATTERN = re.compile(r'[P]\d+')
#USEFUL INFORMATINO FROM THE 'Treasury Rates.xlsx' file
TREASURY_WORKBOOK_PATH = '{}/{}/{}/{}'.format(os.path.abspath(os.pardir), 'company_data','sample', 'Week_Day Treasury Rates.xlsx')
TREASURY_WORKSHEET= openpyxl.load_workbook(TREASURY_WORKBOOK_PATH, data_only=True).get_sheet_by_name('Rates')
TOTAL_TREASURY_SHEET_ROWS = TREASURY_WORKSHEET.max_row
DATE_COLUMN= 2
THREE_MONTH_COLUMN= 7
SIX_MONTH_COLUMN= 8
TWELVE_MONTH_COLUMN= 9
added constants for VIX_calculation.py
import re
import openpyxl
import sys
import os
#a regular expression for a formated option description where the strike is an integer
OPTION_SHEET_PATTERN_INT = re.compile(r'^\w+\s\w+\s\d{2}-\d{2}-\d{2}\s\w+$')
#a regular expression for a formated option description where the strike is a foat
OPTION_SHEET_PATTERN_FLOAT= re.compile(r'^\w+\s\w+\s\d{2}-\d{2}-\d{2}\s\w+\.\w+$')
#a regular expression pattern for the stock sheet
STOCK_SHEET_PATTERN =re.compile(r'^\w+\s\w+\s\w+$')
#a regular expression for a formated option description where the strike is an integer
OPTION_DESCRIPTION_PATTERN_INT= re.compile(r'^\w+\s\w+\s\d{2}/\d{2}/\d{2}\s\w+$')
#a regular expression for a formated option description where the strike is a foat
OPTION_DESCRIPTION_PATTERN_FLOAT = re.compile(r'^\w+\s\w+\s\d{2}/\d{2}/\d{2}\s\w+\.\w+$')
#a regular expression to designate whether an option description is a Call
CALL_DESIGNATION_PATTERN = re.compile(r'[C]\d+')
#a regular expression to designate whether an option description is a Put
PUT_DESIGNATION_PATTERN = re.compile(r'[P]\d+')
#USEFUL INFORMATINO FROM THE 'Treasury Rates.xlsx' file
TREASURY_WORKBOOK_PATH = '{}/{}/{}/{}'.format(os.path.abspath(os.pardir), 'company_data','sample', 'Week_Day Treasury Rates.xlsx')
TREASURY_WORKSHEET= openpyxl.load_workbook(TREASURY_WORKBOOK_PATH, data_only=True).get_sheet_by_name('Rates')
TOTAL_TREASURY_SHEET_ROWS = TREASURY_WORKSHEET.max_row
TREASURY_DATA_START_ROW = 2
DATE_COLUMN= 2
THREE_MONTH_COLUMN= 7
SIX_MONTH_COLUMN= 8
TWELVE_MONTH_COLUMN= 9
#total number of minutes in a 365 day year
MINUTES_PER_YEAR= 525600
#total number of minutes in a 30 day period
MINUTES_PER_MONTH= 43200
|
import json
from django.http import Http404
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from api.serializers import PostSerializer, CommentSerializer, AuthorSerializer, AllAuthorSerializer
from feed.models import Post, Author, Comment, Friend, CommentAuthor, ForeignHost
import requests
import urllib2
class public_posts(APIView):
"""
List all posts.
"""
def get(self, request, format=None):
posts = Post.objects.filter(visibility="PUBLIC")
serializer = PostSerializer(posts, many=True)
return Response({"query": "posts", "count": len(posts), "size": "", "next": "",
"previous": "http://previouspageurlhere", "posts": serializer.data})
class post_detail(APIView):
"""
Retrieve a single post.
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
try:
return Post.objects.get(pk=pk)
except Post.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
post = self.get_object(pk)
serializer = PostSerializer(post)
return Response(serializer.data)
class post_comments(APIView):
"""
List all comments for a single post.
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
post_object = Post.objects.get(id=pk)
comments = Comment.objects.filter(id=post_object)
serializer = CommentSerializer(comments, many=True)
return Response({"query": "comments", "count": len(comments), "size": "10", "next": "http://nextpageurlhere",
"previous": "http://previouspageurlhere", "comments": serializer.data})
def post(self, request, pk, format=None):
comment = request.data.get('comment')
author_object = request.data.get('author')
author_name = author_object['displayName']
published = request.data.get('published')
contentType = request.data.get('contentType')
post_object = Post.objects.get(id=pk)
new_comment_author = CommentAuthor(id=author_object['id'], host=author_object['host'], displayName=author_name,
url=author_object['url'], github=author_object['github'])
new_comment_author.save()
new_comment = Comment(author=new_comment_author, post_id=post_object,
comment=comment, published=published,
author_name=author_name, contentType=contentType)
new_comment.save()
return Response({})
class author_posts(APIView):
"""
List all posts.
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
author_object = Author.objects.get(id=pk)
asker_host = request.META.get("HTTP_HOST")
try:
asker_object = Author.objects.get(email=request.user)
asker_id = str(asker_object.id)
except:
asker_id = str(request.GET.get('id'))
public_posts = Post.objects.filter(author=author_object, visibility="PUBLIC")
return_posts = public_posts
# the asker is the user itself, return everything
if (pk == asker_id):
all_posts = Post.objects.filter(author=author_object)
return_posts = all_posts
# if the asker is a friend
friend_to_author = Friend.objects.filter(follower_id=pk, followed_id=asker_id)
author_to_friend = Friend.objects.filter(follower_id=asker_id, followed_id=pk)
if (len(friend_to_author) == 1) and (len(author_to_friend) == 1):
#then they are friends, because the relationship is mutual
friend_posts = Post.objects.filter(author=author_object, visibility="FRIENDS")
return_posts = return_posts | friend_posts
# if the asker is on our server, and a friend
if (len(Author.objects.filter(id=asker_id)) > 0) and (len(friend_to_author) == 1) and (len(author_to_friend) == 1):
server_friends_posts = Post.objects.filter(author=author_object, visibility="SERVERONLY")
return_posts = return_posts | server_friends_posts
# TODO: Look at FOAF stuff
# asker_id is person A
# as ditto, we need to ask person A's host who A is friends with
# fetch list of A's friends
url = "http://" + asker_host + "/api/friends/" + asker_id
req = urllib2.Request(url)
foreign_hosts = ForeignHost.objects.filter()
for host in foreign_hosts:
# if the sender host, which is a clipped version of the full host path, is part of it, then that host
# is the correct one we're looking for
if asker_host in host.url:
base64string = base64.encodestring('%s:%s' % (host.username, host.password)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
response = urllib2.urlopen(req).read()
loaded = json.loads(response)
print loaded
serializer = PostSerializer(return_posts, many=True)
return Response({"query": "posts", "count": len(return_posts), "size": "10", "next": "http://nextpageurlhere",
"previous": "http://previouspageurlhere", "posts": serializer.data})
class author_comments(APIView):
"""
List all comments from a specific author
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
author_object = Author.objects.get(id=pk)
comments = Comment.objects.filter(author=author_object)
serializer = CommentSerializer(comments, many=True)
return Response({"query": "comments", "count": len(comments), "size": "10", "next": "http://nextpageurlhere",
"previous": "http://previouspageurlhere", "comments": serializer.data})
class author_list(APIView):
"""
List all authors
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
authors = Author.objects.filter(admin_auth=True)
serializer = AllAuthorSerializer(authors, many=True)
return Response({"authors": serializer.data})
class author_detail(APIView):
"""
List all information on provided author
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
author_object = Author.objects.get(id=pk)
serializer = AuthorSerializer(author_object)
return Response({"query": "author", "count": "1", "size": "10", "next": "http://nextpageurlhere",
"previous": "http://previouspageurlhere", "author": serializer.data})
class check_friends(APIView):
"""
returns who are friends
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
# returns
def get(self, request, pk, format=None):
author_id = pk
friends = []
following = Friend.objects.filter(follower_id=author_id)
for i in following:
tmp = Friend.objects.filter(follower_id=i.followed_id, followed_id=i.follower_id)
if len(tmp) > 0:
friends.append(i.followed_id)
packet = {"query": "friends",
"authors": friends}
return Response(packet)
def post(self, request, pk, format=None):
author_id = pk
possible_friends = request.data.get('authors')
confirmed_friends = []
for author_id in possible_friends:
author_one_to_two = Friend.objects.filter(follower_id=pk, followed_id=author_id)
author_two_to_one = Friend.objects.filter(follower_id=author_id, followed_id=pk)
if (len(author_one_to_two) > 0) and (len(author_two_to_one) > 0):
confirmed_friends.append(author_id)
packet = {"query": "friends",
"author": pk,
"authors": confirmed_friends}
return Response(packet)
class check_mutual_friend(APIView):
"""
Return JSON with True or False if friends
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, pk1, pk2, format=None):
author_one_to_two = Friend.objects.filter(follower_id=pk1, followed_id=pk2)
author_two_to_one = Friend.objects.filter(follower_id=pk2, followed_id=pk1)
packet = {"query": "friends",
"authors": [pk1, pk2],
"friends": False}
if (len(author_one_to_two) > 0) and (len(author_two_to_one) > 0):
packet["friends"] = True
return Response(packet)
class friend_request(APIView):
"""
Make a friend (follow) request, if we have already followed the person who is requesting
to become friends with us, they are actually responding to our friend request
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def post(self, request, format=None):
try:
author = json.loads(request.data.get("author"))
friend = json.loads(request.data.get("friend"))
except:
author = request.data.get("author")
friend = request.data.get("friend")
author_host = author.get("host")
friend_host = friend.get("host")
friend_to_author = Friend.objects.filter(follower_id=friend["id"], followed_id=author["id"])
author_to_friend = Friend.objects.filter(follower_id=author["id"], followed_id=friend["id"])
# checks what kind of relationship the two have, intimate or otherwise
# are they following me?
#print len(friend_to_author)
# am I followign them
#print len(author_to_friend)
if (len(friend_to_author) == 1) and (len(author_to_friend) == 1):
print "you're an idiot you're already friends"
elif (len(friend_to_author) == 1) and (len(author_to_friend) == 0):
new_friend_object = Friend(follower_id=author["id"], followed_id=friend["id"],
follower_host=author_host, followed_host=friend_host)
new_friend_object.save()
# WE ARE NOW FRIENDS
elif (len(friend_to_author) == 0) and (len(author_to_friend) == 1):
pass
elif (len(friend_to_author) == 0) and (len(author_to_friend) == 0):
new_friend_object = Friend(follower_id=author["id"], followed_id=friend["id"], follower_host=author_host,
followed_host=friend_host)
new_friend_object.save()
# CHECK THE USERS, IF FRIEND IS NOT OUR SERVER, WE ARE SENDING A REQUEST OFF SERVER
if 'ditto-test' not in friend_host:
try:
url = friend_host + 'api/friendrequest'
packet = {"query":"friendrequest", "author":author, "friend":friend }
foreign_host = ForeignHost.objects.get(url=friend_host)
if foreign_host.username != 'null':
r = requests.post(url, json=packet)
else:
r = requests.post(url, json=packet, auth=(foreign_host.username, foreign_host.password))
except Exception as e:
print e
pass
return Response()
checking foaf stuff5
import json
from django.http import Http404
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from api.serializers import PostSerializer, CommentSerializer, AuthorSerializer, AllAuthorSerializer
from feed.models import Post, Author, Comment, Friend, CommentAuthor, ForeignHost
import requests
import urllib2
class public_posts(APIView):
"""
List all posts.
"""
def get(self, request, format=None):
posts = Post.objects.filter(visibility="PUBLIC")
serializer = PostSerializer(posts, many=True)
return Response({"query": "posts", "count": len(posts), "size": "", "next": "",
"previous": "http://previouspageurlhere", "posts": serializer.data})
class post_detail(APIView):
"""
Retrieve a single post.
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
try:
return Post.objects.get(pk=pk)
except Post.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
post = self.get_object(pk)
serializer = PostSerializer(post)
return Response(serializer.data)
class post_comments(APIView):
"""
List all comments for a single post.
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
post_object = Post.objects.get(id=pk)
comments = Comment.objects.filter(id=post_object)
serializer = CommentSerializer(comments, many=True)
return Response({"query": "comments", "count": len(comments), "size": "10", "next": "http://nextpageurlhere",
"previous": "http://previouspageurlhere", "comments": serializer.data})
def post(self, request, pk, format=None):
comment = request.data.get('comment')
author_object = request.data.get('author')
author_name = author_object['displayName']
published = request.data.get('published')
contentType = request.data.get('contentType')
post_object = Post.objects.get(id=pk)
new_comment_author = CommentAuthor(id=author_object['id'], host=author_object['host'], displayName=author_name,
url=author_object['url'], github=author_object['github'])
new_comment_author.save()
new_comment = Comment(author=new_comment_author, post_id=post_object,
comment=comment, published=published,
author_name=author_name, contentType=contentType)
new_comment.save()
return Response({})
class author_posts(APIView):
"""
List all posts.
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
author_object = Author.objects.get(id=pk)
asker_host = request.META.get("HTTP_HOST")
try:
asker_object = Author.objects.get(email=request.user)
asker_id = str(asker_object.id)
except:
asker_id = str(request.GET.get('id'))
public_posts = Post.objects.filter(author=author_object, visibility="PUBLIC")
return_posts = public_posts
# the asker is the user itself, return everything
if (pk == asker_id):
all_posts = Post.objects.filter(author=author_object)
return_posts = all_posts
# if the asker is a friend
friend_to_author = Friend.objects.filter(follower_id=pk, followed_id=asker_id)
author_to_friend = Friend.objects.filter(follower_id=asker_id, followed_id=pk)
if (len(friend_to_author) == 1) and (len(author_to_friend) == 1):
#then they are friends, because the relationship is mutual
friend_posts = Post.objects.filter(author=author_object, visibility="FRIENDS")
return_posts = return_posts | friend_posts
# if the asker is on our server, and a friend
if (len(Author.objects.filter(id=asker_id)) > 0) and (len(friend_to_author) == 1) and (len(author_to_friend) == 1):
server_friends_posts = Post.objects.filter(author=author_object, visibility="SERVERONLY")
return_posts = return_posts | server_friends_posts
# TODO: Look at FOAF stuff
# asker_id is person A
# as ditto, we need to ask person A's host who A is friends with
# fetch list of A's friends
url = "http://" + asker_host + "/api/friends/" + asker_id
req = urllib2.Request(url)
foreign_hosts = ForeignHost.objects.filter()
for host in foreign_hosts:
# if the sender host, which is a clipped version of the full host path, is part of it, then that host
# is the correct one we're looking for
print asker_host
print host.url
if asker_host in host.url:
base64string = base64.encodestring('%s:%s' % (host.username, host.password)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
response = urllib2.urlopen(req).read()
loaded = json.loads(response)
print loaded
serializer = PostSerializer(return_posts, many=True)
return Response({"query": "posts", "count": len(return_posts), "size": "10", "next": "http://nextpageurlhere",
"previous": "http://previouspageurlhere", "posts": serializer.data})
class author_comments(APIView):
"""
List all comments from a specific author
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
author_object = Author.objects.get(id=pk)
comments = Comment.objects.filter(author=author_object)
serializer = CommentSerializer(comments, many=True)
return Response({"query": "comments", "count": len(comments), "size": "10", "next": "http://nextpageurlhere",
"previous": "http://previouspageurlhere", "comments": serializer.data})
class author_list(APIView):
"""
List all authors
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
authors = Author.objects.filter(admin_auth=True)
serializer = AllAuthorSerializer(authors, many=True)
return Response({"authors": serializer.data})
class author_detail(APIView):
"""
List all information on provided author
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
author_object = Author.objects.get(id=pk)
serializer = AuthorSerializer(author_object)
return Response({"query": "author", "count": "1", "size": "10", "next": "http://nextpageurlhere",
"previous": "http://previouspageurlhere", "author": serializer.data})
class check_friends(APIView):
"""
returns who are friends
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
# returns
def get(self, request, pk, format=None):
author_id = pk
friends = []
following = Friend.objects.filter(follower_id=author_id)
for i in following:
tmp = Friend.objects.filter(follower_id=i.followed_id, followed_id=i.follower_id)
if len(tmp) > 0:
friends.append(i.followed_id)
packet = {"query": "friends",
"authors": friends}
return Response(packet)
def post(self, request, pk, format=None):
author_id = pk
possible_friends = request.data.get('authors')
confirmed_friends = []
for author_id in possible_friends:
author_one_to_two = Friend.objects.filter(follower_id=pk, followed_id=author_id)
author_two_to_one = Friend.objects.filter(follower_id=author_id, followed_id=pk)
if (len(author_one_to_two) > 0) and (len(author_two_to_one) > 0):
confirmed_friends.append(author_id)
packet = {"query": "friends",
"author": pk,
"authors": confirmed_friends}
return Response(packet)
class check_mutual_friend(APIView):
"""
Return JSON with True or False if friends
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, pk1, pk2, format=None):
author_one_to_two = Friend.objects.filter(follower_id=pk1, followed_id=pk2)
author_two_to_one = Friend.objects.filter(follower_id=pk2, followed_id=pk1)
packet = {"query": "friends",
"authors": [pk1, pk2],
"friends": False}
if (len(author_one_to_two) > 0) and (len(author_two_to_one) > 0):
packet["friends"] = True
return Response(packet)
class friend_request(APIView):
"""
Make a friend (follow) request, if we have already followed the person who is requesting
to become friends with us, they are actually responding to our friend request
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def post(self, request, format=None):
try:
author = json.loads(request.data.get("author"))
friend = json.loads(request.data.get("friend"))
except:
author = request.data.get("author")
friend = request.data.get("friend")
author_host = author.get("host")
friend_host = friend.get("host")
friend_to_author = Friend.objects.filter(follower_id=friend["id"], followed_id=author["id"])
author_to_friend = Friend.objects.filter(follower_id=author["id"], followed_id=friend["id"])
# checks what kind of relationship the two have, intimate or otherwise
# are they following me?
#print len(friend_to_author)
# am I followign them
#print len(author_to_friend)
if (len(friend_to_author) == 1) and (len(author_to_friend) == 1):
print "you're an idiot you're already friends"
elif (len(friend_to_author) == 1) and (len(author_to_friend) == 0):
new_friend_object = Friend(follower_id=author["id"], followed_id=friend["id"],
follower_host=author_host, followed_host=friend_host)
new_friend_object.save()
# WE ARE NOW FRIENDS
elif (len(friend_to_author) == 0) and (len(author_to_friend) == 1):
pass
elif (len(friend_to_author) == 0) and (len(author_to_friend) == 0):
new_friend_object = Friend(follower_id=author["id"], followed_id=friend["id"], follower_host=author_host,
followed_host=friend_host)
new_friend_object.save()
# CHECK THE USERS, IF FRIEND IS NOT OUR SERVER, WE ARE SENDING A REQUEST OFF SERVER
if 'ditto-test' not in friend_host:
try:
url = friend_host + 'api/friendrequest'
packet = {"query":"friendrequest", "author":author, "friend":friend }
foreign_host = ForeignHost.objects.get(url=friend_host)
if foreign_host.username != 'null':
r = requests.post(url, json=packet)
else:
r = requests.post(url, json=packet, auth=(foreign_host.username, foreign_host.password))
except Exception as e:
print e
pass
return Response()
|
from vsmlib.vocabulary import Vocabulary_cooccurrence, Vocabulary_simple, Vocabulary
import vsmlib.matrix
import numpy as np
import scipy
from scipy import sparse
import scipy.sparse.linalg
import math
from matplotlib import pyplot as plt
import os
import brewer2mpl
import tables
import json
from .misc.formathelper import bcolors
from .misc.deprecated import deprecated
from .misc.data import save_json, load_json, detect_archive_format_and_open
def normed(v):
return v / np.linalg.norm(v)
class Model(object):
def __init__(self):
self.provenance = ""
self.name = ""
self.metadata = {}
def get_x_label(self, i):
return self.vocabulary.get_word_by_id(i)
def get_most_informative_columns(self, rows, width):
xdim = rows.shape[1]
scores = np.zeros(xdim)
for i in range(rows.shape[0]):
row = rows[i] / np.linalg.norm(rows[i])
for j in range(len(row)):
scores[j] += row[j]
scores = abs(scores)
tops = np.argsort(scores)
return list(reversed(tops[-width:]))
def filter_rows(self, ids_of_interest):
# return (cooccurrence[1].todense()[:width])
xdim = self.matrix.shape[1]
dense = np.empty([0, xdim])
# dense=np.empty([0,width])
for i in ids_of_interest:
if i < 0:
continue
if sparse.issparse(self.matrix):
row = self.matrix[i].todense()
else:
row = self.matrix[i]
row = np.asarray(row)
row = np.reshape(row, (xdim))
# dense=np.vstack([dense,row[:width]])
dense = np.vstack([dense, row])
return (dense)
def filter_submatrix(self, lst_words_initial, width):
words_of_interest = [
w for w in lst_words_initial if self.vocabulary.get_id(w) >= 0]
ids_of_interest = [self.vocabulary.get_id(
w) for w in words_of_interest]
rows = self.filter_rows(ids_of_interest)
# xdim = rows.shape[1]
# max_width = 25
# width=min(xdim,max_width)
vert = None # np.empty((rows.shape[0],0))
cols = self.get_most_informative_columns(rows, width)
for i in cols:
if vert is None:
vert = (rows[:, i])
else:
vert = np.vstack([vert, rows[:, i]])
labels = [self.get_x_label(i) for i in cols]
return rows, vert.T, labels
def get_most_similar_vectors(self, u, cnt=10):
scores = np.zeros(self.matrix.shape[0], dtype=np.float32)
for i in range(self.matrix.shape[0]):
scores[i] = self.cmp_vectors(u, self.matrix[i])
ids = np.argsort(scores)[::-1]
ids = ids[:cnt]
return zip(ids, scores[ids])
def get_most_similar_words(self, w, cnt=10):
if isinstance(w, str):
vec = self.matrix[self.vocabulary.get_id(w)]
else:
vec = w
rows = self.get_most_similar_vectors(vec, cnt)
results = []
for i in rows:
results.append([self.vocabulary.get_word_by_id(i[0]), i[1]])
return results
def get_row(self, w):
i = self.vocabulary.get_id(w)
if i < 0:
raise Exception('word do not exist', w)
# return None
row = self.matrix[i]
return row
def cmp_rows(self, id1, id2):
r1 = self.matrix[id1]
r2 = self.matrix[id2]
return self.cmp_vectors(r1, r2)
def cmp_words(self, w1, w2):
id1 = self.vocabulary.get_id(w1)
id2 = self.vocabulary.get_id(w2)
if (id1 < 0) or (id2 < 0):
return 0
return self.cmp_rows(id1, id2)
def load_props(self, path):
try:
with open(os.path.join(path, "props.json"), "r") as myfile:
str_props = myfile.read()
self.props = json.loads(str_props)
except FileNotFoundError:
print(bcolors.FAIL + "props.json not found" + bcolors.ENDC)
self.props = {}
# exit(-1)
def load_provenance(self, path):
try:
with open(os.path.join(path, "provenance.txt"), "r") as myfile:
self.provenance = myfile.read()
except FileNotFoundError:
print("provenance not found")
self.load_props(path)
def normalize(m):
for i in (range(m.shape[0] - 1)):
norm = np.linalg.norm(m.data[m.indptr[i]:m.indptr[i + 1]])
m.data[m.indptr[i]:m.indptr[i + 1]] /= norm
class Model_explicit(Model):
def __init__(self):
self.name += "explicit_"
def cmp_vectors(self, r1, r2):
c = r1.dot(r2.T) / (np.linalg.norm(r1.data) * np.linalg.norm(r2.data))
c = c[0, 0]
if math.isnan(c):
return 0
return c
def load_from_hdf5(self, path):
self.load_provenance(path)
f = tables.open_file(os.path.join(path, 'cooccurrence_csr.h5p'), 'r')
row_ptr = np.nan_to_num(f.root.row_ptr.read())
col_ind = np.nan_to_num(f.root.col_ind.read())
data = np.nan_to_num(f.root.data.read())
dim = row_ptr.shape[0] - 1
self.matrix = scipy.sparse.csr_matrix(
(data, col_ind, row_ptr), shape=(dim, dim), dtype=np.float32)
f.close()
self.vocabulary = Vocabulary_cooccurrence()
self.vocabulary.load(path)
self.name += os.path.basename(os.path.normpath(path))
def load(self, path):
self.load_provenance(path)
self.vocabulary = Vocabulary_cooccurrence()
self.vocabulary.load(path)
self.name += os.path.basename(os.path.normpath(path))
self.matrix = vsmlib.matrix.load_matrix_csr(path, verbose=True)
def clip_negatives(self):
self.matrix.data.clip(0, out=self.matrix.data)
self.matrix.eliminate_zeros()
self.name += "_pos"
self.provenance += "\ntransform : clip negative"
def normalize(self):
normalize(self.matrix)
self.name += "_normalized"
self.provenance += "\ntransform : normalize"
self.normalized = True
class ModelDense(Model):
def cmp_vectors(self, r1, r2):
c = normed(r1) @ normed(r2)
if math.isnan(c):
return 0
return c
def save_matr_to_hdf5(self, path):
f = tables.open_file(os.path.join(path, 'vectors.h5p'), 'w')
atom = tables.Atom.from_dtype(self.matrix.dtype)
ds = f.create_carray(f.root, 'vectors', atom, self.matrix.shape)
ds[:] = self.matrix
ds.flush()
f.close()
def load_hdf5(self, path):
f = tables.open_file(os.path.join(path, 'vectors.h5p'), 'r')
self.matrix = f.root.vectors.read()
f.close()
def save_to_dir(self, path):
if not os.path.exists(path):
os.makedirs(path)
self.vocabulary.save_to_dir(path)
# self.matrix.tofile(os.path.join(path,"vectors.bin"))
# np.save(os.path.join(path, "vectors.npy"), self.matrix)
self.save_matr_to_hdf5(path)
save_json(self.metadata, os.path.join(path, "metadata.json"))
def load_with_alpha(self, path, power=0.6, verbose=False):
self.load_provenance(path)
f = tables.open_file(os.path.join(path, 'vectors.h5p'), 'r')
# left = np.nan_to_num(f.root.vectors.read())
left = f.root.vectors.read()
sigma = f.root.sigma.read()
if verbose:
print("loaded left singulat vectors and sigma")
sigma = np.power(sigma, power)
self.matrix = np.dot(left, np.diag(sigma))
if verbose:
print("computed the product")
self.props["pow_sigma"] = power
self.props["size_dimensions"] = self.matrix.shape[1]
f.close()
self.vocabulary = Vocabulary_simple()
self.vocabulary.load(path)
self.name += os.path.basename(os.path.normpath(path)) + "_a" + str(power)
def load_from_dir(self, path):
self.matrix = np.load(os.path.join(path, "vectors.npy"))
# self.load_with_alpha(0.6)
self.vocabulary = Vocabulary_simple()
self.vocabulary.load(path)
self.name += os.path.basename(os.path.normpath(path))
self.load_provenance(path)
def normalize(self):
nrm = np.linalg.norm(self.matrix, axis=1)
nrm[nrm == 0] = 1
self.matrix /= nrm[:, np.newaxis]
self.name += "_normalized"
self.provenance += "\ntransform : normalized"
self.props["normalized"] = True
def load_from_text(self, path):
i = 0
# self.name+="_"+os.path.basename(os.path.normpath(path))
self.vocabulary = vsmlib.vocabulary.Vocabulary()
rows = []
header = False
with detect_archive_format_and_open(path) as f:
for line in f:
tokens = line.split()
if i == 0 and len(tokens) == 2:
header = True
cnt_words = int(tokens[0])
size_embedding = int(tokens[1])
continue
# word = tokens[0].decode('ascii',errors="ignore")
# word = tokens[0].decode('UTF-8', errors="ignore")
word = tokens[0]
self.vocabulary.dic_words_ids[word] = i
self.vocabulary.lst_words.append(word)
str_vec = tokens[1:]
row = np.zeros(len(str_vec), dtype=np.float32)
for j in range(len(str_vec)):
row[j] = float(str_vec[j])
rows.append(row)
i += 1
if header:
assert cnt_words == len(rows)
self.matrix = np.vstack(rows)
if header:
assert size_embedding == self.matrix.shape[1]
self.vocabulary.lst_frequencies = np.zeros(len(self.vocabulary.lst_words))
# self.name += "_{}".format(len(rows[0]))
class ModelNumbered(ModelDense):
def get_x_label(self, i):
return i
def viz_wordlist(self, wl, colored=False, show_legend=False):
colors = brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors
cnt = 0
for i in wl:
row = self.get_row(i)
row = row / np.linalg.norm(row)
if colored:
plt.bar(range(0, len(row)), row, color=colors[cnt], linewidth=0, alpha=0.6, label=i)
else:
plt.bar(range(0, len(row)), row, color="black", linewidth=0, alpha=1 / len(wl), label=i)
cnt += 1
if show_legend:
plt.legend()
class Model_Levi(ModelNumbered):
def load_from_dir(self, path):
self.name = "Levi_" + os.path.basename(os.path.normpath(path))
self.matrix = np.load(os.path.join(path, "sgns.contexts.npy"))
self.vocabulary = vsmlib.vocabulary.Vocabulary_simple()
self.vocabulary.dir_root = path
self.vocabulary.load_list_from_sorted_file(
"/home/blackbird/data/scratch/Anna/w2-1000.iter1/sgns.words.vocab")
self.vocabulary.dic_words_ids = {}
for i in range(len(self.vocabulary.lst_words)):
self.vocabulary.dic_words_ids[self.vocabulary.lst_words[i]] = i
class Model_svd_scipy(ModelNumbered):
def __init__(self, original, cnt_singular_vectors, power):
ut, s_ev, vt = scipy.sparse.linalg.svds(
original.matrix, k=cnt_singular_vectors, which='LM') # LM SM LA SA BE
self.sigma = s_ev
sigma_p = np.power(s_ev, power)
self.matrix = np.dot(ut, np.diag(sigma_p))
self.vocabulary = original.vocabulary
self.provenance = original.provenance + \
"\napplied scipy.linal.svd, {} singular vectors, sigma in the power of {}".format(
cnt_singular_vectors, power)
self.name = original.name + \
"_svd_{}_C{}".format(cnt_singular_vectors, power)
class Model_w2v(ModelNumbered):
@staticmethod
def load_word(f):
result = b''
w = b''
while w != b' ':
w = f.read(1)
result = result + w
return result[:-1]
def load_from_file(self, filename):
self.vocabulary = Vocabulary()
f = open(filename, "rb")
header = f.readline().split()
cnt_rows = int(header[0])
size_row = int(header[1])
self.name += "_{}".format(size_row)
self.matrix = np.zeros((cnt_rows, size_row), dtype=np.float32)
print("cnt rows = {}, size row = {}".format(cnt_rows, size_row))
for i in range(cnt_rows):
word = Model_w2v.load_word(f).decode(
'UTF-8', errors="ignore").strip()
# print (word)
self.vocabulary.dic_words_ids[word] = i
self.vocabulary.lst_words.append(word)
s_row = f.read(size_row * 4)
row = np.fromstring(s_row, dtype=np.float32)
# row = row / np.linalg.norm(row)
self.matrix[i] = row
f.close()
def load_from_dir(self, path):
self.name += "w2v_" + os.path.basename(os.path.normpath(path))
self.load_from_file(os.path.join(path, "vectors.bin"))
self.load_provenance(path)
@deprecated
class Model_glove(ModelNumbered):
def __init__(self):
self.name = "glove"
def load_from_dir(self, path):
self.name = "glove_" + os.path.basename(os.path.normpath(path))
files = os.listdir(path)
for f in files:
if f.endswith(".gz"):
print("this is Glove")
self.load_from_text(os.path.join(path, f))
def load_from_dir(path):
if os.path.isfile(os.path.join(path, "cooccurrence_csr.h5p")):
print("this is sparse explicit in hdf5")
m = vsmlib.Model_explicit()
m.load_from_hdf5(path)
return m
if os.path.isfile(os.path.join(path, "bigrams.data.bin")):
print("this is sparse explicit")
m = vsmlib.Model_explicit()
m.load(path)
return m
if os.path.isfile(os.path.join(path, "vectors.bin")):
print("this is w2v")
m = vsmlib.Model_w2v()
m.load_from_dir(path)
return m
if os.path.isfile(os.path.join(path, "sgns.words.npy")):
m = Model_Levi()
m.load_from_dir(path)
print("this is Levi ")
return m
if os.path.isfile(os.path.join(path, "vectors.npy")):
m = vsmlib.ModelNumbered()
m.load_from_dir(path)
print("this is dense ")
return m
if os.path.isfile(os.path.join(path, "vectors.h5p")):
m = vsmlib.ModelNumbered()
m.load_hdf5(path)
print("this is vsmlib format ")
return m
m = ModelNumbered()
files = os.listdir(path)
for f in files:
if f.endswith(".gz") or f.endswith(".bz") or f.endswith(".txt"):
print("this is text")
m.load_from_text(os.path.join(path, f))
return m
raise RuntimeError("can not detect embeddings format")
use logging
from vsmlib.vocabulary import Vocabulary_cooccurrence, Vocabulary_simple, Vocabulary
import vsmlib.matrix
import numpy as np
import scipy
from scipy import sparse
import scipy.sparse.linalg
import math
from matplotlib import pyplot as plt
import os
import brewer2mpl
import tables
import json
import logging
from .misc.formathelper import bcolors
from .misc.deprecated import deprecated
from .misc.data import save_json, load_json, detect_archive_format_and_open
logger = logging.getLogger(__name__)
def normed(v):
return v / np.linalg.norm(v)
class Model(object):
def __init__(self):
self.provenance = ""
self.name = ""
self.metadata = {}
def get_x_label(self, i):
return self.vocabulary.get_word_by_id(i)
def get_most_informative_columns(self, rows, width):
xdim = rows.shape[1]
scores = np.zeros(xdim)
for i in range(rows.shape[0]):
row = rows[i] / np.linalg.norm(rows[i])
for j in range(len(row)):
scores[j] += row[j]
scores = abs(scores)
tops = np.argsort(scores)
return list(reversed(tops[-width:]))
def filter_rows(self, ids_of_interest):
# return (cooccurrence[1].todense()[:width])
xdim = self.matrix.shape[1]
dense = np.empty([0, xdim])
# dense=np.empty([0,width])
for i in ids_of_interest:
if i < 0:
continue
if sparse.issparse(self.matrix):
row = self.matrix[i].todense()
else:
row = self.matrix[i]
row = np.asarray(row)
row = np.reshape(row, (xdim))
# dense=np.vstack([dense,row[:width]])
dense = np.vstack([dense, row])
return (dense)
def filter_submatrix(self, lst_words_initial, width):
words_of_interest = [
w for w in lst_words_initial if self.vocabulary.get_id(w) >= 0]
ids_of_interest = [self.vocabulary.get_id(
w) for w in words_of_interest]
rows = self.filter_rows(ids_of_interest)
# xdim = rows.shape[1]
# max_width = 25
# width=min(xdim,max_width)
vert = None # np.empty((rows.shape[0],0))
cols = self.get_most_informative_columns(rows, width)
for i in cols:
if vert is None:
vert = (rows[:, i])
else:
vert = np.vstack([vert, rows[:, i]])
labels = [self.get_x_label(i) for i in cols]
return rows, vert.T, labels
def get_most_similar_vectors(self, u, cnt=10):
scores = np.zeros(self.matrix.shape[0], dtype=np.float32)
for i in range(self.matrix.shape[0]):
scores[i] = self.cmp_vectors(u, self.matrix[i])
ids = np.argsort(scores)[::-1]
ids = ids[:cnt]
return zip(ids, scores[ids])
def get_most_similar_words(self, w, cnt=10):
if isinstance(w, str):
vec = self.matrix[self.vocabulary.get_id(w)]
else:
vec = w
rows = self.get_most_similar_vectors(vec, cnt)
results = []
for i in rows:
results.append([self.vocabulary.get_word_by_id(i[0]), i[1]])
return results
def get_row(self, w):
i = self.vocabulary.get_id(w)
if i < 0:
raise Exception('word do not exist', w)
# return None
row = self.matrix[i]
return row
def cmp_rows(self, id1, id2):
r1 = self.matrix[id1]
r2 = self.matrix[id2]
return self.cmp_vectors(r1, r2)
def cmp_words(self, w1, w2):
id1 = self.vocabulary.get_id(w1)
id2 = self.vocabulary.get_id(w2)
if (id1 < 0) or (id2 < 0):
return 0
return self.cmp_rows(id1, id2)
def load_props(self, path):
try:
with open(os.path.join(path, "props.json"), "r") as myfile:
str_props = myfile.read()
self.props = json.loads(str_props)
except FileNotFoundError:
logger.warning("props.json not found")
self.props = {}
# exit(-1)
def load_provenance(self, path):
try:
with open(os.path.join(path, "provenance.txt"), "r") as myfile:
self.provenance = myfile.read()
except FileNotFoundError:
logger.warning("provenance not found")
self.load_props(path)
def normalize(m):
for i in (range(m.shape[0] - 1)):
norm = np.linalg.norm(m.data[m.indptr[i]:m.indptr[i + 1]])
m.data[m.indptr[i]:m.indptr[i + 1]] /= norm
class Model_explicit(Model):
def __init__(self):
self.name += "explicit_"
def cmp_vectors(self, r1, r2):
c = r1.dot(r2.T) / (np.linalg.norm(r1.data) * np.linalg.norm(r2.data))
c = c[0, 0]
if math.isnan(c):
return 0
return c
def load_from_hdf5(self, path):
self.load_provenance(path)
f = tables.open_file(os.path.join(path, 'cooccurrence_csr.h5p'), 'r')
row_ptr = np.nan_to_num(f.root.row_ptr.read())
col_ind = np.nan_to_num(f.root.col_ind.read())
data = np.nan_to_num(f.root.data.read())
dim = row_ptr.shape[0] - 1
self.matrix = scipy.sparse.csr_matrix(
(data, col_ind, row_ptr), shape=(dim, dim), dtype=np.float32)
f.close()
self.vocabulary = Vocabulary_cooccurrence()
self.vocabulary.load(path)
self.name += os.path.basename(os.path.normpath(path))
def load(self, path):
self.load_provenance(path)
self.vocabulary = Vocabulary_cooccurrence()
self.vocabulary.load(path)
self.name += os.path.basename(os.path.normpath(path))
self.matrix = vsmlib.matrix.load_matrix_csr(path, verbose=True)
def clip_negatives(self):
self.matrix.data.clip(0, out=self.matrix.data)
self.matrix.eliminate_zeros()
self.name += "_pos"
self.provenance += "\ntransform : clip negative"
def normalize(self):
normalize(self.matrix)
self.name += "_normalized"
self.provenance += "\ntransform : normalize"
self.normalized = True
class ModelDense(Model):
def cmp_vectors(self, r1, r2):
c = normed(r1) @ normed(r2)
if math.isnan(c):
return 0
return c
def save_matr_to_hdf5(self, path):
f = tables.open_file(os.path.join(path, 'vectors.h5p'), 'w')
atom = tables.Atom.from_dtype(self.matrix.dtype)
ds = f.create_carray(f.root, 'vectors', atom, self.matrix.shape)
ds[:] = self.matrix
ds.flush()
f.close()
def load_hdf5(self, path):
f = tables.open_file(os.path.join(path, 'vectors.h5p'), 'r')
self.matrix = f.root.vectors.read()
f.close()
def save_to_dir(self, path):
if not os.path.exists(path):
os.makedirs(path)
self.vocabulary.save_to_dir(path)
# self.matrix.tofile(os.path.join(path,"vectors.bin"))
# np.save(os.path.join(path, "vectors.npy"), self.matrix)
self.save_matr_to_hdf5(path)
save_json(self.metadata, os.path.join(path, "metadata.json"))
def load_with_alpha(self, path, power=0.6):
self.load_provenance(path)
f = tables.open_file(os.path.join(path, 'vectors.h5p'), 'r')
# left = np.nan_to_num(f.root.vectors.read())
left = f.root.vectors.read()
sigma = f.root.sigma.read()
logger.info("loaded left singular vectors and sigma")
sigma = np.power(sigma, power)
self.matrix = np.dot(left, np.diag(sigma))
logger.info("computed the product")
self.props["pow_sigma"] = power
self.props["size_dimensions"] = self.matrix.shape[1]
f.close()
self.vocabulary = Vocabulary_simple()
self.vocabulary.load(path)
self.name += os.path.basename(os.path.normpath(path)) + "_a" + str(power)
def load_from_dir(self, path):
self.matrix = np.load(os.path.join(path, "vectors.npy"))
# self.load_with_alpha(0.6)
self.vocabulary = Vocabulary_simple()
self.vocabulary.load(path)
self.name += os.path.basename(os.path.normpath(path))
self.load_provenance(path)
def normalize(self):
nrm = np.linalg.norm(self.matrix, axis=1)
nrm[nrm == 0] = 1
self.matrix /= nrm[:, np.newaxis]
self.name += "_normalized"
self.provenance += "\ntransform : normalized"
self.props["normalized"] = True
def load_from_text(self, path):
i = 0
# self.name+="_"+os.path.basename(os.path.normpath(path))
self.vocabulary = vsmlib.vocabulary.Vocabulary()
rows = []
header = False
with detect_archive_format_and_open(path) as f:
for line in f:
tokens = line.split()
if i == 0 and len(tokens) == 2:
header = True
cnt_words = int(tokens[0])
size_embedding = int(tokens[1])
continue
# word = tokens[0].decode('ascii',errors="ignore")
# word = tokens[0].decode('UTF-8', errors="ignore")
word = tokens[0]
self.vocabulary.dic_words_ids[word] = i
self.vocabulary.lst_words.append(word)
str_vec = tokens[1:]
row = np.zeros(len(str_vec), dtype=np.float32)
for j in range(len(str_vec)):
row[j] = float(str_vec[j])
rows.append(row)
i += 1
if header:
assert cnt_words == len(rows)
self.matrix = np.vstack(rows)
if header:
assert size_embedding == self.matrix.shape[1]
self.vocabulary.lst_frequencies = np.zeros(len(self.vocabulary.lst_words))
# self.name += "_{}".format(len(rows[0]))
class ModelNumbered(ModelDense):
def get_x_label(self, i):
return i
def viz_wordlist(self, wl, colored=False, show_legend=False):
colors = brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors
cnt = 0
for i in wl:
row = self.get_row(i)
row = row / np.linalg.norm(row)
if colored:
plt.bar(range(0, len(row)), row, color=colors[cnt], linewidth=0, alpha=0.6, label=i)
else:
plt.bar(range(0, len(row)), row, color="black", linewidth=0, alpha=1 / len(wl), label=i)
cnt += 1
if show_legend:
plt.legend()
class Model_Levi(ModelNumbered):
def load_from_dir(self, path):
self.name = "Levi_" + os.path.basename(os.path.normpath(path))
self.matrix = np.load(os.path.join(path, "sgns.contexts.npy"))
self.vocabulary = vsmlib.vocabulary.Vocabulary_simple()
self.vocabulary.dir_root = path
self.vocabulary.load_list_from_sorted_file(
"/home/blackbird/data/scratch/Anna/w2-1000.iter1/sgns.words.vocab")
self.vocabulary.dic_words_ids = {}
for i in range(len(self.vocabulary.lst_words)):
self.vocabulary.dic_words_ids[self.vocabulary.lst_words[i]] = i
class Model_svd_scipy(ModelNumbered):
def __init__(self, original, cnt_singular_vectors, power):
ut, s_ev, vt = scipy.sparse.linalg.svds(
original.matrix, k=cnt_singular_vectors, which='LM') # LM SM LA SA BE
self.sigma = s_ev
sigma_p = np.power(s_ev, power)
self.matrix = np.dot(ut, np.diag(sigma_p))
self.vocabulary = original.vocabulary
self.provenance = original.provenance + \
"\napplied scipy.linal.svd, {} singular vectors, sigma in the power of {}".format(
cnt_singular_vectors, power)
self.name = original.name + \
"_svd_{}_C{}".format(cnt_singular_vectors, power)
class Model_w2v(ModelNumbered):
@staticmethod
def load_word(f):
result = b''
w = b''
while w != b' ':
w = f.read(1)
result = result + w
return result[:-1]
def load_from_file(self, filename):
self.vocabulary = Vocabulary()
f = open(filename, "rb")
header = f.readline().split()
cnt_rows = int(header[0])
size_row = int(header[1])
self.name += "_{}".format(size_row)
self.matrix = np.zeros((cnt_rows, size_row), dtype=np.float32)
logger.debug("cnt rows = {}, size row = {}".format(cnt_rows, size_row))
for i in range(cnt_rows):
word = Model_w2v.load_word(f).decode(
'UTF-8', errors="ignore").strip()
self.vocabulary.dic_words_ids[word] = i
self.vocabulary.lst_words.append(word)
s_row = f.read(size_row * 4)
row = np.fromstring(s_row, dtype=np.float32)
# row = row / np.linalg.norm(row)
self.matrix[i] = row
f.close()
def load_from_dir(self, path):
self.name += "w2v_" + os.path.basename(os.path.normpath(path))
self.load_from_file(os.path.join(path, "vectors.bin"))
self.load_provenance(path)
@deprecated
class Model_glove(ModelNumbered):
def __init__(self):
self.name = "glove"
def load_from_dir(self, path):
self.name = "glove_" + os.path.basename(os.path.normpath(path))
files = os.listdir(path)
for f in files:
if f.endswith(".gz"):
logger.info("this is Glove")
self.load_from_text(os.path.join(path, f))
def load_from_dir(path):
if os.path.isfile(os.path.join(path, "cooccurrence_csr.h5p")):
logger.info("this is sparse explicit in hdf5")
m = vsmlib.Model_explicit()
m.load_from_hdf5(path)
return m
if os.path.isfile(os.path.join(path, "bigrams.data.bin")):
logger.info("this is sparse explicit")
m = vsmlib.Model_explicit()
m.load(path)
return m
if os.path.isfile(os.path.join(path, "vectors.bin")):
logger.info("this is w2v")
m = vsmlib.Model_w2v()
m.load_from_dir(path)
return m
if os.path.isfile(os.path.join(path, "sgns.words.npy")):
m = Model_Levi()
m.load_from_dir(path)
logger.info("this is Levi")
return m
if os.path.isfile(os.path.join(path, "vectors.npy")):
m = vsmlib.ModelNumbered()
m.load_from_dir(path)
logger.info("detected is dense ")
return m
if os.path.isfile(os.path.join(path, "vectors.h5p")):
m = vsmlib.ModelNumbered()
m.load_hdf5(path)
logger.info("detected vsmlib format ")
return m
m = ModelNumbered()
files = os.listdir(path)
for f in files:
if f.endswith(".gz") or f.endswith(".bz") or f.endswith(".txt"):
logger.info(path, "detected as text")
m.load_from_text(os.path.join(path, f))
return m
raise RuntimeError("can not detect embeddings format")
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
import re
from lib.controller.action import action
from lib.controller.checks import checkSqlInjection
from lib.controller.checks import checkDynParam
from lib.controller.checks import checkStability
from lib.controller.checks import checkString
from lib.controller.checks import checkRegexp
from lib.controller.checks import checkConnection
from lib.controller.checks import checkNullConnection
from lib.controller.checks import checkWaf
from lib.controller.checks import heuristicCheckSqlInjection
from lib.controller.checks import identifyWaf
from lib.core.agent import agent
from lib.core.common import extractRegexResult
from lib.core.common import getFilteredPageContent
from lib.core.common import getPublicTypeMembers
from lib.core.common import getUnicode
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.common import intersect
from lib.core.common import parseTargetUrl
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import safeCSValue
from lib.core.common import showHttpErrorCodes
from lib.core.common import urlencode
from lib.core.common import urldecode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import CONTENT_TYPE
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import HEURISTIC_TEST
from lib.core.enums import HTTPMETHOD
from lib.core.enums import PAYLOAD
from lib.core.enums import PLACE
from lib.core.exception import SqlmapBaseException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapNotVulnerableException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapValueException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import ASP_NET_CONTROL_REGEX
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import EMPTY_FORM_FIELDS_REGEX
from lib.core.settings import IGNORE_PARAMETERS
from lib.core.settings import LOW_TEXT_PERCENT
from lib.core.settings import HOST_ALIASES
from lib.core.settings import REFERER_ALIASES
from lib.core.settings import USER_AGENT_ALIASES
from lib.core.target import initTargetEnv
from lib.core.target import setupTargetEnv
from thirdparty.pagerank.pagerank import get_pagerank
def _selectInjection():
"""
Selection function for injection place, parameters and type.
"""
points = {}
for injection in kb.injections:
place = injection.place
parameter = injection.parameter
ptype = injection.ptype
point = (place, parameter, ptype)
if point not in points:
points[point] = injection
else:
for key in points[point].keys():
if key != 'data':
points[point][key] = points[point][key] or injection[key]
points[point]['data'].update(injection['data'])
if len(points) == 1:
kb.injection = kb.injections[0]
elif len(points) > 1:
message = "there were multiple injection points, please select "
message += "the one to use for following injections:\n"
points = []
for i in xrange(0, len(kb.injections)):
place = kb.injections[i].place
parameter = kb.injections[i].parameter
ptype = kb.injections[i].ptype
point = (place, parameter, ptype)
if point not in points:
points.append(point)
ptype = PAYLOAD.PARAMETER[ptype] if isinstance(ptype, int) else ptype
message += "[%d] place: %s, parameter: " % (i, place)
message += "%s, type: %s" % (parameter, ptype)
if i == 0:
message += " (default)"
message += "\n"
message += "[q] Quit"
select = readInput(message, default="0")
if select.isdigit() and int(select) < len(kb.injections) and int(select) >= 0:
index = int(select)
elif select[0] in ("Q", "q"):
raise SqlmapUserQuitException
else:
errMsg = "invalid choice"
raise SqlmapValueException(errMsg)
kb.injection = kb.injections[index]
def _formatInjection(inj):
data = "Place: %s\n" % inj.place
data += "Parameter: %s\n" % inj.parameter
for stype, sdata in inj.data.items():
title = sdata.title
vector = sdata.vector
comment = sdata.comment
payload = agent.adjustLateValues(sdata.payload)
if inj.place == PLACE.CUSTOM_HEADER:
payload = payload.split(',', 1)[1]
if stype == PAYLOAD.TECHNIQUE.UNION:
count = re.sub(r"(?i)(\(.+\))|(\blimit[^A-Za-z]+)", "", sdata.payload).count(',') + 1
title = re.sub(r"\d+ to \d+", str(count), title)
vector = agent.forgeUnionQuery("[QUERY]", vector[0], vector[1], vector[2], None, None, vector[5], vector[6])
if count == 1:
title = title.replace("columns", "column")
elif comment:
vector = "%s%s" % (vector, comment)
data += " Type: %s\n" % PAYLOAD.SQLINJECTION[stype]
data += " Title: %s\n" % title
data += " Payload: %s\n" % urldecode(payload, unsafe="&")
data += " Vector: %s\n\n" % vector if conf.verbose > 1 else "\n"
return data
def _showInjections():
header = "sqlmap identified the following injection points with "
header += "a total of %d HTTP(s) requests" % kb.testQueryCount
if hasattr(conf, "api"):
conf.dumper.string("", kb.injections, content_type=CONTENT_TYPE.TECHNIQUES)
else:
data = "".join(set(map(lambda x: _formatInjection(x), kb.injections))).rstrip("\n")
conf.dumper.string(header, data)
if conf.tamper:
warnMsg = "changes made by tampering scripts are not "
warnMsg += "included in shown payload content(s)"
logger.warn(warnMsg)
if conf.hpp:
warnMsg = "changes made by HTTP parameter pollution are not "
warnMsg += "included in shown payload content(s)"
logger.warn(warnMsg)
def _randomFillBlankFields(value):
retVal = value
if extractRegexResult(EMPTY_FORM_FIELDS_REGEX, value):
message = "do you want to fill blank fields with random values? [Y/n] "
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
for match in re.finditer(EMPTY_FORM_FIELDS_REGEX, retVal):
item = match.group("result")
if not any(_ in item for _ in IGNORE_PARAMETERS) and not re.search(ASP_NET_CONTROL_REGEX, item):
if item[-1] == DEFAULT_GET_POST_DELIMITER:
retVal = retVal.replace(item, "%s%s%s" % (item[:-1], randomStr(), DEFAULT_GET_POST_DELIMITER))
else:
retVal = retVal.replace(item, "%s%s" % (item, randomStr()))
return retVal
def _saveToHashDB():
injections = hashDBRetrieve(HASHDB_KEYS.KB_INJECTIONS, True) or []
injections.extend(_ for _ in kb.injections if _ and _.place is not None and _.parameter is not None)
_ = dict()
for injection in injections:
key = (injection.place, injection.parameter, injection.ptype)
if key not in _:
_[key] = injection
else:
_[key].data.update(injection.data)
hashDBWrite(HASHDB_KEYS.KB_INJECTIONS, _.values(), True)
_ = hashDBRetrieve(HASHDB_KEYS.KB_ABS_FILE_PATHS, True) or set()
_.update(kb.absFilePaths)
hashDBWrite(HASHDB_KEYS.KB_ABS_FILE_PATHS, _, True)
if not hashDBRetrieve(HASHDB_KEYS.KB_CHARS):
hashDBWrite(HASHDB_KEYS.KB_CHARS, kb.chars, True)
if not hashDBRetrieve(HASHDB_KEYS.KB_DYNAMIC_MARKINGS):
hashDBWrite(HASHDB_KEYS.KB_DYNAMIC_MARKINGS, kb.dynamicMarkings, True)
def _saveToResultsFile():
if not conf.resultsFP:
return
results = {}
techniques = dict(map(lambda x: (x[1], x[0]), getPublicTypeMembers(PAYLOAD.TECHNIQUE)))
for inj in kb.injections:
if inj.place is None or inj.parameter is None:
continue
key = (inj.place, inj.parameter)
if key not in results:
results[key] = []
results[key].extend(inj.data.keys())
for key, value in results.items():
place, parameter = key
line = "%s,%s,%s,%s%s" % (safeCSValue(kb.originalUrls.get(conf.url) or conf.url), place, parameter, "".join(map(lambda x: techniques[x][0].upper(), sorted(value))), os.linesep)
conf.resultsFP.writelines(line)
if not results:
line = "%s,,,%s" % (conf.url, os.linesep)
conf.resultsFP.writelines(line)
def start():
"""
This function calls a function that performs checks on both URL
stability and all GET, POST, Cookie and User-Agent parameters to
check if they are dynamic and SQL injection affected
"""
if conf.direct:
initTargetEnv()
setupTargetEnv()
action()
return True
if conf.url and not any((conf.forms, conf.crawlDepth)):
kb.targets.add((conf.url, conf.method, conf.data, conf.cookie))
if conf.configFile and not kb.targets:
errMsg = "you did not edit the configuration file properly, set "
errMsg += "the target URL, list of targets or google dork"
logger.error(errMsg)
return False
if kb.targets and len(kb.targets) > 1:
infoMsg = "sqlmap got a total of %d targets" % len(kb.targets)
logger.info(infoMsg)
hostCount = 0
for targetUrl, targetMethod, targetData, targetCookie in kb.targets:
try:
conf.url = targetUrl
conf.method = targetMethod
conf.data = targetData
conf.cookie = targetCookie
initTargetEnv()
parseTargetUrl()
testSqlInj = False
if PLACE.GET in conf.parameters and not any([conf.data, conf.testParameter]):
for parameter in re.findall(r"([^=]+)=([^%s]+%s?|\Z)" % (conf.pDel or DEFAULT_GET_POST_DELIMITER, conf.pDel or DEFAULT_GET_POST_DELIMITER), conf.parameters[PLACE.GET]):
paramKey = (conf.hostname, conf.path, PLACE.GET, parameter[0])
if paramKey not in kb.testedParams:
testSqlInj = True
break
else:
paramKey = (conf.hostname, conf.path, None, None)
if paramKey not in kb.testedParams:
testSqlInj = True
testSqlInj &= conf.hostname not in kb.vulnHosts
if not testSqlInj:
infoMsg = "skipping '%s'" % targetUrl
logger.info(infoMsg)
continue
if conf.multipleTargets:
hostCount += 1
if conf.forms:
message = "[#%d] form:\n%s %s" % (hostCount, conf.method or HTTPMETHOD.GET, targetUrl)
else:
message = "URL %d:\n%s %s%s" % (hostCount, conf.method or HTTPMETHOD.GET, targetUrl, " (PageRank: %s)" % get_pagerank(targetUrl) if conf.googleDork and conf.pageRank else "")
if conf.cookie:
message += "\nCookie: %s" % conf.cookie
if conf.data is not None:
message += "\nPOST data: %s" % urlencode(conf.data) if conf.data else ""
if conf.forms:
if conf.method == HTTPMETHOD.GET and targetUrl.find("?") == -1:
continue
message += "\ndo you want to test this form? [Y/n/q] "
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
if conf.method == HTTPMETHOD.POST:
message = "Edit POST data [default: %s]%s: " % (urlencode(conf.data) if conf.data else "None", " (Warning: blank fields detected)" if conf.data and extractRegexResult(EMPTY_FORM_FIELDS_REGEX, conf.data) else "")
conf.data = readInput(message, default=conf.data)
conf.data = _randomFillBlankFields(conf.data)
conf.data = urldecode(conf.data) if conf.data and urlencode(DEFAULT_GET_POST_DELIMITER, None) not in conf.data else conf.data
elif conf.method == HTTPMETHOD.GET:
if targetUrl.find("?") > -1:
firstPart = targetUrl[:targetUrl.find("?")]
secondPart = targetUrl[targetUrl.find("?") + 1:]
message = "Edit GET data [default: %s]: " % secondPart
test = readInput(message, default=secondPart)
test = _randomFillBlankFields(test)
conf.url = "%s?%s" % (firstPart, test)
parseTargetUrl()
elif test[0] in ("n", "N"):
continue
elif test[0] in ("q", "Q"):
break
else:
message += "\ndo you want to test this URL? [Y/n/q]"
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
pass
elif test[0] in ("n", "N"):
continue
elif test[0] in ("q", "Q"):
break
infoMsg = "testing URL '%s'" % targetUrl
logger.info(infoMsg)
setupTargetEnv()
if not checkConnection(suppressOutput=conf.forms) or not checkString() or not checkRegexp():
continue
if conf.checkWaf:
checkWaf()
if conf.identifyWaf:
identifyWaf()
if conf.nullConnection:
checkNullConnection()
if (len(kb.injections) == 0 or (len(kb.injections) == 1 and kb.injections[0].place is None)) \
and (kb.injection.place is None or kb.injection.parameter is None):
if not any((conf.string, conf.notString, conf.regexp)) and PAYLOAD.TECHNIQUE.BOOLEAN in conf.tech:
# NOTE: this is not needed anymore, leaving only to display
# a warning message to the user in case the page is not stable
checkStability()
# Do a little prioritization reorder of a testable parameter list
parameters = conf.parameters.keys()
# Order of testing list (first to last)
orderList = (PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER, PLACE.URI, PLACE.POST, PLACE.GET)
for place in orderList[::-1]:
if place in parameters:
parameters.remove(place)
parameters.insert(0, place)
proceed = True
for place in parameters:
# Test User-Agent and Referer headers only if
# --level >= 3
skip = (place == PLACE.USER_AGENT and conf.level < 3)
skip |= (place == PLACE.REFERER and conf.level < 3)
# Test Host header only if
# --level >= 5
skip |= (place == PLACE.HOST and conf.level < 5)
# Test Cookie header only if --level >= 2
skip |= (place == PLACE.COOKIE and conf.level < 2)
skip |= (place == PLACE.USER_AGENT and intersect(USER_AGENT_ALIASES, conf.skip, True) not in ([], None))
skip |= (place == PLACE.REFERER and intersect(REFERER_ALIASES, conf.skip, True) not in ([], None))
skip |= (place == PLACE.COOKIE and intersect(PLACE.COOKIE, conf.skip, True) not in ([], None))
skip &= not (place == PLACE.USER_AGENT and intersect(USER_AGENT_ALIASES, conf.testParameter, True))
skip &= not (place == PLACE.REFERER and intersect(REFERER_ALIASES, conf.testParameter, True))
skip &= not (place == PLACE.HOST and intersect(HOST_ALIASES, conf.testParameter, True))
skip &= not (place == PLACE.COOKIE and intersect((PLACE.COOKIE,), conf.testParameter, True))
if skip:
continue
if place not in conf.paramDict:
continue
paramDict = conf.paramDict[place]
for parameter, value in paramDict.items():
if not proceed:
break
kb.vainRun = False
testSqlInj = True
paramKey = (conf.hostname, conf.path, place, parameter)
if paramKey in kb.testedParams:
testSqlInj = False
infoMsg = "skipping previously processed %s parameter '%s'" % (place, parameter)
logger.info(infoMsg)
elif parameter in conf.testParameter:
pass
elif parameter == conf.rParam:
testSqlInj = False
infoMsg = "skipping randomizing %s parameter '%s'" % (place, parameter)
logger.info(infoMsg)
elif parameter in conf.skip:
testSqlInj = False
infoMsg = "skipping %s parameter '%s'" % (place, parameter)
logger.info(infoMsg)
# Ignore session-like parameters for --level < 4
elif conf.level < 4 and parameter.upper() in IGNORE_PARAMETERS:
testSqlInj = False
infoMsg = "ignoring %s parameter '%s'" % (place, parameter)
logger.info(infoMsg)
elif PAYLOAD.TECHNIQUE.BOOLEAN in conf.tech:
check = checkDynParam(place, parameter, value)
if not check:
warnMsg = "%s parameter '%s' does not appear dynamic" % (place, parameter)
logger.warn(warnMsg)
else:
infoMsg = "%s parameter '%s' is dynamic" % (place, parameter)
logger.info(infoMsg)
kb.testedParams.add(paramKey)
if testSqlInj:
check = heuristicCheckSqlInjection(place, parameter)
if check != HEURISTIC_TEST.POSITIVE:
if conf.smart or (kb.ignoreCasted and check == HEURISTIC_TEST.CASTED):
infoMsg = "skipping %s parameter '%s'" % (place, parameter)
logger.info(infoMsg)
continue
infoMsg = "testing for SQL injection on %s " % place
infoMsg += "parameter '%s'" % parameter
logger.info(infoMsg)
injection = checkSqlInjection(place, parameter, value)
proceed = not kb.endDetection
if injection is not None and injection.place is not None:
kb.injections.append(injection)
# In case when user wants to end detection phase (Ctrl+C)
if not proceed:
break
msg = "%s parameter '%s' " % (injection.place, injection.parameter)
msg += "is vulnerable. Do you want to keep testing the others (if any)? [y/N] "
test = readInput(msg, default="N")
if test[0] not in ("y", "Y"):
proceed = False
paramKey = (conf.hostname, conf.path, None, None)
kb.testedParams.add(paramKey)
else:
warnMsg = "%s parameter '%s' is not " % (place, parameter)
warnMsg += "injectable"
logger.warn(warnMsg)
if len(kb.injections) == 0 or (len(kb.injections) == 1 and kb.injections[0].place is None):
if kb.vainRun and not conf.multipleTargets:
errMsg = "no parameter(s) found for testing in the provided data "
errMsg += "(e.g. GET parameter 'id' in 'www.site.com/index.php?id=1')"
raise SqlmapNoneDataException(errMsg)
else:
errMsg = "all tested parameters appear to be not injectable."
if conf.level < 5 or conf.risk < 3:
errMsg += " Try to increase '--level'/'--risk' values "
errMsg += "to perform more tests."
if isinstance(conf.tech, list) and len(conf.tech) < 5:
errMsg += " Rerun without providing the option '--technique'."
if not conf.textOnly and kb.originalPage:
percent = (100.0 * len(getFilteredPageContent(kb.originalPage)) / len(kb.originalPage))
if kb.dynamicMarkings:
errMsg += " You can give it a go with the switch '--text-only' "
errMsg += "if the target page has a low percentage "
errMsg += "of textual content (~%.2f%% of " % percent
errMsg += "page content is text)."
elif percent < LOW_TEXT_PERCENT and not kb.errorIsNone:
errMsg += " Please retry with the switch '--text-only' "
errMsg += "(along with --technique=BU) as this case "
errMsg += "looks like a perfect candidate "
errMsg += "(low textual content along with inability "
errMsg += "of comparison engine to detect at least "
errMsg += "one dynamic parameter)."
if kb.heuristicTest == HEURISTIC_TEST.POSITIVE:
errMsg += " As heuristic test turned out positive you are "
errMsg += "strongly advised to continue on with the tests. "
errMsg += "Please, consider usage of tampering scripts as "
errMsg += "your target might filter the queries."
if not conf.string and not conf.notString and not conf.regexp:
errMsg += " Also, you can try to rerun by providing "
errMsg += "either a valid value for option '--string' "
errMsg += "(or '--regexp')"
elif conf.string:
errMsg += " Also, you can try to rerun by providing a "
errMsg += "valid value for option '--string' as perhaps the string you "
errMsg += "have choosen does not match "
errMsg += "exclusively True responses"
elif conf.regexp:
errMsg += " Also, you can try to rerun by providing a "
errMsg += "valid value for option '--regexp' as perhaps the regular "
errMsg += "expression that you have choosen "
errMsg += "does not match exclusively True responses"
raise SqlmapNotVulnerableException(errMsg)
else:
# Flush the flag
kb.testMode = False
_saveToResultsFile()
_saveToHashDB()
_showInjections()
_selectInjection()
if kb.injection.place is not None and kb.injection.parameter is not None:
if conf.multipleTargets:
message = "do you want to exploit this SQL injection? [Y/n] "
exploit = readInput(message, default="Y")
condition = not exploit or exploit[0] in ("y", "Y")
else:
condition = True
if condition:
action()
except KeyboardInterrupt:
if conf.multipleTargets:
warnMsg = "user aborted in multiple target mode"
logger.warn(warnMsg)
message = "do you want to skip to the next target in list? [Y/n/q]"
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
pass
elif test[0] in ("n", "N"):
return False
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
raise
except SqlmapUserQuitException:
raise
except SqlmapSilentQuitException:
raise
except SqlmapBaseException, e:
e = getUnicode(e)
if conf.multipleTargets:
e += ", skipping to the next %s" % ("form" if conf.forms else "URL")
logger.error(e)
else:
logger.critical(e)
return False
finally:
showHttpErrorCodes()
if kb.maxConnectionsFlag:
warnMsg = "it appears that the target "
warnMsg += "has a maximum connections "
warnMsg += "constraint"
logger.warn(warnMsg)
if kb.dataOutputFlag and not conf.multipleTargets:
logger.info("fetched data logged to text files under '%s'" % conf.outputPath)
if conf.multipleTargets and conf.resultsFilename:
infoMsg = "you can find results of scanning in multiple targets "
infoMsg += "mode inside the CSV file '%s'" % conf.resultsFilename
logger.info(infoMsg)
return True
Cosmetic fix
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
import re
from lib.controller.action import action
from lib.controller.checks import checkSqlInjection
from lib.controller.checks import checkDynParam
from lib.controller.checks import checkStability
from lib.controller.checks import checkString
from lib.controller.checks import checkRegexp
from lib.controller.checks import checkConnection
from lib.controller.checks import checkNullConnection
from lib.controller.checks import checkWaf
from lib.controller.checks import heuristicCheckSqlInjection
from lib.controller.checks import identifyWaf
from lib.core.agent import agent
from lib.core.common import extractRegexResult
from lib.core.common import getFilteredPageContent
from lib.core.common import getPublicTypeMembers
from lib.core.common import getUnicode
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.common import intersect
from lib.core.common import parseTargetUrl
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import safeCSValue
from lib.core.common import showHttpErrorCodes
from lib.core.common import urlencode
from lib.core.common import urldecode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import CONTENT_TYPE
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import HEURISTIC_TEST
from lib.core.enums import HTTPMETHOD
from lib.core.enums import PAYLOAD
from lib.core.enums import PLACE
from lib.core.exception import SqlmapBaseException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapNotVulnerableException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapValueException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import ASP_NET_CONTROL_REGEX
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import EMPTY_FORM_FIELDS_REGEX
from lib.core.settings import IGNORE_PARAMETERS
from lib.core.settings import LOW_TEXT_PERCENT
from lib.core.settings import HOST_ALIASES
from lib.core.settings import REFERER_ALIASES
from lib.core.settings import USER_AGENT_ALIASES
from lib.core.target import initTargetEnv
from lib.core.target import setupTargetEnv
from thirdparty.pagerank.pagerank import get_pagerank
def _selectInjection():
"""
Selection function for injection place, parameters and type.
"""
points = {}
for injection in kb.injections:
place = injection.place
parameter = injection.parameter
ptype = injection.ptype
point = (place, parameter, ptype)
if point not in points:
points[point] = injection
else:
for key in points[point].keys():
if key != 'data':
points[point][key] = points[point][key] or injection[key]
points[point]['data'].update(injection['data'])
if len(points) == 1:
kb.injection = kb.injections[0]
elif len(points) > 1:
message = "there were multiple injection points, please select "
message += "the one to use for following injections:\n"
points = []
for i in xrange(0, len(kb.injections)):
place = kb.injections[i].place
parameter = kb.injections[i].parameter
ptype = kb.injections[i].ptype
point = (place, parameter, ptype)
if point not in points:
points.append(point)
ptype = PAYLOAD.PARAMETER[ptype] if isinstance(ptype, int) else ptype
message += "[%d] place: %s, parameter: " % (i, place)
message += "%s, type: %s" % (parameter, ptype)
if i == 0:
message += " (default)"
message += "\n"
message += "[q] Quit"
select = readInput(message, default="0")
if select.isdigit() and int(select) < len(kb.injections) and int(select) >= 0:
index = int(select)
elif select[0] in ("Q", "q"):
raise SqlmapUserQuitException
else:
errMsg = "invalid choice"
raise SqlmapValueException(errMsg)
kb.injection = kb.injections[index]
def _formatInjection(inj):
data = "Place: %s\n" % inj.place
data += "Parameter: %s\n" % inj.parameter
for stype, sdata in inj.data.items():
title = sdata.title
vector = sdata.vector
comment = sdata.comment
payload = agent.adjustLateValues(sdata.payload)
if inj.place == PLACE.CUSTOM_HEADER:
payload = payload.split(',', 1)[1]
if stype == PAYLOAD.TECHNIQUE.UNION:
count = re.sub(r"(?i)(\(.+\))|(\blimit[^A-Za-z]+)", "", sdata.payload).count(',') + 1
title = re.sub(r"\d+ to \d+", str(count), title)
vector = agent.forgeUnionQuery("[QUERY]", vector[0], vector[1], vector[2], None, None, vector[5], vector[6])
if count == 1:
title = title.replace("columns", "column")
elif comment:
vector = "%s%s" % (vector, comment)
data += " Type: %s\n" % PAYLOAD.SQLINJECTION[stype]
data += " Title: %s\n" % title
data += " Payload: %s\n" % urldecode(payload, unsafe="&", plusspace=(inj.place == PLACE.POST and kb.postSpaceToPlus))
data += " Vector: %s\n\n" % vector if conf.verbose > 1 else "\n"
return data
def _showInjections():
header = "sqlmap identified the following injection points with "
header += "a total of %d HTTP(s) requests" % kb.testQueryCount
if hasattr(conf, "api"):
conf.dumper.string("", kb.injections, content_type=CONTENT_TYPE.TECHNIQUES)
else:
data = "".join(set(map(lambda x: _formatInjection(x), kb.injections))).rstrip("\n")
conf.dumper.string(header, data)
if conf.tamper:
warnMsg = "changes made by tampering scripts are not "
warnMsg += "included in shown payload content(s)"
logger.warn(warnMsg)
if conf.hpp:
warnMsg = "changes made by HTTP parameter pollution are not "
warnMsg += "included in shown payload content(s)"
logger.warn(warnMsg)
def _randomFillBlankFields(value):
retVal = value
if extractRegexResult(EMPTY_FORM_FIELDS_REGEX, value):
message = "do you want to fill blank fields with random values? [Y/n] "
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
for match in re.finditer(EMPTY_FORM_FIELDS_REGEX, retVal):
item = match.group("result")
if not any(_ in item for _ in IGNORE_PARAMETERS) and not re.search(ASP_NET_CONTROL_REGEX, item):
if item[-1] == DEFAULT_GET_POST_DELIMITER:
retVal = retVal.replace(item, "%s%s%s" % (item[:-1], randomStr(), DEFAULT_GET_POST_DELIMITER))
else:
retVal = retVal.replace(item, "%s%s" % (item, randomStr()))
return retVal
def _saveToHashDB():
injections = hashDBRetrieve(HASHDB_KEYS.KB_INJECTIONS, True) or []
injections.extend(_ for _ in kb.injections if _ and _.place is not None and _.parameter is not None)
_ = dict()
for injection in injections:
key = (injection.place, injection.parameter, injection.ptype)
if key not in _:
_[key] = injection
else:
_[key].data.update(injection.data)
hashDBWrite(HASHDB_KEYS.KB_INJECTIONS, _.values(), True)
_ = hashDBRetrieve(HASHDB_KEYS.KB_ABS_FILE_PATHS, True) or set()
_.update(kb.absFilePaths)
hashDBWrite(HASHDB_KEYS.KB_ABS_FILE_PATHS, _, True)
if not hashDBRetrieve(HASHDB_KEYS.KB_CHARS):
hashDBWrite(HASHDB_KEYS.KB_CHARS, kb.chars, True)
if not hashDBRetrieve(HASHDB_KEYS.KB_DYNAMIC_MARKINGS):
hashDBWrite(HASHDB_KEYS.KB_DYNAMIC_MARKINGS, kb.dynamicMarkings, True)
def _saveToResultsFile():
if not conf.resultsFP:
return
results = {}
techniques = dict(map(lambda x: (x[1], x[0]), getPublicTypeMembers(PAYLOAD.TECHNIQUE)))
for inj in kb.injections:
if inj.place is None or inj.parameter is None:
continue
key = (inj.place, inj.parameter)
if key not in results:
results[key] = []
results[key].extend(inj.data.keys())
for key, value in results.items():
place, parameter = key
line = "%s,%s,%s,%s%s" % (safeCSValue(kb.originalUrls.get(conf.url) or conf.url), place, parameter, "".join(map(lambda x: techniques[x][0].upper(), sorted(value))), os.linesep)
conf.resultsFP.writelines(line)
if not results:
line = "%s,,,%s" % (conf.url, os.linesep)
conf.resultsFP.writelines(line)
def start():
"""
This function calls a function that performs checks on both URL
stability and all GET, POST, Cookie and User-Agent parameters to
check if they are dynamic and SQL injection affected
"""
if conf.direct:
initTargetEnv()
setupTargetEnv()
action()
return True
if conf.url and not any((conf.forms, conf.crawlDepth)):
kb.targets.add((conf.url, conf.method, conf.data, conf.cookie))
if conf.configFile and not kb.targets:
errMsg = "you did not edit the configuration file properly, set "
errMsg += "the target URL, list of targets or google dork"
logger.error(errMsg)
return False
if kb.targets and len(kb.targets) > 1:
infoMsg = "sqlmap got a total of %d targets" % len(kb.targets)
logger.info(infoMsg)
hostCount = 0
for targetUrl, targetMethod, targetData, targetCookie in kb.targets:
try:
conf.url = targetUrl
conf.method = targetMethod
conf.data = targetData
conf.cookie = targetCookie
initTargetEnv()
parseTargetUrl()
testSqlInj = False
if PLACE.GET in conf.parameters and not any([conf.data, conf.testParameter]):
for parameter in re.findall(r"([^=]+)=([^%s]+%s?|\Z)" % (conf.pDel or DEFAULT_GET_POST_DELIMITER, conf.pDel or DEFAULT_GET_POST_DELIMITER), conf.parameters[PLACE.GET]):
paramKey = (conf.hostname, conf.path, PLACE.GET, parameter[0])
if paramKey not in kb.testedParams:
testSqlInj = True
break
else:
paramKey = (conf.hostname, conf.path, None, None)
if paramKey not in kb.testedParams:
testSqlInj = True
testSqlInj &= conf.hostname not in kb.vulnHosts
if not testSqlInj:
infoMsg = "skipping '%s'" % targetUrl
logger.info(infoMsg)
continue
if conf.multipleTargets:
hostCount += 1
if conf.forms:
message = "[#%d] form:\n%s %s" % (hostCount, conf.method or HTTPMETHOD.GET, targetUrl)
else:
message = "URL %d:\n%s %s%s" % (hostCount, conf.method or HTTPMETHOD.GET, targetUrl, " (PageRank: %s)" % get_pagerank(targetUrl) if conf.googleDork and conf.pageRank else "")
if conf.cookie:
message += "\nCookie: %s" % conf.cookie
if conf.data is not None:
message += "\nPOST data: %s" % urlencode(conf.data) if conf.data else ""
if conf.forms:
if conf.method == HTTPMETHOD.GET and targetUrl.find("?") == -1:
continue
message += "\ndo you want to test this form? [Y/n/q] "
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
if conf.method == HTTPMETHOD.POST:
message = "Edit POST data [default: %s]%s: " % (urlencode(conf.data) if conf.data else "None", " (Warning: blank fields detected)" if conf.data and extractRegexResult(EMPTY_FORM_FIELDS_REGEX, conf.data) else "")
conf.data = readInput(message, default=conf.data)
conf.data = _randomFillBlankFields(conf.data)
conf.data = urldecode(conf.data) if conf.data and urlencode(DEFAULT_GET_POST_DELIMITER, None) not in conf.data else conf.data
elif conf.method == HTTPMETHOD.GET:
if targetUrl.find("?") > -1:
firstPart = targetUrl[:targetUrl.find("?")]
secondPart = targetUrl[targetUrl.find("?") + 1:]
message = "Edit GET data [default: %s]: " % secondPart
test = readInput(message, default=secondPart)
test = _randomFillBlankFields(test)
conf.url = "%s?%s" % (firstPart, test)
parseTargetUrl()
elif test[0] in ("n", "N"):
continue
elif test[0] in ("q", "Q"):
break
else:
message += "\ndo you want to test this URL? [Y/n/q]"
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
pass
elif test[0] in ("n", "N"):
continue
elif test[0] in ("q", "Q"):
break
infoMsg = "testing URL '%s'" % targetUrl
logger.info(infoMsg)
setupTargetEnv()
if not checkConnection(suppressOutput=conf.forms) or not checkString() or not checkRegexp():
continue
if conf.checkWaf:
checkWaf()
if conf.identifyWaf:
identifyWaf()
if conf.nullConnection:
checkNullConnection()
if (len(kb.injections) == 0 or (len(kb.injections) == 1 and kb.injections[0].place is None)) \
and (kb.injection.place is None or kb.injection.parameter is None):
if not any((conf.string, conf.notString, conf.regexp)) and PAYLOAD.TECHNIQUE.BOOLEAN in conf.tech:
# NOTE: this is not needed anymore, leaving only to display
# a warning message to the user in case the page is not stable
checkStability()
# Do a little prioritization reorder of a testable parameter list
parameters = conf.parameters.keys()
# Order of testing list (first to last)
orderList = (PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER, PLACE.URI, PLACE.POST, PLACE.GET)
for place in orderList[::-1]:
if place in parameters:
parameters.remove(place)
parameters.insert(0, place)
proceed = True
for place in parameters:
# Test User-Agent and Referer headers only if
# --level >= 3
skip = (place == PLACE.USER_AGENT and conf.level < 3)
skip |= (place == PLACE.REFERER and conf.level < 3)
# Test Host header only if
# --level >= 5
skip |= (place == PLACE.HOST and conf.level < 5)
# Test Cookie header only if --level >= 2
skip |= (place == PLACE.COOKIE and conf.level < 2)
skip |= (place == PLACE.USER_AGENT and intersect(USER_AGENT_ALIASES, conf.skip, True) not in ([], None))
skip |= (place == PLACE.REFERER and intersect(REFERER_ALIASES, conf.skip, True) not in ([], None))
skip |= (place == PLACE.COOKIE and intersect(PLACE.COOKIE, conf.skip, True) not in ([], None))
skip &= not (place == PLACE.USER_AGENT and intersect(USER_AGENT_ALIASES, conf.testParameter, True))
skip &= not (place == PLACE.REFERER and intersect(REFERER_ALIASES, conf.testParameter, True))
skip &= not (place == PLACE.HOST and intersect(HOST_ALIASES, conf.testParameter, True))
skip &= not (place == PLACE.COOKIE and intersect((PLACE.COOKIE,), conf.testParameter, True))
if skip:
continue
if place not in conf.paramDict:
continue
paramDict = conf.paramDict[place]
for parameter, value in paramDict.items():
if not proceed:
break
kb.vainRun = False
testSqlInj = True
paramKey = (conf.hostname, conf.path, place, parameter)
if paramKey in kb.testedParams:
testSqlInj = False
infoMsg = "skipping previously processed %s parameter '%s'" % (place, parameter)
logger.info(infoMsg)
elif parameter in conf.testParameter:
pass
elif parameter == conf.rParam:
testSqlInj = False
infoMsg = "skipping randomizing %s parameter '%s'" % (place, parameter)
logger.info(infoMsg)
elif parameter in conf.skip:
testSqlInj = False
infoMsg = "skipping %s parameter '%s'" % (place, parameter)
logger.info(infoMsg)
# Ignore session-like parameters for --level < 4
elif conf.level < 4 and parameter.upper() in IGNORE_PARAMETERS:
testSqlInj = False
infoMsg = "ignoring %s parameter '%s'" % (place, parameter)
logger.info(infoMsg)
elif PAYLOAD.TECHNIQUE.BOOLEAN in conf.tech:
check = checkDynParam(place, parameter, value)
if not check:
warnMsg = "%s parameter '%s' does not appear dynamic" % (place, parameter)
logger.warn(warnMsg)
else:
infoMsg = "%s parameter '%s' is dynamic" % (place, parameter)
logger.info(infoMsg)
kb.testedParams.add(paramKey)
if testSqlInj:
check = heuristicCheckSqlInjection(place, parameter)
if check != HEURISTIC_TEST.POSITIVE:
if conf.smart or (kb.ignoreCasted and check == HEURISTIC_TEST.CASTED):
infoMsg = "skipping %s parameter '%s'" % (place, parameter)
logger.info(infoMsg)
continue
infoMsg = "testing for SQL injection on %s " % place
infoMsg += "parameter '%s'" % parameter
logger.info(infoMsg)
injection = checkSqlInjection(place, parameter, value)
proceed = not kb.endDetection
if injection is not None and injection.place is not None:
kb.injections.append(injection)
# In case when user wants to end detection phase (Ctrl+C)
if not proceed:
break
msg = "%s parameter '%s' " % (injection.place, injection.parameter)
msg += "is vulnerable. Do you want to keep testing the others (if any)? [y/N] "
test = readInput(msg, default="N")
if test[0] not in ("y", "Y"):
proceed = False
paramKey = (conf.hostname, conf.path, None, None)
kb.testedParams.add(paramKey)
else:
warnMsg = "%s parameter '%s' is not " % (place, parameter)
warnMsg += "injectable"
logger.warn(warnMsg)
if len(kb.injections) == 0 or (len(kb.injections) == 1 and kb.injections[0].place is None):
if kb.vainRun and not conf.multipleTargets:
errMsg = "no parameter(s) found for testing in the provided data "
errMsg += "(e.g. GET parameter 'id' in 'www.site.com/index.php?id=1')"
raise SqlmapNoneDataException(errMsg)
else:
errMsg = "all tested parameters appear to be not injectable."
if conf.level < 5 or conf.risk < 3:
errMsg += " Try to increase '--level'/'--risk' values "
errMsg += "to perform more tests."
if isinstance(conf.tech, list) and len(conf.tech) < 5:
errMsg += " Rerun without providing the option '--technique'."
if not conf.textOnly and kb.originalPage:
percent = (100.0 * len(getFilteredPageContent(kb.originalPage)) / len(kb.originalPage))
if kb.dynamicMarkings:
errMsg += " You can give it a go with the switch '--text-only' "
errMsg += "if the target page has a low percentage "
errMsg += "of textual content (~%.2f%% of " % percent
errMsg += "page content is text)."
elif percent < LOW_TEXT_PERCENT and not kb.errorIsNone:
errMsg += " Please retry with the switch '--text-only' "
errMsg += "(along with --technique=BU) as this case "
errMsg += "looks like a perfect candidate "
errMsg += "(low textual content along with inability "
errMsg += "of comparison engine to detect at least "
errMsg += "one dynamic parameter)."
if kb.heuristicTest == HEURISTIC_TEST.POSITIVE:
errMsg += " As heuristic test turned out positive you are "
errMsg += "strongly advised to continue on with the tests. "
errMsg += "Please, consider usage of tampering scripts as "
errMsg += "your target might filter the queries."
if not conf.string and not conf.notString and not conf.regexp:
errMsg += " Also, you can try to rerun by providing "
errMsg += "either a valid value for option '--string' "
errMsg += "(or '--regexp')"
elif conf.string:
errMsg += " Also, you can try to rerun by providing a "
errMsg += "valid value for option '--string' as perhaps the string you "
errMsg += "have choosen does not match "
errMsg += "exclusively True responses"
elif conf.regexp:
errMsg += " Also, you can try to rerun by providing a "
errMsg += "valid value for option '--regexp' as perhaps the regular "
errMsg += "expression that you have choosen "
errMsg += "does not match exclusively True responses"
raise SqlmapNotVulnerableException(errMsg)
else:
# Flush the flag
kb.testMode = False
_saveToResultsFile()
_saveToHashDB()
_showInjections()
_selectInjection()
if kb.injection.place is not None and kb.injection.parameter is not None:
if conf.multipleTargets:
message = "do you want to exploit this SQL injection? [Y/n] "
exploit = readInput(message, default="Y")
condition = not exploit or exploit[0] in ("y", "Y")
else:
condition = True
if condition:
action()
except KeyboardInterrupt:
if conf.multipleTargets:
warnMsg = "user aborted in multiple target mode"
logger.warn(warnMsg)
message = "do you want to skip to the next target in list? [Y/n/q]"
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
pass
elif test[0] in ("n", "N"):
return False
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
raise
except SqlmapUserQuitException:
raise
except SqlmapSilentQuitException:
raise
except SqlmapBaseException, e:
e = getUnicode(e)
if conf.multipleTargets:
e += ", skipping to the next %s" % ("form" if conf.forms else "URL")
logger.error(e)
else:
logger.critical(e)
return False
finally:
showHttpErrorCodes()
if kb.maxConnectionsFlag:
warnMsg = "it appears that the target "
warnMsg += "has a maximum connections "
warnMsg += "constraint"
logger.warn(warnMsg)
if kb.dataOutputFlag and not conf.multipleTargets:
logger.info("fetched data logged to text files under '%s'" % conf.outputPath)
if conf.multipleTargets and conf.resultsFilename:
infoMsg = "you can find results of scanning in multiple targets "
infoMsg += "mode inside the CSV file '%s'" % conf.resultsFilename
logger.info(infoMsg)
return True
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy.lib.six import string_types, exec_
import sys
import keyword
import re
import inspect
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy.special import (comb, chndtr, gammaln, hyp0f1,
entr, kl_div)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, sum, shape,
product, reshape, zeros, floor, logical_and, log, sqrt, exp,
ndarray)
from numpy import (place, any, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
import numpy.random as mtrand
from ._constants import _EPS, _XMAX
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'parameters': """\nParameters\n---------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(x, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(x, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative density function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative density function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of sf).
"""
_doc_moment = """\
moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = """\
expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = """
Parameters
----------
x : array_like
quantiles
q : array_like
lower or upper tail probability
%(shapes)s : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis.
Default is 'mv'.
"""
_doc_default_longsummary = """\
Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, freeze the distribution and display the frozen pdf:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'callparams': _doc_default_callparams,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'Continuous', 'Discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
docdict_discrete['example'] = _doc_default_discrete_example
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['callparams'],
docdict_discrete['frozennote']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._ctor_param)
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None):
kwds = self.kwds.copy()
kwds.update({'size': size})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def valarray(shape, value=nan, typecode=None):
"""Return an array of all value.
"""
out = ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
out = valarray(shape(arrays[0]), value=fillvalue)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return (%(shape_arg_str)s), %(locscale_out)s, size
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
a = asarray(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*log(x) - a*log(2) - gammaln(a)
return fac + np.nan_to_num(log(hyp0f1(a, nc * x/4.0)))
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = inspect.getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments
shapes_list = []
for meth in meths_to_inspect:
shapes_args = inspect.getargspec(meth)
shapes_list.append(shapes_args.args)
# *args or **kwargs are not allowed w/automatic shapes
# (generic methods have 'self, x' only)
if len(shapes_args.args) > 2:
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
shapes = max(shapes_list, key=lambda x: len(x))
shapes = shapes[2:] # remove self, x,
# make sure the signatures are consistent
# (generic methods have 'self, x' only)
for item in shapes_list:
if len(item) > 2 and item[2:] != shapes:
raise TypeError('Shape arguments are inconsistent.')
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join(str(_) for _ in shapes_vals)
tempdict['vals'] = vals
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = mtrand.sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default=1).
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = np.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# Cast to int if discrete
if discrete:
if np.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (discrete RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# I don't know when or why vecentropy got broken when numargs == 0
# 09.08.2013: is this still relevant? cf check_vecentropy test
# in tests/test_continuous_basic.py
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution.
Parameters
----------
n : int, n>=1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
kwds : keyword arguments, optional
These can include "loc" and "scale", as well as other keyword
arguments relevant for a given distribution.
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
rvs(<shape(s)>, loc=0, scale=1, size=1)
random variates
pdf(x, <shape(s)>, loc=0, scale=1)
probability density function
logpdf(x, <shape(s)>, loc=0, scale=1)
log of the probability density function
cdf(x, <shape(s)>, loc=0, scale=1)
cumulative density function
logcdf(x, <shape(s)>, loc=0, scale=1)
log of the cumulative density function
sf(x, <shape(s)>, loc=0, scale=1)
survival function (1-cdf --- sometimes more accurate)
logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
ppf(q, <shape(s)>, loc=0, scale=1)
percent point function (inverse of cdf --- quantiles)
isf(q, <shape(s)>, loc=0, scale=1)
inverse survival function (inverse of sf)
moment(n, <shape(s)>, loc=0, scale=1)
non-central n-th moment of the distribution. May not work for array
arguments.
stats(<shape(s)>, loc=0, scale=1, moments='mv')
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
entropy(<shape(s)>, loc=0, scale=1)
(differential) entropy of the RV.
fit(data, <shape(s)>, loc=0, scale=1)
Parameter estimates for generic data
expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
median(<shape(s)>, loc=0, scale=1)
Median of the distribution.
mean(<shape(s)>, loc=0, scale=1)
Mean of the distribution.
std(<shape(s)>, loc=0, scale=1)
Standard deviation of the distribution.
var(<shape(s)>, loc=0, scale=1)
Variance of the distribution.
interval(alpha, <shape(s)>, loc=0, scale=1)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
__call__(<shape(s)>, loc=0, scale=1)
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array_like
quantiles
q : array_like
lower or upper tail probability
<shape(s)> : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
Notes
-----
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1) which will be given clean arguments (in between
a and b) and passing the argument check method.
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments=<str>``,
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None.
Alternatively, you can override ``_munp``, which takes n and shape
parameters and returns the nth non-central moment of the distribution.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf(self, x):
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None):
super(rv_continuous, self).__init__()
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self._size = 1
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
# backwards compat. these were removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vecfunc = np.deprecate(self._ppfvec, "vecfunc")
self.veccdf = np.deprecate(self._cdfvec, "veccdf")
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc)
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args), axis=0)
def nnlf(self, theta, x):
'''Return negative loglikelihood function
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where theta are the
parameters (including loc and scale).
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (self.b <= x)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N * log(scale)
def _penalized_nnlf(self, theta, x):
''' Return negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0)
where theta are the parameters (including loc and scale)
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
loginf = log(_XMAX)
if np.isneginf(self.a).all() and np.isinf(self.b).all():
Nbad = 0
else:
cond0 = (x <= self.a) | (self.b <= x)
Nbad = sum(cond0)
if Nbad > 0:
x = argsreduce(~cond0, x)[0]
N = len(x)
return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args)
fixedn = []
index = list(range(Nargs))
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in zip(index, names):
if key in kwds:
fixedn.append(n)
args[n] = kwds[key]
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead.
"""
return self.fit_loc_scale(data, *args)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
"""
args, loc, scale = self._parse_args(*args, **kwds)
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# np.vectorize doesn't work when numargs == 0 in numpy 1.5.1
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Argument (parameters) of the distribution.
lb, ub : scalar, optional
Lower and upper bound for integration. default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
## Handlers for generic case where xk and pk are given
## The _drv prefix probably means discrete random variable.
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk > xk), axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals >= q), axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
# many changes, originally not even a return
tot = 0.0
diff = 1e100
# pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
# handle cases with infinite support
ulimit = max(1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
llimit = min(-1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
while (pos <= self.b) and ((pos <= ulimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# use pmf because _pmf does not check support in randint and there
# might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
vec = kl_div(pk, qk)
S = sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
generic.rvs(<shape(s)>, loc=0, size=1)
random variates
generic.pmf(x, <shape(s)>, loc=0)
probability mass function
logpmf(x, <shape(s)>, loc=0)
log of the probability density function
generic.cdf(x, <shape(s)>, loc=0)
cumulative density function
generic.logcdf(x, <shape(s)>, loc=0)
log of the cumulative density function
generic.sf(x, <shape(s)>, loc=0)
survival function (1-cdf --- sometimes more accurate)
generic.logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
generic.ppf(q, <shape(s)>, loc=0)
percent point function (inverse of cdf --- percentiles)
generic.isf(q, <shape(s)>, loc=0)
inverse survival function (inverse of sf)
generic.moment(n, <shape(s)>, loc=0)
non-central n-th moment of the distribution. May not work for array
arguments.
generic.stats(<shape(s)>, loc=0, moments='mv')
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
generic.entropy(<shape(s)>, loc=0)
entropy of the RV
generic.expect(func=None, args=(), loc=0, lb=None, ub=None,
conditional=False)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
generic.median(<shape(s)>, loc=0)
Median of the distribution.
generic.mean(<shape(s)>, loc=0)
Mean of the distribution.
generic.std(<shape(s)>, loc=0)
Standard deviation of the distribution.
generic.var(<shape(s)>, loc=0)
Variance of the distribution.
generic.interval(alpha, <shape(s)>, loc=0)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
generic(<shape(s)>, loc=0)
calling a distribution instance returns a frozen distribution
Notes
-----
You can construct an arbitrary discrete rv where ``P{X=xk} = pk``
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_discrete):
# "Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance::
poisson = poisson_gen(name="poisson",
longname='A Poisson')
The docstring can be created from a template.
Alternatively, the object may be called (as a function) to fix the shape
and location parameters returning a "frozen" discrete RV object::
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given
shape and location fixed.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None):
super(rv_discrete, self).__init__()
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk), indx, 0)
self.pk = take(ravel(self.pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = dict(zip(self.xk, self.pk))
self.qvals = np.cumsum(self.pk, axis=0)
self.F = dict(zip(self.xk, self.qvals))
decreasing_keys = sorted(self.F.keys(), reverse=True)
self.Finv = dict((self.F[k], k) for k in decreasing_keys)
self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self._construct_argparser(meths_to_inspect=[_drv_pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
else:
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# backwards compat. was removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vec_generic_moment = np.deprecate(_vec_generic_moment,
"vec_generic_moment",
"generic_moment")
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc)
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
#discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict_discrete)
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (default=1). Note that `size`
has to be given as keyword, not as positional argument.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1-cdf) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as ``1 - cdf``,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (1-sf) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments': 'm'}))
val = self.pmf(mu, *args)
ent = entr(val)
k = 1
term = 1.0
while (abs(term) > _EPS):
val = self.pmf(mu+k, *args)
term = entr(val)
val = self.pmf(mu-k, *args)
term += entr(val)
k += 1
ent += term
return ent
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers, optional
lower and upper bound for integration, default is set to the
support of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : bool, optional
Default is False.
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expect : float
Expected value.
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to
evaluate could be added as keyword parameter, to evaluate functions
with non-monotonic shapes, points include integers in (-suppnmin,
suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative
integers are evaluated)
"""
# moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
# avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 # minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
# work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1, *args)
else:
invfac = 1 - self.cdf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) # check limits
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
# handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while ((pos >= lb) and (diff > self.moment_tol) and
count <= maxcount):
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot/invfac
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
DOC: surround methods in class docstrings with backticks.
Fixes #3915.
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy.lib.six import string_types, exec_
import sys
import keyword
import re
import inspect
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy.special import (comb, chndtr, gammaln, hyp0f1,
entr, kl_div)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, sum, shape,
product, reshape, zeros, floor, logical_and, log, sqrt, exp,
ndarray)
from numpy import (place, any, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
import numpy.random as mtrand
from ._constants import _EPS, _XMAX
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'parameters': """\nParameters\n---------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(x, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative density function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative density function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of sf).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = """
Parameters
----------
x : array_like
quantiles
q : array_like
lower or upper tail probability
%(shapes)s : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis.
Default is 'mv'.
"""
_doc_default_longsummary = """\
Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, freeze the distribution and display the frozen pdf:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'callparams': _doc_default_callparams,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'Continuous', 'Discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
docdict_discrete['example'] = _doc_default_discrete_example
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['callparams'],
docdict_discrete['frozennote']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._ctor_param)
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None):
kwds = self.kwds.copy()
kwds.update({'size': size})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def valarray(shape, value=nan, typecode=None):
"""Return an array of all value.
"""
out = ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
out = valarray(shape(arrays[0]), value=fillvalue)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return (%(shape_arg_str)s), %(locscale_out)s, size
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
a = asarray(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*log(x) - a*log(2) - gammaln(a)
return fac + np.nan_to_num(log(hyp0f1(a, nc * x/4.0)))
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = inspect.getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments
shapes_list = []
for meth in meths_to_inspect:
shapes_args = inspect.getargspec(meth)
shapes_list.append(shapes_args.args)
# *args or **kwargs are not allowed w/automatic shapes
# (generic methods have 'self, x' only)
if len(shapes_args.args) > 2:
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
shapes = max(shapes_list, key=lambda x: len(x))
shapes = shapes[2:] # remove self, x,
# make sure the signatures are consistent
# (generic methods have 'self, x' only)
for item in shapes_list:
if len(item) > 2 and item[2:] != shapes:
raise TypeError('Shape arguments are inconsistent.')
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join(str(_) for _ in shapes_vals)
tempdict['vals'] = vals
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = mtrand.sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default=1).
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = np.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# Cast to int if discrete
if discrete:
if np.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (discrete RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# I don't know when or why vecentropy got broken when numargs == 0
# 09.08.2013: is this still relevant? cf check_vecentropy test
# in tests/test_continuous_basic.py
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution.
Parameters
----------
n : int, n>=1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
kwds : keyword arguments, optional
These can include "loc" and "scale", as well as other keyword
arguments relevant for a given distribution.
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
``rvs(<shape(s)>, loc=0, scale=1, size=1)``
random variates
``pdf(x, <shape(s)>, loc=0, scale=1)``
probability density function
``logpdf(x, <shape(s)>, loc=0, scale=1)``
log of the probability density function
``cdf(x, <shape(s)>, loc=0, scale=1)``
cumulative density function
``logcdf(x, <shape(s)>, loc=0, scale=1)``
log of the cumulative density function
``sf(x, <shape(s)>, loc=0, scale=1)``
survival function (1-cdf --- sometimes more accurate)
``logsf(x, <shape(s)>, loc=0, scale=1)``
log of the survival function
``ppf(q, <shape(s)>, loc=0, scale=1)``
percent point function (inverse of cdf --- quantiles)
``isf(q, <shape(s)>, loc=0, scale=1)``
inverse survival function (inverse of sf)
``moment(n, <shape(s)>, loc=0, scale=1)``
non-central n-th moment of the distribution. May not work for array
arguments.
``stats(<shape(s)>, loc=0, scale=1, moments='mv')``
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
``entropy(<shape(s)>, loc=0, scale=1)``
(differential) entropy of the RV.
``fit(data, <shape(s)>, loc=0, scale=1)``
Parameter estimates for generic data
``expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds)``
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
``median(<shape(s)>, loc=0, scale=1)``
Median of the distribution.
``mean(<shape(s)>, loc=0, scale=1)``
Mean of the distribution.
``std(<shape(s)>, loc=0, scale=1)``
Standard deviation of the distribution.
``var(<shape(s)>, loc=0, scale=1)``
Variance of the distribution.
``interval(alpha, <shape(s)>, loc=0, scale=1)``
Interval that with `alpha` percent probability contains a random
realization of this distribution.
``__call__(<shape(s)>, loc=0, scale=1)``
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array_like
quantiles
q : array_like
lower or upper tail probability
<shape(s)> : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
Notes
-----
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1) which will be given clean arguments (in between
a and b) and passing the argument check method.
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments=<str>``,
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None.
Alternatively, you can override ``_munp``, which takes n and shape
parameters and returns the nth non-central moment of the distribution.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf(self, x):
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None):
super(rv_continuous, self).__init__()
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self._size = 1
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
# backwards compat. these were removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vecfunc = np.deprecate(self._ppfvec, "vecfunc")
self.veccdf = np.deprecate(self._cdfvec, "veccdf")
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc)
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args), axis=0)
def nnlf(self, theta, x):
'''Return negative loglikelihood function
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where theta are the
parameters (including loc and scale).
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (self.b <= x)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N * log(scale)
def _penalized_nnlf(self, theta, x):
''' Return negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0)
where theta are the parameters (including loc and scale)
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
loginf = log(_XMAX)
if np.isneginf(self.a).all() and np.isinf(self.b).all():
Nbad = 0
else:
cond0 = (x <= self.a) | (self.b <= x)
Nbad = sum(cond0)
if Nbad > 0:
x = argsreduce(~cond0, x)[0]
N = len(x)
return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args)
fixedn = []
index = list(range(Nargs))
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in zip(index, names):
if key in kwds:
fixedn.append(n)
args[n] = kwds[key]
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead.
"""
return self.fit_loc_scale(data, *args)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
"""
args, loc, scale = self._parse_args(*args, **kwds)
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# np.vectorize doesn't work when numargs == 0 in numpy 1.5.1
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Argument (parameters) of the distribution.
lb, ub : scalar, optional
Lower and upper bound for integration. default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
## Handlers for generic case where xk and pk are given
## The _drv prefix probably means discrete random variable.
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk > xk), axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals >= q), axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
# many changes, originally not even a return
tot = 0.0
diff = 1e100
# pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
# handle cases with infinite support
ulimit = max(1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
llimit = min(-1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
while (pos <= self.b) and ((pos <= ulimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# use pmf because _pmf does not check support in randint and there
# might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
vec = kl_div(pk, qk)
S = sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
``generic.rvs(<shape(s)>, loc=0, size=1)``
random variates
``generic.pmf(x, <shape(s)>, loc=0)``
probability mass function
``logpmf(x, <shape(s)>, loc=0)``
log of the probability density function
``generic.cdf(x, <shape(s)>, loc=0)``
cumulative density function
``generic.logcdf(x, <shape(s)>, loc=0)``
log of the cumulative density function
``generic.sf(x, <shape(s)>, loc=0)``
survival function (1-cdf --- sometimes more accurate)
``generic.logsf(x, <shape(s)>, loc=0, scale=1)``
log of the survival function
``generic.ppf(q, <shape(s)>, loc=0)``
percent point function (inverse of cdf --- percentiles)
``generic.isf(q, <shape(s)>, loc=0)``
inverse survival function (inverse of sf)
``generic.moment(n, <shape(s)>, loc=0)``
non-central n-th moment of the distribution. May not work for array
arguments.
``generic.stats(<shape(s)>, loc=0, moments='mv')``
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
``generic.entropy(<shape(s)>, loc=0)``
entropy of the RV
``generic.expect(func=None, args=(), loc=0, lb=None, ub=None,
conditional=False)``
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
``generic.median(<shape(s)>, loc=0)``
Median of the distribution.
``generic.mean(<shape(s)>, loc=0)``
Mean of the distribution.
``generic.std(<shape(s)>, loc=0)``
Standard deviation of the distribution.
``generic.var(<shape(s)>, loc=0)``
Variance of the distribution.
``generic.interval(alpha, <shape(s)>, loc=0)``
Interval that with `alpha` percent probability contains a random
realization of this distribution.
``generic(<shape(s)>, loc=0)``
calling a distribution instance returns a frozen distribution
Notes
-----
You can construct an arbitrary discrete rv where ``P{X=xk} = pk``
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_discrete):
# "Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance::
poisson = poisson_gen(name="poisson",
longname='A Poisson')
The docstring can be created from a template.
Alternatively, the object may be called (as a function) to fix the shape
and location parameters returning a "frozen" discrete RV object::
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given
shape and location fixed.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None):
super(rv_discrete, self).__init__()
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk), indx, 0)
self.pk = take(ravel(self.pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = dict(zip(self.xk, self.pk))
self.qvals = np.cumsum(self.pk, axis=0)
self.F = dict(zip(self.xk, self.qvals))
decreasing_keys = sorted(self.F.keys(), reverse=True)
self.Finv = dict((self.F[k], k) for k in decreasing_keys)
self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self._construct_argparser(meths_to_inspect=[_drv_pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
else:
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# backwards compat. was removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vec_generic_moment = np.deprecate(_vec_generic_moment,
"vec_generic_moment",
"generic_moment")
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc)
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
#discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict_discrete)
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (default=1). Note that `size`
has to be given as keyword, not as positional argument.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1-cdf) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as ``1 - cdf``,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (1-sf) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments': 'm'}))
val = self.pmf(mu, *args)
ent = entr(val)
k = 1
term = 1.0
while (abs(term) > _EPS):
val = self.pmf(mu+k, *args)
term = entr(val)
val = self.pmf(mu-k, *args)
term += entr(val)
k += 1
ent += term
return ent
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers, optional
lower and upper bound for integration, default is set to the
support of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : bool, optional
Default is False.
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expect : float
Expected value.
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to
evaluate could be added as keyword parameter, to evaluate functions
with non-monotonic shapes, points include integers in (-suppnmin,
suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative
integers are evaluated)
"""
# moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
# avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 # minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
# work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1, *args)
else:
invfac = 1 - self.cdf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) # check limits
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
# handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while ((pos >= lb) and (diff > self.moment_tol) and
count <= maxcount):
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot/invfac
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
b699c8c2-2ead-11e5-8aa3-7831c1d44c14
b6b16fb0-2ead-11e5-b2f8-7831c1d44c14
b6b16fb0-2ead-11e5-b2f8-7831c1d44c14 |
'''
Scratchpad for test-based development.
LICENSING
-------------------------------------------------
hypergolix: A python Golix client.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
Nick Badger
badg@muterra.io | badg@nickbadger.com | nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
'''
import unittest
import collections
import logging
import tempfile
import sys
import os
import time
import multiprocessing
from hypergolix._daemonize import daemonize
from hypergolix._daemonize import _SUPPORTED_PLATFORM
from hypergolix._daemonize import _acquire_pidfile
from hypergolix._daemonize import _write_pid
from hypergolix._daemonize import _fratricidal_fork
from hypergolix._daemonize import _filial_usurpation
from hypergolix._daemonize import _make_range_tuples
from hypergolix._daemonize import _autoclose_files
from hypergolix._daemonize import _flush_stds
from hypergolix._daemonize import _redirect_stds
# ###############################################
# "Paragon of adequacy" test fixtures
# ###############################################
# Nothing to see here
# These are not the droids you are looking for
# etc
def test_daemon(pid_file, token, response_q, ct_exit):
''' The test daemon quite simply daemonizes itself, does some stuff
to confirm its existence, waits for a signal to die, and then dies.
'''
# Daemonize ourselves
daemonize(pid_file)
# Warte mal, just because.
time.sleep(.14)
# Put the token into the queue.
response_q.put(token)
# Wait for a clear to exit
ct_exit.wait(timeout=60)
# ###############################################
# Testing
# ###############################################
class Deamonizing_test(unittest.TestCase):
def test_make_ranges(self):
''' Test making appropriate ranges for file auto-closure.
Platform-independent.
'''
# This would be better done with hypothesis, but that can come later.
argsets = []
expected_results = []
argsets.append(
(0, 5, [])
)
expected_results.append(
[
(0, 5),
]
)
argsets.append(
(3, 10, [1, 2])
)
expected_results.append(
[
(3, 10),
]
)
argsets.append(
(3, 7, [4,])
)
expected_results.append(
[
(3, 4),
(5, 7),
]
)
argsets.append(
(3, 14, [4, 5, 10])
)
expected_results.append(
[
(3, 4),
(6, 10),
(11, 14),
]
)
argsets.append(
(1, 3, [1, 2, 3])
)
expected_results.append(
[
]
)
for argset, expected_result in zip(argsets, expected_results):
with self.subTest(argset):
actual_result = _make_range_tuples(*argset)
self.assertEqual(actual_result, expected_result)
def test_flush_stds(self):
''' Test flushing stds. Platform-independent.
'''
# Should this do any kind of verification or summat?
_flush_stds()
def test_redirect_stds(self):
''' Test redirecting stds. Platform-independent.
'''
stdin = sys.stdin
stdout = sys.stdout
stderr = sys.stderr
# Get some file descriptors to use to cache stds
with tempfile.NamedTemporaryFile() as stdin_tmp, \
tempfile.NamedTemporaryFile() as stdout_tmp, \
tempfile.NamedTemporaryFile() as stderr_tmp:
stdin_fd = stdin_tmp.fileno()
stdout_fd = stdout_tmp.fileno()
stderr_fd = stderr_tmp.fileno()
os.dup2(0, stdin_fd)
os.dup2(1, stdout_fd)
os.dup2(2, stderr_fd)
# Perform the actual tests
with tempfile.TemporaryDirectory() as dirname:
try:
with self.subTest('Separate streams'):
_redirect_stds(
dirname + '/stdin.txt',
dirname + '/stdout.txt',
dirname + '/stderr.txt'
)
with self.subTest('Shared streams'):
_redirect_stds(
dirname + '/stdin2.txt',
dirname + '/stdshr.txt',
dirname + '/stdshr.txt'
)
with self.subTest('Combined streams'):
_redirect_stds(
dirname + '/stdcomb.txt',
dirname + '/stdcomb.txt',
dirname + '/stdcomb.txt'
)
# Restore our original stdin, stdout, stderr. Do this before dir
# cleanup or we'll get cleanup errors.
finally:
os.dup2(stdin_fd, 0)
os.dup2(stdout_fd, 1)
os.dup2(stderr_fd, 2)
def test_write_pid(self):
''' Test that writing the pid to the pidfile worked. Platform-
specific.
'''
pid = str(os.getpid())
# Test new file
with tempfile.TemporaryFile('w+') as fp:
_write_pid(fp)
fp.seek(0)
self.assertEqual(fp.read(), pid + '\n')
# Test existing file
with tempfile.TemporaryFile('w+') as fp:
fp.write('hello world, overwrite me!')
_write_pid(fp)
fp.seek(0)
self.assertEqual(fp.read(), pid + '\n')
@unittest.skipIf(not _SUPPORTED_PLATFORM, 'Unsupported platform.')
def test_autoclose_fs(self):
''' Test auto-closing files. Platform-specific.
'''
num_files = 14
fps = []
for __ in range(num_files):
fps.append(tempfile.TemporaryFile())
try:
kept = [0, 7, 14]
kept = [fps[ii].fileno() for ii in kept]
_autoclose_files(shielded=kept)
for ii in range(num_files):
if ii in kept:
with self.subTest('Persistent: ' + str(ii)):
# Make sure all kept files were, in fact, kept
self.assertFalse(fps[ii].closed)
else:
with self.subTest('Removed: ' + str(ii)):
# Make sure all other files were, in fact, removed
self.assertTrue(fps[ii].closed)
# Do it again with no files shielded from closure.
_autoclose_files()
for keeper in kept:
with self.subtest('Cleanup: ' + str(keeper)):
self.assertTrue(fps[keeper].closed)
# Clean up any unsuccessful tests. Note idempotency of fp.close().
finally:
for fp in fps:
fp.close()
@unittest.skipIf(not _SUPPORTED_PLATFORM, 'Unsupported platform.')
def test_acquire_file(self):
''' Test that locking the pidfile worked. Platform-specific.
'''
mp_ctx = multiprocessing.get_context('spawn')
ctx1 = mp_ctx.Event()
ctx2 = mp_ctx.Event()
ct_exit = mp_ctx.Event()
# parent_conn, child_conn = mp_ctx.Pipe()
with tempfile.TemporaryDirectory() as dirname:
fpath = dirname + '/testpid.txt'
pid = os.fork()
# Parent process execution
if pid != 0:
ctx1.wait(timeout=1)
self.assertTrue(os.path.exists(fpath))
with self.assertRaises(SystemExit):
_acquire_pidfile(fpath)
ctx2.set()
# Child process execution
else:
try:
pidfile = _acquire_pidfile(fpath)
ctx1.set()
ctx2.wait(timeout=60)
finally:
# Manually close the pidfile.
pidfile.close()
# Tell the parent it's safe to close.
ct_exit.set()
# Exit child without cleanup.
os._exit()
# This will only happen in the parent process, due to os._exit call
ct_exit.wait(timeout=60)
@unittest.skipIf(not _SUPPORTED_PLATFORM, 'Unsupported platform.')
def test_frat_fork(self):
''' Test "fratricidal" (okay, parricidal) forking (fork and
parent dies). Platform-specific.
'''
mp_ctx = multiprocessing.get_context('spawn')
pid_q = mp_ctx.Queue()
ct_exit = mp_ctx.Event()
inter_pid = os.fork()
# This is the root parent.
if inter_pid != 0:
child_pid = pid_q.get(timeout=5)
ct_exit.set()
self.assertNotEqual(inter_pid, child_pid)
# Make sure the intermediate process is dead.
with self.assertRaises(OSError):
# Send it a signal to check existence (os.kill is badly named)
os.kill(inter_pid, 0)
# This is the intermediate.
else:
try:
# Fork again, killing the intermediate.
_fratricidal_fork()
my_pid = os.getpid()
pid_q.put(my_pid)
ct_exit.wait(timeout=5)
finally:
# Exit without cleanup.
os._exit()
@unittest.skipIf(not _SUPPORTED_PLATFORM, 'Unsupported platform.')
def test_filial_usurp(self):
''' Test decoupling child from parent environment. Platform-
specific.
'''
mp_ctx = multiprocessing.get_context('spawn')
umask = 0o027
chdir = os.path.abspath('/')
prop_q = mp_ctx.Queue()
ct_exit = mp_ctx.Event()
child_pid = os.fork()
# This is the parent.
if child_pid != 0:
try:
my_sid = os.getsid()
child_sid = prop_q.get(timeout=5)
child_umask = prop_q.get(timeout=5)
child_wdir = prop_q.get(timeout=5)
self.assertNotEqual(my_sid, child_sid)
self.assertEqual(child_umask, umask)
self.assertEqual(child_wdir, chdir)
finally:
ct_exit.set()
# This is the child.
else:
try:
_filial_usurpation(chdir, umask)
# Get our session id
sid = os.getsid()
# reset umask and get our set one.
umask = os.umask(0)
# Get our working directory.
wdir = os.path.abspath(os.getcwd())
# Update parent
prop_q.put(sid)
prop_q.put(umask)
prop_q.put(wdir)
# Wait for the signal and then exit
ct_exit.wait(timeout=5)
finally:
os._exit()
@unittest.skipIf(not _SUPPORTED_PLATFORM, 'Unsupported platform.')
def test_daemonize(self):
''' Test daemonization. Platform-specific.
'''
mp_ctx = multiprocessing.get_context('spawn')
with tempfile.TemporaryDirectory() as dirname:
pid_file = dirname + '/testpid.pid'
token = 2718282
response_q = mp_ctx.Queue()
ct_exit = mp_ctx.Event()
p = mp_ctx.Process(target=test_daemon, args=(
pid_file,
token,
response_q,
ct_exit
))
p.start()
try:
# Wait for the daemon to respond with the token we gave it
parrotted_token = response_q.get(timeout=5)
# Make sure the token matches
self.assertEqual(parrotted_token, token)
# Make sure the pid file exists
self.assertTrue(os.path.exists(pid_file))
finally:
# Let the deamon die
ct_exit.set()
# Now hold off just a moment and then make sure the pid is cleaned
# up successfully.
time.sleep(1)
self.assertFalse(os.path.exists(pid_file))
if __name__ == "__main__":
from hypergolix import logutils
logutils.autoconfig()
# from hypergolix.utils import TraceLogger
# with TraceLogger(interval=10):
# unittest.main()
unittest.main()
Testing difficulties...
'''
Scratchpad for test-based development.
LICENSING
-------------------------------------------------
hypergolix: A python Golix client.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
Nick Badger
badg@muterra.io | badg@nickbadger.com | nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
'''
import unittest
import collections
import logging
import tempfile
import sys
import os
import time
import multiprocessing
from hypergolix._daemonize import daemonize
from hypergolix._daemonize import _SUPPORTED_PLATFORM
from hypergolix._daemonize import _acquire_pidfile
from hypergolix._daemonize import _write_pid
from hypergolix._daemonize import _fratricidal_fork
from hypergolix._daemonize import _filial_usurpation
from hypergolix._daemonize import _make_range_tuples
from hypergolix._daemonize import _autoclose_files
from hypergolix._daemonize import _flush_stds
from hypergolix._daemonize import _redirect_stds
# ###############################################
# "Paragon of adequacy" test fixtures
# ###############################################
# Nothing to see here
# These are not the droids you are looking for
# etc
def test_daemon(pid_file, token, response_q, ct_exit):
''' The test daemon quite simply daemonizes itself, does some stuff
to confirm its existence, waits for a signal to die, and then dies.
'''
# Daemonize ourselves
daemonize(pid_file)
# Warte mal, just because.
time.sleep(.14)
# Put the token into the queue.
response_q.put(token)
# Wait for a clear to exit
ct_exit.wait(timeout=60)
# ###############################################
# Testing
# ###############################################
class Deamonizing_test(unittest.TestCase):
def test_make_ranges(self):
''' Test making appropriate ranges for file auto-closure.
Platform-independent.
'''
# This would be better done with hypothesis, but that can come later.
argsets = []
expected_results = []
argsets.append(
(0, 5, [])
)
expected_results.append(
[
(0, 5),
]
)
argsets.append(
(3, 10, [1, 2])
)
expected_results.append(
[
(3, 10),
]
)
argsets.append(
(3, 7, [4,])
)
expected_results.append(
[
(3, 4),
(5, 7),
]
)
argsets.append(
(3, 14, [4, 5, 10])
)
expected_results.append(
[
(3, 4),
(6, 10),
(11, 14),
]
)
argsets.append(
(1, 3, [1, 2, 3])
)
expected_results.append(
[
]
)
for argset, expected_result in zip(argsets, expected_results):
with self.subTest(argset):
actual_result = _make_range_tuples(*argset)
self.assertEqual(actual_result, expected_result)
def test_flush_stds(self):
''' Test flushing stds. Platform-independent.
'''
# Should this do any kind of verification or summat?
_flush_stds()
def test_redirect_stds(self):
''' Test redirecting stds. Platform-independent.
'''
stdin = sys.stdin
stdout = sys.stdout
stderr = sys.stderr
# Get some file descriptors to use to cache stds
with tempfile.NamedTemporaryFile() as stdin_tmp, \
tempfile.NamedTemporaryFile() as stdout_tmp, \
tempfile.NamedTemporaryFile() as stderr_tmp:
stdin_fd = stdin_tmp.fileno()
stdout_fd = stdout_tmp.fileno()
stderr_fd = stderr_tmp.fileno()
os.dup2(0, stdin_fd)
os.dup2(1, stdout_fd)
os.dup2(2, stderr_fd)
# Perform the actual tests
with tempfile.TemporaryDirectory() as dirname:
try:
with self.subTest('Separate streams'):
_redirect_stds(
dirname + '/stdin.txt',
dirname + '/stdout.txt',
dirname + '/stderr.txt'
)
with self.subTest('Shared streams'):
_redirect_stds(
dirname + '/stdin2.txt',
dirname + '/stdshr.txt',
dirname + '/stdshr.txt'
)
with self.subTest('Combined streams'):
_redirect_stds(
dirname + '/stdcomb.txt',
dirname + '/stdcomb.txt',
dirname + '/stdcomb.txt'
)
# Restore our original stdin, stdout, stderr. Do this before dir
# cleanup or we'll get cleanup errors.
finally:
os.dup2(stdin_fd, 0)
os.dup2(stdout_fd, 1)
os.dup2(stderr_fd, 2)
def test_write_pid(self):
''' Test that writing the pid to the pidfile worked. Platform-
specific.
'''
pid = str(os.getpid())
# Test new file
with tempfile.TemporaryFile('w+') as fp:
_write_pid(fp)
fp.seek(0)
self.assertEqual(fp.read(), pid + '\n')
# Test existing file
with tempfile.TemporaryFile('w+') as fp:
fp.write('hello world, overwrite me!')
_write_pid(fp)
fp.seek(0)
self.assertEqual(fp.read(), pid + '\n')
@unittest.skipIf(not _SUPPORTED_PLATFORM, 'Unsupported platform.')
def test_autoclose_fs(self):
''' Test auto-closing files. Platform-specific.
'''
num_files = 14
fps = []
for __ in range(num_files):
fps.append(tempfile.TemporaryFile())
try:
kept = [0, 7, 13]
kept = [fps[ii].fileno() for ii in kept]
_autoclose_files(shielded=kept)
for ii in range(num_files):
if ii in kept:
with self.subTest('Persistent: ' + str(ii)):
# Make sure all kept files were, in fact, kept
self.assertFalse(fps[ii].closed)
else:
with self.subTest('Removed: ' + str(ii)):
# Make sure all other files were, in fact, removed
self.assertTrue(fps[ii].closed)
# Do it again with no files shielded from closure.
_autoclose_files()
for keeper in kept:
with self.subtest('Cleanup: ' + str(keeper)):
self.assertTrue(fps[keeper].closed)
# Clean up any unsuccessful tests. Note idempotency of fp.close().
finally:
for fp in fps:
fp.close()
@unittest.skipIf(not _SUPPORTED_PLATFORM, 'Unsupported platform.')
def test_acquire_file(self):
''' Test that locking the pidfile worked. Platform-specific.
'''
mp_ctx = multiprocessing.get_context('spawn')
ctx1 = mp_ctx.Event()
ctx2 = mp_ctx.Event()
ct_exit = mp_ctx.Event()
# parent_conn, child_conn = mp_ctx.Pipe()
with tempfile.TemporaryDirectory() as dirname:
fpath = dirname + '/testpid.txt'
pid = os.fork()
# Parent process execution
if pid != 0:
ctx1.wait(timeout=1)
self.assertTrue(os.path.exists(fpath))
with self.assertRaises(SystemExit):
_acquire_pidfile(fpath)
ctx2.set()
# Child process execution
else:
try:
pidfile = _acquire_pidfile(fpath)
ctx1.set()
ctx2.wait(timeout=60)
finally:
# Manually close the pidfile.
pidfile.close()
# Tell the parent it's safe to close.
ct_exit.set()
# Exit child without cleanup.
os._exit(0)
# This will only happen in the parent process, due to os._exit call
ct_exit.wait(timeout=60)
@unittest.skipIf(not _SUPPORTED_PLATFORM, 'Unsupported platform.')
def test_frat_fork(self):
''' Test "fratricidal" (okay, parricidal) forking (fork and
parent dies). Platform-specific.
'''
mp_ctx = multiprocessing.get_context('spawn')
pid_q = mp_ctx.Queue()
ct_exit = mp_ctx.Event()
inter_pid = os.fork()
# This is the root parent.
if inter_pid != 0:
child_pid = pid_q.get(timeout=5)
ct_exit.set()
self.assertNotEqual(inter_pid, child_pid)
# Make sure the intermediate process is dead.
with self.assertRaises(OSError):
# Send it a signal to check existence (os.kill is badly named)
os.kill(inter_pid, 0)
# This is the intermediate.
else:
try:
# Fork again, killing the intermediate.
_fratricidal_fork()
my_pid = os.getpid()
pid_q.put(my_pid)
ct_exit.wait(timeout=5)
finally:
# Exit without cleanup.
os._exit(0)
@unittest.skipIf(not _SUPPORTED_PLATFORM, 'Unsupported platform.')
def test_filial_usurp(self):
''' Test decoupling child from parent environment. Platform-
specific.
'''
mp_ctx = multiprocessing.get_context('spawn')
umask = 0o027
chdir = os.path.abspath('/')
prop_q = mp_ctx.Queue()
ct_exit = mp_ctx.Event()
child_pid = os.fork()
# This is the parent.
if child_pid != 0:
try:
my_sid = os.getsid()
child_sid = prop_q.get(timeout=5)
child_umask = prop_q.get(timeout=5)
child_wdir = prop_q.get(timeout=5)
self.assertNotEqual(my_sid, child_sid)
self.assertEqual(child_umask, umask)
self.assertEqual(child_wdir, chdir)
finally:
ct_exit.set()
# This is the child.
else:
try:
_filial_usurpation(chdir, umask)
# Get our session id
sid = os.getsid()
# reset umask and get our set one.
umask = os.umask(0)
# Get our working directory.
wdir = os.path.abspath(os.getcwd())
# Update parent
prop_q.put(sid)
prop_q.put(umask)
prop_q.put(wdir)
# Wait for the signal and then exit
ct_exit.wait(timeout=5)
finally:
os._exit(0)
@unittest.skipIf(not _SUPPORTED_PLATFORM, 'Unsupported platform.')
def test_daemonize(self):
''' Test daemonization. Platform-specific.
'''
mp_ctx = multiprocessing.get_context('spawn')
with tempfile.TemporaryDirectory() as dirname:
pid_file = dirname + '/testpid.pid'
token = 2718282
response_q = mp_ctx.Queue()
ct_exit = mp_ctx.Event()
p = mp_ctx.Process(target=test_daemon, args=(
pid_file,
token,
response_q,
ct_exit
))
p.start()
try:
# Wait for the daemon to respond with the token we gave it
parrotted_token = response_q.get(timeout=5)
# Make sure the token matches
self.assertEqual(parrotted_token, token)
# Make sure the pid file exists
self.assertTrue(os.path.exists(pid_file))
finally:
# Let the deamon die
ct_exit.set()
# Now hold off just a moment and then make sure the pid is cleaned
# up successfully.
time.sleep(1)
self.assertFalse(os.path.exists(pid_file))
if __name__ == "__main__":
from hypergolix import logutils
logutils.autoconfig()
# from hypergolix.utils import TraceLogger
# with TraceLogger(interval=10):
# unittest.main()
unittest.main() |
import json
import re
import time
from decimal import Decimal
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from django.db.models import Q
import login.models as login_models
import api.parser
from api import actions
from api import parser
from api.helpers.http import ModHttpResponse
from api.error import APIError
from rest_framework.views import APIView
from dataedit.models import Table as DBTable
from rest_framework import status
from django.http import Http404
import sqlalchemy as sqla
import geoalchemy2 # Although this import seems unused is has to be here
def api_exception(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except actions.APIError as e:
return JsonResponse({'reason': e.message},
status=e.status)
except KeyError as e:
return JsonResponse({'reason': e}, status=400)
return wrapper
def permission_wrapper(permission, f):
def wrapper(caller, request, *args, **kwargs):
schema = kwargs.get('schema')
table = kwargs.get('table')
if request.user.is_anonymous or request.user.get_table_permission_level(
DBTable.load(schema, table)) < permission:
raise PermissionDenied
else:
return f(caller, request,*args, **kwargs)
return wrapper
def require_write_permission(f):
return permission_wrapper(login_models.WRITE_PERM, f)
def require_delete_permission(f):
return permission_wrapper(login_models.DELETE_PERM, f)
def require_admin_permission(f):
return permission_wrapper(login_models.ADMIN_PERM, f)
def conjunction(clauses):
return {
'type': 'operator',
'operator': 'AND',
'operands': clauses,
}
class Table(APIView):
"""
Handels the creation of tables and serves information on existing tables
"""
@api_exception
def get(self, request, schema, table):
"""
Returns a dictionary that describes the DDL-make-up of this table.
Fields are:
* name : Name of the table,
* schema: Name of the schema,
* columns : as specified in :meth:`api.actions.describe_columns`
* indexes : as specified in :meth:`api.actions.describe_indexes`
* constraints: as specified in
:meth:`api.actions.describe_constraints`
:param request:
:return:
"""
schema, table = actions.get_table_name(schema, table, restrict_schemas=False)
return JsonResponse({
'schema': schema,
'name': table,
'columns': actions.describe_columns(schema, table),
'indexed': actions.describe_indexes(schema, table),
'constraints': actions.describe_constraints(schema, table)
})
@api_exception
def post(self, request, schema, table):
"""
Changes properties of tables and table columns
:param request:
:param schema:
:param table:
:return:
"""
if schema not in ['model_draft', 'sandbox', 'test']:
raise PermissionDenied
if schema.startswith('_'):
raise PermissionDenied
json_data = request.data
if 'column' in json_data['type']:
column_definition = api.parser.parse_scolumnd_from_columnd(schema, table, json_data['name'], json_data)
result = actions.queue_column_change(schema, table, column_definition)
return ModHttpResponse(result)
elif 'constraint' in json_data['type']:
# Input has nothing to do with DDL from Postgres.
# Input is completely different.
# Using actions.parse_sconstd_from_constd is not applicable
# dict.get() returns None, if key does not exist
constraint_definition = {
'action': json_data['action'], # {ADD, DROP}
'constraint_type': json_data.get('constraint_type'), # {FOREIGN KEY, PRIMARY KEY, UNIQUE, CHECK}
'constraint_name': json_data.get('constraint_name'), # {myForeignKey, myUniqueConstraint}
'constraint_parameter': json_data.get('constraint_parameter'),
# Things in Brackets, e.g. name of column
'reference_table': json_data.get('reference_table'),
'reference_column': json_data.get('reference_column')
}
result = actions.queue_constraint_change(schema, table, constraint_definition)
return ModHttpResponse(result)
else:
return ModHttpResponse(actions.get_response_dict(False, 400, 'type not recognised'))
@api_exception
def put(self, request, schema, table):
"""
Every request to unsave http methods have to contain a "csrftoken".
This token is used to deny cross site reference forwarding.
In every request the header had to contain "X-CSRFToken" with the actual csrftoken.
The token can be requested at / and will be returned as cookie.
:param request:
:return:
"""
if schema not in ['model_draft', 'sandbox', 'test']:
raise PermissionDenied
if schema.startswith('_'):
raise PermissionDenied
if request.user.is_anonymous():
raise PermissionDenied
if actions.has_table(dict(schema=schema, table=table),{}):
raise APIError('Table already exists')
json_data = request.data['query']
constraint_definitions = []
column_definitions = []
for constraint_definiton in json_data.get('constraints',[]):
constraint_definiton.update({"action": "ADD",
"c_table": table,
"c_schema": schema})
constraint_definitions.append(constraint_definiton)
if 'columns' not in json_data:
raise actions.APIError("Table contains no columns")
for column_definition in json_data['columns']:
column_definition.update({"c_table": table,
"c_schema": schema})
column_definitions.append(column_definition)
result = actions.table_create(schema, table, column_definitions, constraint_definitions)
perm, _ = login_models.UserPermission.objects.get_or_create(table=DBTable.load(schema, table),
holder=request.user)
perm.level = login_models.ADMIN_PERM
perm.save()
request.user.save()
return JsonResponse(result, status=status.HTTP_201_CREATED)
@api_exception
@require_delete_permission
def delete(self, request, schema, table):
schema, table = actions.get_table_name(schema, table)
meta_schema = actions.get_meta_schema_name(schema)
edit_table = actions.get_edit_table_name(schema, table)
actions._get_engine().execute(
'DROP TABLE {schema}.{table} CASCADE;'.format(schema=meta_schema,
table=edit_table))
edit_table = actions.get_insert_table_name(schema, table)
actions._get_engine().execute(
'DROP TABLE {schema}.{table} CASCADE;'.format(schema=meta_schema,
table=edit_table))
edit_table = actions.get_delete_table_name(schema, table)
actions._get_engine().execute(
'DROP TABLE {schema}.{table} CASCADE;'.format(schema=meta_schema,
table=edit_table))
actions._get_engine().execute(
'DROP TABLE {schema}.{table} CASCADE;'.format(schema=schema,
table=table))
return JsonResponse({}, status=status.HTTP_200_OK)
class Index(APIView):
def get(self, request):
pass
def post(self, request):
pass
def put(self, request):
pass
class Column(APIView):
@api_exception
def get(self, request, schema, table, column=None):
schema, table = actions.get_table_name(schema, table, restrict_schemas=False)
response = actions.describe_columns(schema, table)
if column:
try:
response = response[column]
except KeyError:
raise actions.APIError('The column specified is not part of '
'this table.')
return JsonResponse(response)
@api_exception
@require_write_permission
def post(self, request, schema, table, column):
schema, table = actions.get_table_name(schema, table)
response = actions.column_alter(request.data['query'], {}, schema, table, column)
return JsonResponse(response)
@api_exception
@require_write_permission
def put(self, request, schema, table, column):
schema, table = actions.get_table_name(schema, table)
actions.column_add(schema, table, column, request.data['query'])
return JsonResponse({}, status=201)
class Fields(APIView):
def get(self, request, schema, table, id, column=None):
schema, table = actions.get_table_name(schema, table, restrict_schemas=False)
if not parser.is_pg_qual(table) or not parser.is_pg_qual(schema) or not parser.is_pg_qual(id) or not parser.is_pg_qual(column):
return ModHttpResponse({"error": "Bad Request", "http_status": 400})
returnValue = actions.getValue(schema, table, column, id);
return HttpResponse(returnValue if returnValue is not None else "", status= (404 if returnValue is None else 200))
def post(self, request):
pass
def put(self, request):
pass
class Rows(APIView):
@api_exception
def get(self, request, schema, table, row_id=None):
schema, table = actions.get_table_name(schema, table, restrict_schemas=False)
columns = request.GET.getlist('column')
where = request.GET.get('where')
if row_id and where:
raise actions.APIError('Where clauses and row id are not allowed in the same query')
orderby = request.GET.getlist('orderby')
if row_id and orderby:
raise actions.APIError('Order by clauses and row id are not allowed in the same query')
limit = request.GET.get('limit')
if row_id and limit:
raise actions.APIError('Limit by clauses and row id are not allowed in the same query')
offset = request.GET.get('offset')
if row_id and offset:
raise actions.APIError('Order by clauses and row id are not allowed in the same query')
if offset is not None and not offset.isdigit():
raise actions.APIError("Offset must be integer")
if limit is not None and not limit.isdigit():
raise actions.APIError("Limit must be integer")
if not all(parser.is_pg_qual(c) for c in columns):
raise actions.APIError("Columns are no postgres qualifiers")
if not all(parser.is_pg_qual(c) for c in orderby):
raise actions.APIError("Columns in groupby-clause are no postgres qualifiers")
# OPERATORS could be EQUALS, GREATER, LOWER, NOTEQUAL, NOTGREATER, NOTLOWER
# CONNECTORS could be AND, OR
# If you connect two values with an +, it will convert the + to a space. Whatever.
where_clauses = self.__read_where_clause(where)
if row_id:
clause = {'operands': [
{'type': 'column',
'column': 'id'},
row_id
],
'operator': 'EQUALS',
'type': 'operator'}
if where_clauses:
where_clauses = conjunction(clause, where_clauses)
else:
where_clauses = clause
# TODO: Validate where_clauses. Should not be vulnerable
data = {'schema': schema,
'table': table,
'columns': columns,
'where': where_clauses,
'orderby': orderby,
'limit': limit,
'offset': offset
}
return_obj = self.__get_rows(request, data)
# Extract column names from description
cols = [col[0] for col in return_obj['description']]
dict_list = [dict(zip(cols,row)) for row in return_obj['data']]
if row_id:
if dict_list:
dict_list = dict_list[0]
else:
raise Http404
# TODO: Figure out what JsonResponse does different.
return JsonResponse(dict_list, safe=False)
@api_exception
@require_write_permission
def post(self, request, schema, table, row_id=None, action=None):
schema, table = actions.get_table_name(schema, table)
column_data = request.data['query']
status_code = status.HTTP_200_OK
if row_id:
response = self.__update_rows(request, schema, table, column_data, row_id)
else:
if action=='new':
response = self.__insert_row(request, schema, table, column_data, row_id)
status_code=status.HTTP_201_CREATED
else:
response = self.__update_rows(request, schema, table, column_data, None)
actions.apply_changes(schema, table)
return JsonResponse(response, status=status_code)
@api_exception
@require_write_permission
def put(self, request, schema, table, row_id=None, action=None):
if action:
raise APIError('This request type (PUT) is not supported. The '
'\'new\' statement is only possible in POST requests.')
schema, table = actions.get_table_name(schema, table)
if not row_id:
return JsonResponse(actions._response_error('This methods requires an id'),
status=status.HTTP_400_BAD_REQUEST)
column_data = request.data['query']
if row_id and column_data.get('id', int(row_id)) != int(row_id):
raise actions.APIError(
'Id in URL and query do not match. Ids may not change.',
status=status.HTTP_409_CONFLICT)
engine = actions._get_engine()
# check whether id is already in use
exists = engine.execute('select count(*) '
'from {schema}.{table} '
'where id = {id};'.format(schema=schema,
table=table,
id=row_id)).first()[0] > 0 if row_id else False
if exists:
response = self.__update_rows(request, schema, table, column_data, row_id)
actions.apply_changes(schema, table)
return JsonResponse(response)
else:
result = self.__insert_row(request, schema, table, column_data, row_id)
actions.apply_changes(schema, table)
return JsonResponse(result, status=status.HTTP_201_CREATED)
@require_delete_permission
def delete(self, request, table, schema, row_id=None):
schema, table = actions.get_table_name(schema, table)
result = self.__delete_rows(request, schema, table, row_id)
actions.apply_changes(schema, table)
return JsonResponse(result)
@actions.load_cursor
def __delete_rows(self, request, schema, table, row_id=None):
where = request.GET.get('where')
query = {
'schema': schema,
'table': table,
'where': self.__read_where_clause(where),
}
context = {'cursor_id': request.data['cursor_id'],
'user': request.user}
if row_id:
clause = {
'operator': '=',
'operands': [
actions._load_value(row_id),
{
'type': 'column',
'column': 'id'
}],
'type': 'operator'
}
if query['where']:
clause = conjunction([clause, query['where']])
query['where'] = clause
return actions.data_delete(query, context)
def __read_where_clause(self, where):
where_expression = '^(?P<first>[\w\d_\.]+)\s*(?P<operator>' \
+ '|'.join(parser.sql_operators) \
+ ')\s*(?P<second>(?![>=]).+)$'
where_clauses = []
if where:
where_splitted = re.findall(where_expression, where)
where_clauses = conjunction([{'operands': [{
'type': 'column',
'column': match[0]},match[2]],
'operator': match[1],
'type': 'operator'} for match in where_splitted])
return where_clauses
@actions.load_cursor
def __insert_row(self, request, schema, table, row, row_id=None):
if row_id and row.get('id', int(row_id)) != int(row_id):
return actions._response_error('The id given in the query does not '
'match the id given in the url')
if row_id:
row['id'] = row_id
context = {'cursor_id': request.data['cursor_id'],
'user': request.user}
query = {
'schema': schema,
'table': table,
'values': [row] if isinstance(row, dict) else row
}
if not row_id:
query['returning'] = ['id']
result = actions.data_insert(query, context)
return result
@actions.load_cursor
def __update_rows(self, request, schema, table, row, row_id=None):
context = {'cursor_id': request.data['cursor_id'],
'user': request.user}
where = request.GET.get('where')
query = {
'schema': schema,
'table': table,
'where': self.__read_where_clause(where),
'values': row
}
if row_id:
clause = {
'operator': '=',
'operands': [
actions._load_value(row_id),
{
'type': 'column',
'column': 'id'
}],
'type': 'operator'
}
if query['where']:
clause = conjunction([clause, query['where']])
query['where'] = clause
return actions.data_update(query, context)
@actions.load_cursor
def __get_rows(self, request, data):
table = actions._get_table(data['schema'], table=data['table'])
params = {}
params_count = 0
columns = data.get('columns')
if not columns:
query = table.select()
else:
columns = [getattr(table.c, c) for c in columns]
query = sqla.select(columns=columns)
where_clauses = data.get('where')
if where_clauses:
query = query.where(parser.parse_condition(where_clauses))
orderby = data.get('orderby')
if orderby:
query = query.order_by(orderby)
limit = data.get('limit')
if limit and limit.isdigit():
query = query.limit(int(limit))
offset = data.get('offset')
if offset and offset.isdigit():
query = query.offset(int(offset))
cursor = actions._load_cursor(request.data['cursor_id'])
actions._execute_sqla(query, cursor)
class Session(APIView):
def get(self, request, length=1):
return request.session['resonse']
def date_handler(obj):
"""
Implements a handler to serialize dates in JSON-strings
:param obj: An object
:return: The str method is called (which is the default serializer for JSON) unless the object has an attribute *isoformat*
"""
if isinstance(obj, Decimal):
return float(obj)
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return str(obj)
# Create your views here.
def create_ajax_handler(func, allow_cors=False):
"""
Implements a mapper from api pages to the corresponding functions in
api/actions.py
:param func: The name of the callable function
:return: A JSON-Response that contains a dictionary with the corresponding response stored in *content*
"""
class AJAX_View(APIView):
@api_exception
def post(self, request):
response = JsonResponse(self.execute(request))
if allow_cors and request.user.is_anonymous:
response['Access-Control-Allow-Origin'] = '*'
return response
@actions.load_cursor
def execute(self, request):
content = request.data
context = {'user': request.user,
'cursor_id': request.data['cursor_id']}
query = content.get('query', ['{}'])
try:
if isinstance(query, list):
query = query[0]
if isinstance(query, str):
query = json.loads(query)
except:
raise APIError('Your query is not properly formated.')
data = func(query, context)
# This must be done in order to clean the structure of non-serializable
# objects (e.g. datetime)
response_data = json.loads(json.dumps(data, default=date_handler))
return {'content': response_data,
'cursor_id': context['cursor_id']}
return AJAX_View.as_view()
def stream(data):
"""
TODO: Implement streaming of large datasets
:param data:
:return:
"""
size = len(data)
chunck = 100
for i in range(size):
yield json.loads(json.dumps(data[i], default=date_handler))
time.sleep(1)
def get_users(request):
string = request.GET['name']
users = login_models.myuser.objects.filter(Q(name__trigram_similar=string) | Q(name__istartswith=string))
return JsonResponse([user.name for user in users], safe=False)
def get_groups(request):
string = request.GET['name']
users = login_models.Group.objects.filter(Q(name__trigram_similar=string) | Q(name__istartswith=string))
return JsonResponse([user.name for user in users], safe=False)
Simplify cors handling
import json
import re
import time
from decimal import Decimal
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from django.db.models import Q
import login.models as login_models
import api.parser
from api import actions
from api import parser
from api.helpers.http import ModHttpResponse
from api.error import APIError
from rest_framework.views import APIView
from dataedit.models import Table as DBTable
from rest_framework import status
from django.http import Http404
import sqlalchemy as sqla
import geoalchemy2 # Although this import seems unused is has to be here
def cors(allow):
def doublewrapper(f):
def wrapper(*args, **kwargs):
response = f(*args, **kwargs)
if allow:
response['Access-Control-Allow-Origin'] = '*'
response["Access-Control-Allow-Methods"] = 'POST'
response["Access-Control-Allow-Headers"] = "Content-Type"
return response
return wrapper
return doublewrapper
def api_exception(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except actions.APIError as e:
return JsonResponse({'reason': e.message},
status=e.status)
except KeyError as e:
return JsonResponse({'reason': e}, status=400)
return wrapper
def permission_wrapper(permission, f):
def wrapper(caller, request, *args, **kwargs):
schema = kwargs.get('schema')
table = kwargs.get('table')
if request.user.is_anonymous or request.user.get_table_permission_level(
DBTable.load(schema, table)) < permission:
raise PermissionDenied
else:
return f(caller, request,*args, **kwargs)
return wrapper
def require_write_permission(f):
return permission_wrapper(login_models.WRITE_PERM, f)
def require_delete_permission(f):
return permission_wrapper(login_models.DELETE_PERM, f)
def require_admin_permission(f):
return permission_wrapper(login_models.ADMIN_PERM, f)
def conjunction(clauses):
return {
'type': 'operator',
'operator': 'AND',
'operands': clauses,
}
class Table(APIView):
"""
Handels the creation of tables and serves information on existing tables
"""
@api_exception
def get(self, request, schema, table):
"""
Returns a dictionary that describes the DDL-make-up of this table.
Fields are:
* name : Name of the table,
* schema: Name of the schema,
* columns : as specified in :meth:`api.actions.describe_columns`
* indexes : as specified in :meth:`api.actions.describe_indexes`
* constraints: as specified in
:meth:`api.actions.describe_constraints`
:param request:
:return:
"""
schema, table = actions.get_table_name(schema, table, restrict_schemas=False)
return JsonResponse({
'schema': schema,
'name': table,
'columns': actions.describe_columns(schema, table),
'indexed': actions.describe_indexes(schema, table),
'constraints': actions.describe_constraints(schema, table)
})
@api_exception
def post(self, request, schema, table):
"""
Changes properties of tables and table columns
:param request:
:param schema:
:param table:
:return:
"""
if schema not in ['model_draft', 'sandbox', 'test']:
raise PermissionDenied
if schema.startswith('_'):
raise PermissionDenied
json_data = request.data
if 'column' in json_data['type']:
column_definition = api.parser.parse_scolumnd_from_columnd(schema, table, json_data['name'], json_data)
result = actions.queue_column_change(schema, table, column_definition)
return ModHttpResponse(result)
elif 'constraint' in json_data['type']:
# Input has nothing to do with DDL from Postgres.
# Input is completely different.
# Using actions.parse_sconstd_from_constd is not applicable
# dict.get() returns None, if key does not exist
constraint_definition = {
'action': json_data['action'], # {ADD, DROP}
'constraint_type': json_data.get('constraint_type'), # {FOREIGN KEY, PRIMARY KEY, UNIQUE, CHECK}
'constraint_name': json_data.get('constraint_name'), # {myForeignKey, myUniqueConstraint}
'constraint_parameter': json_data.get('constraint_parameter'),
# Things in Brackets, e.g. name of column
'reference_table': json_data.get('reference_table'),
'reference_column': json_data.get('reference_column')
}
result = actions.queue_constraint_change(schema, table, constraint_definition)
return ModHttpResponse(result)
else:
return ModHttpResponse(actions.get_response_dict(False, 400, 'type not recognised'))
@api_exception
def put(self, request, schema, table):
"""
Every request to unsave http methods have to contain a "csrftoken".
This token is used to deny cross site reference forwarding.
In every request the header had to contain "X-CSRFToken" with the actual csrftoken.
The token can be requested at / and will be returned as cookie.
:param request:
:return:
"""
if schema not in ['model_draft', 'sandbox', 'test']:
raise PermissionDenied
if schema.startswith('_'):
raise PermissionDenied
if request.user.is_anonymous():
raise PermissionDenied
if actions.has_table(dict(schema=schema, table=table),{}):
raise APIError('Table already exists')
json_data = request.data['query']
constraint_definitions = []
column_definitions = []
for constraint_definiton in json_data.get('constraints',[]):
constraint_definiton.update({"action": "ADD",
"c_table": table,
"c_schema": schema})
constraint_definitions.append(constraint_definiton)
if 'columns' not in json_data:
raise actions.APIError("Table contains no columns")
for column_definition in json_data['columns']:
column_definition.update({"c_table": table,
"c_schema": schema})
column_definitions.append(column_definition)
result = actions.table_create(schema, table, column_definitions, constraint_definitions)
perm, _ = login_models.UserPermission.objects.get_or_create(table=DBTable.load(schema, table),
holder=request.user)
perm.level = login_models.ADMIN_PERM
perm.save()
request.user.save()
return JsonResponse(result, status=status.HTTP_201_CREATED)
@api_exception
@require_delete_permission
def delete(self, request, schema, table):
schema, table = actions.get_table_name(schema, table)
meta_schema = actions.get_meta_schema_name(schema)
edit_table = actions.get_edit_table_name(schema, table)
actions._get_engine().execute(
'DROP TABLE {schema}.{table} CASCADE;'.format(schema=meta_schema,
table=edit_table))
edit_table = actions.get_insert_table_name(schema, table)
actions._get_engine().execute(
'DROP TABLE {schema}.{table} CASCADE;'.format(schema=meta_schema,
table=edit_table))
edit_table = actions.get_delete_table_name(schema, table)
actions._get_engine().execute(
'DROP TABLE {schema}.{table} CASCADE;'.format(schema=meta_schema,
table=edit_table))
actions._get_engine().execute(
'DROP TABLE {schema}.{table} CASCADE;'.format(schema=schema,
table=table))
return JsonResponse({}, status=status.HTTP_200_OK)
class Index(APIView):
def get(self, request):
pass
def post(self, request):
pass
def put(self, request):
pass
class Column(APIView):
@api_exception
def get(self, request, schema, table, column=None):
schema, table = actions.get_table_name(schema, table, restrict_schemas=False)
response = actions.describe_columns(schema, table)
if column:
try:
response = response[column]
except KeyError:
raise actions.APIError('The column specified is not part of '
'this table.')
return JsonResponse(response)
@api_exception
@require_write_permission
def post(self, request, schema, table, column):
schema, table = actions.get_table_name(schema, table)
response = actions.column_alter(request.data['query'], {}, schema, table, column)
return JsonResponse(response)
@api_exception
@require_write_permission
def put(self, request, schema, table, column):
schema, table = actions.get_table_name(schema, table)
actions.column_add(schema, table, column, request.data['query'])
return JsonResponse({}, status=201)
class Fields(APIView):
def get(self, request, schema, table, id, column=None):
schema, table = actions.get_table_name(schema, table, restrict_schemas=False)
if not parser.is_pg_qual(table) or not parser.is_pg_qual(schema) or not parser.is_pg_qual(id) or not parser.is_pg_qual(column):
return ModHttpResponse({"error": "Bad Request", "http_status": 400})
returnValue = actions.getValue(schema, table, column, id);
return HttpResponse(returnValue if returnValue is not None else "", status= (404 if returnValue is None else 200))
def post(self, request):
pass
def put(self, request):
pass
class Rows(APIView):
@api_exception
def get(self, request, schema, table, row_id=None):
schema, table = actions.get_table_name(schema, table, restrict_schemas=False)
columns = request.GET.getlist('column')
where = request.GET.get('where')
if row_id and where:
raise actions.APIError('Where clauses and row id are not allowed in the same query')
orderby = request.GET.getlist('orderby')
if row_id and orderby:
raise actions.APIError('Order by clauses and row id are not allowed in the same query')
limit = request.GET.get('limit')
if row_id and limit:
raise actions.APIError('Limit by clauses and row id are not allowed in the same query')
offset = request.GET.get('offset')
if row_id and offset:
raise actions.APIError('Order by clauses and row id are not allowed in the same query')
if offset is not None and not offset.isdigit():
raise actions.APIError("Offset must be integer")
if limit is not None and not limit.isdigit():
raise actions.APIError("Limit must be integer")
if not all(parser.is_pg_qual(c) for c in columns):
raise actions.APIError("Columns are no postgres qualifiers")
if not all(parser.is_pg_qual(c) for c in orderby):
raise actions.APIError("Columns in groupby-clause are no postgres qualifiers")
# OPERATORS could be EQUALS, GREATER, LOWER, NOTEQUAL, NOTGREATER, NOTLOWER
# CONNECTORS could be AND, OR
# If you connect two values with an +, it will convert the + to a space. Whatever.
where_clauses = self.__read_where_clause(where)
if row_id:
clause = {'operands': [
{'type': 'column',
'column': 'id'},
row_id
],
'operator': 'EQUALS',
'type': 'operator'}
if where_clauses:
where_clauses = conjunction(clause, where_clauses)
else:
where_clauses = clause
# TODO: Validate where_clauses. Should not be vulnerable
data = {'schema': schema,
'table': table,
'columns': columns,
'where': where_clauses,
'orderby': orderby,
'limit': limit,
'offset': offset
}
return_obj = self.__get_rows(request, data)
# Extract column names from description
cols = [col[0] for col in return_obj['description']]
dict_list = [dict(zip(cols,row)) for row in return_obj['data']]
if row_id:
if dict_list:
dict_list = dict_list[0]
else:
raise Http404
# TODO: Figure out what JsonResponse does different.
return JsonResponse(dict_list, safe=False)
@api_exception
@require_write_permission
def post(self, request, schema, table, row_id=None, action=None):
schema, table = actions.get_table_name(schema, table)
column_data = request.data['query']
status_code = status.HTTP_200_OK
if row_id:
response = self.__update_rows(request, schema, table, column_data, row_id)
else:
if action=='new':
response = self.__insert_row(request, schema, table, column_data, row_id)
status_code=status.HTTP_201_CREATED
else:
response = self.__update_rows(request, schema, table, column_data, None)
actions.apply_changes(schema, table)
return JsonResponse(response, status=status_code)
@api_exception
@require_write_permission
def put(self, request, schema, table, row_id=None, action=None):
if action:
raise APIError('This request type (PUT) is not supported. The '
'\'new\' statement is only possible in POST requests.')
schema, table = actions.get_table_name(schema, table)
if not row_id:
return JsonResponse(actions._response_error('This methods requires an id'),
status=status.HTTP_400_BAD_REQUEST)
column_data = request.data['query']
if row_id and column_data.get('id', int(row_id)) != int(row_id):
raise actions.APIError(
'Id in URL and query do not match. Ids may not change.',
status=status.HTTP_409_CONFLICT)
engine = actions._get_engine()
# check whether id is already in use
exists = engine.execute('select count(*) '
'from {schema}.{table} '
'where id = {id};'.format(schema=schema,
table=table,
id=row_id)).first()[0] > 0 if row_id else False
if exists:
response = self.__update_rows(request, schema, table, column_data, row_id)
actions.apply_changes(schema, table)
return JsonResponse(response)
else:
result = self.__insert_row(request, schema, table, column_data, row_id)
actions.apply_changes(schema, table)
return JsonResponse(result, status=status.HTTP_201_CREATED)
@require_delete_permission
def delete(self, request, table, schema, row_id=None):
schema, table = actions.get_table_name(schema, table)
result = self.__delete_rows(request, schema, table, row_id)
actions.apply_changes(schema, table)
return JsonResponse(result)
@actions.load_cursor
def __delete_rows(self, request, schema, table, row_id=None):
where = request.GET.get('where')
query = {
'schema': schema,
'table': table,
'where': self.__read_where_clause(where),
}
context = {'cursor_id': request.data['cursor_id'],
'user': request.user}
if row_id:
clause = {
'operator': '=',
'operands': [
actions._load_value(row_id),
{
'type': 'column',
'column': 'id'
}],
'type': 'operator'
}
if query['where']:
clause = conjunction([clause, query['where']])
query['where'] = clause
return actions.data_delete(query, context)
def __read_where_clause(self, where):
where_expression = '^(?P<first>[\w\d_\.]+)\s*(?P<operator>' \
+ '|'.join(parser.sql_operators) \
+ ')\s*(?P<second>(?![>=]).+)$'
where_clauses = []
if where:
where_splitted = re.findall(where_expression, where)
where_clauses = conjunction([{'operands': [{
'type': 'column',
'column': match[0]},match[2]],
'operator': match[1],
'type': 'operator'} for match in where_splitted])
return where_clauses
@actions.load_cursor
def __insert_row(self, request, schema, table, row, row_id=None):
if row_id and row.get('id', int(row_id)) != int(row_id):
return actions._response_error('The id given in the query does not '
'match the id given in the url')
if row_id:
row['id'] = row_id
context = {'cursor_id': request.data['cursor_id'],
'user': request.user}
query = {
'schema': schema,
'table': table,
'values': [row] if isinstance(row, dict) else row
}
if not row_id:
query['returning'] = ['id']
result = actions.data_insert(query, context)
return result
@actions.load_cursor
def __update_rows(self, request, schema, table, row, row_id=None):
context = {'cursor_id': request.data['cursor_id'],
'user': request.user}
where = request.GET.get('where')
query = {
'schema': schema,
'table': table,
'where': self.__read_where_clause(where),
'values': row
}
if row_id:
clause = {
'operator': '=',
'operands': [
actions._load_value(row_id),
{
'type': 'column',
'column': 'id'
}],
'type': 'operator'
}
if query['where']:
clause = conjunction([clause, query['where']])
query['where'] = clause
return actions.data_update(query, context)
@actions.load_cursor
def __get_rows(self, request, data):
table = actions._get_table(data['schema'], table=data['table'])
params = {}
params_count = 0
columns = data.get('columns')
if not columns:
query = table.select()
else:
columns = [getattr(table.c, c) for c in columns]
query = sqla.select(columns=columns)
where_clauses = data.get('where')
if where_clauses:
query = query.where(parser.parse_condition(where_clauses))
orderby = data.get('orderby')
if orderby:
query = query.order_by(orderby)
limit = data.get('limit')
if limit and limit.isdigit():
query = query.limit(int(limit))
offset = data.get('offset')
if offset and offset.isdigit():
query = query.offset(int(offset))
cursor = actions._load_cursor(request.data['cursor_id'])
actions._execute_sqla(query, cursor)
class Session(APIView):
def get(self, request, length=1):
return request.session['resonse']
def date_handler(obj):
"""
Implements a handler to serialize dates in JSON-strings
:param obj: An object
:return: The str method is called (which is the default serializer for JSON) unless the object has an attribute *isoformat*
"""
if isinstance(obj, Decimal):
return float(obj)
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return str(obj)
# Create your views here.
def create_ajax_handler(func, allow_cors=False):
"""
Implements a mapper from api pages to the corresponding functions in
api/actions.py
:param func: The name of the callable function
:return: A JSON-Response that contains a dictionary with the corresponding response stored in *content*
"""
class AJAX_View(APIView):
@cors(allow_cors)
@api_exception
def options(self, request, *args, **kwargs):
response = HttpResponse()
return response
@cors(allow_cors)
@api_exception
def post(self, request):
response = JsonResponse(self.execute(request))
if allow_cors and request.user.is_anonymous:
response['Access-Control-Allow-Origin'] = '*'
return response
@actions.load_cursor
def execute(self, request):
content = request.data
context = {'user': request.user,
'cursor_id': request.data['cursor_id']}
query = content.get('query', ['{}'])
try:
if isinstance(query, list):
query = query[0]
if isinstance(query, str):
query = json.loads(query)
except:
raise APIError('Your query is not properly formated.')
data = func(query, context)
# This must be done in order to clean the structure of non-serializable
# objects (e.g. datetime)
response_data = json.loads(json.dumps(data, default=date_handler))
return {'content': response_data,
'cursor_id': context['cursor_id']}
return AJAX_View.as_view()
def stream(data):
"""
TODO: Implement streaming of large datasets
:param data:
:return:
"""
size = len(data)
chunck = 100
for i in range(size):
yield json.loads(json.dumps(data[i], default=date_handler))
time.sleep(1)
def get_users(request):
string = request.GET['name']
users = login_models.myuser.objects.filter(Q(name__trigram_similar=string) | Q(name__istartswith=string))
return JsonResponse([user.name for user in users], safe=False)
def get_groups(request):
string = request.GET['name']
users = login_models.Group.objects.filter(Q(name__trigram_similar=string) | Q(name__istartswith=string))
return JsonResponse([user.name for user in users], safe=False)
|
import argparse
import ast
from fnmatch import fnmatch, fnmatchcase
import os.path
import pkgutil
import re
import string
import sys
from vulture import lines
from vulture import noqa
from vulture import utils
__version__ = "1.6"
DEFAULT_CONFIDENCE = 60
IGNORED_VARIABLE_NAMES = {"object", "self"}
ERROR_CODES = {
"attribute": "V101",
"class": "V102",
"function": "V103",
"import": "V104",
"method": "V105",
"property": "V106",
"variable": "V107",
"unreachable_code": "V201",
}
def _get_unused_items(defined_items, used_names):
unused_items = [
item for item in set(defined_items) if item.name not in used_names
]
unused_items.sort(key=lambda item: item.name.lower())
return unused_items
def _is_special_name(name):
return name.startswith("__") and name.endswith("__")
def _match(name, patterns):
return any(fnmatchcase(name, pattern) for pattern in patterns)
def _is_test_file(filename):
return any(
fnmatch(os.path.basename(filename), pattern)
for pattern in ["test*.py", "*_test.py", "*-test.py"]
)
def _ignore_class(filename, class_name):
return _is_test_file(filename) and "Test" in class_name
def _ignore_import(filename, import_name):
"""
Ignore star-imported names since we can't detect whether they are used.
Ignore imports from __init__.py files since they're commonly used to
collect objects from a package.
"""
return os.path.basename(filename) == "__init__.py" or import_name == "*"
def _ignore_function(filename, function_name):
return function_name.startswith("test_") and _is_test_file(filename)
def _ignore_method(filename, method_name):
return _is_special_name(method_name) or (
method_name.startswith("test_") and _is_test_file(filename)
)
def _ignore_variable(filename, varname):
"""
Ignore _ (Python idiom), _x (pylint convention) and
__x__ (special variable or method), but not __x.
"""
return (
varname in IGNORED_VARIABLE_NAMES
or (varname.startswith("_") and not varname.startswith("__"))
or _is_special_name(varname)
)
class Item:
"""
Hold the name, type and location of defined code.
"""
__slots__ = (
"name",
"typ",
"filename",
"first_lineno",
"last_lineno",
"message",
"confidence",
)
def __init__(
self,
name,
typ,
filename,
first_lineno,
last_lineno,
message="",
confidence=DEFAULT_CONFIDENCE,
):
self.name = name
self.typ = typ
self.filename = filename
self.first_lineno = first_lineno
self.last_lineno = last_lineno
self.message = message or "unused {typ} '{name}'".format(**locals())
self.confidence = confidence
@property
def size(self):
assert self.last_lineno >= self.first_lineno
return self.last_lineno - self.first_lineno + 1
def get_report(self, add_size=False):
if add_size:
line_format = "line" if self.size == 1 else "lines"
size_report = f", {self.size:d} {line_format}"
else:
size_report = ""
return "{}:{:d}: {} ({}% confidence{})".format(
utils.format_path(self.filename),
self.first_lineno,
self.message,
self.confidence,
size_report,
)
def get_whitelist_string(self):
filename = utils.format_path(self.filename)
if self.typ == "unreachable_code":
return "# {} ({}:{})".format(
self.message, filename, self.first_lineno
)
else:
prefix = ""
if self.typ in ["attribute", "method", "property"]:
prefix = "_."
return "{}{} # unused {} ({}:{:d})".format(
prefix, self.name, self.typ, filename, self.first_lineno
)
def _tuple(self):
return (self.filename, self.first_lineno, self.name)
def __repr__(self):
return repr(self.name)
def __eq__(self, other):
return self._tuple() == other._tuple()
def __hash__(self):
return hash(self._tuple())
class Vulture(ast.NodeVisitor):
"""Find dead code."""
def __init__(
self, verbose=False, ignore_names=None, ignore_decorators=None
):
self.verbose = verbose
def get_list(typ):
return utils.LoggingList(typ, self.verbose)
def get_set(typ):
return utils.LoggingSet(typ, self.verbose)
self.defined_attrs = get_list("attribute")
self.defined_classes = get_list("class")
self.defined_funcs = get_list("function")
self.defined_imports = get_list("import")
self.defined_methods = get_list("method")
self.defined_props = get_list("property")
self.defined_vars = get_list("variable")
self.unreachable_code = get_list("unreachable_code")
self.used_attrs = get_set("attribute")
self.used_names = get_set("name")
self.ignore_names = ignore_names or []
self.ignore_decorators = ignore_decorators or []
self.filename = ""
self.code = []
self.found_dead_code_or_error = False
def scan(self, code, filename=""):
self.code = code.splitlines()
self.noqa_lines = noqa.parse_noqa(self.code)
self.filename = filename
try:
node = ast.parse(code, filename=self.filename)
except SyntaxError as err:
text = f' at "{err.text.strip()}"' if err.text else ""
print(
"{}:{:d}: {}{}".format(
utils.format_path(filename), err.lineno, err.msg, text
),
file=sys.stderr,
)
self.found_dead_code_or_error = True
except ValueError as err:
# ValueError is raised if source contains null bytes.
print(
'{}: invalid source code "{}"'.format(
utils.format_path(filename), err
),
file=sys.stderr,
)
self.found_dead_code_or_error = True
else:
self.visit(node)
def scavenge(self, paths, exclude=None):
def prepare_pattern(pattern):
if not any(char in pattern for char in ["*", "?", "["]):
pattern = "*{pattern}*".format(**locals())
return pattern
exclude = [prepare_pattern(pattern) for pattern in (exclude or [])]
def exclude_file(name):
return any(fnmatch(name, pattern) for pattern in exclude)
for module in utils.get_modules(paths):
if exclude_file(module):
self._log("Excluded:", module)
continue
self._log("Scanning:", module)
try:
module_string = utils.read_file(module)
except utils.VultureInputException as err: # noqa: F841
print(
"Error: Could not read file {module} - {err}\n"
"Try to change the encoding to UTF-8.".format(**locals()),
file=sys.stderr,
)
self.found_dead_code_or_error = True
else:
self.scan(module_string, filename=module)
unique_imports = {item.name for item in self.defined_imports}
for import_name in unique_imports:
path = os.path.join("whitelists", import_name) + "_whitelist.py"
if exclude_file(path):
self._log("Excluded whitelist:", path)
else:
try:
module_data = pkgutil.get_data("vulture", path)
self._log("Included whitelist:", path)
except OSError:
# Most imported modules don't have a whitelist.
continue
module_string = module_data.decode("utf-8")
self.scan(module_string, filename=path)
def get_unused_code(self, min_confidence=0, sort_by_size=False):
"""
Return ordered list of unused Item objects.
"""
if not 0 <= min_confidence <= 100:
raise ValueError("min_confidence must be between 0 and 100.")
def by_name(item):
return (item.filename.lower(), item.first_lineno)
def by_size(item):
return (item.size,) + by_name(item)
unused_code = (
self.unused_attrs
+ self.unused_classes
+ self.unused_funcs
+ self.unused_imports
+ self.unused_methods
+ self.unused_props
+ self.unused_vars
+ self.unreachable_code
)
confidently_unused = [
obj for obj in unused_code if obj.confidence >= min_confidence
]
return sorted(
confidently_unused, key=by_size if sort_by_size else by_name
)
def report(
self, min_confidence=0, sort_by_size=False, make_whitelist=False
):
"""
Print ordered list of Item objects to stdout.
"""
for item in self.get_unused_code(
min_confidence=min_confidence, sort_by_size=sort_by_size
):
print(
item.get_whitelist_string()
if make_whitelist
else item.get_report(add_size=sort_by_size)
)
self.found_dead_code_or_error = True
return self.found_dead_code_or_error
@property
def unused_classes(self):
return _get_unused_items(
self.defined_classes, self.used_attrs | self.used_names
)
@property
def unused_funcs(self):
return _get_unused_items(
self.defined_funcs, self.used_attrs | self.used_names
)
@property
def unused_imports(self):
return _get_unused_items(
self.defined_imports, self.used_names | self.used_attrs
)
@property
def unused_methods(self):
return _get_unused_items(self.defined_methods, self.used_attrs)
@property
def unused_props(self):
return _get_unused_items(self.defined_props, self.used_attrs)
@property
def unused_vars(self):
return _get_unused_items(
self.defined_vars, self.used_attrs | self.used_names
)
@property
def unused_attrs(self):
return _get_unused_items(self.defined_attrs, self.used_attrs)
def _log(self, *args):
if self.verbose:
print(*args)
def _add_aliases(self, node):
"""
We delegate to this method instead of using visit_alias() to have
access to line numbers and to filter imports from __future__.
"""
assert isinstance(node, (ast.Import, ast.ImportFrom))
for name_and_alias in node.names:
# Store only top-level module name ("os.path" -> "os").
# We can't easily detect when "os.path" is used.
name = name_and_alias.name.partition(".")[0]
alias = name_and_alias.asname
self._define(
self.defined_imports,
alias or name,
node,
confidence=90,
ignore=_ignore_import,
)
if alias is not None:
self.used_names.add(name_and_alias.name)
def _handle_conditional_node(self, node, name):
if utils.condition_is_always_false(node.test):
self._define(
self.unreachable_code,
name,
node,
last_node=node.body
if isinstance(node, ast.IfExp)
else node.body[-1],
message="unsatisfiable '{name}' condition".format(**locals()),
confidence=100,
)
elif utils.condition_is_always_true(node.test):
else_body = node.orelse
if name == "ternary":
self._define(
self.unreachable_code,
name,
else_body,
message="unreachable 'else' expression",
confidence=100,
)
elif else_body:
self._define(
self.unreachable_code,
"else",
else_body[0],
last_node=else_body[-1],
message="unreachable 'else' block",
confidence=100,
)
elif name == "if":
# Redundant if-condition without else block.
self._define(
self.unreachable_code,
name,
node,
message="redundant if-condition".format(**locals()),
confidence=100,
)
def _handle_string(self, s):
"""
Parse variable names in format strings:
'%(my_var)s' % locals()
'{my_var}'.format(**locals())
"""
# Old format strings.
self.used_names |= set(re.findall(r"\%\((\w+)\)", s))
def is_identifier(name):
return bool(re.match(r"[a-zA-Z_][a-zA-Z0-9_]*", name))
# New format strings.
parser = string.Formatter()
try:
names = [name for _, name, _, _ in parser.parse(s) if name]
except ValueError:
# Invalid format string.
names = []
for field_name in names:
# Remove brackets and contents: "a[0][b].c[d].e" -> "a.c.e".
# "a.b.c" -> name = "a", attributes = ["b", "c"]
name_and_attrs = re.sub(r"\[\w*\]", "", field_name).split(".")
name = name_and_attrs[0]
if is_identifier(name):
self.used_names.add(name)
for attr in name_and_attrs[1:]:
if is_identifier(attr):
self.used_attrs.add(attr)
def _define(
self,
collection,
name,
first_node,
last_node=None,
message="",
confidence=DEFAULT_CONFIDENCE,
ignore=None,
):
def ignored(lineno):
return (
(ignore and ignore(self.filename, name))
or _match(name, self.ignore_names)
or noqa.ignore_line(self.noqa_lines, lineno, ERROR_CODES[typ])
)
last_node = last_node or first_node
typ = collection.typ
first_lineno = lines.get_first_line_number(first_node)
if ignored(first_lineno):
self._log('Ignoring {typ} "{name}"'.format(**locals()))
else:
last_lineno = lines.get_last_line_number(last_node)
collection.append(
Item(
name,
typ,
self.filename,
first_lineno,
last_lineno,
message=message,
confidence=confidence,
)
)
def _define_variable(self, name, node, confidence=DEFAULT_CONFIDENCE):
self._define(
self.defined_vars,
name,
node,
confidence=confidence,
ignore=_ignore_variable,
)
def visit_arg(self, node):
"""Function argument"""
self._define_variable(node.arg, node, confidence=100)
def visit_AsyncFunctionDef(self, node):
return self.visit_FunctionDef(node)
def visit_Attribute(self, node):
if isinstance(node.ctx, ast.Store):
self._define(self.defined_attrs, node.attr, node)
elif isinstance(node.ctx, ast.Load):
self.used_attrs.add(node.attr)
def visit_ClassDef(self, node):
for decorator in node.decorator_list:
if _match(
utils.get_decorator_name(decorator), self.ignore_decorators
):
self._log(
'Ignoring class "{}" (decorator whitelisted)'.format(
node.name
)
)
break
else:
self._define(
self.defined_classes, node.name, node, ignore=_ignore_class
)
def visit_FunctionDef(self, node):
decorator_names = [
utils.get_decorator_name(decorator)
for decorator in node.decorator_list
]
first_arg = node.args.args[0].arg if node.args.args else None
if "@property" in decorator_names:
typ = "property"
elif (
"@staticmethod" in decorator_names
or "@classmethod" in decorator_names
or first_arg == "self"
):
typ = "method"
else:
typ = "function"
if any(
_match(name, self.ignore_decorators) for name in decorator_names
):
self._log(
'Ignoring {} "{}" (decorator whitelisted)'.format(
typ, node.name
)
)
elif typ == "property":
self._define(self.defined_props, node.name, node)
elif typ == "method":
self._define(
self.defined_methods, node.name, node, ignore=_ignore_method
)
else:
self._define(
self.defined_funcs, node.name, node, ignore=_ignore_function
)
def visit_If(self, node):
self._handle_conditional_node(node, "if")
def visit_IfExp(self, node):
self._handle_conditional_node(node, "ternary")
def visit_Import(self, node):
self._add_aliases(node)
def visit_ImportFrom(self, node):
if node.module != "__future__":
self._add_aliases(node)
def visit_Name(self, node):
if (
isinstance(node.ctx, ast.Load)
and node.id not in IGNORED_VARIABLE_NAMES
):
self.used_names.add(node.id)
elif isinstance(node.ctx, (ast.Param, ast.Store)):
self._define_variable(node.id, node)
if sys.version_info < (3, 8):
def visit_Str(self, node):
self._handle_string(node.s)
else:
def visit_Constant(self, node):
if isinstance(node.value, str):
self._handle_string(node.value)
def visit_While(self, node):
self._handle_conditional_node(node, "while")
def visit(self, node):
method = "visit_" + node.__class__.__name__
visitor = getattr(self, method, None)
if self.verbose:
lineno = getattr(node, "lineno", 1)
line = self.code[lineno - 1] if self.code else ""
self._log(lineno, ast.dump(node), line)
if visitor:
visitor(node)
return self.generic_visit(node)
def _handle_ast_list(self, ast_list):
"""
Find unreachable nodes in the given sequence of ast nodes.
"""
for index, node in enumerate(ast_list):
if isinstance(
node, (ast.Break, ast.Continue, ast.Raise, ast.Return)
):
try:
first_unreachable_node = ast_list[index + 1]
except IndexError:
continue
class_name = node.__class__.__name__.lower()
self._define(
self.unreachable_code,
class_name,
first_unreachable_node,
last_node=ast_list[-1],
message="unreachable code after '{class_name}'".format(
**locals()
),
confidence=100,
)
return
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
self._handle_ast_list(value)
for item in value:
if isinstance(item, ast.AST):
self.visit(item)
elif isinstance(value, ast.AST):
self.visit(value)
def _parse_args():
def csv(exclude):
return exclude.split(",")
usage = "%(prog)s [options] PATH [PATH ...]"
version = f"vulture {__version__}"
glob_help = "Patterns may contain glob wildcards (*, ?, [abc], [!abc])."
parser = argparse.ArgumentParser(prog="vulture", usage=usage)
parser.add_argument(
"paths",
nargs="+",
metavar="PATH",
help="Paths may be Python files or directories. For each directory"
" Vulture analyzes all contained *.py files.",
)
parser.add_argument(
"--exclude",
metavar="PATTERNS",
type=csv,
help="Comma-separated list of paths to ignore (e.g.,"
' "*settings.py,docs/*.py"). {glob_help} A PATTERN without glob'
" wildcards is treated as *PATTERN*.".format(**locals()),
)
parser.add_argument(
"--ignore-decorators",
metavar="PATTERNS",
type=csv,
help="Comma-separated list of decorators. Functions and classes using"
' these decorators are ignored (e.g., "@app.route,@require_*").'
" {glob_help}".format(**locals()),
)
parser.add_argument(
"--ignore-names",
metavar="PATTERNS",
type=csv,
default=None,
help='Comma-separated list of names to ignore (e.g., "visit_*,do_*").'
" {glob_help}".format(**locals()),
)
parser.add_argument(
"--make-whitelist",
action="store_true",
help="Report unused code in a format that can be added to a"
" whitelist module.",
)
parser.add_argument(
"--min-confidence",
type=int,
default=0,
help="Minimum confidence (between 0 and 100) for code to be"
" reported as unused.",
)
parser.add_argument(
"--sort-by-size",
action="store_true",
help="Sort unused functions and classes by their lines of code.",
)
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("--version", action="version", version=version)
return parser.parse_args()
def main():
args = _parse_args()
vulture = Vulture(
verbose=args.verbose,
ignore_names=args.ignore_names,
ignore_decorators=args.ignore_decorators,
)
vulture.scavenge(args.paths, exclude=args.exclude)
sys.exit(
vulture.report(
min_confidence=args.min_confidence,
sort_by_size=args.sort_by_size,
make_whitelist=args.make_whitelist,
)
)
Use f-strings more consistently.
import argparse
import ast
from fnmatch import fnmatch, fnmatchcase
import os.path
import pkgutil
import re
import string
import sys
from vulture import lines
from vulture import noqa
from vulture import utils
__version__ = "1.6"
DEFAULT_CONFIDENCE = 60
IGNORED_VARIABLE_NAMES = {"object", "self"}
ERROR_CODES = {
"attribute": "V101",
"class": "V102",
"function": "V103",
"import": "V104",
"method": "V105",
"property": "V106",
"variable": "V107",
"unreachable_code": "V201",
}
def _get_unused_items(defined_items, used_names):
unused_items = [
item for item in set(defined_items) if item.name not in used_names
]
unused_items.sort(key=lambda item: item.name.lower())
return unused_items
def _is_special_name(name):
return name.startswith("__") and name.endswith("__")
def _match(name, patterns):
return any(fnmatchcase(name, pattern) for pattern in patterns)
def _is_test_file(filename):
return any(
fnmatch(os.path.basename(filename), pattern)
for pattern in ["test*.py", "*_test.py", "*-test.py"]
)
def _ignore_class(filename, class_name):
return _is_test_file(filename) and "Test" in class_name
def _ignore_import(filename, import_name):
"""
Ignore star-imported names since we can't detect whether they are used.
Ignore imports from __init__.py files since they're commonly used to
collect objects from a package.
"""
return os.path.basename(filename) == "__init__.py" or import_name == "*"
def _ignore_function(filename, function_name):
return function_name.startswith("test_") and _is_test_file(filename)
def _ignore_method(filename, method_name):
return _is_special_name(method_name) or (
method_name.startswith("test_") and _is_test_file(filename)
)
def _ignore_variable(filename, varname):
"""
Ignore _ (Python idiom), _x (pylint convention) and
__x__ (special variable or method), but not __x.
"""
return (
varname in IGNORED_VARIABLE_NAMES
or (varname.startswith("_") and not varname.startswith("__"))
or _is_special_name(varname)
)
class Item:
"""
Hold the name, type and location of defined code.
"""
__slots__ = (
"name",
"typ",
"filename",
"first_lineno",
"last_lineno",
"message",
"confidence",
)
def __init__(
self,
name,
typ,
filename,
first_lineno,
last_lineno,
message="",
confidence=DEFAULT_CONFIDENCE,
):
self.name = name
self.typ = typ
self.filename = filename
self.first_lineno = first_lineno
self.last_lineno = last_lineno
self.message = message or f"unused {typ} '{name}'"
self.confidence = confidence
@property
def size(self):
assert self.last_lineno >= self.first_lineno
return self.last_lineno - self.first_lineno + 1
def get_report(self, add_size=False):
if add_size:
line_format = "line" if self.size == 1 else "lines"
size_report = f", {self.size:d} {line_format}"
else:
size_report = ""
return "{}:{:d}: {} ({}% confidence{})".format(
utils.format_path(self.filename),
self.first_lineno,
self.message,
self.confidence,
size_report,
)
def get_whitelist_string(self):
filename = utils.format_path(self.filename)
if self.typ == "unreachable_code":
return f"# {self.message} ({filename}:{self.first_lineno})"
else:
prefix = ""
if self.typ in ["attribute", "method", "property"]:
prefix = "_."
return "{}{} # unused {} ({}:{:d})".format(
prefix, self.name, self.typ, filename, self.first_lineno
)
def _tuple(self):
return (self.filename, self.first_lineno, self.name)
def __repr__(self):
return repr(self.name)
def __eq__(self, other):
return self._tuple() == other._tuple()
def __hash__(self):
return hash(self._tuple())
class Vulture(ast.NodeVisitor):
"""Find dead code."""
def __init__(
self, verbose=False, ignore_names=None, ignore_decorators=None
):
self.verbose = verbose
def get_list(typ):
return utils.LoggingList(typ, self.verbose)
def get_set(typ):
return utils.LoggingSet(typ, self.verbose)
self.defined_attrs = get_list("attribute")
self.defined_classes = get_list("class")
self.defined_funcs = get_list("function")
self.defined_imports = get_list("import")
self.defined_methods = get_list("method")
self.defined_props = get_list("property")
self.defined_vars = get_list("variable")
self.unreachable_code = get_list("unreachable_code")
self.used_attrs = get_set("attribute")
self.used_names = get_set("name")
self.ignore_names = ignore_names or []
self.ignore_decorators = ignore_decorators or []
self.filename = ""
self.code = []
self.found_dead_code_or_error = False
def scan(self, code, filename=""):
self.code = code.splitlines()
self.noqa_lines = noqa.parse_noqa(self.code)
self.filename = filename
try:
node = ast.parse(code, filename=self.filename)
except SyntaxError as err:
text = f' at "{err.text.strip()}"' if err.text else ""
print(
f"{utils.format_path(filename)}:{err.lineno}: {err.msg}{text}",
file=sys.stderr,
)
self.found_dead_code_or_error = True
except ValueError as err:
# ValueError is raised if source contains null bytes.
print(
f'{utils.format_path(filename)}: invalid source code "{err}"',
file=sys.stderr,
)
self.found_dead_code_or_error = True
else:
self.visit(node)
def scavenge(self, paths, exclude=None):
def prepare_pattern(pattern):
if not any(char in pattern for char in ["*", "?", "["]):
pattern = f"*{pattern}*"
return pattern
exclude = [prepare_pattern(pattern) for pattern in (exclude or [])]
def exclude_file(name):
return any(fnmatch(name, pattern) for pattern in exclude)
for module in utils.get_modules(paths):
if exclude_file(module):
self._log("Excluded:", module)
continue
self._log("Scanning:", module)
try:
module_string = utils.read_file(module)
except utils.VultureInputException as err: # noqa: F841
print(
f"Error: Could not read file {module} - {err}\n"
f"Try to change the encoding to UTF-8.",
file=sys.stderr,
)
self.found_dead_code_or_error = True
else:
self.scan(module_string, filename=module)
unique_imports = {item.name for item in self.defined_imports}
for import_name in unique_imports:
path = os.path.join("whitelists", import_name) + "_whitelist.py"
if exclude_file(path):
self._log("Excluded whitelist:", path)
else:
try:
module_data = pkgutil.get_data("vulture", path)
self._log("Included whitelist:", path)
except OSError:
# Most imported modules don't have a whitelist.
continue
module_string = module_data.decode("utf-8")
self.scan(module_string, filename=path)
def get_unused_code(self, min_confidence=0, sort_by_size=False):
"""
Return ordered list of unused Item objects.
"""
if not 0 <= min_confidence <= 100:
raise ValueError("min_confidence must be between 0 and 100.")
def by_name(item):
return (item.filename.lower(), item.first_lineno)
def by_size(item):
return (item.size,) + by_name(item)
unused_code = (
self.unused_attrs
+ self.unused_classes
+ self.unused_funcs
+ self.unused_imports
+ self.unused_methods
+ self.unused_props
+ self.unused_vars
+ self.unreachable_code
)
confidently_unused = [
obj for obj in unused_code if obj.confidence >= min_confidence
]
return sorted(
confidently_unused, key=by_size if sort_by_size else by_name
)
def report(
self, min_confidence=0, sort_by_size=False, make_whitelist=False
):
"""
Print ordered list of Item objects to stdout.
"""
for item in self.get_unused_code(
min_confidence=min_confidence, sort_by_size=sort_by_size
):
print(
item.get_whitelist_string()
if make_whitelist
else item.get_report(add_size=sort_by_size)
)
self.found_dead_code_or_error = True
return self.found_dead_code_or_error
@property
def unused_classes(self):
return _get_unused_items(
self.defined_classes, self.used_attrs | self.used_names
)
@property
def unused_funcs(self):
return _get_unused_items(
self.defined_funcs, self.used_attrs | self.used_names
)
@property
def unused_imports(self):
return _get_unused_items(
self.defined_imports, self.used_names | self.used_attrs
)
@property
def unused_methods(self):
return _get_unused_items(self.defined_methods, self.used_attrs)
@property
def unused_props(self):
return _get_unused_items(self.defined_props, self.used_attrs)
@property
def unused_vars(self):
return _get_unused_items(
self.defined_vars, self.used_attrs | self.used_names
)
@property
def unused_attrs(self):
return _get_unused_items(self.defined_attrs, self.used_attrs)
def _log(self, *args):
if self.verbose:
print(*args)
def _add_aliases(self, node):
"""
We delegate to this method instead of using visit_alias() to have
access to line numbers and to filter imports from __future__.
"""
assert isinstance(node, (ast.Import, ast.ImportFrom))
for name_and_alias in node.names:
# Store only top-level module name ("os.path" -> "os").
# We can't easily detect when "os.path" is used.
name = name_and_alias.name.partition(".")[0]
alias = name_and_alias.asname
self._define(
self.defined_imports,
alias or name,
node,
confidence=90,
ignore=_ignore_import,
)
if alias is not None:
self.used_names.add(name_and_alias.name)
def _handle_conditional_node(self, node, name):
if utils.condition_is_always_false(node.test):
self._define(
self.unreachable_code,
name,
node,
last_node=node.body
if isinstance(node, ast.IfExp)
else node.body[-1],
message=f"unsatisfiable '{name}' condition",
confidence=100,
)
elif utils.condition_is_always_true(node.test):
else_body = node.orelse
if name == "ternary":
self._define(
self.unreachable_code,
name,
else_body,
message="unreachable 'else' expression",
confidence=100,
)
elif else_body:
self._define(
self.unreachable_code,
"else",
else_body[0],
last_node=else_body[-1],
message="unreachable 'else' block",
confidence=100,
)
elif name == "if":
# Redundant if-condition without else block.
self._define(
self.unreachable_code,
name,
node,
message="redundant if-condition",
confidence=100,
)
def _handle_string(self, s):
"""
Parse variable names in format strings:
'%(my_var)s' % locals()
'{my_var}'.format(**locals())
f'{my_var}'
"""
# Old format strings.
self.used_names |= set(re.findall(r"\%\((\w+)\)", s))
def is_identifier(name):
return bool(re.match(r"[a-zA-Z_][a-zA-Z0-9_]*", name))
# New format strings.
parser = string.Formatter()
try:
names = [name for _, name, _, _ in parser.parse(s) if name]
except ValueError:
# Invalid format string.
names = []
for field_name in names:
# Remove brackets and contents: "a[0][b].c[d].e" -> "a.c.e".
# "a.b.c" -> name = "a", attributes = ["b", "c"]
name_and_attrs = re.sub(r"\[\w*\]", "", field_name).split(".")
name = name_and_attrs[0]
if is_identifier(name):
self.used_names.add(name)
for attr in name_and_attrs[1:]:
if is_identifier(attr):
self.used_attrs.add(attr)
def _define(
self,
collection,
name,
first_node,
last_node=None,
message="",
confidence=DEFAULT_CONFIDENCE,
ignore=None,
):
def ignored(lineno):
return (
(ignore and ignore(self.filename, name))
or _match(name, self.ignore_names)
or noqa.ignore_line(self.noqa_lines, lineno, ERROR_CODES[typ])
)
last_node = last_node or first_node
typ = collection.typ
first_lineno = lines.get_first_line_number(first_node)
if ignored(first_lineno):
self._log(f'Ignoring {typ} "{name}"')
else:
last_lineno = lines.get_last_line_number(last_node)
collection.append(
Item(
name,
typ,
self.filename,
first_lineno,
last_lineno,
message=message,
confidence=confidence,
)
)
def _define_variable(self, name, node, confidence=DEFAULT_CONFIDENCE):
self._define(
self.defined_vars,
name,
node,
confidence=confidence,
ignore=_ignore_variable,
)
def visit_arg(self, node):
"""Function argument"""
self._define_variable(node.arg, node, confidence=100)
def visit_AsyncFunctionDef(self, node):
return self.visit_FunctionDef(node)
def visit_Attribute(self, node):
if isinstance(node.ctx, ast.Store):
self._define(self.defined_attrs, node.attr, node)
elif isinstance(node.ctx, ast.Load):
self.used_attrs.add(node.attr)
def visit_ClassDef(self, node):
for decorator in node.decorator_list:
if _match(
utils.get_decorator_name(decorator), self.ignore_decorators
):
self._log(
f'Ignoring class "{node.name}" (decorator whitelisted)'
)
break
else:
self._define(
self.defined_classes, node.name, node, ignore=_ignore_class
)
def visit_FunctionDef(self, node):
decorator_names = [
utils.get_decorator_name(decorator)
for decorator in node.decorator_list
]
first_arg = node.args.args[0].arg if node.args.args else None
if "@property" in decorator_names:
typ = "property"
elif (
"@staticmethod" in decorator_names
or "@classmethod" in decorator_names
or first_arg == "self"
):
typ = "method"
else:
typ = "function"
if any(
_match(name, self.ignore_decorators) for name in decorator_names
):
self._log(f'Ignoring {typ} "{node.name}" (decorator whitelisted)')
elif typ == "property":
self._define(self.defined_props, node.name, node)
elif typ == "method":
self._define(
self.defined_methods, node.name, node, ignore=_ignore_method
)
else:
self._define(
self.defined_funcs, node.name, node, ignore=_ignore_function
)
def visit_If(self, node):
self._handle_conditional_node(node, "if")
def visit_IfExp(self, node):
self._handle_conditional_node(node, "ternary")
def visit_Import(self, node):
self._add_aliases(node)
def visit_ImportFrom(self, node):
if node.module != "__future__":
self._add_aliases(node)
def visit_Name(self, node):
if (
isinstance(node.ctx, ast.Load)
and node.id not in IGNORED_VARIABLE_NAMES
):
self.used_names.add(node.id)
elif isinstance(node.ctx, (ast.Param, ast.Store)):
self._define_variable(node.id, node)
if sys.version_info < (3, 8):
def visit_Str(self, node):
self._handle_string(node.s)
else:
def visit_Constant(self, node):
if isinstance(node.value, str):
self._handle_string(node.value)
def visit_While(self, node):
self._handle_conditional_node(node, "while")
def visit(self, node):
method = "visit_" + node.__class__.__name__
visitor = getattr(self, method, None)
if self.verbose:
lineno = getattr(node, "lineno", 1)
line = self.code[lineno - 1] if self.code else ""
self._log(lineno, ast.dump(node), line)
if visitor:
visitor(node)
return self.generic_visit(node)
def _handle_ast_list(self, ast_list):
"""
Find unreachable nodes in the given sequence of ast nodes.
"""
for index, node in enumerate(ast_list):
if isinstance(
node, (ast.Break, ast.Continue, ast.Raise, ast.Return)
):
try:
first_unreachable_node = ast_list[index + 1]
except IndexError:
continue
class_name = node.__class__.__name__.lower()
self._define(
self.unreachable_code,
class_name,
first_unreachable_node,
last_node=ast_list[-1],
message=f"unreachable code after '{class_name}'",
confidence=100,
)
return
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
self._handle_ast_list(value)
for item in value:
if isinstance(item, ast.AST):
self.visit(item)
elif isinstance(value, ast.AST):
self.visit(value)
def _parse_args():
def csv(exclude):
return exclude.split(",")
usage = "%(prog)s [options] PATH [PATH ...]"
version = f"vulture {__version__}"
glob_help = "Patterns may contain glob wildcards (*, ?, [abc], [!abc])."
parser = argparse.ArgumentParser(prog="vulture", usage=usage)
parser.add_argument(
"paths",
nargs="+",
metavar="PATH",
help="Paths may be Python files or directories. For each directory"
" Vulture analyzes all contained *.py files.",
)
parser.add_argument(
"--exclude",
metavar="PATTERNS",
type=csv,
help=f"Comma-separated list of paths to ignore (e.g.,"
f' "*settings.py,docs/*.py"). {glob_help} A PATTERN without glob'
f" wildcards is treated as *PATTERN*.",
)
parser.add_argument(
"--ignore-decorators",
metavar="PATTERNS",
type=csv,
help=f"Comma-separated list of decorators. Functions and classes using"
f' these decorators are ignored (e.g., "@app.route,@require_*").'
f" {glob_help}",
)
parser.add_argument(
"--ignore-names",
metavar="PATTERNS",
type=csv,
default=None,
help=f'Comma-separated list of names to ignore (e.g., "visit_*,do_*").'
f" {glob_help}",
)
parser.add_argument(
"--make-whitelist",
action="store_true",
help="Report unused code in a format that can be added to a"
" whitelist module.",
)
parser.add_argument(
"--min-confidence",
type=int,
default=0,
help="Minimum confidence (between 0 and 100) for code to be"
" reported as unused.",
)
parser.add_argument(
"--sort-by-size",
action="store_true",
help="Sort unused functions and classes by their lines of code.",
)
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("--version", action="version", version=version)
return parser.parse_args()
def main():
args = _parse_args()
vulture = Vulture(
verbose=args.verbose,
ignore_names=args.ignore_names,
ignore_decorators=args.ignore_decorators,
)
vulture.scavenge(args.paths, exclude=args.exclude)
sys.exit(
vulture.report(
min_confidence=args.min_confidence,
sort_by_size=args.sort_by_size,
make_whitelist=args.make_whitelist,
)
)
|
# Copyright 2012-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, pickle, re
from .. import build
from .. import dependencies
from .. import mesonlib
from .. import mlog
from .. import compilers
import json
import subprocess
from ..mesonlib import MesonException
from ..mesonlib import get_compiler_for_source, classify_unity_sources
from ..compilers import CompilerArgs
from collections import OrderedDict
import shlex
class CleanTrees:
'''
Directories outputted by custom targets that have to be manually cleaned
because on Linux `ninja clean` only deletes empty directories.
'''
def __init__(self, build_dir, trees):
self.build_dir = build_dir
self.trees = trees
class InstallData:
def __init__(self, source_dir, build_dir, prefix, strip_bin, mesonintrospect):
self.source_dir = source_dir
self.build_dir = build_dir
self.prefix = prefix
self.strip_bin = strip_bin
self.targets = []
self.headers = []
self.man = []
self.data = []
self.po_package_name = ''
self.po = []
self.install_scripts = []
self.install_subdirs = []
self.mesonintrospect = mesonintrospect
class ExecutableSerialisation:
def __init__(self, name, fname, cmd_args, env, is_cross, exe_wrapper,
workdir, extra_paths, capture):
self.name = name
self.fname = fname
self.cmd_args = cmd_args
self.env = env
self.is_cross = is_cross
self.exe_runner = exe_wrapper
self.workdir = workdir
self.extra_paths = extra_paths
self.capture = capture
class TestSerialisation:
def __init__(self, name, suite, fname, is_cross_built, exe_wrapper, is_parallel, cmd_args, env,
should_fail, timeout, workdir, extra_paths):
self.name = name
self.suite = suite
self.fname = fname
self.is_cross_built = is_cross_built
self.exe_runner = exe_wrapper
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.timeout = timeout
self.workdir = workdir
self.extra_paths = extra_paths
class OptionProxy:
def __init__(self, name, value):
self.name = name
self.value = value
class OptionOverrideProxy:
'''Mimic an option list but transparently override
selected option values.'''
def __init__(self, overrides, options):
self.overrides = overrides
self.options = options
def __getitem__(self, option_name):
base_opt = self.options[option_name]
if option_name in self.overrides:
return OptionProxy(base_opt.name, base_opt.validate_value(self.overrides[option_name]))
return base_opt
# This class contains the basic functionality that is needed by all backends.
# Feel free to move stuff in and out of it as you see fit.
class Backend:
def __init__(self, build):
self.build = build
self.environment = build.environment
self.processed_targets = {}
self.build_to_src = os.path.relpath(self.environment.get_source_dir(),
self.environment.get_build_dir())
for t in self.build.targets:
priv_dirname = self.get_target_private_dir_abs(t)
os.makedirs(priv_dirname, exist_ok=True)
def get_target_filename(self, t):
if isinstance(t, build.CustomTarget):
if len(t.get_outputs()) != 1:
mlog.warning('custom_target {!r} has more than one output! '
'Using the first one.'.format(t.name))
filename = t.get_outputs()[0]
else:
assert(isinstance(t, build.BuildTarget))
filename = t.get_filename()
return os.path.join(self.get_target_dir(t), filename)
def get_target_filename_abs(self, target):
return os.path.join(self.environment.get_build_dir(), self.get_target_filename(target))
def get_option_for_target(self, option_name, target):
if option_name in target.option_overrides:
override = target.option_overrides[option_name]
return self.environment.coredata.validate_option_value(option_name, override)
return self.environment.coredata.get_builtin_option(option_name)
def get_target_filename_for_linking(self, target):
# On some platforms (msvc for instance), the file that is used for
# dynamic linking is not the same as the dynamic library itself. This
# file is called an import library, and we want to link against that.
# On all other platforms, we link to the library directly.
if isinstance(target, build.SharedLibrary):
link_lib = target.get_import_filename() or target.get_filename()
return os.path.join(self.get_target_dir(target), link_lib)
elif isinstance(target, build.StaticLibrary):
return os.path.join(self.get_target_dir(target), target.get_filename())
elif isinstance(target, build.Executable):
if target.import_filename:
return os.path.join(self.get_target_dir(target), target.get_import_filename())
else:
return None
raise AssertionError('BUG: Tried to link to {!r} which is not linkable'.format(target))
def get_target_dir(self, target):
if self.environment.coredata.get_builtin_option('layout') == 'mirror':
dirname = target.get_subdir()
else:
dirname = 'meson-out'
return dirname
def get_target_dir_relative_to(self, t, o):
'''Get a target dir relative to another target's directory'''
target_dir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(t))
othert_dir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(o))
return os.path.relpath(target_dir, othert_dir)
def get_target_source_dir(self, target):
# if target dir is empty, avoid extraneous trailing / from os.path.join()
target_dir = self.get_target_dir(target)
if target_dir:
return os.path.join(self.build_to_src, target_dir)
return self.build_to_src
def get_target_private_dir(self, target):
dirname = os.path.join(self.get_target_dir(target), target.get_basename() + target.type_suffix())
return dirname
def get_target_private_dir_abs(self, target):
dirname = os.path.join(self.environment.get_build_dir(), self.get_target_private_dir(target))
return dirname
def get_target_generated_dir(self, target, gensrc, src):
"""
Takes a BuildTarget, a generator source (CustomTarget or GeneratedList),
and a generated source filename.
Returns the full path of the generated source relative to the build root
"""
# CustomTarget generators output to the build dir of the CustomTarget
if isinstance(gensrc, (build.CustomTarget, build.CustomTargetIndex)):
return os.path.join(self.get_target_dir(gensrc), src)
# GeneratedList generators output to the private build directory of the
# target that the GeneratedList is used in
return os.path.join(self.get_target_private_dir(target), src)
def get_unity_source_file(self, target, suffix):
osrc = target.name + '-unity.' + suffix
return mesonlib.File.from_built_file(self.get_target_private_dir(target), osrc)
def generate_unity_files(self, target, unity_src):
abs_files = []
result = []
compsrcs = classify_unity_sources(target.compilers.values(), unity_src)
def init_language_file(suffix):
unity_src = self.get_unity_source_file(target, suffix)
outfileabs = unity_src.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
outfileabs_tmp = outfileabs + '.tmp'
abs_files.append(outfileabs)
outfileabs_tmp_dir = os.path.dirname(outfileabs_tmp)
if not os.path.exists(outfileabs_tmp_dir):
os.makedirs(outfileabs_tmp_dir)
result.append(unity_src)
return open(outfileabs_tmp, 'w')
# For each language, generate a unity source file and return the list
for comp, srcs in compsrcs.items():
with init_language_file(comp.get_default_suffix()) as ofile:
for src in srcs:
ofile.write('#include<%s>\n' % src)
[mesonlib.replace_if_different(x, x + '.tmp') for x in abs_files]
return result
def relpath(self, todir, fromdir):
return os.path.relpath(os.path.join('dummyprefixdir', todir),
os.path.join('dummyprefixdir', fromdir))
def flatten_object_list(self, target, proj_dir_to_build_root=''):
obj_list = []
for obj in target.get_objects():
if isinstance(obj, str):
o = os.path.join(proj_dir_to_build_root,
self.build_to_src, target.get_subdir(), obj)
obj_list.append(o)
elif isinstance(obj, mesonlib.File):
obj_list.append(obj.rel_to_builddir(self.build_to_src))
elif isinstance(obj, build.ExtractedObjects):
obj_list += self.determine_ext_objs(target, obj, proj_dir_to_build_root)
else:
raise MesonException('Unknown data type in object list.')
return obj_list
def serialize_executable(self, exe, cmd_args, workdir, env={},
extra_paths=None, capture=None):
import hashlib
if extra_paths is None:
# The callee didn't check if we needed extra paths, so check it here
if mesonlib.is_windows() or mesonlib.is_cygwin():
extra_paths = self.determine_windows_extra_paths(exe, [])
else:
extra_paths = []
# Can't just use exe.name here; it will likely be run more than once
if isinstance(exe, (dependencies.ExternalProgram,
build.BuildTarget, build.CustomTarget)):
basename = exe.name
else:
basename = os.path.basename(exe)
# Take a digest of the cmd args, env, workdir, and capture. This avoids
# collisions and also makes the name deterministic over regenerations
# which avoids a rebuild by Ninja because the cmdline stays the same.
data = bytes(str(sorted(env.items())) + str(cmd_args) + str(workdir) + str(capture),
encoding='utf-8')
digest = hashlib.sha1(data).hexdigest()
scratch_file = 'meson_exe_{0}_{1}.dat'.format(basename, digest)
exe_data = os.path.join(self.environment.get_scratch_dir(), scratch_file)
with open(exe_data, 'wb') as f:
if isinstance(exe, dependencies.ExternalProgram):
exe_cmd = exe.get_command()
exe_needs_wrapper = False
elif isinstance(exe, (build.BuildTarget, build.CustomTarget)):
exe_cmd = [self.get_target_filename_abs(exe)]
exe_needs_wrapper = exe.is_cross
else:
exe_cmd = [exe]
exe_needs_wrapper = False
is_cross_built = exe_needs_wrapper and \
self.environment.is_cross_build() and \
self.environment.cross_info.need_cross_compiler() and \
self.environment.cross_info.need_exe_wrapper()
if is_cross_built:
exe_wrapper = self.environment.cross_info.config['binaries'].get('exe_wrapper', None)
else:
exe_wrapper = None
es = ExecutableSerialisation(basename, exe_cmd, cmd_args, env,
is_cross_built, exe_wrapper, workdir,
extra_paths, capture)
pickle.dump(es, f)
return exe_data
def serialize_tests(self):
test_data = os.path.join(self.environment.get_scratch_dir(), 'meson_test_setup.dat')
with open(test_data, 'wb') as datafile:
self.write_test_file(datafile)
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
with open(benchmark_data, 'wb') as datafile:
self.write_benchmark_file(datafile)
return test_data, benchmark_data
def determine_linker(self, target):
'''
If we're building a static library, there is only one static linker.
Otherwise, we query the target for the dynamic linker.
'''
if isinstance(target, build.StaticLibrary):
if target.is_cross:
return self.build.static_cross_linker
else:
return self.build.static_linker
l = target.get_clike_dynamic_linker()
if not l:
m = "Couldn't determine linker for target {!r}"
raise MesonException(m.format(target.name))
return l
def rpaths_for_bundled_shared_libraries(self, target):
paths = []
for dep in target.external_deps:
if isinstance(dep, dependencies.ExternalLibrary):
la = dep.link_args
if len(la) == 1 and os.path.isabs(la[0]):
# The only link argument is an absolute path to a library file.
libpath = la[0]
if libpath.startswith(('/usr/lib', '/lib')):
# No point in adding system paths.
continue
if os.path.splitext(libpath)[1] not in ['.dll', '.lib', '.so']:
continue
absdir = os.path.dirname(libpath)
rel_to_src = absdir[len(self.environment.get_source_dir()) + 1:]
assert(not os.path.isabs(rel_to_src))
paths.append(os.path.join(self.build_to_src, rel_to_src))
return paths
def determine_rpath_dirs(self, target):
link_deps = target.get_all_link_deps()
result = []
for ld in link_deps:
if ld is target:
continue
prospective = self.get_target_dir(ld)
if prospective not in result:
result.append(prospective)
for rp in self.rpaths_for_bundled_shared_libraries(target):
if rp not in result:
result += [rp]
return result
def object_filename_from_source(self, target, source, is_unity):
assert isinstance(source, mesonlib.File)
build_dir = self.environment.get_build_dir()
rel_src = source.rel_to_builddir(self.build_to_src)
if (not self.environment.is_source(rel_src) or
self.environment.is_header(rel_src)) and not is_unity:
return None
# foo.vala files compile down to foo.c and then foo.c.o, not foo.vala.o
if rel_src.endswith(('.vala', '.gs')):
# See description in generate_vala_compile for this logic.
if source.is_built:
if os.path.isabs(rel_src):
rel_src = rel_src[len(build_dir) + 1:]
rel_src = os.path.relpath(rel_src, self.get_target_private_dir(target))
else:
rel_src = os.path.basename(rel_src)
if is_unity:
return 'meson-generated_' + rel_src[:-5] + '.c.' + self.environment.get_object_suffix()
# A meson- prefixed directory is reserved; hopefully no-one creates a file name with such a weird prefix.
source = 'meson-generated_' + rel_src[:-5] + '.c'
elif source.is_built:
if os.path.isabs(rel_src):
rel_src = rel_src[len(build_dir) + 1:]
targetdir = self.get_target_private_dir(target)
# A meson- prefixed directory is reserved; hopefully no-one creates a file name with such a weird prefix.
source = 'meson-generated_' + os.path.relpath(rel_src, targetdir)
else:
if os.path.isabs(rel_src):
# Not from the source directory; hopefully this doesn't conflict with user's source files.
source = os.path.basename(rel_src)
else:
source = os.path.relpath(os.path.join(build_dir, rel_src),
os.path.join(self.environment.get_source_dir(), target.get_subdir()))
return source.replace('/', '_').replace('\\', '_') + '.' + self.environment.get_object_suffix()
def determine_ext_objs(self, target, extobj, proj_dir_to_build_root):
result = []
targetdir = self.get_target_private_dir(extobj.target)
# With unity builds, there's just one object that contains all the
# sources, and we only support extracting all the objects in this mode,
# so just return that.
if self.is_unity(target):
comp = get_compiler_for_source(extobj.target.compilers.values(),
extobj.srclist[0])
# There is a potential conflict here, but it is unlikely that
# anyone both enables unity builds and has a file called foo-unity.cpp.
osrc = self.get_unity_source_file(extobj.target,
comp.get_default_suffix())
objname = self.object_filename_from_source(extobj.target, osrc, True)
objname = objname.replace('/', '_').replace('\\', '_')
objpath = os.path.join(proj_dir_to_build_root, targetdir, objname)
return [objpath]
for osrc in extobj.srclist:
objname = self.object_filename_from_source(extobj.target, osrc, False)
if objname:
objpath = os.path.join(proj_dir_to_build_root, targetdir, objname)
result.append(objpath)
return result
def get_pch_include_args(self, compiler, target):
args = []
pchpath = self.get_target_private_dir(target)
includeargs = compiler.get_include_args(pchpath, False)
for lang in ['c', 'cpp']:
p = target.get_pch(lang)
if not p:
continue
if compiler.can_compile(p[-1]):
header = p[0]
args += compiler.get_pch_use_args(pchpath, header)
if len(args) > 0:
args = includeargs + args
return args
@staticmethod
def escape_extra_args(compiler, args):
# No extra escaping/quoting needed when not running on Windows
if not mesonlib.is_windows():
return args
extra_args = []
# Compiler-specific escaping is needed for -D args but not for any others
if compiler.get_id() == 'msvc':
# MSVC needs escaping when a -D argument ends in \ or \"
for arg in args:
if arg.startswith('-D') or arg.startswith('/D'):
# Without extra escaping for these two, the next character
# gets eaten
if arg.endswith('\\'):
arg += '\\'
elif arg.endswith('\\"'):
arg = arg[:-2] + '\\\\"'
extra_args.append(arg)
else:
# MinGW GCC needs all backslashes in defines to be doubly-escaped
# FIXME: Not sure about Cygwin or Clang
for arg in args:
if arg.startswith('-D') or arg.startswith('/D'):
arg = arg.replace('\\', '\\\\')
extra_args.append(arg)
return extra_args
def generate_basic_compiler_args(self, target, compiler, no_warn_args=False):
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
# starting from hard-coded defaults followed by build options and so on.
commands = CompilerArgs(compiler)
copt_proxy = OptionOverrideProxy(target.option_overrides, self.environment.coredata.compiler_options)
# First, the trivial ones that are impossible to override.
#
# Add -nostdinc/-nostdinc++ if needed; can't be overridden
commands += self.get_cross_stdlib_args(target, compiler)
# Add things like /NOLOGO or -pipe; usually can't be overridden
commands += compiler.get_always_args()
# Only add warning-flags by default if the buildtype enables it, and if
# we weren't explicitly asked to not emit warnings (for Vala, f.ex)
if no_warn_args:
commands += compiler.get_no_warn_args()
elif self.get_option_for_target('buildtype', target) != 'plain':
commands += compiler.get_warn_args(self.get_option_for_target('warning_level', target))
# Add -Werror if werror=true is set in the build options set on the
# command-line or default_options inside project(). This only sets the
# action to be done for warnings if/when they are emitted, so it's ok
# to set it after get_no_warn_args() or get_warn_args().
if self.get_option_for_target('werror', target):
commands += compiler.get_werror_args()
# Add compile args for c_* or cpp_* build options set on the
# command-line or default_options inside project().
commands += compiler.get_option_compile_args(copt_proxy)
# Add buildtype args: optimization level, debugging, etc.
commands += compiler.get_buildtype_args(self.get_option_for_target('buildtype', target))
# Add compile args added using add_project_arguments()
commands += self.build.get_project_args(compiler, target.subproject)
# Add compile args added using add_global_arguments()
# These override per-project arguments
commands += self.build.get_global_args(compiler)
if not target.is_cross:
# Compile args added from the env: CFLAGS/CXXFLAGS, etc. We want these
# to override all the defaults, but not the per-target compile args.
commands += self.environment.coredata.external_args[compiler.get_language()]
# Always set -fPIC for shared libraries
if isinstance(target, build.SharedLibrary):
commands += compiler.get_pic_args()
# Set -fPIC for static libraries by default unless explicitly disabled
if isinstance(target, build.StaticLibrary) and target.pic:
commands += compiler.get_pic_args()
# Add compile args needed to find external dependencies. Link args are
# added while generating the link command.
# NOTE: We must preserve the order in which external deps are
# specified, so we reverse the list before iterating over it.
for dep in reversed(target.get_external_deps()):
if not dep.found():
continue
if compiler.language == 'vala':
if isinstance(dep, dependencies.PkgConfigDependency):
if dep.name == 'glib-2.0' and dep.version_reqs is not None:
for req in dep.version_reqs:
if req.startswith(('>=', '==')):
commands += ['--target-glib', req[2:]]
break
commands += ['--pkg', dep.name]
elif isinstance(dep, dependencies.ExternalLibrary):
commands += dep.get_link_args('vala')
else:
commands += dep.get_compile_args()
# Qt needs -fPIC for executables
# XXX: We should move to -fPIC for all executables
if isinstance(target, build.Executable):
commands += dep.get_exe_args(compiler)
# For 'automagic' deps: Boost and GTest. Also dependency('threads').
# pkg-config puts the thread flags itself via `Cflags:`
if dep.need_threads():
commands += compiler.thread_flags(self.environment)
# Fortran requires extra include directives.
if compiler.language == 'fortran':
for lt in target.link_targets:
priv_dir = os.path.join(self.get_target_dir(lt), lt.get_basename() + lt.type_suffix())
incflag = compiler.get_include_args(priv_dir, False)
commands += incflag
return commands
def build_target_link_arguments(self, compiler, deps):
args = []
for d in deps:
if not (d.is_linkable_target()):
raise RuntimeError('Tried to link with a non-library target "%s".' % d.get_basename())
d_arg = self.get_target_filename_for_linking(d)
if not d_arg:
continue
if isinstance(compiler, (compilers.LLVMDCompiler, compilers.DmdDCompiler)):
d_arg = '-L' + d_arg
args.append(d_arg)
return args
def determine_windows_extra_paths(self, target, extra_bdeps):
'''On Windows there is no such thing as an rpath.
We must determine all locations of DLLs that this exe
links to and return them so they can be used in unit
tests.'''
result = []
prospectives = []
if isinstance(target, build.Executable):
prospectives = target.get_transitive_link_deps()
# External deps
for deppath in self.rpaths_for_bundled_shared_libraries(target):
result.append(os.path.normpath(os.path.join(self.environment.get_build_dir(), deppath)))
for bdep in extra_bdeps:
prospectives += bdep.get_transitive_link_deps()
# Internal deps
for ld in prospectives:
if ld == '' or ld == '.':
continue
dirseg = os.path.join(self.environment.get_build_dir(), self.get_target_dir(ld))
if dirseg not in result:
result.append(dirseg)
return result
def write_benchmark_file(self, datafile):
self.write_test_serialisation(self.build.get_benchmarks(), datafile)
def write_test_file(self, datafile):
self.write_test_serialisation(self.build.get_tests(), datafile)
def write_test_serialisation(self, tests, datafile):
arr = []
for t in tests:
exe = t.get_exe()
if isinstance(exe, dependencies.ExternalProgram):
cmd = exe.get_command()
else:
cmd = [os.path.join(self.environment.get_build_dir(), self.get_target_filename(t.get_exe()))]
is_cross = self.environment.is_cross_build() and \
self.environment.cross_info.need_cross_compiler() and \
self.environment.cross_info.need_exe_wrapper()
if isinstance(exe, build.BuildTarget):
is_cross = is_cross and exe.is_cross
if isinstance(exe, dependencies.ExternalProgram):
# E.g. an external verifier or simulator program run on a generated executable.
# Can always be run.
is_cross = False
if is_cross:
exe_wrapper = self.environment.cross_info.config['binaries'].get('exe_wrapper', None)
else:
exe_wrapper = None
if mesonlib.is_windows() or mesonlib.is_cygwin():
extra_paths = self.determine_windows_extra_paths(exe, [])
else:
extra_paths = []
cmd_args = []
for a in t.cmd_args:
if hasattr(a, 'held_object'):
a = a.held_object
if isinstance(a, mesonlib.File):
a = os.path.join(self.environment.get_build_dir(), a.rel_to_builddir(self.build_to_src))
cmd_args.append(a)
elif isinstance(a, str):
cmd_args.append(a)
elif isinstance(a, build.Target):
cmd_args.append(self.get_target_filename(a))
else:
raise MesonException('Bad object in test command.')
ts = TestSerialisation(t.get_name(), t.suite, cmd, is_cross, exe_wrapper,
t.is_parallel, cmd_args, t.env, t.should_fail,
t.timeout, t.workdir, extra_paths)
arr.append(ts)
pickle.dump(arr, datafile)
def generate_depmf_install(self, d):
if self.build.dep_manifest_name is None:
return
ifilename = os.path.join(self.environment.get_build_dir(), 'depmf.json')
ofilename = os.path.join(self.environment.get_prefix(), self.build.dep_manifest_name)
mfobj = {'type': 'dependency manifest', 'version': '1.0', 'projects': self.build.dep_manifest}
with open(ifilename, 'w') as f:
f.write(json.dumps(mfobj))
# Copy file from, to, and with mode unchanged
d.data.append([ifilename, ofilename, None])
def get_regen_filelist(self):
'''List of all files whose alteration means that the build
definition needs to be regenerated.'''
deps = [os.path.join(self.build_to_src, df)
for df in self.interpreter.get_build_def_files()]
if self.environment.is_cross_build():
deps.append(os.path.join(self.build_to_src,
self.environment.coredata.cross_file))
deps.append('meson-private/coredata.dat')
if os.path.exists(os.path.join(self.environment.get_source_dir(), 'meson_options.txt')):
deps.append(os.path.join(self.build_to_src, 'meson_options.txt'))
for sp in self.build.subprojects.keys():
fname = os.path.join(self.environment.get_source_dir(), sp, 'meson_options.txt')
if os.path.isfile(fname):
deps.append(os.path.join(self.build_to_src, sp, 'meson_options.txt'))
return deps
def exe_object_to_cmd_array(self, exe):
if self.environment.is_cross_build() and \
self.environment.cross_info.need_exe_wrapper() and \
isinstance(exe, build.BuildTarget) and exe.is_cross:
if 'exe_wrapper' not in self.environment.cross_info.config['binaries']:
s = 'Can not use target %s as a generator because it is cross-built\n'
s += 'and no exe wrapper is defined. You might want to set it to native instead.'
s = s % exe.name
raise MesonException(s)
if isinstance(exe, build.BuildTarget):
exe_arr = [os.path.join(self.environment.get_build_dir(), self.get_target_filename(exe))]
else:
exe_arr = exe.get_command()
return exe_arr
def replace_extra_args(self, args, genlist):
final_args = []
for a in args:
if a == '@EXTRA_ARGS@':
final_args += genlist.get_extra_args()
else:
final_args.append(a)
return final_args
def replace_outputs(self, args, private_dir, output_list):
newargs = []
regex = re.compile('@OUTPUT(\d+)@')
for arg in args:
m = regex.search(arg)
while m is not None:
index = int(m.group(1))
src = '@OUTPUT%d@' % index
arg = arg.replace(src, os.path.join(private_dir, output_list[index]))
m = regex.search(arg)
newargs.append(arg)
return newargs
def get_build_by_default_targets(self):
result = OrderedDict()
# Get all build and custom targets that must be built by default
for name, t in self.build.get_targets().items():
if t.build_by_default or t.install or t.build_always:
result[name] = t
# Get all targets used as test executables and arguments. These must
# also be built by default. XXX: Sometime in the future these should be
# built only before running tests.
for t in self.build.get_tests():
exe = t.exe
if hasattr(exe, 'held_object'):
exe = exe.held_object
if isinstance(exe, (build.CustomTarget, build.BuildTarget)):
result[exe.get_id()] = exe
for arg in t.cmd_args:
if hasattr(arg, 'held_object'):
arg = arg.held_object
if not isinstance(arg, (build.CustomTarget, build.BuildTarget)):
continue
result[arg.get_id()] = arg
return result
def get_custom_target_provided_libraries(self, target):
libs = []
for t in target.get_generated_sources():
if not isinstance(t, build.CustomTarget):
continue
for f in t.get_outputs():
if self.environment.is_library(f):
libs.append(os.path.join(self.get_target_dir(t), f))
return libs
def is_unity(self, target):
optval = self.get_option_for_target('unity', target)
if optval == 'on' or (optval == 'subprojects' and target.subproject != ''):
return True
return False
def get_custom_target_sources(self, target):
'''
Custom target sources can be of various object types; strings, File,
BuildTarget, even other CustomTargets.
Returns the path to them relative to the build root directory.
'''
srcs = []
for i in target.get_sources():
if hasattr(i, 'held_object'):
i = i.held_object
if isinstance(i, str):
fname = [os.path.join(self.build_to_src, target.subdir, i)]
elif isinstance(i, build.BuildTarget):
fname = [self.get_target_filename(i)]
elif isinstance(i, build.CustomTarget):
fname = [os.path.join(self.get_target_dir(i), p) for p in i.get_outputs()]
elif isinstance(i, build.GeneratedList):
fname = [os.path.join(self.get_target_private_dir(target), p) for p in i.get_outputs()]
else:
fname = [i.rel_to_builddir(self.build_to_src)]
if target.absolute_paths:
fname = [os.path.join(self.environment.get_build_dir(), f) for f in fname]
srcs += fname
return srcs
def get_custom_target_depend_files(self, target, absolute_paths=False):
deps = []
for i in target.depend_files:
if isinstance(i, mesonlib.File):
if absolute_paths:
deps.append(i.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir()))
else:
deps.append(i.rel_to_builddir(self.build_to_src))
else:
if absolute_paths:
deps.append(os.path.join(self.environment.get_source_dir(), target.subdir, i))
else:
deps.append(os.path.join(self.build_to_src, target.subdir, i))
return deps
def eval_custom_target_command(self, target, absolute_outputs=False):
# We want the outputs to be absolute only when using the VS backend
# XXX: Maybe allow the vs backend to use relative paths too?
source_root = self.build_to_src
build_root = '.'
outdir = self.get_target_dir(target)
if absolute_outputs:
source_root = self.environment.get_source_dir()
build_root = self.environment.get_source_dir()
outdir = os.path.join(self.environment.get_build_dir(), outdir)
outputs = []
for i in target.get_outputs():
outputs.append(os.path.join(outdir, i))
inputs = self.get_custom_target_sources(target)
# Evaluate the command list
cmd = []
for i in target.command:
if isinstance(i, build.Executable):
cmd += self.exe_object_to_cmd_array(i)
continue
elif isinstance(i, build.CustomTarget):
# GIR scanner will attempt to execute this binary but
# it assumes that it is in path, so always give it a full path.
tmp = i.get_outputs()[0]
i = os.path.join(self.get_target_dir(i), tmp)
elif isinstance(i, mesonlib.File):
i = i.rel_to_builddir(self.build_to_src)
if target.absolute_paths:
i = os.path.join(self.environment.get_build_dir(), i)
# FIXME: str types are blindly added ignoring 'target.absolute_paths'
# because we can't know if they refer to a file or just a string
elif not isinstance(i, str):
err_msg = 'Argument {0} is of unknown type {1}'
raise RuntimeError(err_msg.format(str(i), str(type(i))))
elif '@SOURCE_ROOT@' in i:
i = i.replace('@SOURCE_ROOT@', source_root)
elif '@BUILD_ROOT@' in i:
i = i.replace('@BUILD_ROOT@', build_root)
elif '@DEPFILE@' in i:
if target.depfile is None:
msg = 'Custom target {!r} has @DEPFILE@ but no depfile ' \
'keyword argument.'.format(target.name)
raise MesonException(msg)
dfilename = os.path.join(outdir, target.depfile)
i = i.replace('@DEPFILE@', dfilename)
elif '@PRIVATE_OUTDIR_' in i:
match = re.search('@PRIVATE_OUTDIR_(ABS_)?([^/\s*]*)@', i)
if not match:
msg = 'Custom target {!r} has an invalid argument {!r}' \
''.format(target.name, i)
raise MesonException(msg)
source = match.group(0)
if match.group(1) is None and not target.absolute_paths:
lead_dir = ''
else:
lead_dir = self.environment.get_build_dir()
i = i.replace(source, os.path.join(lead_dir, outdir))
cmd.append(i)
# Substitute the rest of the template strings
values = mesonlib.get_filenames_templates_dict(inputs, outputs)
cmd = mesonlib.substitute_values(cmd, values)
# This should not be necessary but removing it breaks
# building GStreamer on Windows. The underlying issue
# is problems with quoting backslashes on Windows
# which is the seventh circle of hell. The downside is
# that this breaks custom targets whose command lines
# have backslashes. If you try to fix this be sure to
# check that it does not break GST.
#
# The bug causes file paths such as c:\foo to get escaped
# into c:\\foo.
#
# Unfortunately we have not been able to come up with an
# isolated test case for this so unless you manage to come up
# with one, the only way is to test the building with Gst's
# setup. Note this in your MR or ping us and we will get it
# fixed.
#
# https://github.com/mesonbuild/meson/pull/737
cmd = [i.replace('\\', '/') for i in cmd]
return inputs, outputs, cmd
def run_postconf_scripts(self):
env = {'MESON_SOURCE_ROOT': self.environment.get_source_dir(),
'MESON_BUILD_ROOT': self.environment.get_build_dir(),
'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in self.environment.get_build_command() + ['introspect']]),
}
child_env = os.environ.copy()
child_env.update(env)
for s in self.build.postconf_scripts:
cmd = s['exe'] + s['args']
subprocess.check_call(cmd, env=child_env)
backends: Only add pch args that are appropriate for the compiler
Currently we try both C and C++ when determining which PCH files to
include. The problem with this approach is that if there are no C or C++
files (only headers) and the target has both C and C++ sources then the
PCHs will be passed to the wrong compiler.
The solution is less code, we already have the compiler, the compiler
knows what language it is, so we don't need to walk both C and C++.
Fixes #3068
# Copyright 2012-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, pickle, re
from .. import build
from .. import dependencies
from .. import mesonlib
from .. import mlog
from .. import compilers
import json
import subprocess
from ..mesonlib import MesonException
from ..mesonlib import get_compiler_for_source, classify_unity_sources
from ..compilers import CompilerArgs
from collections import OrderedDict
import shlex
class CleanTrees:
'''
Directories outputted by custom targets that have to be manually cleaned
because on Linux `ninja clean` only deletes empty directories.
'''
def __init__(self, build_dir, trees):
self.build_dir = build_dir
self.trees = trees
class InstallData:
def __init__(self, source_dir, build_dir, prefix, strip_bin, mesonintrospect):
self.source_dir = source_dir
self.build_dir = build_dir
self.prefix = prefix
self.strip_bin = strip_bin
self.targets = []
self.headers = []
self.man = []
self.data = []
self.po_package_name = ''
self.po = []
self.install_scripts = []
self.install_subdirs = []
self.mesonintrospect = mesonintrospect
class ExecutableSerialisation:
def __init__(self, name, fname, cmd_args, env, is_cross, exe_wrapper,
workdir, extra_paths, capture):
self.name = name
self.fname = fname
self.cmd_args = cmd_args
self.env = env
self.is_cross = is_cross
self.exe_runner = exe_wrapper
self.workdir = workdir
self.extra_paths = extra_paths
self.capture = capture
class TestSerialisation:
def __init__(self, name, suite, fname, is_cross_built, exe_wrapper, is_parallel, cmd_args, env,
should_fail, timeout, workdir, extra_paths):
self.name = name
self.suite = suite
self.fname = fname
self.is_cross_built = is_cross_built
self.exe_runner = exe_wrapper
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.timeout = timeout
self.workdir = workdir
self.extra_paths = extra_paths
class OptionProxy:
def __init__(self, name, value):
self.name = name
self.value = value
class OptionOverrideProxy:
'''Mimic an option list but transparently override
selected option values.'''
def __init__(self, overrides, options):
self.overrides = overrides
self.options = options
def __getitem__(self, option_name):
base_opt = self.options[option_name]
if option_name in self.overrides:
return OptionProxy(base_opt.name, base_opt.validate_value(self.overrides[option_name]))
return base_opt
# This class contains the basic functionality that is needed by all backends.
# Feel free to move stuff in and out of it as you see fit.
class Backend:
def __init__(self, build):
self.build = build
self.environment = build.environment
self.processed_targets = {}
self.build_to_src = os.path.relpath(self.environment.get_source_dir(),
self.environment.get_build_dir())
for t in self.build.targets:
priv_dirname = self.get_target_private_dir_abs(t)
os.makedirs(priv_dirname, exist_ok=True)
def get_target_filename(self, t):
if isinstance(t, build.CustomTarget):
if len(t.get_outputs()) != 1:
mlog.warning('custom_target {!r} has more than one output! '
'Using the first one.'.format(t.name))
filename = t.get_outputs()[0]
else:
assert(isinstance(t, build.BuildTarget))
filename = t.get_filename()
return os.path.join(self.get_target_dir(t), filename)
def get_target_filename_abs(self, target):
return os.path.join(self.environment.get_build_dir(), self.get_target_filename(target))
def get_option_for_target(self, option_name, target):
if option_name in target.option_overrides:
override = target.option_overrides[option_name]
return self.environment.coredata.validate_option_value(option_name, override)
return self.environment.coredata.get_builtin_option(option_name)
def get_target_filename_for_linking(self, target):
# On some platforms (msvc for instance), the file that is used for
# dynamic linking is not the same as the dynamic library itself. This
# file is called an import library, and we want to link against that.
# On all other platforms, we link to the library directly.
if isinstance(target, build.SharedLibrary):
link_lib = target.get_import_filename() or target.get_filename()
return os.path.join(self.get_target_dir(target), link_lib)
elif isinstance(target, build.StaticLibrary):
return os.path.join(self.get_target_dir(target), target.get_filename())
elif isinstance(target, build.Executable):
if target.import_filename:
return os.path.join(self.get_target_dir(target), target.get_import_filename())
else:
return None
raise AssertionError('BUG: Tried to link to {!r} which is not linkable'.format(target))
def get_target_dir(self, target):
if self.environment.coredata.get_builtin_option('layout') == 'mirror':
dirname = target.get_subdir()
else:
dirname = 'meson-out'
return dirname
def get_target_dir_relative_to(self, t, o):
'''Get a target dir relative to another target's directory'''
target_dir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(t))
othert_dir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(o))
return os.path.relpath(target_dir, othert_dir)
def get_target_source_dir(self, target):
# if target dir is empty, avoid extraneous trailing / from os.path.join()
target_dir = self.get_target_dir(target)
if target_dir:
return os.path.join(self.build_to_src, target_dir)
return self.build_to_src
def get_target_private_dir(self, target):
dirname = os.path.join(self.get_target_dir(target), target.get_basename() + target.type_suffix())
return dirname
def get_target_private_dir_abs(self, target):
dirname = os.path.join(self.environment.get_build_dir(), self.get_target_private_dir(target))
return dirname
def get_target_generated_dir(self, target, gensrc, src):
"""
Takes a BuildTarget, a generator source (CustomTarget or GeneratedList),
and a generated source filename.
Returns the full path of the generated source relative to the build root
"""
# CustomTarget generators output to the build dir of the CustomTarget
if isinstance(gensrc, (build.CustomTarget, build.CustomTargetIndex)):
return os.path.join(self.get_target_dir(gensrc), src)
# GeneratedList generators output to the private build directory of the
# target that the GeneratedList is used in
return os.path.join(self.get_target_private_dir(target), src)
def get_unity_source_file(self, target, suffix):
osrc = target.name + '-unity.' + suffix
return mesonlib.File.from_built_file(self.get_target_private_dir(target), osrc)
def generate_unity_files(self, target, unity_src):
abs_files = []
result = []
compsrcs = classify_unity_sources(target.compilers.values(), unity_src)
def init_language_file(suffix):
unity_src = self.get_unity_source_file(target, suffix)
outfileabs = unity_src.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
outfileabs_tmp = outfileabs + '.tmp'
abs_files.append(outfileabs)
outfileabs_tmp_dir = os.path.dirname(outfileabs_tmp)
if not os.path.exists(outfileabs_tmp_dir):
os.makedirs(outfileabs_tmp_dir)
result.append(unity_src)
return open(outfileabs_tmp, 'w')
# For each language, generate a unity source file and return the list
for comp, srcs in compsrcs.items():
with init_language_file(comp.get_default_suffix()) as ofile:
for src in srcs:
ofile.write('#include<%s>\n' % src)
[mesonlib.replace_if_different(x, x + '.tmp') for x in abs_files]
return result
def relpath(self, todir, fromdir):
return os.path.relpath(os.path.join('dummyprefixdir', todir),
os.path.join('dummyprefixdir', fromdir))
def flatten_object_list(self, target, proj_dir_to_build_root=''):
obj_list = []
for obj in target.get_objects():
if isinstance(obj, str):
o = os.path.join(proj_dir_to_build_root,
self.build_to_src, target.get_subdir(), obj)
obj_list.append(o)
elif isinstance(obj, mesonlib.File):
obj_list.append(obj.rel_to_builddir(self.build_to_src))
elif isinstance(obj, build.ExtractedObjects):
obj_list += self.determine_ext_objs(target, obj, proj_dir_to_build_root)
else:
raise MesonException('Unknown data type in object list.')
return obj_list
def serialize_executable(self, exe, cmd_args, workdir, env={},
extra_paths=None, capture=None):
import hashlib
if extra_paths is None:
# The callee didn't check if we needed extra paths, so check it here
if mesonlib.is_windows() or mesonlib.is_cygwin():
extra_paths = self.determine_windows_extra_paths(exe, [])
else:
extra_paths = []
# Can't just use exe.name here; it will likely be run more than once
if isinstance(exe, (dependencies.ExternalProgram,
build.BuildTarget, build.CustomTarget)):
basename = exe.name
else:
basename = os.path.basename(exe)
# Take a digest of the cmd args, env, workdir, and capture. This avoids
# collisions and also makes the name deterministic over regenerations
# which avoids a rebuild by Ninja because the cmdline stays the same.
data = bytes(str(sorted(env.items())) + str(cmd_args) + str(workdir) + str(capture),
encoding='utf-8')
digest = hashlib.sha1(data).hexdigest()
scratch_file = 'meson_exe_{0}_{1}.dat'.format(basename, digest)
exe_data = os.path.join(self.environment.get_scratch_dir(), scratch_file)
with open(exe_data, 'wb') as f:
if isinstance(exe, dependencies.ExternalProgram):
exe_cmd = exe.get_command()
exe_needs_wrapper = False
elif isinstance(exe, (build.BuildTarget, build.CustomTarget)):
exe_cmd = [self.get_target_filename_abs(exe)]
exe_needs_wrapper = exe.is_cross
else:
exe_cmd = [exe]
exe_needs_wrapper = False
is_cross_built = exe_needs_wrapper and \
self.environment.is_cross_build() and \
self.environment.cross_info.need_cross_compiler() and \
self.environment.cross_info.need_exe_wrapper()
if is_cross_built:
exe_wrapper = self.environment.cross_info.config['binaries'].get('exe_wrapper', None)
else:
exe_wrapper = None
es = ExecutableSerialisation(basename, exe_cmd, cmd_args, env,
is_cross_built, exe_wrapper, workdir,
extra_paths, capture)
pickle.dump(es, f)
return exe_data
def serialize_tests(self):
test_data = os.path.join(self.environment.get_scratch_dir(), 'meson_test_setup.dat')
with open(test_data, 'wb') as datafile:
self.write_test_file(datafile)
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
with open(benchmark_data, 'wb') as datafile:
self.write_benchmark_file(datafile)
return test_data, benchmark_data
def determine_linker(self, target):
'''
If we're building a static library, there is only one static linker.
Otherwise, we query the target for the dynamic linker.
'''
if isinstance(target, build.StaticLibrary):
if target.is_cross:
return self.build.static_cross_linker
else:
return self.build.static_linker
l = target.get_clike_dynamic_linker()
if not l:
m = "Couldn't determine linker for target {!r}"
raise MesonException(m.format(target.name))
return l
def rpaths_for_bundled_shared_libraries(self, target):
paths = []
for dep in target.external_deps:
if isinstance(dep, dependencies.ExternalLibrary):
la = dep.link_args
if len(la) == 1 and os.path.isabs(la[0]):
# The only link argument is an absolute path to a library file.
libpath = la[0]
if libpath.startswith(('/usr/lib', '/lib')):
# No point in adding system paths.
continue
if os.path.splitext(libpath)[1] not in ['.dll', '.lib', '.so']:
continue
absdir = os.path.dirname(libpath)
rel_to_src = absdir[len(self.environment.get_source_dir()) + 1:]
assert(not os.path.isabs(rel_to_src))
paths.append(os.path.join(self.build_to_src, rel_to_src))
return paths
def determine_rpath_dirs(self, target):
link_deps = target.get_all_link_deps()
result = []
for ld in link_deps:
if ld is target:
continue
prospective = self.get_target_dir(ld)
if prospective not in result:
result.append(prospective)
for rp in self.rpaths_for_bundled_shared_libraries(target):
if rp not in result:
result += [rp]
return result
def object_filename_from_source(self, target, source, is_unity):
assert isinstance(source, mesonlib.File)
build_dir = self.environment.get_build_dir()
rel_src = source.rel_to_builddir(self.build_to_src)
if (not self.environment.is_source(rel_src) or
self.environment.is_header(rel_src)) and not is_unity:
return None
# foo.vala files compile down to foo.c and then foo.c.o, not foo.vala.o
if rel_src.endswith(('.vala', '.gs')):
# See description in generate_vala_compile for this logic.
if source.is_built:
if os.path.isabs(rel_src):
rel_src = rel_src[len(build_dir) + 1:]
rel_src = os.path.relpath(rel_src, self.get_target_private_dir(target))
else:
rel_src = os.path.basename(rel_src)
if is_unity:
return 'meson-generated_' + rel_src[:-5] + '.c.' + self.environment.get_object_suffix()
# A meson- prefixed directory is reserved; hopefully no-one creates a file name with such a weird prefix.
source = 'meson-generated_' + rel_src[:-5] + '.c'
elif source.is_built:
if os.path.isabs(rel_src):
rel_src = rel_src[len(build_dir) + 1:]
targetdir = self.get_target_private_dir(target)
# A meson- prefixed directory is reserved; hopefully no-one creates a file name with such a weird prefix.
source = 'meson-generated_' + os.path.relpath(rel_src, targetdir)
else:
if os.path.isabs(rel_src):
# Not from the source directory; hopefully this doesn't conflict with user's source files.
source = os.path.basename(rel_src)
else:
source = os.path.relpath(os.path.join(build_dir, rel_src),
os.path.join(self.environment.get_source_dir(), target.get_subdir()))
return source.replace('/', '_').replace('\\', '_') + '.' + self.environment.get_object_suffix()
def determine_ext_objs(self, target, extobj, proj_dir_to_build_root):
result = []
targetdir = self.get_target_private_dir(extobj.target)
# With unity builds, there's just one object that contains all the
# sources, and we only support extracting all the objects in this mode,
# so just return that.
if self.is_unity(target):
comp = get_compiler_for_source(extobj.target.compilers.values(),
extobj.srclist[0])
# There is a potential conflict here, but it is unlikely that
# anyone both enables unity builds and has a file called foo-unity.cpp.
osrc = self.get_unity_source_file(extobj.target,
comp.get_default_suffix())
objname = self.object_filename_from_source(extobj.target, osrc, True)
objname = objname.replace('/', '_').replace('\\', '_')
objpath = os.path.join(proj_dir_to_build_root, targetdir, objname)
return [objpath]
for osrc in extobj.srclist:
objname = self.object_filename_from_source(extobj.target, osrc, False)
if objname:
objpath = os.path.join(proj_dir_to_build_root, targetdir, objname)
result.append(objpath)
return result
def get_pch_include_args(self, compiler, target):
args = []
pchpath = self.get_target_private_dir(target)
includeargs = compiler.get_include_args(pchpath, False)
p = target.get_pch(compiler.get_language())
if p:
args += compiler.get_pch_use_args(pchpath, p[0])
return includeargs + args
@staticmethod
def escape_extra_args(compiler, args):
# No extra escaping/quoting needed when not running on Windows
if not mesonlib.is_windows():
return args
extra_args = []
# Compiler-specific escaping is needed for -D args but not for any others
if compiler.get_id() == 'msvc':
# MSVC needs escaping when a -D argument ends in \ or \"
for arg in args:
if arg.startswith('-D') or arg.startswith('/D'):
# Without extra escaping for these two, the next character
# gets eaten
if arg.endswith('\\'):
arg += '\\'
elif arg.endswith('\\"'):
arg = arg[:-2] + '\\\\"'
extra_args.append(arg)
else:
# MinGW GCC needs all backslashes in defines to be doubly-escaped
# FIXME: Not sure about Cygwin or Clang
for arg in args:
if arg.startswith('-D') or arg.startswith('/D'):
arg = arg.replace('\\', '\\\\')
extra_args.append(arg)
return extra_args
def generate_basic_compiler_args(self, target, compiler, no_warn_args=False):
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
# starting from hard-coded defaults followed by build options and so on.
commands = CompilerArgs(compiler)
copt_proxy = OptionOverrideProxy(target.option_overrides, self.environment.coredata.compiler_options)
# First, the trivial ones that are impossible to override.
#
# Add -nostdinc/-nostdinc++ if needed; can't be overridden
commands += self.get_cross_stdlib_args(target, compiler)
# Add things like /NOLOGO or -pipe; usually can't be overridden
commands += compiler.get_always_args()
# Only add warning-flags by default if the buildtype enables it, and if
# we weren't explicitly asked to not emit warnings (for Vala, f.ex)
if no_warn_args:
commands += compiler.get_no_warn_args()
elif self.get_option_for_target('buildtype', target) != 'plain':
commands += compiler.get_warn_args(self.get_option_for_target('warning_level', target))
# Add -Werror if werror=true is set in the build options set on the
# command-line or default_options inside project(). This only sets the
# action to be done for warnings if/when they are emitted, so it's ok
# to set it after get_no_warn_args() or get_warn_args().
if self.get_option_for_target('werror', target):
commands += compiler.get_werror_args()
# Add compile args for c_* or cpp_* build options set on the
# command-line or default_options inside project().
commands += compiler.get_option_compile_args(copt_proxy)
# Add buildtype args: optimization level, debugging, etc.
commands += compiler.get_buildtype_args(self.get_option_for_target('buildtype', target))
# Add compile args added using add_project_arguments()
commands += self.build.get_project_args(compiler, target.subproject)
# Add compile args added using add_global_arguments()
# These override per-project arguments
commands += self.build.get_global_args(compiler)
if not target.is_cross:
# Compile args added from the env: CFLAGS/CXXFLAGS, etc. We want these
# to override all the defaults, but not the per-target compile args.
commands += self.environment.coredata.external_args[compiler.get_language()]
# Always set -fPIC for shared libraries
if isinstance(target, build.SharedLibrary):
commands += compiler.get_pic_args()
# Set -fPIC for static libraries by default unless explicitly disabled
if isinstance(target, build.StaticLibrary) and target.pic:
commands += compiler.get_pic_args()
# Add compile args needed to find external dependencies. Link args are
# added while generating the link command.
# NOTE: We must preserve the order in which external deps are
# specified, so we reverse the list before iterating over it.
for dep in reversed(target.get_external_deps()):
if not dep.found():
continue
if compiler.language == 'vala':
if isinstance(dep, dependencies.PkgConfigDependency):
if dep.name == 'glib-2.0' and dep.version_reqs is not None:
for req in dep.version_reqs:
if req.startswith(('>=', '==')):
commands += ['--target-glib', req[2:]]
break
commands += ['--pkg', dep.name]
elif isinstance(dep, dependencies.ExternalLibrary):
commands += dep.get_link_args('vala')
else:
commands += dep.get_compile_args()
# Qt needs -fPIC for executables
# XXX: We should move to -fPIC for all executables
if isinstance(target, build.Executable):
commands += dep.get_exe_args(compiler)
# For 'automagic' deps: Boost and GTest. Also dependency('threads').
# pkg-config puts the thread flags itself via `Cflags:`
if dep.need_threads():
commands += compiler.thread_flags(self.environment)
# Fortran requires extra include directives.
if compiler.language == 'fortran':
for lt in target.link_targets:
priv_dir = os.path.join(self.get_target_dir(lt), lt.get_basename() + lt.type_suffix())
incflag = compiler.get_include_args(priv_dir, False)
commands += incflag
return commands
def build_target_link_arguments(self, compiler, deps):
args = []
for d in deps:
if not (d.is_linkable_target()):
raise RuntimeError('Tried to link with a non-library target "%s".' % d.get_basename())
d_arg = self.get_target_filename_for_linking(d)
if not d_arg:
continue
if isinstance(compiler, (compilers.LLVMDCompiler, compilers.DmdDCompiler)):
d_arg = '-L' + d_arg
args.append(d_arg)
return args
def determine_windows_extra_paths(self, target, extra_bdeps):
'''On Windows there is no such thing as an rpath.
We must determine all locations of DLLs that this exe
links to and return them so they can be used in unit
tests.'''
result = []
prospectives = []
if isinstance(target, build.Executable):
prospectives = target.get_transitive_link_deps()
# External deps
for deppath in self.rpaths_for_bundled_shared_libraries(target):
result.append(os.path.normpath(os.path.join(self.environment.get_build_dir(), deppath)))
for bdep in extra_bdeps:
prospectives += bdep.get_transitive_link_deps()
# Internal deps
for ld in prospectives:
if ld == '' or ld == '.':
continue
dirseg = os.path.join(self.environment.get_build_dir(), self.get_target_dir(ld))
if dirseg not in result:
result.append(dirseg)
return result
def write_benchmark_file(self, datafile):
self.write_test_serialisation(self.build.get_benchmarks(), datafile)
def write_test_file(self, datafile):
self.write_test_serialisation(self.build.get_tests(), datafile)
def write_test_serialisation(self, tests, datafile):
arr = []
for t in tests:
exe = t.get_exe()
if isinstance(exe, dependencies.ExternalProgram):
cmd = exe.get_command()
else:
cmd = [os.path.join(self.environment.get_build_dir(), self.get_target_filename(t.get_exe()))]
is_cross = self.environment.is_cross_build() and \
self.environment.cross_info.need_cross_compiler() and \
self.environment.cross_info.need_exe_wrapper()
if isinstance(exe, build.BuildTarget):
is_cross = is_cross and exe.is_cross
if isinstance(exe, dependencies.ExternalProgram):
# E.g. an external verifier or simulator program run on a generated executable.
# Can always be run.
is_cross = False
if is_cross:
exe_wrapper = self.environment.cross_info.config['binaries'].get('exe_wrapper', None)
else:
exe_wrapper = None
if mesonlib.is_windows() or mesonlib.is_cygwin():
extra_paths = self.determine_windows_extra_paths(exe, [])
else:
extra_paths = []
cmd_args = []
for a in t.cmd_args:
if hasattr(a, 'held_object'):
a = a.held_object
if isinstance(a, mesonlib.File):
a = os.path.join(self.environment.get_build_dir(), a.rel_to_builddir(self.build_to_src))
cmd_args.append(a)
elif isinstance(a, str):
cmd_args.append(a)
elif isinstance(a, build.Target):
cmd_args.append(self.get_target_filename(a))
else:
raise MesonException('Bad object in test command.')
ts = TestSerialisation(t.get_name(), t.suite, cmd, is_cross, exe_wrapper,
t.is_parallel, cmd_args, t.env, t.should_fail,
t.timeout, t.workdir, extra_paths)
arr.append(ts)
pickle.dump(arr, datafile)
def generate_depmf_install(self, d):
if self.build.dep_manifest_name is None:
return
ifilename = os.path.join(self.environment.get_build_dir(), 'depmf.json')
ofilename = os.path.join(self.environment.get_prefix(), self.build.dep_manifest_name)
mfobj = {'type': 'dependency manifest', 'version': '1.0', 'projects': self.build.dep_manifest}
with open(ifilename, 'w') as f:
f.write(json.dumps(mfobj))
# Copy file from, to, and with mode unchanged
d.data.append([ifilename, ofilename, None])
def get_regen_filelist(self):
'''List of all files whose alteration means that the build
definition needs to be regenerated.'''
deps = [os.path.join(self.build_to_src, df)
for df in self.interpreter.get_build_def_files()]
if self.environment.is_cross_build():
deps.append(os.path.join(self.build_to_src,
self.environment.coredata.cross_file))
deps.append('meson-private/coredata.dat')
if os.path.exists(os.path.join(self.environment.get_source_dir(), 'meson_options.txt')):
deps.append(os.path.join(self.build_to_src, 'meson_options.txt'))
for sp in self.build.subprojects.keys():
fname = os.path.join(self.environment.get_source_dir(), sp, 'meson_options.txt')
if os.path.isfile(fname):
deps.append(os.path.join(self.build_to_src, sp, 'meson_options.txt'))
return deps
def exe_object_to_cmd_array(self, exe):
if self.environment.is_cross_build() and \
self.environment.cross_info.need_exe_wrapper() and \
isinstance(exe, build.BuildTarget) and exe.is_cross:
if 'exe_wrapper' not in self.environment.cross_info.config['binaries']:
s = 'Can not use target %s as a generator because it is cross-built\n'
s += 'and no exe wrapper is defined. You might want to set it to native instead.'
s = s % exe.name
raise MesonException(s)
if isinstance(exe, build.BuildTarget):
exe_arr = [os.path.join(self.environment.get_build_dir(), self.get_target_filename(exe))]
else:
exe_arr = exe.get_command()
return exe_arr
def replace_extra_args(self, args, genlist):
final_args = []
for a in args:
if a == '@EXTRA_ARGS@':
final_args += genlist.get_extra_args()
else:
final_args.append(a)
return final_args
def replace_outputs(self, args, private_dir, output_list):
newargs = []
regex = re.compile('@OUTPUT(\d+)@')
for arg in args:
m = regex.search(arg)
while m is not None:
index = int(m.group(1))
src = '@OUTPUT%d@' % index
arg = arg.replace(src, os.path.join(private_dir, output_list[index]))
m = regex.search(arg)
newargs.append(arg)
return newargs
def get_build_by_default_targets(self):
result = OrderedDict()
# Get all build and custom targets that must be built by default
for name, t in self.build.get_targets().items():
if t.build_by_default or t.install or t.build_always:
result[name] = t
# Get all targets used as test executables and arguments. These must
# also be built by default. XXX: Sometime in the future these should be
# built only before running tests.
for t in self.build.get_tests():
exe = t.exe
if hasattr(exe, 'held_object'):
exe = exe.held_object
if isinstance(exe, (build.CustomTarget, build.BuildTarget)):
result[exe.get_id()] = exe
for arg in t.cmd_args:
if hasattr(arg, 'held_object'):
arg = arg.held_object
if not isinstance(arg, (build.CustomTarget, build.BuildTarget)):
continue
result[arg.get_id()] = arg
return result
def get_custom_target_provided_libraries(self, target):
libs = []
for t in target.get_generated_sources():
if not isinstance(t, build.CustomTarget):
continue
for f in t.get_outputs():
if self.environment.is_library(f):
libs.append(os.path.join(self.get_target_dir(t), f))
return libs
def is_unity(self, target):
optval = self.get_option_for_target('unity', target)
if optval == 'on' or (optval == 'subprojects' and target.subproject != ''):
return True
return False
def get_custom_target_sources(self, target):
'''
Custom target sources can be of various object types; strings, File,
BuildTarget, even other CustomTargets.
Returns the path to them relative to the build root directory.
'''
srcs = []
for i in target.get_sources():
if hasattr(i, 'held_object'):
i = i.held_object
if isinstance(i, str):
fname = [os.path.join(self.build_to_src, target.subdir, i)]
elif isinstance(i, build.BuildTarget):
fname = [self.get_target_filename(i)]
elif isinstance(i, build.CustomTarget):
fname = [os.path.join(self.get_target_dir(i), p) for p in i.get_outputs()]
elif isinstance(i, build.GeneratedList):
fname = [os.path.join(self.get_target_private_dir(target), p) for p in i.get_outputs()]
else:
fname = [i.rel_to_builddir(self.build_to_src)]
if target.absolute_paths:
fname = [os.path.join(self.environment.get_build_dir(), f) for f in fname]
srcs += fname
return srcs
def get_custom_target_depend_files(self, target, absolute_paths=False):
deps = []
for i in target.depend_files:
if isinstance(i, mesonlib.File):
if absolute_paths:
deps.append(i.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir()))
else:
deps.append(i.rel_to_builddir(self.build_to_src))
else:
if absolute_paths:
deps.append(os.path.join(self.environment.get_source_dir(), target.subdir, i))
else:
deps.append(os.path.join(self.build_to_src, target.subdir, i))
return deps
def eval_custom_target_command(self, target, absolute_outputs=False):
# We want the outputs to be absolute only when using the VS backend
# XXX: Maybe allow the vs backend to use relative paths too?
source_root = self.build_to_src
build_root = '.'
outdir = self.get_target_dir(target)
if absolute_outputs:
source_root = self.environment.get_source_dir()
build_root = self.environment.get_source_dir()
outdir = os.path.join(self.environment.get_build_dir(), outdir)
outputs = []
for i in target.get_outputs():
outputs.append(os.path.join(outdir, i))
inputs = self.get_custom_target_sources(target)
# Evaluate the command list
cmd = []
for i in target.command:
if isinstance(i, build.Executable):
cmd += self.exe_object_to_cmd_array(i)
continue
elif isinstance(i, build.CustomTarget):
# GIR scanner will attempt to execute this binary but
# it assumes that it is in path, so always give it a full path.
tmp = i.get_outputs()[0]
i = os.path.join(self.get_target_dir(i), tmp)
elif isinstance(i, mesonlib.File):
i = i.rel_to_builddir(self.build_to_src)
if target.absolute_paths:
i = os.path.join(self.environment.get_build_dir(), i)
# FIXME: str types are blindly added ignoring 'target.absolute_paths'
# because we can't know if they refer to a file or just a string
elif not isinstance(i, str):
err_msg = 'Argument {0} is of unknown type {1}'
raise RuntimeError(err_msg.format(str(i), str(type(i))))
elif '@SOURCE_ROOT@' in i:
i = i.replace('@SOURCE_ROOT@', source_root)
elif '@BUILD_ROOT@' in i:
i = i.replace('@BUILD_ROOT@', build_root)
elif '@DEPFILE@' in i:
if target.depfile is None:
msg = 'Custom target {!r} has @DEPFILE@ but no depfile ' \
'keyword argument.'.format(target.name)
raise MesonException(msg)
dfilename = os.path.join(outdir, target.depfile)
i = i.replace('@DEPFILE@', dfilename)
elif '@PRIVATE_OUTDIR_' in i:
match = re.search('@PRIVATE_OUTDIR_(ABS_)?([^/\s*]*)@', i)
if not match:
msg = 'Custom target {!r} has an invalid argument {!r}' \
''.format(target.name, i)
raise MesonException(msg)
source = match.group(0)
if match.group(1) is None and not target.absolute_paths:
lead_dir = ''
else:
lead_dir = self.environment.get_build_dir()
i = i.replace(source, os.path.join(lead_dir, outdir))
cmd.append(i)
# Substitute the rest of the template strings
values = mesonlib.get_filenames_templates_dict(inputs, outputs)
cmd = mesonlib.substitute_values(cmd, values)
# This should not be necessary but removing it breaks
# building GStreamer on Windows. The underlying issue
# is problems with quoting backslashes on Windows
# which is the seventh circle of hell. The downside is
# that this breaks custom targets whose command lines
# have backslashes. If you try to fix this be sure to
# check that it does not break GST.
#
# The bug causes file paths such as c:\foo to get escaped
# into c:\\foo.
#
# Unfortunately we have not been able to come up with an
# isolated test case for this so unless you manage to come up
# with one, the only way is to test the building with Gst's
# setup. Note this in your MR or ping us and we will get it
# fixed.
#
# https://github.com/mesonbuild/meson/pull/737
cmd = [i.replace('\\', '/') for i in cmd]
return inputs, outputs, cmd
def run_postconf_scripts(self):
env = {'MESON_SOURCE_ROOT': self.environment.get_source_dir(),
'MESON_BUILD_ROOT': self.environment.get_build_dir(),
'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in self.environment.get_build_command() + ['introspect']]),
}
child_env = os.environ.copy()
child_env.update(env)
for s in self.build.postconf_scripts:
cmd = s['exe'] + s['args']
subprocess.check_call(cmd, env=child_env)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ['crawtext']
'''Crawtext.
Usage:
crawtext_4.py <project> crawl <query> [--repeat]
crawtext_4.py <project> discover <query> [--file=<filename> | --key=<bing_api_key> | --file=<filename> --key=<bing_api_key>] [--repeat]
crawtext_4.py <project> start <query>
crawtext_4.py <project> stop
crawtext_4.py (-h | --help)
crawtext_4.py --version
Options:
--file Complete path of the sourcefile.
--key Bing API Key for Search.
--repeat Scheduled task for every monday @ 5:30.
-h --help Show usage and Options.
--version Show versions.
'''
from os.path import exists
import sys
import requests
import json
import re
#import threading
#import Queue
import pymongo
from pymongo import MongoClient
from pymongo import errors
from bs4 import BeautifulSoup as bs
from urlparse import urlparse
from random import choice
#from pygoose import *
from tld import get_tld
import datetime
import __future__
from docopt import docopt
from abpy import Filter
#from scheduler import *
from database import Database
unwanted_extensions = ['css','js','gif','asp', 'GIF','jpeg','JPEG','jpg','JPG','pdf','PDF','ico','ICO','png','PNG','dtd','DTD', 'mp4', 'mp3', 'mov', 'zip','bz2', 'gz', ]
adblock = Filter(file('easylist.txt'))
class Page(object):
'''Page factory'''
def __init__(self, url, query):
self.url = url
self.query = query
self.status = None
self.error_type = None
self.info = {}
self.outlinks = None
def check(self):
'''Bool: check the format of the next url compared to curr url'''
if self.url is None or len(self.url) <= 1 or self.url == "\n":
self.error_type = "Url is empty"
return False
elif (( self.url.split('.')[-1] in unwanted_extensions ) and ( len( adblock.match(self.url) ) > 0 ) ):
self.error_type="Url has not a proprer extension or page is an advertissement"
return False
else:
return True
def request(self):
'''Bool request a webpage: return boolean and update src'''
try:
requests.adapters.DEFAULT_RETRIES = 2
user_agents = [u'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1', u'Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2', u'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0', u'Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00']
headers = {'User-Agent': choice(user_agents),}
proxies = {"https":"77.120.126.35:3128", "https":'88.165.134.24:3128', }
self.req = requests.get((self.url), headers = headers,allow_redirects=True, proxies=proxies, timeout=5)
try:
self.src = self.req.text
return True
except Exception, e:
self.error_type = "Request answer was not understood %s" %e
return False
else:
self.error_type = "Not relevant"
return False
except requests.exceptions.MissingSchema:
self.error_type = "Incorrect url %s" %self.url
return False
except Exception as e:
self.error_type = str(e)
return False
def control(self):
'''Bool control the result of request return a boolean'''
#Content-type is not html
if 'text/html' not in self.req.headers['content-type']:
self.error_type="Content type is not TEXT/HTML"
return False
#Error on ressource or on server
elif self.req.status_code in range(400,520):
self.error_type="Connexion error"
return False
#Redirect
#~ elif len(self.req.history) > 0 | self.req.status_code in range(300,320):
#~ self.error_type="Redirection"
#~ self.bad_status()
#~ return False
else:
return True
def extract(self):
'''Dict extract content and info of webpage return boolean and self.info'''
try:
#using Goose extractor
g = Goose()
article = g.extract(raw_html=self.src)
#filtering relevant webpages
if self.filter() is True:
self.outlinks = set([self.clean_url(url=e.attrs['href']) for e in bs(self.src).find_all('a', {'href': True})])
self.info = {
"url":self.url,
"domain": get_tld(self.url),
"outlinks": list(self.outlinks),
"backlinks":[n for n in self.outlinks if n == self.url],
"texte": article.cleaned_text,
"title": article.title,
"meta_description":bs(article.meta_description).text
}
return self.info
except Exception, e:
self.error_type = str(e)
return False
def filter(self):
'''Bool Decide if page is relevant and match the correct query. Reformat the query properly: supports AND, OR and space'''
if 'OR' in self.query:
for each in self.query.split('OR'):
query4re = each.lower().replace(' ', '.*')
if re.search(query4re, self.src, re.IGNORECASE) or re.search(query4re, self.url, re.IGNORECASE):
return True
elif 'AND' in self.query:
query4re = self.query.lower().replace(' AND ', '.*').replace(' ', '.*')
return bool(re.search(query4re, self.src, re.IGNORECASE) or re.search(query4re, self.url, re.IGNORECASE))
else:
query4re = self.query.lower().replace(' ', '.*')
return bool(re.search(query4re, self.src, re.IGNORECASE) or re.search(query4re, self.url, re.IGNORECASE))
def bad_status(self):
'''create a msg_log {"url":self.url, "error_code": self.req.status_code, "error_type": self.error_type, "status": False}'''
try:
if self.req.status_code is not None and self.error_type is not None:
return {"url":self.url, "error_code": self.req.status_code, "type": self.error_type, "status": False}
elif self.req is None and self.error_type is not None:
return {"url":self.url, "error_code": None, "type": self.error_type, "status": False}
elif self.req is not None and self.error_type is None:
return {"url":self.url, "error_code": None, "type": self.req.status_code, "status": False}
else:
return {"url":self.url,"status": False}
except Exception, e:
print e
return {"url":self.url, "error_code": "Request Error", "type": self.error_type, "status": False}
def clean_url(self, url):
''' utility to normalize url and discard unwanted extension : return a url or None'''
#ref tld: http://mxr.mozilla.org/mozilla-central/source/netwerk/dns/effective_tld_names.dat?raw=1
if url not in [ "#","/", None, "\n", "",] or url not in 'javascript':
self.netloc = urlparse(self.url).netloc
uid = urlparse(url)
#if next_url is relative take previous url netloc
if uid.netloc == "":
if len(uid.path) <=1:
return None
elif (uid.path[0] != "/" and self.netloc[-1] != "/"):
clean_url = "http://"+self.netloc+"/"+uid.path
else:
clean_url = "http://"+self.netloc+uid.path
else:
clean_url = url
return clean_url
else:
return None
class Discovery():
'''special method to produces seeds url and send it to sources'''
def __init__(self, db_name, query, path=None, api_key=None):
#constitution de la base
db = Database(db_name)
db.create_tables()
self.seeds = []
self.path = path
self.key = api_key
if query is not None:
if self.path is not None:
self.get_local()
if query is not None:
self.get_bing()
self.send_to_sources(db, query)
self.send_to_queue(db)
def send_to_sources(self, db, query):
for n in self.seeds:
#first send to queue
db.sources.insert({"url":n, "crawl_date": datetime.datetime.today(), "mode":"discovery"} for n in self.seeds if n is not None)
# p = Page(n, query)
# if p.check() and p.request() and p.control() and p.extract() and p.filter():
# #send it to results
# if url not in db.results
# db.results.insert(p.info)
# #send next urls to queue
# for url in p.outlinks:
# if url is not None or url not in db.queue.distinct("url"):
# db.queue.insert({"url":url})
# else:
# #problematic sources are automatically sent to log
# db.log.insert(p.bad_status())
#Todo: integrate it into mail report
# print "Nb de sources", db.sources.count()
# print "Nb urls en traitement", db.queue.count()
# print "nb erreur", db.log.count()
return db
def send_to_queue(self, db):
sources_queue = [{"url":url, "date": datetime.datetime.today()} for url in db.sources.distinct("url") if url not in db.queue.distinct("url")]
if len(sources_queue) != 0:
db.queue.insert(sources_queue)
return db
def get_bing(self):
''' Method to extract results from BING API (Limited to 5000 req/month). '''
print "Searching on Bing"
try:
r = requests.get(
'https://api.datamarket.azure.com/Bing/Search/v1/Web',
params={
'$format' : 'json',
'$top' : 50,
'Query' : '\'%s\'' % self.query,
},
auth=(self.key, self.key)
)
for e in r.json()['d']['results']:
self.seeds.append(e['Url'])
self.seeds = list(set(self.seeds))
print len(self.seeds), results
return True
except:
self.error_type = "Error fetching results from BING API, check your credentials. May exceed the 5000req/month limit "
print self.error_type
return False
def get_local(self):
''' Method to extract url list from text file'''
print "Harvesting the sources you gave him"
try:
for url in open(self.path).readlines():
self.seeds.append(url)
self.seeds = list(set(self.seeds))
return True
except:
self.error_type = "Error fetching results from file %s. Check if file exists" %self.path
return False
class Sourcing():
'''From an initial db sources send url to queue'''
def __init__(self,db_name):
'''simple producer : insert from sources database to processing queue'''
db = Database(db_name)
db.create_tables()
sources_queue = [{"url":url, "date": datetime.datetime.today()} for url in db.sources.distinct("url") if url not in db.queue.distinct("url")]
if len(sources_queue) != 0:
db.queue.insert(sources_queue)
def crawler(docopt_args):
start = datetime.datetime.now()
db_name=docopt_args['<project>']
query=docopt_args['<query>']
'''the main consumer from queue insert into results or log'''
db = Database(db_name)
db.create_tables()
print db.queue.count()
while db.queue.count > 0:
print "beginning crawl"
print db.sources.count()
print db.queue.count()
for url in db.queue.distinct("url"):
if url not in db.results.find({"url":url}) or url not in db.log.find({"url":url}):
p = Page(url, query)
if p.check() and p.request() and p.control() and p.extract():
db.results.insert(p.info)
if p.outlinks is not None:
try:
for n_url in p.outlinks:
if n_url not in db.queue.find({"url":n_url}):
if n_url not in db.results.find({"url":n_url}) or n_url not in db.log.find({"url":n_url}):
db.queue.insert({"url":n_url})
except pymongo.errors:
db.log.insert(({"url":url, "error_type": "pymongo error inserting outlinks", "status":False}))
else:
db.log.insert(p.bad_status())
db.queue.remove({"url": url})
# print "En traitement", self.db.queue.count()
# print "Resultats", self.db.results.count()
# print "Erreur", self.db.log.count()
if db.queue.count() == 0:
break
if db.queue.count() == 0:
break
end = datetime.datetime.now()
elapsed = end - start
print "crawl finished, %i results and %i sources are stored in Mongo Database: %s in %s" %(db.results.count(),db.sources.count(),db_name, elapsed)
def crawtext(docopt_args):
''' main crawtext run by command line option '''
if docopt_args['discover'] is True:
print "discovery"
Discovery(db_name=docopt_args['<project>'],query=docopt_args['<query>'], path=docopt_args['--file'], api_key=docopt_args['--key'])
Sourcing(db_name=docopt_args['<project>'])
crawler(docopt_args)
# if docopt_args['--repeat']:
# schedule(crawler, docopt_args)
# return sys.exit()
elif docopt_args['crawl'] is True:
Sourcing(db_name=docopt_args['<project>'])
crawler(docopt_args)
# if docopt_args['--repeat']:
# schedule(crawler, docopt_args)
# return sys.exit()
elif docopt_args['stop']:
# unschedule(docopt_args)
print "Process is stopped"
return
elif docopt_args['start']:
'''Option Start here (and for the moment) is just a defaut activate of the crawl using defaut api key and basic query'''
Discovery(db_name=docopt_args['<project>'], query=docopt_args['<query>'], api_key="J8zQNrEwAJ2u3VcMykpouyPf4nvA6Wre1019v/dIT0o")
Sourcing(db_name=docopt_args['<project>'])
print "DB Sources is created with a first search on BING based on your project name"
#schedule(crawler, docopt_args)
return
else:
print "No command supplied, please check command line usage and options."
return sys.exit()
if __name__ == "__main__":
args = docopt(__doc__)
crawtext(args)
sys.exit()
Bug with pip deactivation of autocompletion
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Crawtext.
Usage:
crawtext_4.py <project> crawl <query> [--repeat]
crawtext_4.py <project> discover <query> [--file=<filename> | --key=<bing_api_key> | --file=<filename> --key=<bing_api_key>] [--repeat]
crawtext_4.py <project> start <query>
crawtext_4.py <project> stop
crawtext_4.py (-h | --help)
crawtext_4.py --version
Options:
--file Complete path of the sourcefile.
--key Bing API Key for Search.
--repeat Scheduled task for every monday @ 5:30.
-h --help Show usage and Options.
--version Show versions.
'''
#__all__ = ['crawtext']
#from __future__ import print_function
from os.path import exists
import sys
import requests
import json
import re
#import threading
#import Queue
import pymongo
from pymongo import MongoClient
from pymongo import errors
from bs4 import BeautifulSoup as bs
from urlparse import urlparse
from random import choice
#from pygoose import *
from tld import get_tld
import datetime
import __future__
from docopt import docopt
from abpy import Filter
#from scheduler import *
from database import Database
unwanted_extensions = ['css','js','gif','asp', 'GIF','jpeg','JPEG','jpg','JPG','pdf','PDF','ico','ICO','png','PNG','dtd','DTD', 'mp4', 'mp3', 'mov', 'zip','bz2', 'gz', ]
adblock = Filter(file('easylist.txt'))
class Page(object):
'''Page factory'''
def __init__(self, url, query):
self.url = url
self.query = query
self.status = None
self.error_type = None
self.info = {}
self.outlinks = None
def check(self):
'''Bool: check the format of the next url compared to curr url'''
if self.url is None or len(self.url) <= 1 or self.url == "\n":
self.error_type = "Url is empty"
return False
elif (( self.url.split('.')[-1] in unwanted_extensions ) and ( len( adblock.match(self.url) ) > 0 ) ):
self.error_type="Url has not a proprer extension or page is an advertissement"
return False
else:
return True
def request(self):
'''Bool request a webpage: return boolean and update src'''
try:
requests.adapters.DEFAULT_RETRIES = 2
user_agents = [u'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1', u'Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2', u'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0', u'Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00']
headers = {'User-Agent': choice(user_agents),}
proxies = {"https":"77.120.126.35:3128", "https":'88.165.134.24:3128', }
self.req = requests.get((self.url), headers = headers,allow_redirects=True, proxies=proxies, timeout=5)
try:
self.src = self.req.text
return True
except Exception, e:
self.error_type = "Request answer was not understood %s" %e
return False
else:
self.error_type = "Not relevant"
return False
except requests.exceptions.MissingSchema:
self.error_type = "Incorrect url %s" %self.url
return False
except Exception as e:
self.error_type = str(e)
return False
def control(self):
'''Bool control the result of request return a boolean'''
#Content-type is not html
if 'text/html' not in self.req.headers['content-type']:
self.error_type="Content type is not TEXT/HTML"
return False
#Error on ressource or on server
elif self.req.status_code in range(400,520):
self.error_type="Connexion error"
return False
#Redirect
#~ elif len(self.req.history) > 0 | self.req.status_code in range(300,320):
#~ self.error_type="Redirection"
#~ self.bad_status()
#~ return False
else:
return True
def extract(self):
'''Dict extract content and info of webpage return boolean and self.info'''
try:
#using Goose extractor
g = Goose()
article = g.extract(raw_html=self.src)
#filtering relevant webpages
if self.filter() is True:
self.outlinks = set([self.clean_url(url=e.attrs['href']) for e in bs(self.src).find_all('a', {'href': True})])
self.info = {
"url":self.url,
"domain": get_tld(self.url),
"outlinks": list(self.outlinks),
"backlinks":[n for n in self.outlinks if n == self.url],
"texte": article.cleaned_text,
"title": article.title,
"meta_description":bs(article.meta_description).text
}
return self.info
except Exception, e:
self.error_type = str(e)
return False
def filter(self):
'''Bool Decide if page is relevant and match the correct query. Reformat the query properly: supports AND, OR and space'''
if 'OR' in self.query:
for each in self.query.split('OR'):
query4re = each.lower().replace(' ', '.*')
if re.search(query4re, self.src, re.IGNORECASE) or re.search(query4re, self.url, re.IGNORECASE):
return True
elif 'AND' in self.query:
query4re = self.query.lower().replace(' AND ', '.*').replace(' ', '.*')
return bool(re.search(query4re, self.src, re.IGNORECASE) or re.search(query4re, self.url, re.IGNORECASE))
else:
query4re = self.query.lower().replace(' ', '.*')
return bool(re.search(query4re, self.src, re.IGNORECASE) or re.search(query4re, self.url, re.IGNORECASE))
def bad_status(self):
'''create a msg_log {"url":self.url, "error_code": self.req.status_code, "error_type": self.error_type, "status": False}'''
try:
assert(self.req)
if self.req.status_code is not None and self.error_type is not None:
return {"url":self.url, "error_code": self.req.status_code, "type": self.error_type, "status": False}
elif self.req is None and self.error_type is not None:
return {"url":self.url, "error_code": None, "type": self.error_type, "status": False}
elif self.req is not None and self.error_type is None:
return {"url":self.url, "error_code": None, "type": self.req.status_code, "status": False}
else:
return {"url":self.url,"status": False}
except Exception:
return {"url":self.url, "error_code": "Request Error", "type": self.error_type, "status": False}
def clean_url(self, url):
''' utility to normalize url and discard unwanted extension : return a url or None'''
#ref tld: http://mxr.mozilla.org/mozilla-central/source/netwerk/dns/effective_tld_names.dat?raw=1
if url not in [ "#","/", None, "\n", "",] or url not in 'javascript':
self.netloc = urlparse(self.url).netloc
uid = urlparse(url)
#if next_url is relative take previous url netloc
if uid.netloc == "":
if len(uid.path) <=1:
return None
elif (uid.path[0] != "/" and self.netloc[-1] != "/"):
clean_url = "http://"+self.netloc+"/"+uid.path
else:
clean_url = "http://"+self.netloc+uid.path
else:
clean_url = url
return clean_url
else:
return None
class Discovery():
'''special method to produces seeds url and send it to sources'''
def __init__(self, db_name, query, path=None, api_key=None):
#constitution de la base
db = Database(db_name)
db.create_tables()
self.seeds = []
self.path = path
self.key = api_key
if query is not None:
if self.path is not None:
self.get_local()
if query is not None:
self.get_bing()
self.send_to_sources(db, query)
self.send_to_queue(db)
def send_to_sources(self, db, query):
for n in self.seeds:
#first send to queue
db.sources.insert({"url":n, "crawl_date": datetime.datetime.today(), "mode":"discovery"} for n in self.seeds if n is not None)
# p = Page(n, query)
# if p.check() and p.request() and p.control() and p.extract() and p.filter():
# #send it to results
# if url not in db.results
# db.results.insert(p.info)
# #send next urls to queue
# for url in p.outlinks:
# if url is not None or url not in db.queue.distinct("url"):
# db.queue.insert({"url":url})
# else:
# #problematic sources are automatically sent to log
# db.log.insert(p.bad_status())
#Todo: integrate it into mail report
# print "Nb de sources", db.sources.count()
# print "Nb urls en traitement", db.queue.count()
# print "nb erreur", db.log.count()
return db
def send_to_queue(self, db):
sources_queue = [{"url":url, "date": datetime.datetime.today()} for url in db.sources.distinct("url") if url not in db.queue.distinct("url")]
if len(sources_queue) != 0:
db.queue.insert(sources_queue)
return db
def get_bing(self):
''' Method to extract results from BING API (Limited to 5000 req/month). '''
print "Searching on Bing"
try:
r = requests.get(
'https://api.datamarket.azure.com/Bing/Search/v1/Web',
params={
'$format' : 'json',
'$top' : 50,
'Query' : '\'%s\'' % self.query,
},
auth=(self.key, self.key)
)
for e in r.json()['d']['results']:
self.seeds.append(e['Url'])
self.seeds = list(set(self.seeds))
print len(self.seeds), results
return True
except:
self.error_type = "Error fetching results from BING API, check your credentials. May exceed the 5000req/month limit "
print self.error_type
return False
def get_local(self):
''' Method to extract url list from text file'''
print "Harvesting the sources you gave him"
try:
for url in open(self.path).readlines():
self.seeds.append(url)
self.seeds = list(set(self.seeds))
return True
except:
self.error_type = "Error fetching results from file %s. Check if file exists" %self.path
return False
class Sourcing():
'''From an initial db sources send url to queue'''
def __init__(self,db_name):
'''simple producer : insert from sources database to processing queue'''
db = Database(db_name)
db.create_tables()
sources_queue = [{"url":url, "date": datetime.datetime.today()} for url in db.sources.distinct("url") if url not in db.queue.distinct("url")]
if len(sources_queue) != 0:
db.queue.insert(sources_queue)
def crawler(docopt_args):
start = datetime.datetime.now()
db_name=docopt_args['<project>']
query=docopt_args['<query>']
'''the main consumer from queue insert into results or log'''
db = Database(db_name)
db.create_tables()
print db.queue.count()
while db.queue.count > 0:
print "beginning crawl"
print db.sources.count()
print db.queue.count()
for url in db.queue.distinct("url"):
if url not in db.results.find({"url":url}) or url not in db.log.find({"url":url}):
p = Page(url, query)
if p.check() and p.request() and p.control() and p.extract():
db.results.insert(p.info)
if p.outlinks is not None:
try:
for n_url in p.outlinks:
if n_url not in db.queue.find({"url":n_url}):
if n_url not in db.results.find({"url":n_url}) or n_url not in db.log.find({"url":n_url}):
db.queue.insert({"url":n_url})
except pymongo.errors:
db.log.insert(({"url":url, "error_type": "pymongo error inserting outlinks", "status":False}))
else:
db.log.insert(p.bad_status())
db.queue.remove({"url": url})
# print "En traitement", self.db.queue.count()
# print "Resultats", self.db.results.count()
# print "Erreur", self.db.log.count()
if db.queue.count() == 0:
break
if db.queue.count() == 0:
break
end = datetime.datetime.now()
elapsed = end - start
print "crawl finished, %i results and %i sources are stored in Mongo Database: %s in %s" %(db.results.count(),db.sources.count(),db_name, elapsed)
def crawtext(docopt_args):
''' main crawtext run by command line option '''
if docopt_args['discover'] is True:
print "discovery"
Discovery(db_name=docopt_args['<project>'],query=docopt_args['<query>'], path=docopt_args['--file'], api_key=docopt_args['--key'])
Sourcing(db_name=docopt_args['<project>'])
crawler(docopt_args)
# if docopt_args['--repeat']:
# schedule(crawler, docopt_args)
# return sys.exit()
elif docopt_args['crawl'] is True:
Sourcing(db_name=docopt_args['<project>'])
crawler(docopt_args)
# if docopt_args['--repeat']:
# schedule(crawler, docopt_args)
# return sys.exit()
elif docopt_args['stop']:
# unschedule(docopt_args)
print "Process is stopped"
return
elif docopt_args['start']:
'''Option Start here (and for the moment) is just a defaut activate of the crawl using defaut api key and basic query'''
Discovery(db_name=docopt_args['<project>'], query=docopt_args['<query>'], api_key="J8zQNrEwAJ2u3VcMykpouyPf4nvA6Wre1019v/dIT0o")
Sourcing(db_name=docopt_args['<project>'])
print "DB Sources is created with a first search on BING based on your project name"
#schedule(crawler, docopt_args)
return
else:
print "No command supplied, please check command line usage and options."
return sys.exit()
if __name__ == "__main__":
args = docopt(__doc__)
crawtext(args)
sys.exit()
|
""" Build swig, f2py, weave, sources.
"""
import os
import re
from distutils.cmd import Command
from distutils.command import build_ext, build_py
from distutils.util import convert_path
from distutils.dep_util import newer_group, newer
from scipy_distutils import log
from scipy_distutils.misc_util import fortran_ext_match, all_strings
from scipy_distutils.from_template import process_str
class build_src(build_ext.build_ext):
description = "build sources from SWIG, F2PY files or a function"
user_options = [
('build-src=', 'd', "directory to \"build\" sources to"),
('f2pyflags=', None, "additonal flags to f2py"),
('swigflags=', None, "additional flags to swig"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
]
boolean_options = ['force','inplace']
help_options = []
def initialize_options(self):
self.extensions = None
self.package = None
self.py_modules = None
self.build_src = None
self.build_lib = None
self.build_base = None
self.force = None
self.inplace = None
self.package_dir = None
self.f2pyflags = None
self.swigflags = None
return
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('force', 'force'))
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
self.libraries = self.distribution.libraries or []
self.py_modules = self.distribution.py_modules
if self.build_src is None:
self.build_src = os.path.join(self.build_base, 'src')
if self.inplace is None:
build_ext = self.get_finalized_command('build_ext')
self.inplace = build_ext.inplace
# py_modules is used in build_py.find_package_modules
self.py_modules = {}
if self.f2pyflags is None:
self.f2pyflags = []
else:
self.f2pyflags = self.f2pyflags.split() # XXX spaces??
if self.swigflags is None:
self.swigflags = []
else:
self.swigflags = self.swigflags.split() # XXX spaces??
return
def run(self):
if not (self.extensions or self.libraries):
return
self.build_sources()
return
def build_sources(self):
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension_sources(ext)
for libname_info in self.libraries:
self.build_library_sources(*libname_info)
return
def build_library_sources(self, lib_name, build_info):
sources = list(build_info.get('sources',[]))
if not sources:
return
log.info('building library "%s" sources' % (lib_name))
sources = self.generate_sources(sources, (lib_name, build_info))
build_info['sources'] = sources
return
def build_extension_sources(self, ext):
sources = list(ext.sources)
log.info('building extension "%s" sources' % (ext.name))
fullname = self.get_ext_fullname(ext.name)
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
if self.inplace:
build_py = self.get_finalized_command('build_py')
self.ext_target_dir = build_py.get_package_dir(package)
sources = self.generate_sources(sources, ext)
sources = self.template_sources(sources, ext)
sources = self.swig_sources(sources, ext)
sources = self.f2py_sources(sources, ext)
sources, py_files = self.filter_py_files(sources)
if not self.py_modules.has_key(package):
self.py_modules[package] = []
modules = []
for f in py_files:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
self.py_modules[package] += modules
ext.sources = sources
return
def generate_sources(self, sources, extension):
new_sources = []
func_sources = []
for source in sources:
if type(source) is type(''):
new_sources.append(source)
else:
func_sources.append(source)
if not func_sources:
return new_sources
if self.inplace:
build_dir = self.ext_target_dir
else:
if type(extension) is type(()):
name = extension[0]
else:
name = extension.name
build_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
self.mkpath(build_dir)
for func in func_sources:
source = func(extension, build_dir)
if type(source) is type([]):
[log.info(" adding '%s' to sources." % (s)) for s in source]
new_sources.extend(source)
else:
log.info(" adding '%s' to sources." % (source))
new_sources.append(source)
return new_sources
def filter_py_files(self, sources):
new_sources = []
py_files = []
for source in sources:
(base, ext) = os.path.splitext(source)
if ext=='.py':
py_files.append(source)
else:
new_sources.append(source)
return new_sources, py_files
def template_sources(self, sources, extension):
new_sources = []
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.src': # Template file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
self.mkpath(target_dir)
target_file = os.path.join(target_dir,os.path.basename(base))
if (self.force or newer(source, target_file)):
fid = open(source)
outstr = process_str(fid.read())
fid.close()
fid = open(target_file,'w')
fid.write(outstr)
fid.close()
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
def f2py_sources(self, sources, extension):
new_sources = []
f2py_sources = []
f_sources = []
f2py_targets = {}
target_dirs = []
ext_name = extension.name.split('.')[-1]
skip_f2py = 0
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyf': # F2PY interface file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
if os.path.isfile(source):
name = get_f2py_modulename(source)
assert name==ext_name,'mismatch of extension names: '\
+source+' provides'\
' '+`name`+' but expected '+`ext_name`
target_file = os.path.join(target_dir,name+'module.c')
else:
log.debug(' source %s does not exist: skipping f2py\'ing.' \
% (source))
name = ext_name
skip_f2py = 1
target_file = os.path.join(target_dir,name+'module.c')
if not os.path.isfile(target_file):
log.debug(' target %s does not exist:\n '\
'Assuming %smodule.c was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = os.path.join(target_dir,name+'module.c')
assert os.path.isfile(target_file),`target_file`+' missing'
log.debug(' Yes! Using %s as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
f2py_sources.append(source)
f2py_targets[source] = target_file
new_sources.append(target_file)
elif fortran_ext_match(ext):
f_sources.append(source)
else:
new_sources.append(source)
if not (f2py_sources or f_sources):
return new_sources
map(self.mkpath, target_dirs)
f2py_options = extension.f2py_options + self.f2pyflags
if f2py_sources:
assert len(f2py_sources)==1,\
'only one .pyf file is allowed per extension module but got'\
' more:'+`f2py_sources`
source = f2py_sources[0]
target_file = f2py_targets[source]
target_dir = os.path.dirname(target_file)
depends = [source] + extension.depends
if (self.force or newer_group(depends, target_file,'newer')) \
and not skip_f2py:
log.info("f2py: %s" % (source))
import f2py2e
f2py2e.run_main(f2py_options + ['--build-dir',target_dir,source])
else:
log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
else:
#XXX TODO: --inplace support for sdist command
if type(extension) is type(()): name = extension[0]
else: name = extension.name
target_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
target_file = os.path.join(target_dir,ext_name + 'module.c')
new_sources.append(target_file)
depends = f_sources + extension.depends
if (self.force or newer_group(depends, target_file, 'newer')) \
and not skip_f2py:
import f2py2e
log.info("f2py:> %s" % (target_file))
self.mkpath(target_dir)
f2py2e.run_main(f2py_options + ['--lower',
'--build-dir',target_dir]+\
['-m',ext_name]+f_sources)
else:
log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
% (target_file))
assert os.path.isfile(target_file),`target_file`+' missing'
target_c = os.path.join(self.build_src,'fortranobject.c')
target_h = os.path.join(self.build_src,'fortranobject.h')
log.info(" adding '%s' to sources." % (target_c))
new_sources.append(target_c)
if self.build_src not in extension.include_dirs:
log.info(" adding '%s' to include_dirs." \
% (self.build_src))
extension.include_dirs.append(self.build_src)
if not skip_f2py:
import f2py2e
d = os.path.dirname(f2py2e.__file__)
source_c = os.path.join(d,'src','fortranobject.c')
source_h = os.path.join(d,'src','fortranobject.h')
if newer(source_c,target_c) or newer(source_h,target_h):
self.mkpath(os.path.dirname(target_c))
self.copy_file(source_c,target_c)
self.copy_file(source_h,target_h)
else:
assert os.path.isfile(target_c),`target_c` + ' missing'
assert os.path.isfile(target_h),`target_h` + ' missing'
for name_ext in ['-f2pywrappers.f','-f2pywrappers2.f90']:
filename = os.path.join(target_dir,ext_name + name_ext)
if os.path.isfile(filename):
log.info(" adding '%s' to sources." % (filename))
f_sources.append(filename)
return new_sources + f_sources
def swig_sources(self, sources, extension):
# Assuming SWIG 1.3.14 or later. See compatibility note in
# http://www.swig.org/Doc1.3/Python.html#Python_nn6
new_sources = []
swig_sources = []
swig_targets = {}
target_dirs = []
py_files = [] # swig generated .py files
target_ext = '.c'
typ = None
is_cpp = 0
skip_swig = 0
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.i': # SWIG interface file
if self.inplace:
target_dir = os.path.dirname(base)
py_target_dir = self.ext_target_dir
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
py_target_dir = target_dir
if os.path.isfile(source):
name = get_swig_modulename(source)
assert name==ext_name[1:],'mismatch of extension names: '\
+source+' provides'\
' '+`name`+' but expected '+`ext_name[1:]`
if typ is None:
typ = get_swig_target(source)
is_cpp = typ=='c++'
if is_cpp:
target_ext = '.cpp'
else:
assert typ == get_swig_target(source),`typ`
target_file = os.path.join(target_dir,'%s_wrap%s' \
% (name, target_ext))
else:
log.debug(' source %s does not exist: skipping swig\'ing.' \
% (source))
name = ext_name[1:]
skip_swig = 1
target_file = _find_swig_target(target_dir, name)
if not os.path.isfile(target_file):
log.debug(' target %s does not exist:\n '\
'Assuming %s_wrap.{c,cpp} was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = _find_swig_target(target_dir, name)
assert os.path.isfile(target_file),`target_file`+' missing'
log.debug(' Yes! Using %s as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
new_sources.append(target_file)
py_files.append(os.path.join(py_target_dir, name+'.py'))
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
if skip_swig:
return new_sources + py_files
map(self.mkpath, target_dirs)
swig = self.find_swig()
swig_cmd = [swig, "-python"]
if is_cpp:
swig_cmd.append('-c++')
for d in extension.include_dirs:
swig_cmd.append('-I'+d)
for source in swig_sources:
target = swig_targets[source]
depends = [source] + extension.depends
if self.force or newer_group(depends, target, 'newer'):
log.info("%s: %s" % (os.path.basename(swig) \
+ (is_cpp and '++' or ''), source))
self.spawn(swig_cmd + self.swigflags \
+ ["-o", target, '-outdir', py_target_dir, source])
else:
log.debug(" skipping '%s' swig interface (up-to-date)" \
% (source))
return new_sources + py_files
def appendpath(prefix,path):
if os.path.isabs(path):
absprefix = os.path.abspath(prefix)
d = os.path.commonprefix([absprefix,path])
subpath = path[len(d):]
assert not os.path.isabs(subpath),`subpath`
return os.path.join(prefix,subpath)
return os.path.join(prefix, path)
#### SWIG related auxiliary functions ####
_swig_module_name_match = re.compile(r'\s*%module\s*(?P<name>[\w_]+)',
re.I).match
_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-',re.I).search
_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-',re.I).search
def get_swig_target(source):
f = open(source,'r')
result = 'c'
line = f.readline()
if _has_cpp_header(line):
result = 'c++'
if _has_c_header(line):
result = 'c'
f.close()
return result
def get_swig_modulename(source):
f = open(source,'r')
f_readlines = getattr(f,'xreadlines',f.readlines)
for line in f_readlines():
m = _swig_module_name_match(line)
if m:
name = m.group('name')
break
f.close()
return name
def _find_swig_target(target_dir,name):
for ext in ['.cpp','.c']:
target = os.path.join(target_dir,'%s_wrap%s' % (name, ext))
if os.path.isfile(target):
break
return target
#### F2PY related auxiliary functions ####
_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
re.I).match
_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'\
'__user__[\w_]*)',re.I).match
def get_f2py_modulename(source):
name = None
f = open(source)
f_readlines = getattr(f,'xreadlines',f.readlines)
for line in f_readlines():
m = _f2py_module_name_match(line)
if m:
if _f2py_user_module_name_match(line): # skip *__user__* names
continue
name = m.group('name')
break
f.close()
return name
##########################################
Minor fix.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@885 94b884b6-d6fd-0310-90d3-974f1d3f35e1
""" Build swig, f2py, weave, sources.
"""
import os
import re
from distutils.cmd import Command
from distutils.command import build_ext, build_py
from distutils.util import convert_path
from distutils.dep_util import newer_group, newer
from scipy_distutils import log
from scipy_distutils.misc_util import fortran_ext_match, all_strings
from scipy_distutils.from_template import process_str
class build_src(build_ext.build_ext):
description = "build sources from SWIG, F2PY files or a function"
user_options = [
('build-src=', 'd', "directory to \"build\" sources to"),
('f2pyflags=', None, "additonal flags to f2py"),
('swigflags=', None, "additional flags to swig"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
]
boolean_options = ['force','inplace']
help_options = []
def initialize_options(self):
self.extensions = None
self.package = None
self.py_modules = None
self.build_src = None
self.build_lib = None
self.build_base = None
self.force = None
self.inplace = None
self.package_dir = None
self.f2pyflags = None
self.swigflags = None
return
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('force', 'force'))
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
self.libraries = self.distribution.libraries or []
self.py_modules = self.distribution.py_modules
if self.build_src is None:
self.build_src = os.path.join(self.build_base, 'src')
if self.inplace is None:
build_ext = self.get_finalized_command('build_ext')
self.inplace = build_ext.inplace
# py_modules is used in build_py.find_package_modules
self.py_modules = {}
if self.f2pyflags is None:
self.f2pyflags = []
else:
self.f2pyflags = self.f2pyflags.split() # XXX spaces??
if self.swigflags is None:
self.swigflags = []
else:
self.swigflags = self.swigflags.split() # XXX spaces??
return
def run(self):
if not (self.extensions or self.libraries):
return
self.build_sources()
return
def build_sources(self):
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension_sources(ext)
for libname_info in self.libraries:
self.build_library_sources(*libname_info)
return
def build_library_sources(self, lib_name, build_info):
sources = list(build_info.get('sources',[]))
if not sources:
return
log.info('building library "%s" sources' % (lib_name))
sources = self.generate_sources(sources, (lib_name, build_info))
build_info['sources'] = sources
return
def build_extension_sources(self, ext):
sources = list(ext.sources)
log.info('building extension "%s" sources' % (ext.name))
fullname = self.get_ext_fullname(ext.name)
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
if self.inplace:
build_py = self.get_finalized_command('build_py')
self.ext_target_dir = build_py.get_package_dir(package)
sources = self.generate_sources(sources, ext)
sources = self.template_sources(sources, ext)
sources = self.swig_sources(sources, ext)
sources = self.f2py_sources(sources, ext)
sources, py_files = self.filter_py_files(sources)
if not self.py_modules.has_key(package):
self.py_modules[package] = []
modules = []
for f in py_files:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
self.py_modules[package] += modules
ext.sources = sources
return
def generate_sources(self, sources, extension):
new_sources = []
func_sources = []
for source in sources:
if type(source) is type(''):
new_sources.append(source)
else:
func_sources.append(source)
if not func_sources:
return new_sources
if self.inplace:
build_dir = self.ext_target_dir
else:
if type(extension) is type(()):
name = extension[0]
else:
name = extension.name
build_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
self.mkpath(build_dir)
for func in func_sources:
source = func(extension, build_dir)
if type(source) is type([]):
[log.info(" adding '%s' to sources." % (s)) for s in source]
new_sources.extend(source)
else:
log.info(" adding '%s' to sources." % (source))
new_sources.append(source)
return new_sources
def filter_py_files(self, sources):
new_sources = []
py_files = []
for source in sources:
(base, ext) = os.path.splitext(source)
if ext=='.py':
py_files.append(source)
else:
new_sources.append(source)
return new_sources, py_files
def template_sources(self, sources, extension):
new_sources = []
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.src': # Template file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
self.mkpath(target_dir)
target_file = os.path.join(target_dir,os.path.basename(base))
if (self.force or newer(source, target_file)):
fid = open(source)
outstr = process_str(fid.read())
fid.close()
fid = open(target_file,'w')
fid.write(outstr)
fid.close()
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
def f2py_sources(self, sources, extension):
new_sources = []
f2py_sources = []
f_sources = []
f2py_targets = {}
target_dirs = []
ext_name = extension.name.split('.')[-1]
skip_f2py = 0
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyf': # F2PY interface file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
if os.path.isfile(source):
name = get_f2py_modulename(source)
assert name==ext_name,'mismatch of extension names: '\
+source+' provides'\
' '+`name`+' but expected '+`ext_name`
target_file = os.path.join(target_dir,name+'module.c')
else:
log.debug(' source %s does not exist: skipping f2py\'ing.' \
% (source))
name = ext_name
skip_f2py = 1
target_file = os.path.join(target_dir,name+'module.c')
if not os.path.isfile(target_file):
log.debug(' target %s does not exist:\n '\
'Assuming %smodule.c was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = os.path.join(target_dir,name+'module.c')
assert os.path.isfile(target_file),`target_file`+' missing'
log.debug(' Yes! Using %s as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
f2py_sources.append(source)
f2py_targets[source] = target_file
new_sources.append(target_file)
elif fortran_ext_match(ext):
f_sources.append(source)
else:
new_sources.append(source)
if not (f2py_sources or f_sources):
return new_sources
map(self.mkpath, target_dirs)
f2py_options = extension.f2py_options + self.f2pyflags
if f2py_sources:
assert len(f2py_sources)==1,\
'only one .pyf file is allowed per extension module but got'\
' more:'+`f2py_sources`
source = f2py_sources[0]
target_file = f2py_targets[source]
target_dir = os.path.dirname(target_file) or '.'
depends = [source] + extension.depends
if (self.force or newer_group(depends, target_file,'newer')) \
and not skip_f2py:
log.info("f2py: %s" % (source))
import f2py2e
f2py2e.run_main(f2py_options + ['--build-dir',target_dir,source])
else:
log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
else:
#XXX TODO: --inplace support for sdist command
if type(extension) is type(()): name = extension[0]
else: name = extension.name
target_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
target_file = os.path.join(target_dir,ext_name + 'module.c')
new_sources.append(target_file)
depends = f_sources + extension.depends
if (self.force or newer_group(depends, target_file, 'newer')) \
and not skip_f2py:
import f2py2e
log.info("f2py:> %s" % (target_file))
self.mkpath(target_dir)
f2py2e.run_main(f2py_options + ['--lower',
'--build-dir',target_dir]+\
['-m',ext_name]+f_sources)
else:
log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
% (target_file))
assert os.path.isfile(target_file),`target_file`+' missing'
target_c = os.path.join(self.build_src,'fortranobject.c')
target_h = os.path.join(self.build_src,'fortranobject.h')
log.info(" adding '%s' to sources." % (target_c))
new_sources.append(target_c)
if self.build_src not in extension.include_dirs:
log.info(" adding '%s' to include_dirs." \
% (self.build_src))
extension.include_dirs.append(self.build_src)
if not skip_f2py:
import f2py2e
d = os.path.dirname(f2py2e.__file__)
source_c = os.path.join(d,'src','fortranobject.c')
source_h = os.path.join(d,'src','fortranobject.h')
if newer(source_c,target_c) or newer(source_h,target_h):
self.mkpath(os.path.dirname(target_c))
self.copy_file(source_c,target_c)
self.copy_file(source_h,target_h)
else:
assert os.path.isfile(target_c),`target_c` + ' missing'
assert os.path.isfile(target_h),`target_h` + ' missing'
for name_ext in ['-f2pywrappers.f','-f2pywrappers2.f90']:
filename = os.path.join(target_dir,ext_name + name_ext)
if os.path.isfile(filename):
log.info(" adding '%s' to sources." % (filename))
f_sources.append(filename)
return new_sources + f_sources
def swig_sources(self, sources, extension):
# Assuming SWIG 1.3.14 or later. See compatibility note in
# http://www.swig.org/Doc1.3/Python.html#Python_nn6
new_sources = []
swig_sources = []
swig_targets = {}
target_dirs = []
py_files = [] # swig generated .py files
target_ext = '.c'
typ = None
is_cpp = 0
skip_swig = 0
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.i': # SWIG interface file
if self.inplace:
target_dir = os.path.dirname(base)
py_target_dir = self.ext_target_dir
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
py_target_dir = target_dir
if os.path.isfile(source):
name = get_swig_modulename(source)
assert name==ext_name[1:],'mismatch of extension names: '\
+source+' provides'\
' '+`name`+' but expected '+`ext_name[1:]`
if typ is None:
typ = get_swig_target(source)
is_cpp = typ=='c++'
if is_cpp:
target_ext = '.cpp'
else:
assert typ == get_swig_target(source),`typ`
target_file = os.path.join(target_dir,'%s_wrap%s' \
% (name, target_ext))
else:
log.debug(' source %s does not exist: skipping swig\'ing.' \
% (source))
name = ext_name[1:]
skip_swig = 1
target_file = _find_swig_target(target_dir, name)
if not os.path.isfile(target_file):
log.debug(' target %s does not exist:\n '\
'Assuming %s_wrap.{c,cpp} was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = _find_swig_target(target_dir, name)
assert os.path.isfile(target_file),`target_file`+' missing'
log.debug(' Yes! Using %s as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
new_sources.append(target_file)
py_files.append(os.path.join(py_target_dir, name+'.py'))
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
if skip_swig:
return new_sources + py_files
map(self.mkpath, target_dirs)
swig = self.find_swig()
swig_cmd = [swig, "-python"]
if is_cpp:
swig_cmd.append('-c++')
for d in extension.include_dirs:
swig_cmd.append('-I'+d)
for source in swig_sources:
target = swig_targets[source]
depends = [source] + extension.depends
if self.force or newer_group(depends, target, 'newer'):
log.info("%s: %s" % (os.path.basename(swig) \
+ (is_cpp and '++' or ''), source))
self.spawn(swig_cmd + self.swigflags \
+ ["-o", target, '-outdir', py_target_dir, source])
else:
log.debug(" skipping '%s' swig interface (up-to-date)" \
% (source))
return new_sources + py_files
def appendpath(prefix,path):
if os.path.isabs(path):
absprefix = os.path.abspath(prefix)
d = os.path.commonprefix([absprefix,path])
subpath = path[len(d):]
assert not os.path.isabs(subpath),`subpath`
return os.path.join(prefix,subpath)
return os.path.join(prefix, path)
#### SWIG related auxiliary functions ####
_swig_module_name_match = re.compile(r'\s*%module\s*(?P<name>[\w_]+)',
re.I).match
_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-',re.I).search
_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-',re.I).search
def get_swig_target(source):
f = open(source,'r')
result = 'c'
line = f.readline()
if _has_cpp_header(line):
result = 'c++'
if _has_c_header(line):
result = 'c'
f.close()
return result
def get_swig_modulename(source):
f = open(source,'r')
f_readlines = getattr(f,'xreadlines',f.readlines)
for line in f_readlines():
m = _swig_module_name_match(line)
if m:
name = m.group('name')
break
f.close()
return name
def _find_swig_target(target_dir,name):
for ext in ['.cpp','.c']:
target = os.path.join(target_dir,'%s_wrap%s' % (name, ext))
if os.path.isfile(target):
break
return target
#### F2PY related auxiliary functions ####
_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
re.I).match
_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'\
'__user__[\w_]*)',re.I).match
def get_f2py_modulename(source):
name = None
f = open(source)
f_readlines = getattr(f,'xreadlines',f.readlines)
for line in f_readlines():
m = _f2py_module_name_match(line)
if m:
if _f2py_user_module_name_match(line): # skip *__user__* names
continue
name = m.group('name')
break
f.close()
return name
##########################################
|
d2ddd3f3-2ead-11e5-89ac-7831c1d44c14
d2e7c29c-2ead-11e5-bbee-7831c1d44c14
d2e7c29c-2ead-11e5-bbee-7831c1d44c14 |
"""
The class in this file defines a common base for the pytests executed
through pywbemcli execution
"""
from __future__ import absolute_import, print_function
import re
import os
import pytest
import six
# import pkgs to determine pywbem version
import packaging.version
import pywbem
from .utils import execute_pywbemcli, assert_rc, assert_patterns, assert_lines
TEST_DIR = os.path.dirname(__file__)
# Boolean Variable that can be in individual tests to determine tests are to
# be executed. Returns True if the pywbem >=1.0.0 is use. Otherwise returns
# False. These variables can be applied to tests that are specific to one
# version of pywbem or the other
PYWBEM_VERSION = packaging.version.parse(pywbem.__version__)
PYWBEM_1 = PYWBEM_VERSION.release >= (1, 0, 0)
PYWBEM_0 = not PYWBEM_1
# This variable defines the url returned from some of the commands. Since it
# is changed with pywbem 1.0.0 to include the port number, we must dynamically
# create it for the tests depending on pywbem version
FAKEURL_STR = '//FakedUrl:5988' if PYWBEM_1 else '//FakedUrl'
class CLITestsBase(object):
# pylint: disable=too-few-public-methods, useless-object-inheritance
"""
Defines methods to execute tests on pywbemcli.
"""
def command_test(self, desc, command_grp, inputs, exp_response, mock_files,
condition, verbose=False):
# pylint: disable=line-too-long, no-self-use
"""
Test method to execute test on pywbemcli by calling the executable
pywbemcli for the command defined by command with arguments defined
by args. This can execute pywbemcli either with a mock environment
by using the mock_files variable or without mock if the mock_files
parameter is None. The method tests the results of the execution
of pywbemcli using the exp_response parameter.
Parameters:
desc (:term:`string`):
Description of the test
command_grp (:term:`string`):
Pywbemcli command group for this test. This is the first level of
the command, e.g. 'class' or 'instance'.
inputs (:term:`string` or tuple/list of :term:`string` or :class:`py:dict`):
* If inputs is a string or a tuple/list of strings, it contains the
command line arguments, without the command name or command group.
Each single argument must be its own item in the iterable;
combining the arguments into a string does not work, and the
string is not split into words (anymore).
The arguments may be binary strings encoded in UTF-8, or unicode
strings.
* If inputs is a dict it can contain the following optional items:
- 'args': String or tuple/list of strings with local
(command-level) options that will be added to the command
line after the command name.
Each single argument (e.g. option name and option arguments)
must be its own item in the iterable.
- 'general': String or tuple/list of strings with general
options that will be added to the command line before the
command name.
Each single argument (e.g. option name and option arguments)
must be its own item in the iterable.
- 'env': Dictionary of environment variables where key is the
variable name; dict value is the variable value (without any
shell escaping needed).
If omitted, None, or empty, no environment variables will be
set for the test.
- 'stdin': A string or a tuple/list of strings, that contains the
standard input for the command.
This can be used for commands in interactive mode, and for
responding to prompts.
If present, the command group specified in the `command_grp`
parameter is not added to the command line, but the local and
global options are added.
If specified as a tuple/list of strings, these strings are
joined into a single string separated by an EOL char. Each line
is processed as a separate repl line or prompt input by
pybemcli.
If omitted or None, no standard input will be provided to the
pywbemcli command.
exp_response (:class:`py:dict`)
Keyword arguments for expected response.
Includes the following possible keys:
'stdout' or 'stderr' - Defines which return is expected (the
expected response). The value is a string or iterable defining
the data expected in the response. The data definition in
this dictionary entry must be compatible with the definition
of expected data for the test selected.
Only one of these keys may exist.
'test' - If it exists defines the test used to compare the
returned data with the expected returned data defined as the
value of 'stdout'/'stderr'.
The tests define are:
'startswith' - Expected Response must be a single string
The returned text defined starts with the defined string
'lines' - Expected response may be either a list of strings or
single string
Compares for exact match between the expected response and the
returned data line by line. The number of lines and the data
in each line must match. If the expected response is a single
string it is split into lines separated at each new line
before the match
'linesnows' - Expected response may be either list of strings
or single string. Compares as with lines except that all
whitespace is removed from the strings before the compare.
'patterns' - Expected response must be same as lines test
except that each line in the expected response is treated as
a regex expression and a regex match is executed for each line.
'regex' - Expected response is a single string or list of
strings. Each string is considered a regex expression. The
regex expression is tested against each line in the output
using regex search. All strings in the expected response must
be found in actual response to pass the test
'in' - Expected response is string or list of strings.
Tests the complete response line by line to determine if each
entry in expected response is in the response data as a single
test.
'innows' - Like 'in, except that differences in whitespace are
ignored.
Executes a single regex search of the entire
response data to match with each entry in the expected
response
'rc' expected exit_code from pywbemcli. If None, code 0
is expected.
mock_files (:term:`string` or list of string or None):
If this is a string, this test will be executed using the
--mock-server pywbemcl option with this file as the name of the
objects to be compiled or executed. This should be just a file name
and this method assumes the file is in the tests/unit directory.
If it is a list, the same rules apply to each entry in the list.
If None, test is executed without the --mock-server input parameter
and defines an artificial server name Used to test commands
and options that do not communicate with a server. It is faster
than installing the mock repository
condition (True, False, 'pdb', or 'verbose'):
If True, the test is executed.
If False, the test is skipped.
If 'pdb', the test breaks in the debugger.
If 'verbose', verbose mode is enabled for the test.
verbose (:class:`py:bool`):
If True, verbose mode is enabled for the test.
In verbose mode, the assembled command line, the environment
variables set by 'env', and other details will be displayed.
""" # noqa: E501
# pylint: enable=line-too-long
if not condition:
pytest.skip('Condition for test case {} not met'.format(desc))
env = None
stdin = None
general_args = None
if isinstance(inputs, dict):
general_args = inputs.get("general", None)
local_args = inputs.get("args", None)
env = inputs.get("env", None)
stdin = inputs.get('stdin', None)
if stdin:
if isinstance(stdin, (tuple, list)):
stdin = '\n'.join(stdin)
elif isinstance(inputs, six.string_types):
local_args = inputs
elif isinstance(inputs, (list, tuple)):
local_args = inputs
else:
assert False, 'Invalid inputs param to test {!r}. Allowed types ' \
'are dict, string, list, tuple.'.format(inputs)
if isinstance(local_args, six.string_types):
# Is not split into words anymore
local_args = [local_args]
if isinstance(general_args, six.string_types):
# Is not split into words anymore
general_args = [general_args]
cmd_line = []
if general_args:
cmd_line.extend(general_args)
if mock_files:
if isinstance(mock_files, (list, tuple)):
for item in mock_files:
cmd_line.extend(['--mock-server',
os.path.join(TEST_DIR, item)])
elif isinstance(mock_files, six.string_types):
cmd_line.extend(['--mock-server',
os.path.join(TEST_DIR, mock_files)])
else:
assert False, \
'CLI_TEST_EXTENSIONS mock_file {} invalid' \
.format(mock_files)
if not stdin:
cmd_line.append(command_grp)
if local_args:
cmd_line.extend(local_args)
if condition == 'pdb':
cmd_line.append('--pdb')
if not verbose:
verbose = condition == 'verbose'
if verbose:
print('\nCMDLINE: {}'.format(cmd_line))
if verbose and env:
print('ENV: {}'.format(env))
rc, stdout, stderr = execute_pywbemcli(cmd_line, env=env, stdin=stdin,
verbose=verbose)
exp_rc = exp_response['rc'] if 'rc' in exp_response else 0
assert_rc(exp_rc, rc, stdout, stderr, desc)
if verbose:
print('RC={}\nSTDOUT={}\nSTDERR={}'.format(rc, stdout, stderr))
if exp_response['test']:
test_definition = exp_response['test']
else:
test_definition = None
if 'stdout' in exp_response:
test_value = exp_response['stdout']
rtn_value = stdout
rtn_type = 'stdout'
elif 'stderr' in exp_response:
test_value = exp_response['stderr']
rtn_value = stderr
rtn_type = 'stderr'
else:
assert False, 'Expected "stdout" or "stderr" key. One of these ' \
'keys required in exp_response.'
if test_definition:
if 'test' in exp_response:
test_definition = exp_response['test']
# test that rtn_value starts with test_value
if test_definition == 'startswith':
assert isinstance(test_value, six.string_types)
assert rtn_value.startswith(test_value), \
"Unexpected start of line on {} in test:\n" \
"{}\n" \
"Expected start of line:\n" \
"------------\n" \
"{}\n" \
"------------\n" \
"Actual output line(s):\n" \
"------------\n" \
"{}\n" \
"------------\n". \
format(rtn_type, desc, test_value, rtn_value)
# test that lines match between test_value and rtn_value
# base on regex match
elif test_definition == 'patterns':
if isinstance(test_value, six.string_types):
test_value = test_value.splitlines()
assert isinstance(test_value, (list, tuple))
assert_patterns(test_value, rtn_value.splitlines(),
rtn_type, desc)
# test that each line in the test value matches the
# corresponding line in the rtn_value exactly
elif test_definition == 'lines':
if isinstance(test_value, six.string_types):
test_value = test_value.splitlines()
if isinstance(test_value, (list, tuple)):
assert_lines(test_value, rtn_value.splitlines(),
rtn_type, desc)
else:
assert(isinstance(test_value, six.string_types))
assert_lines(test_value.splitlines(),
rtn_value.splitlines(),
rtn_type, desc)
# compress test_value and rtn_value into whitespace single
# strings and assert_lines.
elif test_definition == 'linesnows':
assert_lines(remove_ws(test_value),
remove_ws(rtn_value),
rtn_type, desc)
# test with a regex search that all values in list exist in
# the return. Build rtn_value into single string and do
# re.search against it for each test_value
elif test_definition == 'regex':
assert isinstance(rtn_value, six.string_types)
if isinstance(test_value, six.string_types):
test_value = [test_value]
for regex in test_value:
assert isinstance(regex, six.string_types)
match_result = re.search(regex, rtn_value, re.MULTILINE)
assert match_result, \
"Missing pattern on {} in test:\n" \
"{}\n" \
"Expected pattern in any line:\n" \
"------------\n" \
"{}\n" \
"------------\n" \
"Actual output line(s):\n" \
"------------\n" \
"{}\n" \
"------------\n". \
format(rtn_type, desc, regex, rtn_value)
elif test_definition == 'in':
if isinstance(test_value, six.string_types):
test_value = [test_value]
for test_str in test_value:
assert test_str in rtn_value, \
"Missing in-string on {} in test:\n" \
"{}\n" \
"Expected in-string in any line:\n" \
"------------\n" \
"{}\n" \
"------------\n" \
"Actual output line(s):\n" \
"------------\n" \
"{}\n" \
"------------\n". \
format(rtn_type, desc, test_str, rtn_value)
elif test_definition == 'innows':
if isinstance(test_value, six.string_types):
test_value = [test_value]
for test_str in test_value:
assert remove_ws(test_str, join=True) in \
remove_ws(rtn_value, join=True), \
"Missing ws-agnostic in-string on {} in test:\n" \
"{}\n" \
"Expected ws-agnostic in-string in any line:\n" \
"------------\n" \
"{}\n" \
"------------\n" \
"Actual output line(s):\n" \
"------------\n" \
"{}\n" \
"------------\n". \
format(rtn_type, desc, test_str, rtn_value)
elif test_definition == 'not-innows':
if isinstance(test_value, six.string_types):
test_value = [test_value]
for test_str in test_value:
assert remove_ws(test_str, join=True) not in \
remove_ws(rtn_value, join=True), \
"Unexpected ws-agnostic in-string on {} in test:\n"\
"{}\n" \
"Unexpected ws-agnostic in-string in any line:\n" \
"------------\n" \
"{}\n" \
"------------\n" \
"Actual output line(s):\n" \
"------------\n" \
"{}\n" \
"------------\n". \
format(rtn_type, desc, test_str, rtn_value)
else:
raise AssertionError(
"Test validation {!r} is invalid in test:\n"
"{}\n".format(test_definition, desc))
def remove_ws(inputs, join=False):
"""
Return the input with whitespace removed and empty lines removed.
Whitespace is defined as spaces and tabs.
Parameters:
inputs: The input, which may be:
* a single string representing one or more lines.
* an iterable of strings, each representing one or more lines.
join (bool): Controls whether the returned list is joined into one line.
Returns:
string or list of string: Non-empty lines from the input with whitespace
removed, as one string if joined, or as a list of strings is not joined.
"""
if isinstance(inputs, six.string_types):
inputs = [inputs]
input_lines = []
for line in inputs:
input_lines.extend(line.splitlines())
ret_lines = []
for line in input_lines:
line = re.sub(r"\s", "", line)
if line:
ret_lines.append(line)
if join:
ret_lines = ''.join(ret_lines)
return ret_lines
Fixed condition 'pdb' in testcases
Details:
* Some of the testcases allow specifying a condition that if set
to 'pdb' causes pywbemcli to be invoked with the '--pdb' general
option. However, the issue was that this option was appended after
command group and command. Fixed that by setting it as the first
option.
Signed-off-by: Andreas Maier <0903150dae73adcf31eccfd2633bf70bbc40bd52@gmx.de>
"""
The class in this file defines a common base for the pytests executed
through pywbemcli execution
"""
from __future__ import absolute_import, print_function
import re
import os
import pytest
import six
# import pkgs to determine pywbem version
import packaging.version
import pywbem
from .utils import execute_pywbemcli, assert_rc, assert_patterns, assert_lines
TEST_DIR = os.path.dirname(__file__)
# Boolean Variable that can be in individual tests to determine tests are to
# be executed. Returns True if the pywbem >=1.0.0 is use. Otherwise returns
# False. These variables can be applied to tests that are specific to one
# version of pywbem or the other
PYWBEM_VERSION = packaging.version.parse(pywbem.__version__)
PYWBEM_1 = PYWBEM_VERSION.release >= (1, 0, 0)
PYWBEM_0 = not PYWBEM_1
# This variable defines the url returned from some of the commands. Since it
# is changed with pywbem 1.0.0 to include the port number, we must dynamically
# create it for the tests depending on pywbem version
FAKEURL_STR = '//FakedUrl:5988' if PYWBEM_1 else '//FakedUrl'
class CLITestsBase(object):
# pylint: disable=too-few-public-methods, useless-object-inheritance
"""
Defines methods to execute tests on pywbemcli.
"""
def command_test(self, desc, command_grp, inputs, exp_response, mock_files,
condition, verbose=False):
# pylint: disable=line-too-long, no-self-use
"""
Test method to execute test on pywbemcli by calling the executable
pywbemcli for the command defined by command with arguments defined
by args. This can execute pywbemcli either with a mock environment
by using the mock_files variable or without mock if the mock_files
parameter is None. The method tests the results of the execution
of pywbemcli using the exp_response parameter.
Parameters:
desc (:term:`string`):
Description of the test
command_grp (:term:`string`):
Pywbemcli command group for this test. This is the first level of
the command, e.g. 'class' or 'instance'.
inputs (:term:`string` or tuple/list of :term:`string` or :class:`py:dict`):
* If inputs is a string or a tuple/list of strings, it contains the
command line arguments, without the command name or command group.
Each single argument must be its own item in the iterable;
combining the arguments into a string does not work, and the
string is not split into words (anymore).
The arguments may be binary strings encoded in UTF-8, or unicode
strings.
* If inputs is a dict it can contain the following optional items:
- 'args': String or tuple/list of strings with local
(command-level) options that will be added to the command
line after the command name.
Each single argument (e.g. option name and option arguments)
must be its own item in the iterable.
- 'general': String or tuple/list of strings with general
options that will be added to the command line before the
command name.
Each single argument (e.g. option name and option arguments)
must be its own item in the iterable.
- 'env': Dictionary of environment variables where key is the
variable name; dict value is the variable value (without any
shell escaping needed).
If omitted, None, or empty, no environment variables will be
set for the test.
- 'stdin': A string or a tuple/list of strings, that contains the
standard input for the command.
This can be used for commands in interactive mode, and for
responding to prompts.
If present, the command group specified in the `command_grp`
parameter is not added to the command line, but the local and
global options are added.
If specified as a tuple/list of strings, these strings are
joined into a single string separated by an EOL char. Each line
is processed as a separate repl line or prompt input by
pybemcli.
If omitted or None, no standard input will be provided to the
pywbemcli command.
exp_response (:class:`py:dict`)
Keyword arguments for expected response.
Includes the following possible keys:
'stdout' or 'stderr' - Defines which return is expected (the
expected response). The value is a string or iterable defining
the data expected in the response. The data definition in
this dictionary entry must be compatible with the definition
of expected data for the test selected.
Only one of these keys may exist.
'test' - If it exists defines the test used to compare the
returned data with the expected returned data defined as the
value of 'stdout'/'stderr'.
The tests define are:
'startswith' - Expected Response must be a single string
The returned text defined starts with the defined string
'lines' - Expected response may be either a list of strings or
single string
Compares for exact match between the expected response and the
returned data line by line. The number of lines and the data
in each line must match. If the expected response is a single
string it is split into lines separated at each new line
before the match
'linesnows' - Expected response may be either list of strings
or single string. Compares as with lines except that all
whitespace is removed from the strings before the compare.
'patterns' - Expected response must be same as lines test
except that each line in the expected response is treated as
a regex expression and a regex match is executed for each line.
'regex' - Expected response is a single string or list of
strings. Each string is considered a regex expression. The
regex expression is tested against each line in the output
using regex search. All strings in the expected response must
be found in actual response to pass the test
'in' - Expected response is string or list of strings.
Tests the complete response line by line to determine if each
entry in expected response is in the response data as a single
test.
'innows' - Like 'in, except that differences in whitespace are
ignored.
Executes a single regex search of the entire
response data to match with each entry in the expected
response
'rc' expected exit_code from pywbemcli. If None, code 0
is expected.
mock_files (:term:`string` or list of string or None):
If this is a string, this test will be executed using the
--mock-server pywbemcl option with this file as the name of the
objects to be compiled or executed. This should be just a file name
and this method assumes the file is in the tests/unit directory.
If it is a list, the same rules apply to each entry in the list.
If None, test is executed without the --mock-server input parameter
and defines an artificial server name Used to test commands
and options that do not communicate with a server. It is faster
than installing the mock repository
condition (True, False, 'pdb', or 'verbose'):
If True, the test is executed.
If False, the test is skipped.
If 'pdb', the test breaks in the debugger.
If 'verbose', verbose mode is enabled for the test.
verbose (:class:`py:bool`):
If True, verbose mode is enabled for the test.
In verbose mode, the assembled command line, the environment
variables set by 'env', and other details will be displayed.
""" # noqa: E501
# pylint: enable=line-too-long
if not condition:
pytest.skip('Condition for test case {} not met'.format(desc))
env = None
stdin = None
general_args = None
if isinstance(inputs, dict):
general_args = inputs.get("general", None)
local_args = inputs.get("args", None)
env = inputs.get("env", None)
stdin = inputs.get('stdin', None)
if stdin:
if isinstance(stdin, (tuple, list)):
stdin = '\n'.join(stdin)
elif isinstance(inputs, six.string_types):
local_args = inputs
elif isinstance(inputs, (list, tuple)):
local_args = inputs
else:
assert False, 'Invalid inputs param to test {!r}. Allowed types ' \
'are dict, string, list, tuple.'.format(inputs)
if isinstance(local_args, six.string_types):
# Is not split into words anymore
local_args = [local_args]
if isinstance(general_args, six.string_types):
# Is not split into words anymore
general_args = [general_args]
cmd_line = []
if condition == 'pdb':
cmd_line.append('--pdb')
if general_args:
cmd_line.extend(general_args)
if mock_files:
if isinstance(mock_files, (list, tuple)):
for item in mock_files:
cmd_line.extend(['--mock-server',
os.path.join(TEST_DIR, item)])
elif isinstance(mock_files, six.string_types):
cmd_line.extend(['--mock-server',
os.path.join(TEST_DIR, mock_files)])
else:
assert False, \
'CLI_TEST_EXTENSIONS mock_file {} invalid' \
.format(mock_files)
if not stdin:
cmd_line.append(command_grp)
if local_args:
cmd_line.extend(local_args)
if not verbose:
verbose = condition == 'verbose'
if verbose:
print('\nCMDLINE: {}'.format(cmd_line))
if verbose and env:
print('ENV: {}'.format(env))
rc, stdout, stderr = execute_pywbemcli(cmd_line, env=env, stdin=stdin,
verbose=verbose)
exp_rc = exp_response['rc'] if 'rc' in exp_response else 0
assert_rc(exp_rc, rc, stdout, stderr, desc)
if verbose:
print('RC={}\nSTDOUT={}\nSTDERR={}'.format(rc, stdout, stderr))
if exp_response['test']:
test_definition = exp_response['test']
else:
test_definition = None
if 'stdout' in exp_response:
test_value = exp_response['stdout']
rtn_value = stdout
rtn_type = 'stdout'
elif 'stderr' in exp_response:
test_value = exp_response['stderr']
rtn_value = stderr
rtn_type = 'stderr'
else:
assert False, 'Expected "stdout" or "stderr" key. One of these ' \
'keys required in exp_response.'
if test_definition:
if 'test' in exp_response:
test_definition = exp_response['test']
# test that rtn_value starts with test_value
if test_definition == 'startswith':
assert isinstance(test_value, six.string_types)
assert rtn_value.startswith(test_value), \
"Unexpected start of line on {} in test:\n" \
"{}\n" \
"Expected start of line:\n" \
"------------\n" \
"{}\n" \
"------------\n" \
"Actual output line(s):\n" \
"------------\n" \
"{}\n" \
"------------\n". \
format(rtn_type, desc, test_value, rtn_value)
# test that lines match between test_value and rtn_value
# base on regex match
elif test_definition == 'patterns':
if isinstance(test_value, six.string_types):
test_value = test_value.splitlines()
assert isinstance(test_value, (list, tuple))
assert_patterns(test_value, rtn_value.splitlines(),
rtn_type, desc)
# test that each line in the test value matches the
# corresponding line in the rtn_value exactly
elif test_definition == 'lines':
if isinstance(test_value, six.string_types):
test_value = test_value.splitlines()
if isinstance(test_value, (list, tuple)):
assert_lines(test_value, rtn_value.splitlines(),
rtn_type, desc)
else:
assert(isinstance(test_value, six.string_types))
assert_lines(test_value.splitlines(),
rtn_value.splitlines(),
rtn_type, desc)
# compress test_value and rtn_value into whitespace single
# strings and assert_lines.
elif test_definition == 'linesnows':
assert_lines(remove_ws(test_value),
remove_ws(rtn_value),
rtn_type, desc)
# test with a regex search that all values in list exist in
# the return. Build rtn_value into single string and do
# re.search against it for each test_value
elif test_definition == 'regex':
assert isinstance(rtn_value, six.string_types)
if isinstance(test_value, six.string_types):
test_value = [test_value]
for regex in test_value:
assert isinstance(regex, six.string_types)
match_result = re.search(regex, rtn_value, re.MULTILINE)
assert match_result, \
"Missing pattern on {} in test:\n" \
"{}\n" \
"Expected pattern in any line:\n" \
"------------\n" \
"{}\n" \
"------------\n" \
"Actual output line(s):\n" \
"------------\n" \
"{}\n" \
"------------\n". \
format(rtn_type, desc, regex, rtn_value)
elif test_definition == 'in':
if isinstance(test_value, six.string_types):
test_value = [test_value]
for test_str in test_value:
assert test_str in rtn_value, \
"Missing in-string on {} in test:\n" \
"{}\n" \
"Expected in-string in any line:\n" \
"------------\n" \
"{}\n" \
"------------\n" \
"Actual output line(s):\n" \
"------------\n" \
"{}\n" \
"------------\n". \
format(rtn_type, desc, test_str, rtn_value)
elif test_definition == 'innows':
if isinstance(test_value, six.string_types):
test_value = [test_value]
for test_str in test_value:
assert remove_ws(test_str, join=True) in \
remove_ws(rtn_value, join=True), \
"Missing ws-agnostic in-string on {} in test:\n" \
"{}\n" \
"Expected ws-agnostic in-string in any line:\n" \
"------------\n" \
"{}\n" \
"------------\n" \
"Actual output line(s):\n" \
"------------\n" \
"{}\n" \
"------------\n". \
format(rtn_type, desc, test_str, rtn_value)
elif test_definition == 'not-innows':
if isinstance(test_value, six.string_types):
test_value = [test_value]
for test_str in test_value:
assert remove_ws(test_str, join=True) not in \
remove_ws(rtn_value, join=True), \
"Unexpected ws-agnostic in-string on {} in test:\n"\
"{}\n" \
"Unexpected ws-agnostic in-string in any line:\n" \
"------------\n" \
"{}\n" \
"------------\n" \
"Actual output line(s):\n" \
"------------\n" \
"{}\n" \
"------------\n". \
format(rtn_type, desc, test_str, rtn_value)
else:
raise AssertionError(
"Test validation {!r} is invalid in test:\n"
"{}\n".format(test_definition, desc))
def remove_ws(inputs, join=False):
"""
Return the input with whitespace removed and empty lines removed.
Whitespace is defined as spaces and tabs.
Parameters:
inputs: The input, which may be:
* a single string representing one or more lines.
* an iterable of strings, each representing one or more lines.
join (bool): Controls whether the returned list is joined into one line.
Returns:
string or list of string: Non-empty lines from the input with whitespace
removed, as one string if joined, or as a list of strings is not joined.
"""
if isinstance(inputs, six.string_types):
inputs = [inputs]
input_lines = []
for line in inputs:
input_lines.extend(line.splitlines())
ret_lines = []
for line in input_lines:
line = re.sub(r"\s", "", line)
if line:
ret_lines.append(line)
if join:
ret_lines = ''.join(ret_lines)
return ret_lines
|
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError as DjangoValidationError
from django.db.models import Q
from django.db.transaction import atomic
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _, activate, deactivate_all
import django_filters
from rest_framework import mixins, parsers, renderers, status, viewsets
from rest_framework.authtoken.models import Token
from rest_framework.decorators import list_route, detail_route
from rest_framework.exceptions import ValidationError as DRFValidationError, PermissionDenied
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from api.serializers import UserSerializer, GroupSerializer, ServiceSerializer, ProviderSerializer, \
ProviderTypeSerializer, ServiceAreaSerializer, APILoginSerializer, APIActivationSerializer, \
PasswordResetRequestSerializer, PasswordResetCheckSerializer, PasswordResetSerializer, \
ResendActivationLinkSerializer, CreateProviderSerializer, ServiceTypeSerializer, \
SelectionCriterionSerializer, LanguageSerializer, ServiceSearchSerializer
from email_user.models import EmailUser
from services.models import Service, Provider, ProviderType, ServiceArea, ServiceType, \
SelectionCriterion
class TranslatedViewMixin(object):
def perform_authentication(self, request):
super().perform_authentication(request)
# Change current langugage if authentication is successful
# and we know the user's preference
if getattr(request.user, 'language', False):
activate(request.user.language)
else:
deactivate_all()
class ServiceInfoGenericViewSet(TranslatedViewMixin, viewsets.GenericViewSet):
"""A view set that allows for translated fields, but doesn't provide any
specific view methods (like list or detail) by default."""
pass
class ServiceInfoAPIView(TranslatedViewMixin, APIView):
pass
class ServiceInfoModelViewSet(TranslatedViewMixin, viewsets.ModelViewSet):
pass
class LanguageView(ServiceInfoAPIView):
"""
Lookup the authenticated user's preferred language.
"""
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
return Response({'language': request.user.language})
def post(self, request):
serializer = LanguageSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
request.user.language = serializer.data['language']
request.user.save()
return Response()
class UserViewSet(ServiceInfoModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = EmailUser.objects.all()
serializer_class = UserSerializer
class GroupViewSet(ServiceInfoModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class ServiceAreaViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin,
ServiceInfoGenericViewSet):
permission_classes = [AllowAny]
queryset = ServiceArea.objects.all()
serializer_class = ServiceAreaSerializer
class CharAnyLanguageFilter(django_filters.CharFilter):
"""
Given the base name of a field that has multiple language versions,
filter allowing for any of the language versions to contain the
given value, case-insensitively.
E.g. if field_name is 'name' and the value given in the query is 'foo',
then any record where 'name_en', 'name_ar', or 'name_fr' contains 'foo'
will match.
"""
def __init__(self, field_name):
self.field_name = field_name
super().__init__()
def filter(self, qset, value):
if not len(value):
return qset
query = Q()
for lang in ['en', 'ar', 'fr']:
query |= Q(**{'%s_%s__icontains' % (self.field_name, lang): value})
return qset.filter(query)
class ServiceTypeNumbersFilter(django_filters.CharFilter):
"""
Filter service records where their service type has any of the
numbers given in a comma-separated string.
"""
def filter(self, qset, value):
if not len(value):
return qset
return qset.filter(type__number__in=[int(s) for s in value.split(',')])
class ServiceFilter(django_filters.FilterSet):
additional_info = CharAnyLanguageFilter('additional_info')
area_of_service_name = CharAnyLanguageFilter('area_of_service__name')
description = CharAnyLanguageFilter('description')
name = CharAnyLanguageFilter('name')
type_name = CharAnyLanguageFilter('type__name')
type_numbers = ServiceTypeNumbersFilter()
id = django_filters.NumberFilter()
class Meta:
model = Service
fields = ['area_of_service_name', 'name', 'description', 'additional_info', 'type_name',
'type_numbers', 'id']
class ServiceViewSet(ServiceInfoModelViewSet):
# This docstring shows up when browsing the API in a web browser:
"""
Service view
In addition to the usual URLs, you can append 'cancel/' to
the service's URL and POST to cancel a service that's in
draft or current state. (User must be the provider or superuser).
"""
filter_class = ServiceFilter
is_search = False
# The queryset is only here so DRF knows the base model for this View.
# We override it below in all cases.
queryset = Service.objects.all()
serializer_class = ServiceSerializer
# All the text fields that are used for full-text searches (?search=XXXXX)
search_fields = [
'additional_info_en', 'additional_info_ar', 'additional_info_fr',
'cost_of_service',
'description_en', 'description_ar', 'description_fr',
'name_en', 'name_ar', 'name_fr',
'area_of_service__name_en', 'area_of_service__name_ar', 'area_of_service__name_fr',
'type__comments_en', 'type__comments_ar', 'type__comments_fr',
'type__name_en', 'type__name_ar', 'type__name_fr',
'provider__description_en', 'provider__description_ar', 'provider__description_fr',
'provider__name_en', 'provider__name_ar', 'provider__name_fr',
'provider__type__name_en', 'provider__type__name_ar', 'provider__type__name_fr',
'provider__phone_number',
'provider__website',
'provider__user__email',
'selection_criteria__text_en','selection_criteria__text_ar', 'selection_criteria__text_fr',
]
def get_queryset(self):
# Only make visible the Services owned by the current provider
# and not archived
if self.is_search:
return Service.objects.filter(status=Service.STATUS_CURRENT)
else:
return self.queryset.filter(provider__user=self.request.user)\
.exclude(status=Service.STATUS_ARCHIVED)\
.exclude(status=Service.STATUS_CANCELED)
@detail_route(methods=['post'])
def cancel(self, request, *args, **kwargs):
"""Cancel a service. Should be current or draft"""
obj = self.get_object()
if obj.status not in [Service.STATUS_DRAFT, Service.STATUS_CURRENT]:
raise DRFValidationError(
{'status': _('Service record must be current or pending changes to be canceled')})
obj.cancel()
return Response()
@list_route(methods=['get'], permission_classes=[AllowAny])
def search(self, request, *args, **kwargs):
"""
Public API for searching public information about the current services
"""
self.is_search = True
self.serializer_class = ServiceSearchSerializer
return super().list(request, *args, **kwargs)
class SelectionCriterionViewSet(ServiceInfoModelViewSet):
queryset = SelectionCriterion.objects.all()
serializer_class = SelectionCriterionSerializer
def get_queryset(self):
# Only make visible the SelectionCriteria owned by the current provider
# (attached to services of the current provider)
return self.queryset.filter(service__provider__user=self.request.user)
def get_object(self):
# Users can only access their own records
# Overriding get_queryset() should be enough, but just in case...
obj = super().get_object()
if not obj.provider.user == self.request.user:
raise PermissionDenied
return obj
class ProviderTypeViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin,
ServiceInfoGenericViewSet):
"""
Look up provider types.
(Read-only - no create, update, or delete provided)
"""
# Unauth'ed users need to be able to read the provider types so
# they can register as providers.
permission_classes = [AllowAny]
queryset = ProviderType.objects.all()
serializer_class = ProviderTypeSerializer
class ServiceTypeViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin,
ServiceInfoGenericViewSet):
"""
Look up service types.
(Read-only - no create, update, or delete provided)
"""
permission_classes = [AllowAny]
queryset = ServiceType.objects.all()
serializer_class = ServiceTypeSerializer
class ProviderViewSet(ServiceInfoModelViewSet):
# This docstring shows up when browsing the API in a web browser:
"""
Provider view
For providers to create/update their own data.
In addition to the usual URLs, you can append 'create_provider/' to
the provider URL and POST to create a new user and provider.
POST the fields of the provider, except instead of passing the
user, pass an 'email' and 'password' field so we can create the user
too.
The user will be created inactive. An email message will be sent
to them with a link they'll have to click in order to activate their
account. After clicking the link, they'll be redirected to the front
end, logged in and ready to go.
"""
queryset = Provider.objects.all()
serializer_class = ProviderSerializer
def get_queryset(self):
# If user is authenticated, it's not a create_provider call.
# Limit visible providers to the user's own.
if self.request.user.is_authenticated():
return self.queryset.filter(user=self.request.user)
return self.queryset.all() # Add ".all()" to force re-evaluation each time
def get_object(self):
# Users can only access their own records
# Overriding get_queryset() should be enough, but just in case...
obj = super().get_object()
if not obj.user == self.request.user:
raise PermissionDenied
return obj
def update(self, request, *args, **kwargs):
"""On change to provider via the API, notify via JIRA"""
response = super().update(request, *args, **kwargs)
self.get_object().notify_jira_of_change()
return response
@list_route(methods=['post'], permission_classes=[AllowAny])
def create_provider(self, request, *args, **kwargs):
"""
Customized "create provider" API call.
This is distinct from the built-in 'POST to the list URL'
call because we need it to work for users who are not
authenticated (otherwise, they can't register).
Expected data is basically the same as for creating a provider,
except that in place of the 'user' field, there should be an
'email' and 'password' field. They'll be used to create a new user,
send them an activation email, and create a provider using
that user.
"""
with atomic(): # If we throw an exception anywhere in here, rollback all changes
serializer = CreateProviderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# Create User
user = get_user_model().objects.create_user(
email=request.data['email'],
password=request.data['password'],
is_active=False
)
user.groups.add(Group.objects.get(name='Providers'))
# Create Provider
data = dict(request.data, user=user.get_api_url())
serializer = ProviderSerializer(data=data, context={'request': request})
serializer.is_valid(raise_exception=True)
serializer.save() # returns provider if we need it
headers = self.get_success_headers(serializer.data)
# If we got here without blowing up, send the user's activation email
user.send_activation_email(request.site, request, data['base_activation_link'])
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
#
# UNAUTHENTICATED views
#
class APILogin(ServiceInfoAPIView):
"""
Allow front-end to pass us an email and a password and get
back an auth token for the user.
(Adapted from the corresponding view in DRF for our email-based
user model.)
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
serializer = APILoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
user.last_login = now()
user.save(update_fields=['last_login'])
return Response({'token': token.key,
'language': user.language,
'is_staff': user.is_staff})
class APIActivationView(ServiceInfoAPIView):
"""
Given a user activation key, activate the user and
return an auth token.
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
serializer = APIActivationSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
activation_key = serializer.validated_data['activation_key']
try:
user = get_user_model().objects.activate_user(activation_key=activation_key)
except DjangoValidationError as e: # pragma: no cover
# The serializer already checked the key, so about the only way this could
# have failed would be due to another request having activated the user
# between our checking and our trying to activate them ourselves. Still,
# it's theoretically possible, so handle it...
raise DRFValidationError(e.messages)
token, unused = Token.objects.get_or_create(user=user)
user.last_login = now()
user.save(update_fields=['last_login'])
return Response({'token': token.key, 'email': user.email})
class PasswordResetRequest(ServiceInfoAPIView):
"""
View to tell the API that a user wants to reset their password.
If the provided email is for a valid user, it sends them an
email with a link they can use.
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
serializer = PasswordResetRequestSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
base_url = serializer.validated_data['base_reset_link']
user = serializer.validated_data['user']
user.send_password_reset_email(base_url, request.site)
return Response()
class PasswordResetCheck(ServiceInfoAPIView):
"""
View to check if a password reset key appears to
be valid (at the moment).
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
# The serializer validation does all the work in this one
serializer = PasswordResetCheckSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
return Response({'email': serializer.validated_data['user'].email})
class PasswordReset(ServiceInfoAPIView):
"""
View to reset a user's password, given a reset key
and a new password.
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
serializer = PasswordResetSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
password = serializer.validated_data['password']
user.set_password(password)
user.save()
token, unused = Token.objects.get_or_create(user=user)
user.last_login = now()
user.save(update_fields=['last_login'])
return Response({'token': token.key, 'email': user.email})
class ResendActivationLinkView(ServiceInfoAPIView):
"""
View to resend the activation link for the user
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
serializer = ResendActivationLinkSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
user.send_activation_email(request.site, request, request.data['base_activation_link'])
return Response()
PEP-8
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError as DjangoValidationError
from django.db.models import Q
from django.db.transaction import atomic
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _, activate, deactivate_all
import django_filters
from rest_framework import mixins, parsers, renderers, status, viewsets
from rest_framework.authtoken.models import Token
from rest_framework.decorators import list_route, detail_route
from rest_framework.exceptions import ValidationError as DRFValidationError, PermissionDenied
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from api.serializers import UserSerializer, GroupSerializer, ServiceSerializer, ProviderSerializer, \
ProviderTypeSerializer, ServiceAreaSerializer, APILoginSerializer, APIActivationSerializer, \
PasswordResetRequestSerializer, PasswordResetCheckSerializer, PasswordResetSerializer, \
ResendActivationLinkSerializer, CreateProviderSerializer, ServiceTypeSerializer, \
SelectionCriterionSerializer, LanguageSerializer, ServiceSearchSerializer
from email_user.models import EmailUser
from services.models import Service, Provider, ProviderType, ServiceArea, ServiceType, \
SelectionCriterion
class TranslatedViewMixin(object):
def perform_authentication(self, request):
super().perform_authentication(request)
# Change current langugage if authentication is successful
# and we know the user's preference
if getattr(request.user, 'language', False):
activate(request.user.language)
else:
deactivate_all()
class ServiceInfoGenericViewSet(TranslatedViewMixin, viewsets.GenericViewSet):
"""A view set that allows for translated fields, but doesn't provide any
specific view methods (like list or detail) by default."""
pass
class ServiceInfoAPIView(TranslatedViewMixin, APIView):
pass
class ServiceInfoModelViewSet(TranslatedViewMixin, viewsets.ModelViewSet):
pass
class LanguageView(ServiceInfoAPIView):
"""
Lookup the authenticated user's preferred language.
"""
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
return Response({'language': request.user.language})
def post(self, request):
serializer = LanguageSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
request.user.language = serializer.data['language']
request.user.save()
return Response()
class UserViewSet(ServiceInfoModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = EmailUser.objects.all()
serializer_class = UserSerializer
class GroupViewSet(ServiceInfoModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class ServiceAreaViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin,
ServiceInfoGenericViewSet):
permission_classes = [AllowAny]
queryset = ServiceArea.objects.all()
serializer_class = ServiceAreaSerializer
class CharAnyLanguageFilter(django_filters.CharFilter):
"""
Given the base name of a field that has multiple language versions,
filter allowing for any of the language versions to contain the
given value, case-insensitively.
E.g. if field_name is 'name' and the value given in the query is 'foo',
then any record where 'name_en', 'name_ar', or 'name_fr' contains 'foo'
will match.
"""
def __init__(self, field_name):
self.field_name = field_name
super().__init__()
def filter(self, qset, value):
if not len(value):
return qset
query = Q()
for lang in ['en', 'ar', 'fr']:
query |= Q(**{'%s_%s__icontains' % (self.field_name, lang): value})
return qset.filter(query)
class ServiceTypeNumbersFilter(django_filters.CharFilter):
"""
Filter service records where their service type has any of the
numbers given in a comma-separated string.
"""
def filter(self, qset, value):
if not len(value):
return qset
return qset.filter(type__number__in=[int(s) for s in value.split(',')])
class ServiceFilter(django_filters.FilterSet):
additional_info = CharAnyLanguageFilter('additional_info')
area_of_service_name = CharAnyLanguageFilter('area_of_service__name')
description = CharAnyLanguageFilter('description')
name = CharAnyLanguageFilter('name')
type_name = CharAnyLanguageFilter('type__name')
type_numbers = ServiceTypeNumbersFilter()
id = django_filters.NumberFilter()
class Meta:
model = Service
fields = ['area_of_service_name', 'name', 'description', 'additional_info', 'type_name',
'type_numbers', 'id']
class ServiceViewSet(ServiceInfoModelViewSet):
# This docstring shows up when browsing the API in a web browser:
"""
Service view
In addition to the usual URLs, you can append 'cancel/' to
the service's URL and POST to cancel a service that's in
draft or current state. (User must be the provider or superuser).
"""
filter_class = ServiceFilter
is_search = False
# The queryset is only here so DRF knows the base model for this View.
# We override it below in all cases.
queryset = Service.objects.all()
serializer_class = ServiceSerializer
# All the text fields that are used for full-text searches (?search=XXXXX)
search_fields = [
'additional_info_en', 'additional_info_ar', 'additional_info_fr',
'cost_of_service',
'description_en', 'description_ar', 'description_fr',
'name_en', 'name_ar', 'name_fr',
'area_of_service__name_en', 'area_of_service__name_ar', 'area_of_service__name_fr',
'type__comments_en', 'type__comments_ar', 'type__comments_fr',
'type__name_en', 'type__name_ar', 'type__name_fr',
'provider__description_en', 'provider__description_ar', 'provider__description_fr',
'provider__name_en', 'provider__name_ar', 'provider__name_fr',
'provider__type__name_en', 'provider__type__name_ar', 'provider__type__name_fr',
'provider__phone_number',
'provider__website',
'provider__user__email',
'selection_criteria__text_en', 'selection_criteria__text_ar', 'selection_criteria__text_fr',
]
def get_queryset(self):
# Only make visible the Services owned by the current provider
# and not archived
if self.is_search:
return Service.objects.filter(status=Service.STATUS_CURRENT)
else:
return self.queryset.filter(provider__user=self.request.user)\
.exclude(status=Service.STATUS_ARCHIVED)\
.exclude(status=Service.STATUS_CANCELED)
@detail_route(methods=['post'])
def cancel(self, request, *args, **kwargs):
"""Cancel a service. Should be current or draft"""
obj = self.get_object()
if obj.status not in [Service.STATUS_DRAFT, Service.STATUS_CURRENT]:
raise DRFValidationError(
{'status': _('Service record must be current or pending changes to be canceled')})
obj.cancel()
return Response()
@list_route(methods=['get'], permission_classes=[AllowAny])
def search(self, request, *args, **kwargs):
"""
Public API for searching public information about the current services
"""
self.is_search = True
self.serializer_class = ServiceSearchSerializer
return super().list(request, *args, **kwargs)
class SelectionCriterionViewSet(ServiceInfoModelViewSet):
queryset = SelectionCriterion.objects.all()
serializer_class = SelectionCriterionSerializer
def get_queryset(self):
# Only make visible the SelectionCriteria owned by the current provider
# (attached to services of the current provider)
return self.queryset.filter(service__provider__user=self.request.user)
def get_object(self):
# Users can only access their own records
# Overriding get_queryset() should be enough, but just in case...
obj = super().get_object()
if not obj.provider.user == self.request.user:
raise PermissionDenied
return obj
class ProviderTypeViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin,
ServiceInfoGenericViewSet):
"""
Look up provider types.
(Read-only - no create, update, or delete provided)
"""
# Unauth'ed users need to be able to read the provider types so
# they can register as providers.
permission_classes = [AllowAny]
queryset = ProviderType.objects.all()
serializer_class = ProviderTypeSerializer
class ServiceTypeViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin,
ServiceInfoGenericViewSet):
"""
Look up service types.
(Read-only - no create, update, or delete provided)
"""
permission_classes = [AllowAny]
queryset = ServiceType.objects.all()
serializer_class = ServiceTypeSerializer
class ProviderViewSet(ServiceInfoModelViewSet):
# This docstring shows up when browsing the API in a web browser:
"""
Provider view
For providers to create/update their own data.
In addition to the usual URLs, you can append 'create_provider/' to
the provider URL and POST to create a new user and provider.
POST the fields of the provider, except instead of passing the
user, pass an 'email' and 'password' field so we can create the user
too.
The user will be created inactive. An email message will be sent
to them with a link they'll have to click in order to activate their
account. After clicking the link, they'll be redirected to the front
end, logged in and ready to go.
"""
queryset = Provider.objects.all()
serializer_class = ProviderSerializer
def get_queryset(self):
# If user is authenticated, it's not a create_provider call.
# Limit visible providers to the user's own.
if self.request.user.is_authenticated():
return self.queryset.filter(user=self.request.user)
return self.queryset.all() # Add ".all()" to force re-evaluation each time
def get_object(self):
# Users can only access their own records
# Overriding get_queryset() should be enough, but just in case...
obj = super().get_object()
if not obj.user == self.request.user:
raise PermissionDenied
return obj
def update(self, request, *args, **kwargs):
"""On change to provider via the API, notify via JIRA"""
response = super().update(request, *args, **kwargs)
self.get_object().notify_jira_of_change()
return response
@list_route(methods=['post'], permission_classes=[AllowAny])
def create_provider(self, request, *args, **kwargs):
"""
Customized "create provider" API call.
This is distinct from the built-in 'POST to the list URL'
call because we need it to work for users who are not
authenticated (otherwise, they can't register).
Expected data is basically the same as for creating a provider,
except that in place of the 'user' field, there should be an
'email' and 'password' field. They'll be used to create a new user,
send them an activation email, and create a provider using
that user.
"""
with atomic(): # If we throw an exception anywhere in here, rollback all changes
serializer = CreateProviderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# Create User
user = get_user_model().objects.create_user(
email=request.data['email'],
password=request.data['password'],
is_active=False
)
user.groups.add(Group.objects.get(name='Providers'))
# Create Provider
data = dict(request.data, user=user.get_api_url())
serializer = ProviderSerializer(data=data, context={'request': request})
serializer.is_valid(raise_exception=True)
serializer.save() # returns provider if we need it
headers = self.get_success_headers(serializer.data)
# If we got here without blowing up, send the user's activation email
user.send_activation_email(request.site, request, data['base_activation_link'])
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
#
# UNAUTHENTICATED views
#
class APILogin(ServiceInfoAPIView):
"""
Allow front-end to pass us an email and a password and get
back an auth token for the user.
(Adapted from the corresponding view in DRF for our email-based
user model.)
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
serializer = APILoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
user.last_login = now()
user.save(update_fields=['last_login'])
return Response({'token': token.key,
'language': user.language,
'is_staff': user.is_staff})
class APIActivationView(ServiceInfoAPIView):
"""
Given a user activation key, activate the user and
return an auth token.
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
serializer = APIActivationSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
activation_key = serializer.validated_data['activation_key']
try:
user = get_user_model().objects.activate_user(activation_key=activation_key)
except DjangoValidationError as e: # pragma: no cover
# The serializer already checked the key, so about the only way this could
# have failed would be due to another request having activated the user
# between our checking and our trying to activate them ourselves. Still,
# it's theoretically possible, so handle it...
raise DRFValidationError(e.messages)
token, unused = Token.objects.get_or_create(user=user)
user.last_login = now()
user.save(update_fields=['last_login'])
return Response({'token': token.key, 'email': user.email})
class PasswordResetRequest(ServiceInfoAPIView):
"""
View to tell the API that a user wants to reset their password.
If the provided email is for a valid user, it sends them an
email with a link they can use.
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
serializer = PasswordResetRequestSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
base_url = serializer.validated_data['base_reset_link']
user = serializer.validated_data['user']
user.send_password_reset_email(base_url, request.site)
return Response()
class PasswordResetCheck(ServiceInfoAPIView):
"""
View to check if a password reset key appears to
be valid (at the moment).
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
# The serializer validation does all the work in this one
serializer = PasswordResetCheckSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
return Response({'email': serializer.validated_data['user'].email})
class PasswordReset(ServiceInfoAPIView):
"""
View to reset a user's password, given a reset key
and a new password.
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
serializer = PasswordResetSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
password = serializer.validated_data['password']
user.set_password(password)
user.save()
token, unused = Token.objects.get_or_create(user=user)
user.last_login = now()
user.save(update_fields=['last_login'])
return Response({'token': token.key, 'email': user.email})
class ResendActivationLinkView(ServiceInfoAPIView):
"""
View to resend the activation link for the user
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
serializer = ResendActivationLinkSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
user.send_activation_email(request.site, request, request.data['base_activation_link'])
return Response()
|
# -*- test-case-name: vumi.tests.test_message -*-
import json
from uuid import uuid4
from datetime import datetime
from errors import MissingMessageField, InvalidMessageField
from vumi.utils import to_kwargs
# This is the date format we work with internally
VUMI_DATE_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def date_time_decoder(json_object):
for key, value in json_object.items():
try:
json_object[key] = datetime.strptime(value,
VUMI_DATE_FORMAT)
except ValueError:
continue
except TypeError:
continue
return json_object
class JSONMessageEncoder(json.JSONEncoder):
"""A JSON encoder that is able to serialize datetime"""
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime(VUMI_DATE_FORMAT)
return super(JSONMessageEncoder, self).default(obj)
def from_json(json_string):
return json.loads(json_string, object_hook=date_time_decoder)
def to_json(obj):
return json.dumps(obj, cls=JSONMessageEncoder)
class Message(object):
"""
A unified message object used by Vumi when transmitting messages over AMQP
and occassionally as a standardised JSON format for use in external APIs.
The special ``.cache`` attribute stores a dictionary of data that is not
stored by the ``vumi.fields.VumiMessage`` and hence not stored by the
Vumi message store.
"""
# name of the special attribute that isn't stored by the message store
_CACHE_ATTRIBUTE = "__cache__"
def __init__(self, _process_fields=True, **kwargs):
if _process_fields:
kwargs = self.process_fields(kwargs)
self.payload = kwargs
self.validate_fields()
def process_fields(self, fields):
return fields
def validate_fields(self):
pass
def assert_field_present(self, *fields):
for field in fields:
if field not in self.payload:
raise MissingMessageField(field)
def assert_field_value(self, field, *values):
self.assert_field_present(field)
if self.payload[field] not in values:
raise InvalidMessageField(field)
def to_json(self):
return to_json(self.payload)
@classmethod
def from_json(cls, json_string):
return cls(_process_fields=False, **to_kwargs(from_json(json_string)))
def __str__(self):
return u"<Message payload=\"%s\">" % repr(self.payload)
def __repr__(self):
return str(self)
def __eq__(self, other):
if isinstance(other, Message):
return self.payload == other.payload
return False
def __contains__(self, key):
return key in self.payload
def __getitem__(self, key):
return self.payload[key]
def __setitem__(self, key, value):
self.payload[key] = value
def get(self, key, default=None):
return self.payload.get(key, default)
def items(self):
return self.payload.items()
def copy(self):
return self.from_json(self.to_json())
@property
def cache(self):
"""
A special payload attribute that isn't stored by the message store.
"""
return self.payload.setdefault(self._CACHE_ATTRIBUTE, {})
class TransportMessage(Message):
"""Common base class for messages sent to or from a transport."""
# sub-classes should set the message type
MESSAGE_TYPE = None
MESSAGE_VERSION = '20110921'
DEFAULT_ENDPOINT_NAME = 'default'
@staticmethod
def generate_id():
"""
Generate a unique message id.
There are places where we want a message id before we can
build a complete message. This lets us do that in a consistent
manner.
"""
return uuid4().get_hex()
def process_fields(self, fields):
fields.setdefault('message_version', self.MESSAGE_VERSION)
fields.setdefault('message_type', self.MESSAGE_TYPE)
fields.setdefault('timestamp', datetime.utcnow())
fields.setdefault('routing_metadata', {})
fields.setdefault('helper_metadata', {})
return fields
def validate_fields(self):
self.assert_field_value('message_version', self.MESSAGE_VERSION)
# We might get older event messages without the `helper_metadata`
# field.
self.payload.setdefault('helper_metadata', {})
self.assert_field_present(
'message_type',
'timestamp',
'helper_metadata',
)
if self['message_type'] is None:
raise InvalidMessageField('message_type')
@property
def routing_metadata(self):
return self.payload.setdefault('routing_metadata', {})
@classmethod
def check_routing_endpoint(cls, endpoint_name):
if endpoint_name is None:
return cls.DEFAULT_ENDPOINT_NAME
return endpoint_name
def set_routing_endpoint(self, endpoint_name=None):
endpoint_name = self.check_routing_endpoint(endpoint_name)
self.routing_metadata['endpoint_name'] = endpoint_name
def get_routing_endpoint(self):
endpoint_name = self.routing_metadata.get('endpoint_name')
return self.check_routing_endpoint(endpoint_name)
class TransportUserMessage(TransportMessage):
"""Message to or from a user.
transport_type = sms, ussd, etc
helper_metadata = for use by dispathers and off-to-the-side
components like failure workers (not for use
by transports or message workers).
"""
MESSAGE_TYPE = 'user_message'
# session event constants
#
# SESSION_NONE, SESSION_NEW, SESSION_RESUME, and SESSION_CLOSE
# may be sent from the transport to a worker. SESSION_NONE indicates
# there is no relevant session for this message.
#
# SESSION_NONE and SESSION_CLOSE may be sent from the worker to
# the transport. SESSION_NONE indicates any existing session
# should be continued. SESSION_CLOSE indicates that any existing
# session should be terminated after sending the message.
SESSION_NONE, SESSION_NEW, SESSION_RESUME, SESSION_CLOSE = (
None, 'new', 'resume', 'close')
# list of valid session events
SESSION_EVENTS = frozenset([SESSION_NONE, SESSION_NEW, SESSION_RESUME,
SESSION_CLOSE])
# canonical transport types
TT_HTTP_API = 'http_api'
TT_IRC = 'irc'
TT_TELNET = 'telnet'
TT_TWITTER = 'twitter'
TT_SMS = 'sms'
TT_USSD = 'ussd'
TT_XMPP = 'xmpp'
TT_MXIT = 'mxit'
TT_WECHAT = 'wechat'
TRANSPORT_TYPES = set([TT_HTTP_API, TT_IRC, TT_TELNET, TT_TWITTER, TT_SMS,
TT_USSD, TT_XMPP, TT_MXIT, TT_WECHAT])
AT_IRC_NICKNAME = 'irc_nickname'
AT_TWITTER_HANDLE = 'twitter_handle'
AT_MSISDN = 'msisdn'
AT_GTALK_ID = 'gtalk_id'
AT_JABBER_ID = 'jabber_id'
AT_MXIT_ID = 'mxit_id'
AT_WECHAT_ID = 'wechat_id'
ADDRESS_TYPES = set([
AT_IRC_NICKNAME, AT_TWITTER_HANDLE, AT_MSISDN, AT_GTALK_ID,
AT_JABBER_ID, AT_MXIT_ID, AT_WECHAT_ID])
def process_fields(self, fields):
fields = super(TransportUserMessage, self).process_fields(fields)
fields.setdefault('message_id', self.generate_id())
fields.setdefault('in_reply_to', None)
fields.setdefault('provider', None)
fields.setdefault('session_event', None)
fields.setdefault('content', None)
fields.setdefault('transport_metadata', {})
fields.setdefault('group', None)
fields.setdefault('to_addr_type', None)
fields.setdefault('from_addr_type', None)
return fields
def validate_fields(self):
super(TransportUserMessage, self).validate_fields()
# We might get older message versions without the `group` or `provider`
# fields.
self.payload.setdefault('group', None)
self.payload.setdefault('provider', None)
self.assert_field_present(
'message_id',
'to_addr',
'from_addr',
'in_reply_to',
'session_event',
'content',
'transport_name',
'transport_type',
'transport_metadata',
'group',
'provider',
)
if self['session_event'] not in self.SESSION_EVENTS:
raise InvalidMessageField("Invalid session_event %r"
% (self['session_event'],))
def user(self):
return self['from_addr']
def reply(self, content, continue_session=True, **kw):
"""Construct a reply message.
The reply message will have its `to_addr` field set to the original
message's `from_addr`. This means that even if the original message is
directed to the group only (i.e. it has `to_addr` set to `None`), the
reply will be directed to the sender of the original message.
:meth:`reply` suitable for constructing both one-to-one messages (such
as SMS) and directed messages within a group chat (such as
name-prefixed content in an IRC channel message).
If `session_event` is provided in the the keyword args,
`continue_session` will be ignored.
NOTE: Certain fields are required to come from the message being
replied to and may not be overridden by this method:
# If we're not using this addressing, we shouldn't be replying.
'to_addr', 'from_addr', 'group', 'in_reply_to', 'provider'
# These three belong together and are supposed to be opaque.
'transport_name', 'transport_type', 'transport_metadata'
FIXME: `helper_metadata` should *not* be copied to the reply message.
We only do it here because a bunch of legacy code relies on it.
"""
session_event = None if continue_session else self.SESSION_CLOSE
for field in [
# If we're not using this addressing, we shouldn't be replying.
'to_addr', 'from_addr', 'group', 'in_reply_to', 'provider'
# These three belong together and are supposed to be opaque.
'transport_name', 'transport_type', 'transport_metadata']:
if field in kw:
# Other "bad keyword argument" conditions cause TypeErrors.
raise TypeError("'%s' may not be overridden." % (field,))
fields = {
'helper_metadata': self['helper_metadata'], # XXX: See above.
'session_event': session_event,
'to_addr': self['from_addr'],
'from_addr': self['to_addr'],
'group': self['group'],
'in_reply_to': self['message_id'],
'provider': self['provider'],
'transport_name': self['transport_name'],
'transport_type': self['transport_type'],
'transport_metadata': self['transport_metadata'],
}
fields.update(kw)
out_msg = TransportUserMessage(content=content, **fields)
# The reply should go out the same endpoint it came in.
out_msg.set_routing_endpoint(self.get_routing_endpoint())
return out_msg
def reply_group(self, *args, **kw):
"""Construct a group reply message.
If the `group` field is set to `None`, :meth:`reply_group` is identical
to :meth:`reply`.
If the `group` field is not set to `None`, the reply message will have
its `to_addr` field set to `None`. This means that even if the original
message is directed to an individual within the group (i.e. its
`to_addr` is not set to `None`), the reply will be directed to the
group as a whole.
:meth:`reply_group` suitable for both one-to-one messages (such as SMS)
and undirected messages within a group chat (such as IRC channel
messages).
"""
out_msg = self.reply(*args, **kw)
if self['group'] is not None:
out_msg['to_addr'] = None
return out_msg
@classmethod
def send(cls, to_addr, content, **kw):
kw.setdefault('from_addr', None)
kw.setdefault('transport_name', None)
kw.setdefault('transport_type', None)
out_msg = cls(
to_addr=to_addr,
in_reply_to=None,
content=content,
session_event=cls.SESSION_NONE,
**kw)
return out_msg
class TransportEvent(TransportMessage):
"""Message about a TransportUserMessage.
"""
MESSAGE_TYPE = 'event'
# list of valid delivery statuses
DELIVERY_STATUSES = frozenset(('pending', 'failed', 'delivered'))
# map of event_types -> extra fields
EVENT_TYPES = {
'ack': {'sent_message_id': lambda v: v is not None},
'nack': {
'nack_reason': lambda v: v is not None,
},
'delivery_report': {
'delivery_status': lambda v: v in TransportEvent.DELIVERY_STATUSES,
},
}
def process_fields(self, fields):
fields = super(TransportEvent, self).process_fields(fields)
fields.setdefault('event_id', self.generate_id())
return fields
def validate_fields(self):
super(TransportEvent, self).validate_fields()
self.assert_field_present(
'user_message_id',
'event_id',
'event_type',
)
event_type = self.payload['event_type']
if event_type not in self.EVENT_TYPES:
raise InvalidMessageField("Unknown event_type %r" % (event_type,))
for extra_field, check in self.EVENT_TYPES[event_type].items():
self.assert_field_present(extra_field)
if not check(self[extra_field]):
raise InvalidMessageField(extra_field)
Clarify cache docs.
# -*- test-case-name: vumi.tests.test_message -*-
import json
from uuid import uuid4
from datetime import datetime
from errors import MissingMessageField, InvalidMessageField
from vumi.utils import to_kwargs
# This is the date format we work with internally
VUMI_DATE_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def date_time_decoder(json_object):
for key, value in json_object.items():
try:
json_object[key] = datetime.strptime(value,
VUMI_DATE_FORMAT)
except ValueError:
continue
except TypeError:
continue
return json_object
class JSONMessageEncoder(json.JSONEncoder):
"""A JSON encoder that is able to serialize datetime"""
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime(VUMI_DATE_FORMAT)
return super(JSONMessageEncoder, self).default(obj)
def from_json(json_string):
return json.loads(json_string, object_hook=date_time_decoder)
def to_json(obj):
return json.dumps(obj, cls=JSONMessageEncoder)
class Message(object):
"""
A unified message object used by Vumi when transmitting messages over AMQP
and occassionally as a standardised JSON format for use in external APIs.
The special ``.cache`` property stores a dictionary of data that is not
stored by the :class:`vumi.fields.VumiMessage` field and hence not stored
by Vumi's message store.
"""
# name of the special attribute that isn't stored by the message store
_CACHE_ATTRIBUTE = "__cache__"
def __init__(self, _process_fields=True, **kwargs):
if _process_fields:
kwargs = self.process_fields(kwargs)
self.payload = kwargs
self.validate_fields()
def process_fields(self, fields):
return fields
def validate_fields(self):
pass
def assert_field_present(self, *fields):
for field in fields:
if field not in self.payload:
raise MissingMessageField(field)
def assert_field_value(self, field, *values):
self.assert_field_present(field)
if self.payload[field] not in values:
raise InvalidMessageField(field)
def to_json(self):
return to_json(self.payload)
@classmethod
def from_json(cls, json_string):
return cls(_process_fields=False, **to_kwargs(from_json(json_string)))
def __str__(self):
return u"<Message payload=\"%s\">" % repr(self.payload)
def __repr__(self):
return str(self)
def __eq__(self, other):
if isinstance(other, Message):
return self.payload == other.payload
return False
def __contains__(self, key):
return key in self.payload
def __getitem__(self, key):
return self.payload[key]
def __setitem__(self, key, value):
self.payload[key] = value
def get(self, key, default=None):
return self.payload.get(key, default)
def items(self):
return self.payload.items()
def copy(self):
return self.from_json(self.to_json())
@property
def cache(self):
"""
A special payload attribute that isn't stored by the message store.
"""
return self.payload.setdefault(self._CACHE_ATTRIBUTE, {})
class TransportMessage(Message):
"""Common base class for messages sent to or from a transport."""
# sub-classes should set the message type
MESSAGE_TYPE = None
MESSAGE_VERSION = '20110921'
DEFAULT_ENDPOINT_NAME = 'default'
@staticmethod
def generate_id():
"""
Generate a unique message id.
There are places where we want a message id before we can
build a complete message. This lets us do that in a consistent
manner.
"""
return uuid4().get_hex()
def process_fields(self, fields):
fields.setdefault('message_version', self.MESSAGE_VERSION)
fields.setdefault('message_type', self.MESSAGE_TYPE)
fields.setdefault('timestamp', datetime.utcnow())
fields.setdefault('routing_metadata', {})
fields.setdefault('helper_metadata', {})
return fields
def validate_fields(self):
self.assert_field_value('message_version', self.MESSAGE_VERSION)
# We might get older event messages without the `helper_metadata`
# field.
self.payload.setdefault('helper_metadata', {})
self.assert_field_present(
'message_type',
'timestamp',
'helper_metadata',
)
if self['message_type'] is None:
raise InvalidMessageField('message_type')
@property
def routing_metadata(self):
return self.payload.setdefault('routing_metadata', {})
@classmethod
def check_routing_endpoint(cls, endpoint_name):
if endpoint_name is None:
return cls.DEFAULT_ENDPOINT_NAME
return endpoint_name
def set_routing_endpoint(self, endpoint_name=None):
endpoint_name = self.check_routing_endpoint(endpoint_name)
self.routing_metadata['endpoint_name'] = endpoint_name
def get_routing_endpoint(self):
endpoint_name = self.routing_metadata.get('endpoint_name')
return self.check_routing_endpoint(endpoint_name)
class TransportUserMessage(TransportMessage):
"""Message to or from a user.
transport_type = sms, ussd, etc
helper_metadata = for use by dispathers and off-to-the-side
components like failure workers (not for use
by transports or message workers).
"""
MESSAGE_TYPE = 'user_message'
# session event constants
#
# SESSION_NONE, SESSION_NEW, SESSION_RESUME, and SESSION_CLOSE
# may be sent from the transport to a worker. SESSION_NONE indicates
# there is no relevant session for this message.
#
# SESSION_NONE and SESSION_CLOSE may be sent from the worker to
# the transport. SESSION_NONE indicates any existing session
# should be continued. SESSION_CLOSE indicates that any existing
# session should be terminated after sending the message.
SESSION_NONE, SESSION_NEW, SESSION_RESUME, SESSION_CLOSE = (
None, 'new', 'resume', 'close')
# list of valid session events
SESSION_EVENTS = frozenset([SESSION_NONE, SESSION_NEW, SESSION_RESUME,
SESSION_CLOSE])
# canonical transport types
TT_HTTP_API = 'http_api'
TT_IRC = 'irc'
TT_TELNET = 'telnet'
TT_TWITTER = 'twitter'
TT_SMS = 'sms'
TT_USSD = 'ussd'
TT_XMPP = 'xmpp'
TT_MXIT = 'mxit'
TT_WECHAT = 'wechat'
TRANSPORT_TYPES = set([TT_HTTP_API, TT_IRC, TT_TELNET, TT_TWITTER, TT_SMS,
TT_USSD, TT_XMPP, TT_MXIT, TT_WECHAT])
AT_IRC_NICKNAME = 'irc_nickname'
AT_TWITTER_HANDLE = 'twitter_handle'
AT_MSISDN = 'msisdn'
AT_GTALK_ID = 'gtalk_id'
AT_JABBER_ID = 'jabber_id'
AT_MXIT_ID = 'mxit_id'
AT_WECHAT_ID = 'wechat_id'
ADDRESS_TYPES = set([
AT_IRC_NICKNAME, AT_TWITTER_HANDLE, AT_MSISDN, AT_GTALK_ID,
AT_JABBER_ID, AT_MXIT_ID, AT_WECHAT_ID])
def process_fields(self, fields):
fields = super(TransportUserMessage, self).process_fields(fields)
fields.setdefault('message_id', self.generate_id())
fields.setdefault('in_reply_to', None)
fields.setdefault('provider', None)
fields.setdefault('session_event', None)
fields.setdefault('content', None)
fields.setdefault('transport_metadata', {})
fields.setdefault('group', None)
fields.setdefault('to_addr_type', None)
fields.setdefault('from_addr_type', None)
return fields
def validate_fields(self):
super(TransportUserMessage, self).validate_fields()
# We might get older message versions without the `group` or `provider`
# fields.
self.payload.setdefault('group', None)
self.payload.setdefault('provider', None)
self.assert_field_present(
'message_id',
'to_addr',
'from_addr',
'in_reply_to',
'session_event',
'content',
'transport_name',
'transport_type',
'transport_metadata',
'group',
'provider',
)
if self['session_event'] not in self.SESSION_EVENTS:
raise InvalidMessageField("Invalid session_event %r"
% (self['session_event'],))
def user(self):
return self['from_addr']
def reply(self, content, continue_session=True, **kw):
"""Construct a reply message.
The reply message will have its `to_addr` field set to the original
message's `from_addr`. This means that even if the original message is
directed to the group only (i.e. it has `to_addr` set to `None`), the
reply will be directed to the sender of the original message.
:meth:`reply` suitable for constructing both one-to-one messages (such
as SMS) and directed messages within a group chat (such as
name-prefixed content in an IRC channel message).
If `session_event` is provided in the the keyword args,
`continue_session` will be ignored.
NOTE: Certain fields are required to come from the message being
replied to and may not be overridden by this method:
# If we're not using this addressing, we shouldn't be replying.
'to_addr', 'from_addr', 'group', 'in_reply_to', 'provider'
# These three belong together and are supposed to be opaque.
'transport_name', 'transport_type', 'transport_metadata'
FIXME: `helper_metadata` should *not* be copied to the reply message.
We only do it here because a bunch of legacy code relies on it.
"""
session_event = None if continue_session else self.SESSION_CLOSE
for field in [
# If we're not using this addressing, we shouldn't be replying.
'to_addr', 'from_addr', 'group', 'in_reply_to', 'provider'
# These three belong together and are supposed to be opaque.
'transport_name', 'transport_type', 'transport_metadata']:
if field in kw:
# Other "bad keyword argument" conditions cause TypeErrors.
raise TypeError("'%s' may not be overridden." % (field,))
fields = {
'helper_metadata': self['helper_metadata'], # XXX: See above.
'session_event': session_event,
'to_addr': self['from_addr'],
'from_addr': self['to_addr'],
'group': self['group'],
'in_reply_to': self['message_id'],
'provider': self['provider'],
'transport_name': self['transport_name'],
'transport_type': self['transport_type'],
'transport_metadata': self['transport_metadata'],
}
fields.update(kw)
out_msg = TransportUserMessage(content=content, **fields)
# The reply should go out the same endpoint it came in.
out_msg.set_routing_endpoint(self.get_routing_endpoint())
return out_msg
def reply_group(self, *args, **kw):
"""Construct a group reply message.
If the `group` field is set to `None`, :meth:`reply_group` is identical
to :meth:`reply`.
If the `group` field is not set to `None`, the reply message will have
its `to_addr` field set to `None`. This means that even if the original
message is directed to an individual within the group (i.e. its
`to_addr` is not set to `None`), the reply will be directed to the
group as a whole.
:meth:`reply_group` suitable for both one-to-one messages (such as SMS)
and undirected messages within a group chat (such as IRC channel
messages).
"""
out_msg = self.reply(*args, **kw)
if self['group'] is not None:
out_msg['to_addr'] = None
return out_msg
@classmethod
def send(cls, to_addr, content, **kw):
kw.setdefault('from_addr', None)
kw.setdefault('transport_name', None)
kw.setdefault('transport_type', None)
out_msg = cls(
to_addr=to_addr,
in_reply_to=None,
content=content,
session_event=cls.SESSION_NONE,
**kw)
return out_msg
class TransportEvent(TransportMessage):
"""Message about a TransportUserMessage.
"""
MESSAGE_TYPE = 'event'
# list of valid delivery statuses
DELIVERY_STATUSES = frozenset(('pending', 'failed', 'delivered'))
# map of event_types -> extra fields
EVENT_TYPES = {
'ack': {'sent_message_id': lambda v: v is not None},
'nack': {
'nack_reason': lambda v: v is not None,
},
'delivery_report': {
'delivery_status': lambda v: v in TransportEvent.DELIVERY_STATUSES,
},
}
def process_fields(self, fields):
fields = super(TransportEvent, self).process_fields(fields)
fields.setdefault('event_id', self.generate_id())
return fields
def validate_fields(self):
super(TransportEvent, self).validate_fields()
self.assert_field_present(
'user_message_id',
'event_id',
'event_type',
)
event_type = self.payload['event_type']
if event_type not in self.EVENT_TYPES:
raise InvalidMessageField("Unknown event_type %r" % (event_type,))
for extra_field, check in self.EVENT_TYPES[event_type].items():
self.assert_field_present(extra_field)
if not check(self[extra_field]):
raise InvalidMessageField(extra_field)
|
#!/usr/bin/env python
import os,sys,math
def usage():
"""
This program is implementing a pairwise comparison for a selected
number of criteria for villages of Tonle Sap.
"""
os.system("tput setaf 2")
print("\n----------------------------------------------------------")
print("Usage:")
print("----------------------------------------------------------")
print("This program needs the tablefile.csv in the same directory\n")
print("TL.exe crit1 crit2 crit3 crit4 crit5 weigt btter scren")
print("\n\n")
print("It will run with five criteria:")
print("\tcrit1: MA_INDACT, MA_PROXROAD (1 choice)")
print("\tcrit2: TAGAP_DRY, TAGAP_WET (1 choice)")
print("\tcrit3: SW_PROXRIV, SW_PONDS (1 choice)")
print("\tcrit4: GW_DWELL, GW_BOREW (1 choice)")
print("\tcrit5: IRRI_SCH, IRRI_HEAD (1 choice)")
print("\tweigt: 0.0[,0.0[,..]] (comma separated float vals)")
print("\tbtter: m[,m[,..]] (comma separated [m;l], i.e. more or less)")
print("\tscren: POP,>,200 [ SEXR,<,0.5 [ ..]] (comma separated info)")
print("----------------------------------------------------------")
print("\tscren is a set of screening columns thresholds")
print("\tit tells the program to mask out using thresholds")
print("\tavailable types are:")
print("\tTOPOZONE,INUNDATION,ELEVATION,DRYRAIN,SOIL_LOWP,SOIL_MEDP,SOIL_HIGHP,TAGAP_DRY,TAGAP_WET,CONZ_PROX,CONZ_PEOP,POP,SEXR,KW_UPTOSEC,KW_ILLIT,LF_RICE,LF_VEGE,LF_LSC,LF_WAGED,INDLIVELI,MIGRANTS,PL_P1HH,PL_P2HH,PL_NONPHH,RYLD_WET,RYLD_DRY,RYLD_DANDW,RYLD_RANDI,LA_RICE1HA,LA_CULT1HA,INDAGRIM,A_IRRIC,A_IRRIR,A_IRRIP,A_IRRIW")
print("\t")
print("\ti.e. TOPOZONE,<=,3 (comma separators are compulsory")
print("\ti.e. ELEVATION,>=,5")
print("----------------------------------------------------------")
os.system("tput setaf 3")
print("Example 1:")
print("TL.py 0 0 0 0 0 1.0,1.0,1.0,1.0,1.0 m,l,l,m,m")
print("Means that:")
print("TL.py MA_INDACT TAGAP_DRY SW_PROXRIV GW_DWELL IRRI_SCH 1.0,1.0,1.0 more,less,less")
print("----------------------------------------------------------")
os.system("tput setaf 4")
print("Example 2:")
print("TL.py 0 0 1 0 0 1.0,1.0,1.0,1.0,1.0 m,l,l,l,l")
print("Means that:")
print("TL.py MA_INDACT TAGAP_DRY SW_PONDS GW_DWELL IRRI_SCH 1.0,1.0,1.0,1.0,1.0 more,less,more,less,less")
print("----------------------------------------------------------")
print("\n")
os.system("tput setaf 9")
#import the csv file
import numpy as np
data = np.genfromtxt("tablefile.csv", skip_header=1, delimiter=",")
#Clarify the table access order, TO BE CHANGED IF NEW tablefile.csv
#Access XCOORD Full Column with data[:,137]
#Access YCOORD Full Column with data[:,138]
XL=np.asarray(data[:,0])
YL=np.asarray(data[:,1])
#Create Village output list
VL=np.zeros(data.shape[0])
#Create Village MASK list
MK=np.ones(data.shape[0])
#Create outranking criteria column list
#Access MA_INDACT Full Column with data[:,39]
#Access MA_PROXROAD Full Column with data[:,40]
#Access TAGAP_DRY Full Column with data[:,41]
#Access TAGAP_WET Full Column with data[:,42]
#Access SW_PROXRIV Full Column with data[:,43]
#Access SW_PONDS Full Column with data[:,44]
#Access GW_DWELL Full Column with data[:,45]
#Access GW_BOREW Full Column with data[:,46]
#Access IRRI_SCH Full Column with data[:,47]
#Access IRRI_HEAD Full Column with data[:,48]
#set critcolno with any of the critno[index]
mastercritno=[39,40,41,42,43,44,45,46,47,48]
#------------------------
#PARSING ARGUMENTS
#------------------------
#Minimum number of input variables
#1 csv weights list
#1 csv better list (more is better="m")
if (len(sys.argv) < 6):
os.system("tput setaf 1")
print("\ninsufficient amount of input variables")
os.system("tput setaf 9")
usage()
exit(1)
#Collect the user's choices for the criteria
crit1=sys.argv[1]
crit2=sys.argv[2]
crit3=sys.argv[3]
crit4=sys.argv[4]
crit5=sys.argv[5]
#Create column index of selected criteria
critno=[]
#Access MA_INDACT Full Column with data[:,39]
#Access MA_PROXROAD Full Column with data[:,40]
if(int(crit1)==0):
critno.append(39)
else:
critno.append(40)
#Access TAGAP_DRY Full Column with data[:,41]
#Access TAGAP_WET Full Column with data[:,42]
if(int(crit2)==0):
critno.append(41)
else:
critno.append(42)
#Access SW_PROXRIV Full Column with data[:,43]
#Access SW_PONDS Full Column with data[:,44]
if(int(crit3)==0):
critno.append(43)
else:
critno.append(44)
#Access GW_DWELL Full Column with data[:,45]
#Access GW_BOREW Full Column with data[:,46]
if(int(crit4)==0):
critno.append(45)
else:
critno.append(46)
#Access IRRI_SCH Full Column with data[:,47]
#Access IRRI_HEAD Full Column with data[:,48]
if(int(crit5)==0):
critno.append(47)
else:
critno.append(48)
#Collect the weight list
w=[]
w.extend(sys.argv[6].split(","))
if(len(w)<5):
os.system("tput setaf 1")
print("\nWeights list has less than 5 criteria members")
os.system("tput setaf 9")
usage()
exit(1)
#Collect the "more/less is better" list
lmib=[]
lmib.extend(sys.argv[7].split(','))
if(len(lmib)<5):
os.system("tput setaf 1")
print("\nList of 'more/less' has less than 5 criteria members")
os.system("tput setaf 9")
usage()
exit(1)
def mask(b,c,d):
#nullify the MK output multiplicator if applies
for i in range(data.shape[0]):
if(c=="<="):
if(data[i,b]<=d):
MK[i]=0
elif(c==">="):
if(data[i,b]>=d):
MK[i]=0
elif(c=="=="):
if(data[i,b]==d):
MK[i]=0
elif(c=="!="):
if(data[i,b]!=d):
MK[i]=0
elif(c=="<"):
if(data[i,b]<d):
MK[i]=0
elif(c==">"):
if(data[i,b]>d):
MK[i]=0
else:
#do nothing
print("Not understood %s command, skipping" % b)
if(len(sys.argv)>8):
#Create the masking list
screnlist=[]
for i in range(8,len(sys.argv),1):
screnlist.append(sys.argv[i])
scren={'TOPOZONE':4,'INUNDATION':5,'ELEVATION':6,'DRYRAIN':7,'SOIL_LOWP':8,'SOIL_MEDP':9,'SOIL_HIGHP':10,'TAGAP_DRY':11,'TAGAP_WET':12,'CONZ_PROX':13,'CONZ_PEOP':14,'POP':15,'SEXR':16,'KW_UPTOSEC':17,'KW_ILLIT':18,'LF_RICE':19,'LF_VEGE':20,'LF_LSC':21,'LF_WAGED':22,'INDLIVELI':23,'MIGRANTS':24,'PL_P1HH':25,'PL_P2HH':26,'PL_NONPHH':27,'RYLD_WET':28,'RYLD_DRY':29,'RYLD_DANDW':30,'RYLD_RANDI':31,'LA_RICE1HA':32,'LA_CULT1HA':33,'INDAGRIM':34,'A_IRRIC':35,'A_IRRIR':36,'A_IRRIP':37,'A_IRRIW':38}
for i in range(len(screnlist)):
a=screnlist[i].split(',')
try:
b=scren[a[0]]#extract col number from dict
c=a[1]#Get comparison symbol
d=a[2]#Get threshold value
mask(b,c,d)
except:
print("screening name typo %s, will be ignored" % a[0])
#------------------------
#END OF PARSING ARGUMENTS
#------------------------
def mkvalrange(datacol):
"""
Create range data for a criterium data column
"""
return(np.max(datacol)-np.min(datacol))
def vpwc(val1,val2,valrange):
"""
Village pairwise Comparison for a given criterium
"""
return((val1-val2)/valrange)
def assignvpwc(vil1rowno,vil2rowno,critcolno,counter):
#print(vil1rowno,vil2rowno,critcolno,VL.shape[0])
#Compute value range for a given criterium
#HINT: use stats range for column or row data table
datacol=data[:,critcolno]
valrange=mkvalrange(datacol)
#get value from each village for a given criterium
val1=data[vil1rowno,critcolno]
val2=data[vil2rowno,critcolno]
#compute pairwise comparison for a given criterion
value=vpwc(val1,val2,valrange)
#Assign given weight to value
value*=float(w[counter])
#Adjust the LessIsBetter parameter from User input
if(lmib[counter]=="m"):
LessIsBetter=False
elif(lmib[counter]=="l"):
LessIsBetter=True
else:
print("list of more/less has a typo, use 'm' or 'l'")
usage()
exit(1)
if(LessIsBetter==True):
if(value > 0):
vil1out=float(value)
vil2out=0.0
elif(value == 0):
vil1out=0.0
vil2out=0.0
else:
vil2out=float(value)
vil1out=0.0
else:#MoreIsBetter
if(value < 0):
vil1out=float(value)
vil2out=0.0
elif(value == 0):
vil1out=0.0
vil2out=0.0
else:
vil2out=float(value)
vil1out=0.0
##return vil1out,vil2out
return vil1out,vil2out
#set counter for weight and lmib user input forwarding
counter=0
for critcolno in critno:
print("critcolno=%d" % critcolno)
for vil1rowno in range(data.shape[0]):
for vil2rowno in range(data.shape[0]):
(a,b)=assignvpwc(vil1rowno,vil2rowno,critcolno,counter)
if(False == math.isnan(a)):
VL[vil1rowno]+=a
#print(VL[vil1rowno])
if(False == math.isnan(b)):
VL[vil2rowno]+=b
#print(VL[vil2rowno])
counter+=1
#Remove negative values
VL=VL.clip(0)
#Rescale 0 to 1
VLM=np.max(VL)
VLm=np.min(VL)
VLr=VLM-VLm
VL=[(i-VLm)/VLr for i in VL]
#convert float array to string list
#vlfloat=VL.tolist()
#vl = ["%.2f" % x for x in vlfloat]
#Temporary stage: OUTPUT an XYZ text file
f=open("villages.csv","w")
for i in range(data.shape[0]-1):
try:
strng=(str(XL[i])+","+str(YL[i])+","+str(VL[i]*MK[i])+"\n")
f.write(strng)
except:
print("Error writing csv file, skipping row")
f.close()
#Final stage: Create a new shapefile directly
import osgeo.ogr, osgeo.osr
sr = osgeo.osr.SpatialReference()
sr.ImportFromEPSG(3148)
driver = osgeo.ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists('points.shp'):
driver.DeleteDataSource('points.shp')
shpData = driver.CreateDataSource('points.shp')
if shpData is None:
print ' Could not create file'
sys.exit(1)
lyr = shpData.CreateLayer('layer1', sr, osgeo.ogr.wkbPoint)
lyrDef = lyr.GetLayerDefn()
idField = osgeo.ogr.FieldDefn("ID_0", osgeo.ogr.OFTReal)
lyr.CreateField(idField)
fidx = 0
for i in range(len(XL)):
#Apply data MASK
if(MK[i]):
ftr = osgeo.ogr.Feature(lyrDef)
pt = osgeo.ogr.Geometry(osgeo.ogr.wkbPoint)
pt.SetPoint(0, XL[i], YL[i])
ftr.SetGeometry(pt)
ftr.SetFID(fidx)
ftr.SetField(ftr.GetFieldIndex('ID_0'),VL[i]*MK[i])
lyr.CreateFeature(ftr)
fidx += 1
shpData.Destroy()
#create shapefile projection file
sr.MorphToESRI()
file = open('points.prj', 'w')
file.write(sr.ExportToWkt())
file.close()
First draft masking arguments
#!/usr/bin/env python
import os,sys,math
def usage():
"""
This program is implementing a pairwise comparison for a selected
number of criteria for villages of Tonle Sap.
"""
os.system("tput setaf 2")
print("\n----------------------------------------------------------")
print("Usage:")
print("----------------------------------------------------------")
print("This program needs the tablefile.csv in the same directory\n")
print("TL.exe crit1 crit2 crit3 crit4 crit5 weigt btter scren")
print("\n\n")
print("It will run with five criteria:")
print("\tcrit1: MA_INDACT, MA_PROXROAD (1 choice)")
print("\tcrit2: TAGAP_DRY, TAGAP_WET (1 choice)")
print("\tcrit3: SW_PROXRIV, SW_PONDS (1 choice)")
print("\tcrit4: GW_DWELL, GW_BOREW (1 choice)")
print("\tcrit5: IRRI_SCH, IRRI_HEAD (1 choice)")
print("\tweigt: 0.0[,0.0[,..]] (comma separated float vals)")
print("\tbtter: m[,m[,..]] (comma separated [m;l], i.e. more or less)")
print("\tscren: POP,>,200 [ SEXR,<,0.5 [ ..]] (comma separated info)")
print("----------------------------------------------------------")
print("\tscren is a set of screening columns thresholds")
print("\tit tells the program to mask out using thresholds")
print("\tavailable types are:")
print("\tTOPOZONE,INUNDATION,ELEVATION,DRYRAIN,SOIL_LOWP,SOIL_MEDP,SOIL_HIGHP,TAGAP_DRY,TAGAP_WET,CONZ_PROX,CONZ_PEOP,POP,SEXR,KW_UPTOSEC,KW_ILLIT,LF_RICE,LF_VEGE,LF_LSC,LF_WAGED,INDLIVELI,MIGRANTS,PL_P1HH,PL_P2HH,PL_NONPHH,RYLD_WET,RYLD_DRY,RYLD_DANDW,RYLD_RANDI,LA_RICE1HA,LA_CULT1HA,INDAGRIM,A_IRRIC,A_IRRIR,A_IRRIP,A_IRRIW")
print("\t")
print("\ti.e. TOPOZONE,<=,3 (comma separators are compulsory)")
print("\ti.e. ELEVATION,>=,5")
print("----------------------------------------------------------")
os.system("tput setaf 3")
print("Example 1:")
print("TL.py 0 0 0 0 0 1.0,1.0,1.0,1.0,1.0 m,l,l,m,m")
print("Means that:")
print("TL.py MA_INDACT TAGAP_DRY SW_PROXRIV GW_DWELL IRRI_SCH 1.0,1.0,1.0 more,less,less")
print("----------------------------------------------------------")
os.system("tput setaf 4")
print("Example 2:")
print("TL.py 0 0 1 0 0 1.0,1.0,1.0,1.0,1.0 m,l,l,l,l")
print("Means that:")
print("TL.py MA_INDACT TAGAP_DRY SW_PONDS GW_DWELL IRRI_SCH 1.0,1.0,1.0,1.0,1.0 more,less,more,less,less")
print("----------------------------------------------------------")
print("\n")
os.system("tput setaf 9")
#import the csv file
import numpy as np
data = np.genfromtxt("tablefile.csv", skip_header=1, delimiter=",")
#Clarify the table access order, TO BE CHANGED IF NEW tablefile.csv
#Access XCOORD Full Column with data[:,137]
#Access YCOORD Full Column with data[:,138]
XL=np.asarray(data[:,0])
YL=np.asarray(data[:,1])
#Create Village output list
VL=np.zeros(data.shape[0])
#Create Village MASK list
MK=np.ones(data.shape[0])
#Create outranking criteria column list
#Access MA_INDACT Full Column with data[:,39]
#Access MA_PROXROAD Full Column with data[:,40]
#Access TAGAP_DRY Full Column with data[:,41]
#Access TAGAP_WET Full Column with data[:,42]
#Access SW_PROXRIV Full Column with data[:,43]
#Access SW_PONDS Full Column with data[:,44]
#Access GW_DWELL Full Column with data[:,45]
#Access GW_BOREW Full Column with data[:,46]
#Access IRRI_SCH Full Column with data[:,47]
#Access IRRI_HEAD Full Column with data[:,48]
#set critcolno with any of the critno[index]
mastercritno=[39,40,41,42,43,44,45,46,47,48]
#------------------------
#PARSING ARGUMENTS
#------------------------
#Minimum number of input variables
#1 csv weights list
#1 csv better list (more is better="m")
if (len(sys.argv) < 6):
os.system("tput setaf 1")
print("\ninsufficient amount of input variables")
os.system("tput setaf 9")
usage()
exit(1)
#Collect the user's choices for the criteria
crit1=sys.argv[1]
crit2=sys.argv[2]
crit3=sys.argv[3]
crit4=sys.argv[4]
crit5=sys.argv[5]
#Create column index of selected criteria
critno=[]
#Access MA_INDACT Full Column with data[:,39]
#Access MA_PROXROAD Full Column with data[:,40]
if(int(crit1)==0):
critno.append(39)
else:
critno.append(40)
#Access TAGAP_DRY Full Column with data[:,41]
#Access TAGAP_WET Full Column with data[:,42]
if(int(crit2)==0):
critno.append(41)
else:
critno.append(42)
#Access SW_PROXRIV Full Column with data[:,43]
#Access SW_PONDS Full Column with data[:,44]
if(int(crit3)==0):
critno.append(43)
else:
critno.append(44)
#Access GW_DWELL Full Column with data[:,45]
#Access GW_BOREW Full Column with data[:,46]
if(int(crit4)==0):
critno.append(45)
else:
critno.append(46)
#Access IRRI_SCH Full Column with data[:,47]
#Access IRRI_HEAD Full Column with data[:,48]
if(int(crit5)==0):
critno.append(47)
else:
critno.append(48)
#Collect the weight list
w=[]
w.extend(sys.argv[6].split(","))
if(len(w)<5):
os.system("tput setaf 1")
print("\nWeights list has less than 5 criteria members")
os.system("tput setaf 9")
usage()
exit(1)
#Collect the "more/less is better" list
lmib=[]
lmib.extend(sys.argv[7].split(','))
if(len(lmib)<5):
os.system("tput setaf 1")
print("\nList of 'more/less' has less than 5 criteria members")
os.system("tput setaf 9")
usage()
exit(1)
def mask(b,c,d):
#nullify the MK output multiplicator if applies
for i in range(data.shape[0]):
if(c=="<="):
if(data[i,b]<=d):
MK[i]=0
elif(c==">="):
if(data[i,b]>=d):
MK[i]=0
elif(c=="=="):
if(data[i,b]==d):
MK[i]=0
elif(c=="!="):
if(data[i,b]!=d):
MK[i]=0
elif(c=="<"):
if(data[i,b]<d):
MK[i]=0
elif(c==">"):
if(data[i,b]>d):
MK[i]=0
else:
#do nothing
print("Not understood %s command, skipping" % b)
if(len(sys.argv)>8):
#Create the masking list
screnlist=[]
for i in range(8,len(sys.argv),1):
screnlist.append(sys.argv[i])
scren={'TOPOZONE':4,'INUNDATION':5,'ELEVATION':6,'DRYRAIN':7,'SOIL_LOWP':8,'SOIL_MEDP':9,'SOIL_HIGHP':10,'TAGAP_DRY':11,'TAGAP_WET':12,'CONZ_PROX':13,'CONZ_PEOP':14,'POP':15,'SEXR':16,'KW_UPTOSEC':17,'KW_ILLIT':18,'LF_RICE':19,'LF_VEGE':20,'LF_LSC':21,'LF_WAGED':22,'INDLIVELI':23,'MIGRANTS':24,'PL_P1HH':25,'PL_P2HH':26,'PL_NONPHH':27,'RYLD_WET':28,'RYLD_DRY':29,'RYLD_DANDW':30,'RYLD_RANDI':31,'LA_RICE1HA':32,'LA_CULT1HA':33,'INDAGRIM':34,'A_IRRIC':35,'A_IRRIR':36,'A_IRRIP':37,'A_IRRIW':38}
for i in range(len(screnlist)):
a=screnlist[i].split(',')
try:
b=scren[a[0]]#extract col number from dict
c=a[1]#Get comparison symbol
d=a[2]#Get threshold value
mask(b,c,d)
except:
print("screening name typo %s, will be ignored" % a[0])
#------------------------
#END OF PARSING ARGUMENTS
#------------------------
def mkvalrange(datacol):
"""
Create range data for a criterium data column
"""
return(np.max(datacol)-np.min(datacol))
def vpwc(val1,val2,valrange):
"""
Village pairwise Comparison for a given criterium
"""
return((val1-val2)/valrange)
def assignvpwc(vil1rowno,vil2rowno,critcolno,counter):
#print(vil1rowno,vil2rowno,critcolno,VL.shape[0])
#Compute value range for a given criterium
#HINT: use stats range for column or row data table
datacol=data[:,critcolno]
valrange=mkvalrange(datacol)
#get value from each village for a given criterium
val1=data[vil1rowno,critcolno]
val2=data[vil2rowno,critcolno]
#compute pairwise comparison for a given criterion
value=vpwc(val1,val2,valrange)
#Assign given weight to value
value*=float(w[counter])
#Adjust the LessIsBetter parameter from User input
if(lmib[counter]=="m"):
LessIsBetter=False
elif(lmib[counter]=="l"):
LessIsBetter=True
else:
print("list of more/less has a typo, use 'm' or 'l'")
usage()
exit(1)
if(LessIsBetter==True):
if(value > 0):
vil1out=float(value)
vil2out=0.0
elif(value == 0):
vil1out=0.0
vil2out=0.0
else:
vil2out=float(value)
vil1out=0.0
else:#MoreIsBetter
if(value < 0):
vil1out=float(value)
vil2out=0.0
elif(value == 0):
vil1out=0.0
vil2out=0.0
else:
vil2out=float(value)
vil1out=0.0
##return vil1out,vil2out
return vil1out,vil2out
#set counter for weight and lmib user input forwarding
counter=0
for critcolno in critno:
print("critcolno=%d" % critcolno)
for vil1rowno in range(data.shape[0]):
for vil2rowno in range(data.shape[0]):
(a,b)=assignvpwc(vil1rowno,vil2rowno,critcolno,counter)
if(False == math.isnan(a)):
VL[vil1rowno]+=a
#print(VL[vil1rowno])
if(False == math.isnan(b)):
VL[vil2rowno]+=b
#print(VL[vil2rowno])
counter+=1
#Remove negative values
VL=VL.clip(0)
#Rescale 0 to 1
VLM=np.max(VL)
VLm=np.min(VL)
VLr=VLM-VLm
VL=[(i-VLm)/VLr for i in VL]
#convert float array to string list
#vlfloat=VL.tolist()
#vl = ["%.2f" % x for x in vlfloat]
#Temporary stage: OUTPUT an XYZ text file
f=open("villages.csv","w")
for i in range(data.shape[0]-1):
try:
strng=(str(XL[i])+","+str(YL[i])+","+str(VL[i]*MK[i])+"\n")
f.write(strng)
except:
print("Error writing csv file, skipping row")
f.close()
#Final stage: Create a new shapefile directly
import osgeo.ogr, osgeo.osr
sr = osgeo.osr.SpatialReference()
sr.ImportFromEPSG(3148)
driver = osgeo.ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists('points.shp'):
driver.DeleteDataSource('points.shp')
shpData = driver.CreateDataSource('points.shp')
if shpData is None:
print ' Could not create file'
sys.exit(1)
lyr = shpData.CreateLayer('layer1', sr, osgeo.ogr.wkbPoint)
lyrDef = lyr.GetLayerDefn()
idField = osgeo.ogr.FieldDefn("ID_0", osgeo.ogr.OFTReal)
lyr.CreateField(idField)
fidx = 0
for i in range(len(XL)):
#Apply data MASK
if(MK[i]):
ftr = osgeo.ogr.Feature(lyrDef)
pt = osgeo.ogr.Geometry(osgeo.ogr.wkbPoint)
pt.SetPoint(0, XL[i], YL[i])
ftr.SetGeometry(pt)
ftr.SetFID(fidx)
ftr.SetField(ftr.GetFieldIndex('ID_0'),VL[i]*MK[i])
lyr.CreateFeature(ftr)
fidx += 1
shpData.Destroy()
#create shapefile projection file
sr.MorphToESRI()
file = open('points.prj', 'w')
file.write(sr.ExportToWkt())
file.close()
|
# Copyright 2012-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from functools import lru_cache
from itertools import chain
from pathlib import Path
import enum
import json
import os
import pickle
import re
import typing as T
import hashlib
from .. import build
from .. import dependencies
from .. import programs
from .. import mesonlib
from .. import mlog
from ..compilers import LANGUAGES_USING_LDFLAGS, detect
from ..mesonlib import (
File, MachineChoice, MesonException, OptionType, OrderedSet, OptionOverrideProxy,
classify_unity_sources, OptionKey, join_args
)
if T.TYPE_CHECKING:
from .._typing import ImmutableListProtocol
from ..arglist import CompilerArgs
from ..compilers import Compiler
from ..environment import Environment
from ..interpreter import Interpreter, Test
from ..linkers import StaticLinker
from ..mesonlib import FileMode, FileOrString
from ..wrap import WrapMode
from typing_extensions import TypedDict
class TargetIntrospectionData(TypedDict):
language: str
compiler : T.List[str]
parameters: T.List[str]
sources: T.List[str]
generated_sources: T.List[str]
# Languages that can mix with C or C++ but don't support unity builds yet
# because the syntax we use for unity builds is specific to C/++/ObjC/++.
# Assembly files cannot be unitified and neither can LLVM IR files
LANGS_CANT_UNITY = ('d', 'fortran', 'vala')
class RegenInfo:
def __init__(self, source_dir: str, build_dir: str, depfiles: T.List[str]):
self.source_dir = source_dir
self.build_dir = build_dir
self.depfiles = depfiles
class TestProtocol(enum.Enum):
EXITCODE = 0
TAP = 1
GTEST = 2
RUST = 3
@classmethod
def from_str(cls, string: str) -> 'TestProtocol':
if string == 'exitcode':
return cls.EXITCODE
elif string == 'tap':
return cls.TAP
elif string == 'gtest':
return cls.GTEST
elif string == 'rust':
return cls.RUST
raise MesonException(f'unknown test format {string}')
def __str__(self) -> str:
cls = type(self)
if self is cls.EXITCODE:
return 'exitcode'
elif self is cls.GTEST:
return 'gtest'
elif self is cls.RUST:
return 'rust'
return 'tap'
class CleanTrees:
'''
Directories outputted by custom targets that have to be manually cleaned
because on Linux `ninja clean` only deletes empty directories.
'''
def __init__(self, build_dir: str, trees: T.List[str]):
self.build_dir = build_dir
self.trees = trees
class InstallData:
def __init__(self, source_dir: str, build_dir: str, prefix: str, libdir: str,
strip_bin: T.List[str], install_umask: T.Union[str, int],
mesonintrospect: T.List[str], version: str):
# TODO: in python 3.8 or with typing_Extensions install_umask could be:
# `T.Union[T.Literal['preserve'], int]`, which would be more accurate.
self.source_dir = source_dir
self.build_dir = build_dir
self.prefix = prefix
self.libdir = libdir
self.strip_bin = strip_bin
self.install_umask = install_umask
self.targets: T.List[TargetInstallData] = []
self.headers: T.List[InstallDataBase] = []
self.man: T.List[InstallDataBase] = []
self.emptydir: T.List[InstallEmptyDir] = []
self.data: T.List[InstallDataBase] = []
self.symlinks: T.List[InstallSymlinkData] = []
self.install_scripts: T.List[ExecutableSerialisation] = []
self.install_subdirs: T.List[SubdirInstallData] = []
self.mesonintrospect = mesonintrospect
self.version = version
class TargetInstallData:
# TODO: install_mode should just always be a FileMode object
def __init__(self, fname: str, outdir: str, outdir_name: str, aliases: T.Dict[str, str],
strip: bool, install_name_mappings: T.Mapping[str, str], rpath_dirs_to_remove: T.Set[bytes],
install_rpath: str, install_mode: T.Optional['FileMode'],
subproject: str, optional: bool = False, tag: T.Optional[str] = None):
self.fname = fname
self.outdir = outdir
self.out_name = os.path.join(outdir_name, os.path.basename(fname))
self.aliases = aliases
self.strip = strip
self.install_name_mappings = install_name_mappings
self.rpath_dirs_to_remove = rpath_dirs_to_remove
self.install_rpath = install_rpath
self.install_mode = install_mode
self.subproject = subproject
self.optional = optional
self.tag = tag
class InstallEmptyDir:
def __init__(self, path: str, install_mode: 'FileMode', subproject: str, tag: T.Optional[str] = None):
self.path = path
self.install_mode = install_mode
self.subproject = subproject
self.tag = tag
class InstallDataBase:
def __init__(self, path: str, install_path: str, install_path_name: str,
install_mode: 'FileMode', subproject: str, tag: T.Optional[str] = None,
data_type: T.Optional[str] = None):
self.path = path
self.install_path = install_path
self.install_path_name = install_path_name
self.install_mode = install_mode
self.subproject = subproject
self.tag = tag
self.data_type = data_type
class InstallSymlinkData:
def __init__(self, target: str, name: str, install_path: str,
subproject: str, tag: T.Optional[str] = None):
self.target = target
self.name = name
self.install_path = install_path
self.subproject = subproject
self.tag = tag
class SubdirInstallData(InstallDataBase):
def __init__(self, path: str, install_path: str, install_path_name: str,
install_mode: 'FileMode', exclude: T.Tuple[T.Set[str], T.Set[str]],
subproject: str, tag: T.Optional[str] = None, data_type: T.Optional[str] = None):
super().__init__(path, install_path, install_path_name, install_mode, subproject, tag, data_type)
self.exclude = exclude
class ExecutableSerialisation:
# XXX: should capture and feed default to False, instead of None?
def __init__(self, cmd_args: T.List[str],
env: T.Optional[build.EnvironmentVariables] = None,
exe_wrapper: T.Optional['programs.ExternalProgram'] = None,
workdir: T.Optional[str] = None,
extra_paths: T.Optional[T.List] = None,
capture: T.Optional[bool] = None,
feed: T.Optional[bool] = None,
tag: T.Optional[str] = None,
verbose: bool = False,
) -> None:
self.cmd_args = cmd_args
self.env = env
if exe_wrapper is not None:
assert isinstance(exe_wrapper, programs.ExternalProgram)
self.exe_wrapper = exe_wrapper
self.workdir = workdir
self.extra_paths = extra_paths
self.capture = capture
self.feed = feed
self.pickled = False
self.skip_if_destdir = False
self.verbose = verbose
self.subproject = ''
self.tag = tag
class TestSerialisation:
def __init__(self, name: str, project: str, suite: T.List[str], fname: T.List[str],
is_cross_built: bool, exe_wrapper: T.Optional[programs.ExternalProgram],
needs_exe_wrapper: bool, is_parallel: bool, cmd_args: T.List[str],
env: build.EnvironmentVariables, should_fail: bool,
timeout: T.Optional[int], workdir: T.Optional[str],
extra_paths: T.List[str], protocol: TestProtocol, priority: int,
cmd_is_built: bool, depends: T.List[str], version: str):
self.name = name
self.project_name = project
self.suite = suite
self.fname = fname
self.is_cross_built = is_cross_built
if exe_wrapper is not None:
assert isinstance(exe_wrapper, programs.ExternalProgram)
self.exe_wrapper = exe_wrapper
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.timeout = timeout
self.workdir = workdir
self.extra_paths = extra_paths
self.protocol = protocol
self.priority = priority
self.needs_exe_wrapper = needs_exe_wrapper
self.cmd_is_built = cmd_is_built
self.depends = depends
self.version = version
def get_backend_from_name(backend: str, build: T.Optional[build.Build] = None, interpreter: T.Optional['Interpreter'] = None) -> T.Optional['Backend']:
if backend == 'ninja':
from . import ninjabackend
return ninjabackend.NinjaBackend(build, interpreter)
elif backend == 'vs':
from . import vs2010backend
return vs2010backend.autodetect_vs_version(build, interpreter)
elif backend == 'vs2010':
from . import vs2010backend
return vs2010backend.Vs2010Backend(build, interpreter)
elif backend == 'vs2012':
from . import vs2012backend
return vs2012backend.Vs2012Backend(build, interpreter)
elif backend == 'vs2013':
from . import vs2013backend
return vs2013backend.Vs2013Backend(build, interpreter)
elif backend == 'vs2015':
from . import vs2015backend
return vs2015backend.Vs2015Backend(build, interpreter)
elif backend == 'vs2017':
from . import vs2017backend
return vs2017backend.Vs2017Backend(build, interpreter)
elif backend == 'vs2019':
from . import vs2019backend
return vs2019backend.Vs2019Backend(build, interpreter)
elif backend == 'vs2022':
from . import vs2022backend
return vs2022backend.Vs2022Backend(build, interpreter)
elif backend == 'xcode':
from . import xcodebackend
return xcodebackend.XCodeBackend(build, interpreter)
return None
# This class contains the basic functionality that is needed by all backends.
# Feel free to move stuff in and out of it as you see fit.
class Backend:
environment: T.Optional['Environment']
def __init__(self, build: T.Optional[build.Build], interpreter: T.Optional['Interpreter']):
# Make it possible to construct a dummy backend
# This is used for introspection without a build directory
if build is None:
self.environment = None
return
self.build = build
self.interpreter = interpreter
self.environment = build.environment
self.processed_targets: T.Set[str] = set()
self.name = '<UNKNOWN>'
self.build_dir = self.environment.get_build_dir()
self.source_dir = self.environment.get_source_dir()
self.build_to_src = mesonlib.relpath(self.environment.get_source_dir(),
self.environment.get_build_dir())
self.src_to_build = mesonlib.relpath(self.environment.get_build_dir(),
self.environment.get_source_dir())
def generate(self) -> None:
raise RuntimeError(f'generate is not implemented in {type(self).__name__}')
def get_target_filename(self, t: T.Union[build.Target, build.CustomTargetIndex], *, warn_multi_output: bool = True) -> str:
if isinstance(t, build.CustomTarget):
if warn_multi_output and len(t.get_outputs()) != 1:
mlog.warning(f'custom_target {t.name!r} has more than one output! '
'Using the first one.')
filename = t.get_outputs()[0]
elif isinstance(t, build.CustomTargetIndex):
filename = t.get_outputs()[0]
else:
assert isinstance(t, build.BuildTarget)
filename = t.get_filename()
return os.path.join(self.get_target_dir(t), filename)
def get_target_filename_abs(self, target: T.Union[build.Target, build.CustomTargetIndex]) -> str:
return os.path.join(self.environment.get_build_dir(), self.get_target_filename(target))
def get_base_options_for_target(self, target: build.BuildTarget) -> OptionOverrideProxy:
return OptionOverrideProxy(target.option_overrides_base,
{k: v for k, v in self.environment.coredata.options.items()
if k.type in {OptionType.BASE, OptionType.BUILTIN}})
def get_compiler_options_for_target(self, target: build.BuildTarget) -> OptionOverrideProxy:
comp_reg = {k: v for k, v in self.environment.coredata.options.items() if k.is_compiler()}
comp_override = target.option_overrides_compiler
return OptionOverrideProxy(comp_override, comp_reg)
def get_option_for_target(self, option_name: 'OptionKey', target: build.BuildTarget) -> T.Union[str, int, bool, 'WrapMode']:
if option_name in target.option_overrides_base:
override = target.option_overrides_base[option_name]
v = self.environment.coredata.validate_option_value(option_name, override)
else:
v = self.environment.coredata.get_option(option_name.evolve(subproject=target.subproject))
# We don't actually have wrapmode here to do an assert, so just do a
# cast, we know what's in coredata anyway.
# TODO: if it's possible to annotate get_option or validate_option_value
# in the future we might be able to remove the cast here
return T.cast(T.Union[str, int, bool, 'WrapMode'], v)
def get_source_dir_include_args(self, target: build.BuildTarget, compiler: 'Compiler', *, absolute_path: bool = False) -> T.List[str]:
curdir = target.get_subdir()
if absolute_path:
lead = self.source_dir
else:
lead = self.build_to_src
tmppath = os.path.normpath(os.path.join(lead, curdir))
return compiler.get_include_args(tmppath, False)
def get_build_dir_include_args(self, target: build.BuildTarget, compiler: 'Compiler', *, absolute_path: bool = False) -> T.List[str]:
if absolute_path:
curdir = os.path.join(self.build_dir, target.get_subdir())
else:
curdir = target.get_subdir()
if curdir == '':
curdir = '.'
return compiler.get_include_args(curdir, False)
def get_target_filename_for_linking(self, target: T.Union[build.Target, build.CustomTargetIndex]) -> T.Optional[str]:
# On some platforms (msvc for instance), the file that is used for
# dynamic linking is not the same as the dynamic library itself. This
# file is called an import library, and we want to link against that.
# On all other platforms, we link to the library directly.
if isinstance(target, build.SharedLibrary):
link_lib = target.get_import_filename() or target.get_filename()
return os.path.join(self.get_target_dir(target), link_lib)
elif isinstance(target, build.StaticLibrary):
return os.path.join(self.get_target_dir(target), target.get_filename())
elif isinstance(target, (build.CustomTarget, build.CustomTargetIndex)):
if not target.is_linkable_target():
raise MesonException(f'Tried to link against custom target "{target.name}", which is not linkable.')
return os.path.join(self.get_target_dir(target), target.get_filename())
elif isinstance(target, build.Executable):
if target.import_filename:
return os.path.join(self.get_target_dir(target), target.get_import_filename())
else:
return None
raise AssertionError(f'BUG: Tried to link to {target!r} which is not linkable')
@lru_cache(maxsize=None)
def get_target_dir(self, target: T.Union[build.Target, build.CustomTargetIndex]) -> str:
if isinstance(target, build.RunTarget):
# this produces no output, only a dummy top-level name
dirname = ''
elif self.environment.coredata.get_option(OptionKey('layout')) == 'mirror':
dirname = target.get_subdir()
else:
dirname = 'meson-out'
return dirname
def get_target_dir_relative_to(self, t: build.Target, o: build.Target) -> str:
'''Get a target dir relative to another target's directory'''
target_dir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(t))
othert_dir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(o))
return os.path.relpath(target_dir, othert_dir)
def get_target_source_dir(self, target: build.Target) -> str:
# if target dir is empty, avoid extraneous trailing / from os.path.join()
target_dir = self.get_target_dir(target)
if target_dir:
return os.path.join(self.build_to_src, target_dir)
return self.build_to_src
def get_target_private_dir(self, target: T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex]) -> str:
return os.path.join(self.get_target_filename(target, warn_multi_output=False) + '.p')
def get_target_private_dir_abs(self, target: T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex]) -> str:
return os.path.join(self.environment.get_build_dir(), self.get_target_private_dir(target))
@lru_cache(maxsize=None)
def get_target_generated_dir(
self, target: T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex],
gensrc: T.Union[build.CustomTarget, build.CustomTargetIndex, build.GeneratedList],
src: str) -> str:
"""
Takes a BuildTarget, a generator source (CustomTarget or GeneratedList),
and a generated source filename.
Returns the full path of the generated source relative to the build root
"""
# CustomTarget generators output to the build dir of the CustomTarget
if isinstance(gensrc, (build.CustomTarget, build.CustomTargetIndex)):
return os.path.join(self.get_target_dir(gensrc), src)
# GeneratedList generators output to the private build directory of the
# target that the GeneratedList is used in
return os.path.join(self.get_target_private_dir(target), src)
def get_unity_source_file(self, target: T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex],
suffix: str, number: int) -> mesonlib.File:
# There is a potential conflict here, but it is unlikely that
# anyone both enables unity builds and has a file called foo-unity.cpp.
osrc = f'{target.name}-unity{number}.{suffix}'
return mesonlib.File.from_built_file(self.get_target_private_dir(target), osrc)
def generate_unity_files(self, target: build.BuildTarget, unity_src: str) -> T.List[mesonlib.File]:
abs_files: T.List[str] = []
result: T.List[mesonlib.File] = []
compsrcs = classify_unity_sources(target.compilers.values(), unity_src)
unity_size = self.get_option_for_target(OptionKey('unity_size'), target)
assert isinstance(unity_size, int), 'for mypy'
def init_language_file(suffix: str, unity_file_number: int) -> T.TextIO:
unity_src = self.get_unity_source_file(target, suffix, unity_file_number)
outfileabs = unity_src.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
outfileabs_tmp = outfileabs + '.tmp'
abs_files.append(outfileabs)
outfileabs_tmp_dir = os.path.dirname(outfileabs_tmp)
if not os.path.exists(outfileabs_tmp_dir):
os.makedirs(outfileabs_tmp_dir)
result.append(unity_src)
return open(outfileabs_tmp, 'w', encoding='utf-8')
# For each language, generate unity source files and return the list
for comp, srcs in compsrcs.items():
files_in_current = unity_size + 1
unity_file_number = 0
# TODO: this could be simplified with an algorithm that pre-sorts
# the sources into the size of chunks we want
ofile = None
for src in srcs:
if files_in_current >= unity_size:
if ofile:
ofile.close()
ofile = init_language_file(comp.get_default_suffix(), unity_file_number)
unity_file_number += 1
files_in_current = 0
ofile.write(f'#include<{src}>\n')
files_in_current += 1
if ofile:
ofile.close()
for x in abs_files:
mesonlib.replace_if_different(x, x + '.tmp')
return result
@staticmethod
def relpath(todir: str, fromdir: str) -> str:
return os.path.relpath(os.path.join('dummyprefixdir', todir),
os.path.join('dummyprefixdir', fromdir))
def flatten_object_list(self, target: build.BuildTarget, proj_dir_to_build_root: str = '') -> T.List[str]:
obj_list = self._flatten_object_list(target, target.get_objects(), proj_dir_to_build_root)
return list(dict.fromkeys(obj_list))
def _flatten_object_list(self, target: build.BuildTarget,
objects: T.Sequence[T.Union[str, 'File', build.ExtractedObjects]],
proj_dir_to_build_root: str) -> T.List[str]:
obj_list: T.List[str] = []
for obj in objects:
if isinstance(obj, str):
o = os.path.join(proj_dir_to_build_root,
self.build_to_src, target.get_subdir(), obj)
obj_list.append(o)
elif isinstance(obj, mesonlib.File):
if obj.is_built:
o = os.path.join(proj_dir_to_build_root,
obj.rel_to_builddir(self.build_to_src))
obj_list.append(o)
else:
o = os.path.join(proj_dir_to_build_root,
self.build_to_src)
obj_list.append(obj.rel_to_builddir(o))
elif isinstance(obj, build.ExtractedObjects):
if obj.recursive:
obj_list += self._flatten_object_list(obj.target, obj.objlist, proj_dir_to_build_root)
obj_list += self.determine_ext_objs(obj, proj_dir_to_build_root)
else:
raise MesonException('Unknown data type in object list.')
return obj_list
@staticmethod
def is_swift_target(target: build.BuildTarget) -> bool:
for s in target.sources:
if s.endswith('swift'):
return True
return False
def determine_swift_dep_dirs(self, target: build.BuildTarget) -> T.List[str]:
result: T.List[str] = []
for l in target.link_targets:
result.append(self.get_target_private_dir_abs(l))
return result
def get_executable_serialisation(
self, cmd: T.Sequence[T.Union[programs.ExternalProgram, build.BuildTarget, build.CustomTarget, File, str]],
workdir: T.Optional[str] = None,
extra_bdeps: T.Optional[T.List[build.BuildTarget]] = None,
capture: T.Optional[bool] = None,
feed: T.Optional[bool] = None,
env: T.Optional[build.EnvironmentVariables] = None,
tag: T.Optional[str] = None,
verbose: bool = False) -> 'ExecutableSerialisation':
# XXX: cmd_args either need to be lowered to strings, or need to be checked for non-string arguments, right?
exe, *raw_cmd_args = cmd
if isinstance(exe, programs.ExternalProgram):
exe_cmd = exe.get_command()
exe_for_machine = exe.for_machine
elif isinstance(exe, build.BuildTarget):
exe_cmd = [self.get_target_filename_abs(exe)]
exe_for_machine = exe.for_machine
elif isinstance(exe, build.CustomTarget):
# The output of a custom target can either be directly runnable
# or not, that is, a script, a native binary or a cross compiled
# binary when exe wrapper is available and when it is not.
# This implementation is not exhaustive but it works in the
# common cases.
exe_cmd = [self.get_target_filename_abs(exe)]
exe_for_machine = MachineChoice.BUILD
elif isinstance(exe, mesonlib.File):
exe_cmd = [exe.rel_to_builddir(self.environment.source_dir)]
exe_for_machine = MachineChoice.BUILD
else:
exe_cmd = [exe]
exe_for_machine = MachineChoice.BUILD
cmd_args: T.List[str] = []
for c in raw_cmd_args:
if isinstance(c, programs.ExternalProgram):
p = c.get_path()
assert isinstance(p, str)
cmd_args.append(p)
elif isinstance(c, (build.BuildTarget, build.CustomTarget)):
cmd_args.append(self.get_target_filename_abs(c))
elif isinstance(c, mesonlib.File):
cmd_args.append(c.rel_to_builddir(self.environment.source_dir))
else:
cmd_args.append(c)
machine = self.environment.machines[exe_for_machine]
if machine.is_windows() or machine.is_cygwin():
extra_paths = self.determine_windows_extra_paths(exe, extra_bdeps or [])
else:
extra_paths = []
is_cross_built = not self.environment.machines.matches_build_machine(exe_for_machine)
if is_cross_built and self.environment.need_exe_wrapper():
exe_wrapper = self.environment.get_exe_wrapper()
if not exe_wrapper or not exe_wrapper.found():
msg = 'An exe_wrapper is needed but was not found. Please define one ' \
'in cross file and check the command and/or add it to PATH.'
raise MesonException(msg)
else:
if exe_cmd[0].endswith('.jar'):
exe_cmd = ['java', '-jar'] + exe_cmd
elif exe_cmd[0].endswith('.exe') and not (mesonlib.is_windows() or mesonlib.is_cygwin() or mesonlib.is_wsl()):
exe_cmd = ['mono'] + exe_cmd
exe_wrapper = None
workdir = workdir or self.environment.get_build_dir()
return ExecutableSerialisation(exe_cmd + cmd_args, env,
exe_wrapper, workdir,
extra_paths, capture, feed, tag, verbose)
def as_meson_exe_cmdline(self, exe: T.Union[str, mesonlib.File, build.BuildTarget, build.CustomTarget, programs.ExternalProgram],
cmd_args: T.Sequence[T.Union[str, mesonlib.File, build.BuildTarget, build.CustomTarget, programs.ExternalProgram]],
workdir: T.Optional[str] = None,
extra_bdeps: T.Optional[T.List[build.BuildTarget]] = None,
capture: T.Optional[bool] = None,
feed: T.Optional[bool] = None,
force_serialize: bool = False,
env: T.Optional[build.EnvironmentVariables] = None,
verbose: bool = False) -> T.Tuple[T.Sequence[T.Union[str, File, build.Target, programs.ExternalProgram]], str]:
'''
Serialize an executable for running with a generator or a custom target
'''
cmd: T.List[T.Union[str, mesonlib.File, build.BuildTarget, build.CustomTarget, programs.ExternalProgram]] = []
cmd.append(exe)
cmd.extend(cmd_args)
es = self.get_executable_serialisation(cmd, workdir, extra_bdeps, capture, feed, env, verbose=verbose)
reasons: T.List[str] = []
if es.extra_paths:
reasons.append('to set PATH')
if es.exe_wrapper:
reasons.append('to use exe_wrapper')
if workdir:
reasons.append('to set workdir')
if any('\n' in c for c in es.cmd_args):
reasons.append('because command contains newlines')
if es.env and es.env.varnames:
reasons.append('to set env')
force_serialize = force_serialize or bool(reasons)
if capture:
reasons.append('to capture output')
if feed:
reasons.append('to feed input')
if not force_serialize:
if not capture and not feed:
return es.cmd_args, ''
args: T.List[str] = []
if capture:
args += ['--capture', str(capture)]
if feed:
args += ['--feed', str(feed)]
return (
self.environment.get_build_command() + ['--internal', 'exe'] + args + ['--'] + es.cmd_args,
', '.join(reasons)
)
if isinstance(exe, (programs.ExternalProgram,
build.BuildTarget, build.CustomTarget)):
basename = exe.name
elif isinstance(exe, mesonlib.File):
basename = os.path.basename(exe.fname)
else:
basename = os.path.basename(exe)
# Can't just use exe.name here; it will likely be run more than once
# Take a digest of the cmd args, env, workdir, capture, and feed. This
# avoids collisions and also makes the name deterministic over
# regenerations which avoids a rebuild by Ninja because the cmdline
# stays the same.
hasher = hashlib.sha1()
if es.env:
es.env.hash(hasher)
hasher.update(bytes(str(es.cmd_args), encoding='utf-8'))
hasher.update(bytes(str(es.workdir), encoding='utf-8'))
hasher.update(bytes(str(capture), encoding='utf-8'))
hasher.update(bytes(str(feed), encoding='utf-8'))
digest = hasher.hexdigest()
scratch_file = f'meson_exe_{basename}_{digest}.dat'
exe_data = os.path.join(self.environment.get_scratch_dir(), scratch_file)
with open(exe_data, 'wb') as f:
pickle.dump(es, f)
return (self.environment.get_build_command() + ['--internal', 'exe', '--unpickle', exe_data],
', '.join(reasons))
def serialize_tests(self) -> T.Tuple[str, str]:
test_data = os.path.join(self.environment.get_scratch_dir(), 'meson_test_setup.dat')
with open(test_data, 'wb') as datafile:
self.write_test_file(datafile)
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
with open(benchmark_data, 'wb') as datafile:
self.write_benchmark_file(datafile)
return test_data, benchmark_data
def determine_linker_and_stdlib_args(self, target: build.BuildTarget) -> T.Tuple[T.Union['Compiler', 'StaticLinker'], T.List[str]]:
'''
If we're building a static library, there is only one static linker.
Otherwise, we query the target for the dynamic linker.
'''
if isinstance(target, build.StaticLibrary):
return self.build.static_linker[target.for_machine], []
l, stdlib_args = target.get_clink_dynamic_linker_and_stdlibs()
return l, stdlib_args
@staticmethod
def _libdir_is_system(libdir: str, compilers: T.Mapping[str, 'Compiler'], env: 'Environment') -> bool:
libdir = os.path.normpath(libdir)
for cc in compilers.values():
if libdir in cc.get_library_dirs(env):
return True
return False
def get_external_rpath_dirs(self, target: build.BuildTarget) -> T.Set[str]:
dirs: T.Set[str] = set()
args: T.List[str] = []
for lang in LANGUAGES_USING_LDFLAGS:
try:
e = self.environment.coredata.get_external_link_args(target.for_machine, lang)
if isinstance(e, str):
args.append(e)
else:
args.extend(e)
except Exception:
pass
# Match rpath formats:
# -Wl,-rpath=
# -Wl,-rpath,
rpath_regex = re.compile(r'-Wl,-rpath[=,]([^,]+)')
# Match solaris style compat runpath formats:
# -Wl,-R
# -Wl,-R,
runpath_regex = re.compile(r'-Wl,-R[,]?([^,]+)')
# Match symbols formats:
# -Wl,--just-symbols=
# -Wl,--just-symbols,
symbols_regex = re.compile(r'-Wl,--just-symbols[=,]([^,]+)')
for arg in args:
rpath_match = rpath_regex.match(arg)
if rpath_match:
for dir in rpath_match.group(1).split(':'):
dirs.add(dir)
runpath_match = runpath_regex.match(arg)
if runpath_match:
for dir in runpath_match.group(1).split(':'):
# The symbols arg is an rpath if the path is a directory
if Path(dir).is_dir():
dirs.add(dir)
symbols_match = symbols_regex.match(arg)
if symbols_match:
for dir in symbols_match.group(1).split(':'):
# Prevent usage of --just-symbols to specify rpath
if Path(dir).is_dir():
raise MesonException(f'Invalid arg for --just-symbols, {dir} is a directory.')
return dirs
def rpaths_for_bundled_shared_libraries(self, target: build.BuildTarget, exclude_system: bool = True) -> T.List[str]:
paths: T.List[str] = []
for dep in target.external_deps:
if not isinstance(dep, (dependencies.ExternalLibrary, dependencies.PkgConfigDependency)):
continue
la = dep.link_args
if len(la) != 1 or not os.path.isabs(la[0]):
continue
# The only link argument is an absolute path to a library file.
libpath = la[0]
libdir = os.path.dirname(libpath)
if exclude_system and self._libdir_is_system(libdir, target.compilers, self.environment):
# No point in adding system paths.
continue
# Don't remove rpaths specified in LDFLAGS.
if libdir in self.get_external_rpath_dirs(target):
continue
# Windows doesn't support rpaths, but we use this function to
# emulate rpaths by setting PATH, so also accept DLLs here
if os.path.splitext(libpath)[1] not in ['.dll', '.lib', '.so', '.dylib']:
continue
if libdir.startswith(self.environment.get_source_dir()):
rel_to_src = libdir[len(self.environment.get_source_dir()) + 1:]
assert not os.path.isabs(rel_to_src), f'rel_to_src: {rel_to_src} is absolute'
paths.append(os.path.join(self.build_to_src, rel_to_src))
else:
paths.append(libdir)
for i in chain(target.link_targets, target.link_whole_targets):
if isinstance(i, build.BuildTarget):
paths.extend(self.rpaths_for_bundled_shared_libraries(i, exclude_system))
return paths
# This may take other types
def determine_rpath_dirs(self, target: T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex]
) -> T.Tuple[str, ...]:
result: OrderedSet[str]
if self.environment.coredata.get_option(OptionKey('layout')) == 'mirror':
# Need a copy here
result = OrderedSet(target.get_link_dep_subdirs())
else:
result = OrderedSet()
result.add('meson-out')
if isinstance(target, build.BuildTarget):
result.update(self.rpaths_for_bundled_shared_libraries(target))
target.rpath_dirs_to_remove.update([d.encode('utf-8') for d in result])
return tuple(result)
@staticmethod
def canonicalize_filename(fname: str) -> str:
for ch in ('/', '\\', ':'):
fname = fname.replace(ch, '_')
return fname
def object_filename_from_source(self, target: build.BuildTarget, source: 'FileOrString') -> str:
assert isinstance(source, mesonlib.File)
build_dir = self.environment.get_build_dir()
rel_src = source.rel_to_builddir(self.build_to_src)
# foo.vala files compile down to foo.c and then foo.c.o, not foo.vala.o
if rel_src.endswith(('.vala', '.gs')):
# See description in generate_vala_compile for this logic.
if source.is_built:
if os.path.isabs(rel_src):
rel_src = rel_src[len(build_dir) + 1:]
rel_src = os.path.relpath(rel_src, self.get_target_private_dir(target))
else:
rel_src = os.path.basename(rel_src)
# A meson- prefixed directory is reserved; hopefully no-one creates a file name with such a weird prefix.
gen_source = 'meson-generated_' + rel_src[:-5] + '.c'
elif source.is_built:
if os.path.isabs(rel_src):
rel_src = rel_src[len(build_dir) + 1:]
targetdir = self.get_target_private_dir(target)
# A meson- prefixed directory is reserved; hopefully no-one creates a file name with such a weird prefix.
gen_source = 'meson-generated_' + os.path.relpath(rel_src, targetdir)
else:
if os.path.isabs(rel_src):
# Use the absolute path directly to avoid file name conflicts
gen_source = rel_src
else:
gen_source = os.path.relpath(os.path.join(build_dir, rel_src),
os.path.join(self.environment.get_source_dir(), target.get_subdir()))
machine = self.environment.machines[target.for_machine]
return self.canonicalize_filename(gen_source) + '.' + machine.get_object_suffix()
def determine_ext_objs(self, extobj: 'build.ExtractedObjects', proj_dir_to_build_root: str) -> T.List[str]:
result: T.List[str] = []
# Merge sources and generated sources
raw_sources = list(extobj.srclist)
for gensrc in extobj.genlist:
for r in gensrc.get_outputs():
path = self.get_target_generated_dir(extobj.target, gensrc, r)
dirpart, fnamepart = os.path.split(path)
raw_sources.append(File(True, dirpart, fnamepart))
# Filter out headers and all non-source files
sources: T.List['FileOrString'] = []
for s in raw_sources:
if self.environment.is_source(s) and not self.environment.is_header(s):
sources.append(s)
elif self.environment.is_object(s):
result.append(s.relative_name())
# extobj could contain only objects and no sources
if not sources:
return result
targetdir = self.get_target_private_dir(extobj.target)
# With unity builds, sources don't map directly to objects,
# we only support extracting all the objects in this mode,
# so just return all object files.
if self.is_unity(extobj.target):
compsrcs = classify_unity_sources(extobj.target.compilers.values(), sources)
sources = []
unity_size = self.get_option_for_target(OptionKey('unity_size'), extobj.target)
assert isinstance(unity_size, int), 'for mypy'
for comp, srcs in compsrcs.items():
if comp.language in LANGS_CANT_UNITY:
sources += srcs
continue
for i in range(len(srcs) // unity_size + 1):
_src = self.get_unity_source_file(extobj.target,
comp.get_default_suffix(), i)
sources.append(_src)
for osrc in sources:
objname = self.object_filename_from_source(extobj.target, osrc)
objpath = os.path.join(proj_dir_to_build_root, targetdir, objname)
result.append(objpath)
return result
def get_pch_include_args(self, compiler: 'Compiler', target: build.BuildTarget) -> T.List[str]:
args: T.List[str] = []
pchpath = self.get_target_private_dir(target)
includeargs = compiler.get_include_args(pchpath, False)
p = target.get_pch(compiler.get_language())
if p:
args += compiler.get_pch_use_args(pchpath, p[0])
return includeargs + args
def create_msvc_pch_implementation(self, target: build.BuildTarget, lang: str, pch_header: str) -> str:
# We have to include the language in the file name, otherwise
# pch.c and pch.cpp will both end up as pch.obj in VS backends.
impl_name = f'meson_pch-{lang}.{lang}'
pch_rel_to_build = os.path.join(self.get_target_private_dir(target), impl_name)
# Make sure to prepend the build dir, since the working directory is
# not defined. Otherwise, we might create the file in the wrong path.
pch_file = os.path.join(self.build_dir, pch_rel_to_build)
os.makedirs(os.path.dirname(pch_file), exist_ok=True)
content = f'#include "{os.path.basename(pch_header)}"'
pch_file_tmp = pch_file + '.tmp'
with open(pch_file_tmp, 'w', encoding='utf-8') as f:
f.write(content)
mesonlib.replace_if_different(pch_file, pch_file_tmp)
return pch_rel_to_build
@staticmethod
def escape_extra_args(args: T.List[str]) -> T.List[str]:
# all backslashes in defines are doubly-escaped
extra_args: T.List[str] = []
for arg in args:
if arg.startswith(('-D', '/D')):
arg = arg.replace('\\', '\\\\')
extra_args.append(arg)
return extra_args
def get_no_stdlib_args(self, target: 'build.BuildTarget', compiler: 'Compiler') -> T.List[str]:
if compiler.language in self.build.stdlibs[target.for_machine]:
return compiler.get_no_stdinc_args()
return []
def generate_basic_compiler_args(self, target: build.BuildTarget, compiler: 'Compiler', no_warn_args: bool = False) -> 'CompilerArgs':
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
# starting from hard-coded defaults followed by build options and so on.
commands = compiler.compiler_args()
copt_proxy = self.get_compiler_options_for_target(target)
# First, the trivial ones that are impossible to override.
#
# Add -nostdinc/-nostdinc++ if needed; can't be overridden
commands += self.get_no_stdlib_args(target, compiler)
# Add things like /NOLOGO or -pipe; usually can't be overridden
commands += compiler.get_always_args()
# Only add warning-flags by default if the buildtype enables it, and if
# we weren't explicitly asked to not emit warnings (for Vala, f.ex)
if no_warn_args:
commands += compiler.get_no_warn_args()
else:
# warning_level is a string, but mypy can't determine that
commands += compiler.get_warn_args(T.cast(str, self.get_option_for_target(OptionKey('warning_level'), target)))
# Add -Werror if werror=true is set in the build options set on the
# command-line or default_options inside project(). This only sets the
# action to be done for warnings if/when they are emitted, so it's ok
# to set it after get_no_warn_args() or get_warn_args().
if self.get_option_for_target(OptionKey('werror'), target):
commands += compiler.get_werror_args()
# Add compile args for c_* or cpp_* build options set on the
# command-line or default_options inside project().
commands += compiler.get_option_compile_args(copt_proxy)
# Add buildtype args: optimization level, debugging, etc.
buildtype = self.get_option_for_target(OptionKey('buildtype'), target)
assert isinstance(buildtype, str), 'for mypy'
commands += compiler.get_buildtype_args(buildtype)
optimization = self.get_option_for_target(OptionKey('optimization'), target)
assert isinstance(optimization, str), 'for mypy'
commands += compiler.get_optimization_args(optimization)
debug = self.get_option_for_target(OptionKey('debug'), target)
assert isinstance(debug, bool), 'for mypy'
commands += compiler.get_debug_args(debug)
# Add compile args added using add_project_arguments()
commands += self.build.get_project_args(compiler, target.subproject, target.for_machine)
# Add compile args added using add_global_arguments()
# These override per-project arguments
commands += self.build.get_global_args(compiler, target.for_machine)
# Using both /ZI and /Zi at the same times produces a compiler warning.
# We do not add /ZI by default. If it is being used it is because the user has explicitly enabled it.
# /ZI needs to be removed in that case to avoid cl's warning to that effect (D9025 : overriding '/ZI' with '/Zi')
if ('/ZI' in commands) and ('/Zi' in commands):
commands.remove('/Zi')
# Compile args added from the env: CFLAGS/CXXFLAGS, etc, or the cross
# file. We want these to override all the defaults, but not the
# per-target compile args.
commands += self.environment.coredata.get_external_args(target.for_machine, compiler.get_language())
# Always set -fPIC for shared libraries
if isinstance(target, build.SharedLibrary):
commands += compiler.get_pic_args()
# Set -fPIC for static libraries by default unless explicitly disabled
if isinstance(target, build.StaticLibrary) and target.pic:
commands += compiler.get_pic_args()
elif isinstance(target, (build.StaticLibrary, build.Executable)) and target.pie:
commands += compiler.get_pie_args()
# Add compile args needed to find external dependencies. Link args are
# added while generating the link command.
# NOTE: We must preserve the order in which external deps are
# specified, so we reverse the list before iterating over it.
for dep in reversed(target.get_external_deps()):
if not dep.found():
continue
if compiler.language == 'vala':
if isinstance(dep, dependencies.PkgConfigDependency):
if dep.name == 'glib-2.0' and dep.version_reqs is not None:
for req in dep.version_reqs:
if req.startswith(('>=', '==')):
commands += ['--target-glib', req[2:]]
break
commands += ['--pkg', dep.name]
elif isinstance(dep, dependencies.ExternalLibrary):
commands += dep.get_link_args('vala')
else:
commands += compiler.get_dependency_compile_args(dep)
# Qt needs -fPIC for executables
# XXX: We should move to -fPIC for all executables
if isinstance(target, build.Executable):
commands += dep.get_exe_args(compiler)
# For 'automagic' deps: Boost and GTest. Also dependency('threads').
# pkg-config puts the thread flags itself via `Cflags:`
# Fortran requires extra include directives.
if compiler.language == 'fortran':
for lt in chain(target.link_targets, target.link_whole_targets):
priv_dir = self.get_target_private_dir(lt)
commands += compiler.get_include_args(priv_dir, False)
return commands
def build_target_link_arguments(self, compiler: 'Compiler', deps: T.List[build.Target]) -> T.List[str]:
args: T.List[str] = []
for d in deps:
if not d.is_linkable_target():
raise RuntimeError(f'Tried to link with a non-library target "{d.get_basename()}".')
arg = self.get_target_filename_for_linking(d)
if not arg:
continue
if compiler.get_language() == 'd':
arg = '-Wl,' + arg
else:
arg = compiler.get_linker_lib_prefix() + arg
args.append(arg)
return args
def get_mingw_extra_paths(self, target: build.BuildTarget) -> T.List[str]:
paths: OrderedSet[str] = OrderedSet()
# The cross bindir
root = self.environment.properties[target.for_machine].get_root()
if root:
paths.add(os.path.join(root, 'bin'))
# The toolchain bindir
sys_root = self.environment.properties[target.for_machine].get_sys_root()
if sys_root:
paths.add(os.path.join(sys_root, 'bin'))
# Get program and library dirs from all target compilers
if isinstance(target, build.BuildTarget):
for cc in target.compilers.values():
paths.update(cc.get_program_dirs(self.environment))
paths.update(cc.get_library_dirs(self.environment))
return list(paths)
def determine_windows_extra_paths(
self, target: T.Union[build.BuildTarget, build.CustomTarget, programs.ExternalProgram, mesonlib.File, str],
extra_bdeps: T.Sequence[T.Union[build.BuildTarget, build.CustomTarget]]) -> T.List[str]:
"""On Windows there is no such thing as an rpath.
We must determine all locations of DLLs that this exe
links to and return them so they can be used in unit
tests.
"""
result: T.Set[str] = set()
prospectives: T.Set[build.Target] = set()
if isinstance(target, build.BuildTarget):
prospectives.update(target.get_transitive_link_deps())
# External deps
for deppath in self.rpaths_for_bundled_shared_libraries(target, exclude_system=False):
result.add(os.path.normpath(os.path.join(self.environment.get_build_dir(), deppath)))
for bdep in extra_bdeps:
prospectives.add(bdep)
if isinstance(bdep, build.BuildTarget):
prospectives.update(bdep.get_transitive_link_deps())
# Internal deps
for ld in prospectives:
dirseg = os.path.join(self.environment.get_build_dir(), self.get_target_dir(ld))
result.add(dirseg)
if (isinstance(target, build.BuildTarget) and
not self.environment.machines.matches_build_machine(target.for_machine)):
result.update(self.get_mingw_extra_paths(target))
return list(result)
def write_benchmark_file(self, datafile: T.BinaryIO) -> None:
self.write_test_serialisation(self.build.get_benchmarks(), datafile)
def write_test_file(self, datafile: T.BinaryIO) -> None:
self.write_test_serialisation(self.build.get_tests(), datafile)
def create_test_serialisation(self, tests: T.List['Test']) -> T.List[TestSerialisation]:
arr: T.List[TestSerialisation] = []
for t in sorted(tests, key=lambda tst: -1 * tst.priority):
exe = t.get_exe()
if isinstance(exe, programs.ExternalProgram):
cmd = exe.get_command()
else:
cmd = [os.path.join(self.environment.get_build_dir(), self.get_target_filename(exe))]
if isinstance(exe, (build.BuildTarget, programs.ExternalProgram)):
test_for_machine = exe.for_machine
else:
# E.g. an external verifier or simulator program run on a generated executable.
# Can always be run without a wrapper.
test_for_machine = MachineChoice.BUILD
# we allow passing compiled executables to tests, which may be cross built.
# We need to consider these as well when considering whether the target is cross or not.
for a in t.cmd_args:
if isinstance(a, build.BuildTarget):
if a.for_machine is MachineChoice.HOST:
test_for_machine = MachineChoice.HOST
break
is_cross = self.environment.is_cross_build(test_for_machine)
if is_cross and self.environment.need_exe_wrapper():
exe_wrapper = self.environment.get_exe_wrapper()
else:
exe_wrapper = None
machine = self.environment.machines[exe.for_machine]
if machine.is_windows() or machine.is_cygwin():
extra_bdeps: T.List[T.Union[build.BuildTarget, build.CustomTarget]] = []
if isinstance(exe, build.CustomTarget):
extra_bdeps = list(exe.get_transitive_build_target_deps())
extra_paths = self.determine_windows_extra_paths(exe, extra_bdeps)
else:
extra_paths = []
cmd_args: T.List[str] = []
depends: T.Set[build.Target] = set(t.depends)
if isinstance(exe, build.Target):
depends.add(exe)
for a in t.cmd_args:
if isinstance(a, build.Target):
depends.add(a)
if isinstance(a, build.BuildTarget):
extra_paths += self.determine_windows_extra_paths(a, [])
if isinstance(a, mesonlib.File):
a = os.path.join(self.environment.get_build_dir(), a.rel_to_builddir(self.build_to_src))
cmd_args.append(a)
elif isinstance(a, str):
cmd_args.append(a)
elif isinstance(a, build.Executable):
p = self.construct_target_rel_path(a, t.workdir)
if p == a.get_filename():
p = './' + p
cmd_args.append(p)
elif isinstance(a, build.Target):
cmd_args.append(self.construct_target_rel_path(a, t.workdir))
else:
raise MesonException('Bad object in test command.')
ts = TestSerialisation(t.get_name(), t.project_name, t.suite, cmd, is_cross,
exe_wrapper, self.environment.need_exe_wrapper(),
t.is_parallel, cmd_args, t.env,
t.should_fail, t.timeout, t.workdir,
extra_paths, t.protocol, t.priority,
isinstance(exe, build.Executable),
[x.get_id() for x in depends],
self.environment.coredata.version)
arr.append(ts)
return arr
def write_test_serialisation(self, tests: T.List['Test'], datafile: T.BinaryIO) -> None:
pickle.dump(self.create_test_serialisation(tests), datafile)
def construct_target_rel_path(self, a: build.Target, workdir: T.Optional[str]) -> str:
if workdir is None:
return self.get_target_filename(a)
assert os.path.isabs(workdir)
abs_path = self.get_target_filename_abs(a)
return os.path.relpath(abs_path, workdir)
def generate_depmf_install(self, d: InstallData) -> None:
if self.build.dep_manifest_name is None:
return
ifilename = os.path.join(self.environment.get_build_dir(), 'depmf.json')
ofilename = os.path.join(self.environment.get_prefix(), self.build.dep_manifest_name)
out_name = os.path.join('{prefix}', self.build.dep_manifest_name)
mfobj = {'type': 'dependency manifest', 'version': '1.0',
'projects': {k: v.to_json() for k, v in self.build.dep_manifest.items()}}
with open(ifilename, 'w', encoding='utf-8') as f:
f.write(json.dumps(mfobj))
# Copy file from, to, and with mode unchanged
d.data.append(InstallDataBase(ifilename, ofilename, out_name, None, '',
tag='devel', data_type='depmf'))
def get_regen_filelist(self) -> T.List[str]:
'''List of all files whose alteration means that the build
definition needs to be regenerated.'''
deps = [str(Path(self.build_to_src) / df)
for df in self.interpreter.get_build_def_files()]
if self.environment.is_cross_build():
deps.extend(self.environment.coredata.cross_files)
deps.extend(self.environment.coredata.config_files)
deps.append('meson-private/coredata.dat')
self.check_clock_skew(deps)
return deps
def generate_regen_info(self) -> None:
deps = self.get_regen_filelist()
regeninfo = RegenInfo(self.environment.get_source_dir(),
self.environment.get_build_dir(),
deps)
filename = os.path.join(self.environment.get_scratch_dir(),
'regeninfo.dump')
with open(filename, 'wb') as f:
pickle.dump(regeninfo, f)
def check_clock_skew(self, file_list: T.List[str]) -> None:
# If a file that leads to reconfiguration has a time
# stamp in the future, it will trigger an eternal reconfigure
# loop.
import time
now = time.time()
for f in file_list:
absf = os.path.join(self.environment.get_build_dir(), f)
ftime = os.path.getmtime(absf)
delta = ftime - now
# On Windows disk time stamps sometimes point
# to the future by a minuscule amount, less than
# 0.001 seconds. I don't know why.
if delta > 0.001:
raise MesonException(f'Clock skew detected. File {absf} has a time stamp {delta:.4f}s in the future.')
def build_target_to_cmd_array(self, bt: T.Union[build.BuildTarget, programs.ExternalProgram]) -> T.List[str]:
if isinstance(bt, build.BuildTarget):
arr = [os.path.join(self.environment.get_build_dir(), self.get_target_filename(bt))]
else:
arr = bt.get_command()
return arr
def replace_extra_args(self, args: T.List[str], genlist: 'build.GeneratedList') -> T.List[str]:
final_args: T.List[str] = []
for a in args:
if a == '@EXTRA_ARGS@':
final_args += genlist.get_extra_args()
else:
final_args.append(a)
return final_args
def replace_outputs(self, args: T.List[str], private_dir: str, output_list: T.List[str]) -> T.List[str]:
newargs: T.List[str] = []
regex = re.compile(r'@OUTPUT(\d+)@')
for arg in args:
m = regex.search(arg)
while m is not None:
index = int(m.group(1))
src = f'@OUTPUT{index}@'
arg = arg.replace(src, os.path.join(private_dir, output_list[index]))
m = regex.search(arg)
newargs.append(arg)
return newargs
def get_build_by_default_targets(self) -> 'T.OrderedDict[str, T.Union[build.BuildTarget, build.CustomTarget]]':
result: 'T.OrderedDict[str, T.Union[build.BuildTarget, build.CustomTarget]]' = OrderedDict()
# Get all build and custom targets that must be built by default
for name, b in self.build.get_targets().items():
if b.build_by_default:
result[name] = b
# Get all targets used as test executables and arguments. These must
# also be built by default. XXX: Sometime in the future these should be
# built only before running tests.
for t in self.build.get_tests():
exe = t.exe
if isinstance(exe, (build.CustomTarget, build.BuildTarget)):
result[exe.get_id()] = exe
for arg in t.cmd_args:
if not isinstance(arg, (build.CustomTarget, build.BuildTarget)):
continue
result[arg.get_id()] = arg
for dep in t.depends:
assert isinstance(dep, (build.CustomTarget, build.BuildTarget))
result[dep.get_id()] = dep
return result
@lru_cache(maxsize=None)
def get_custom_target_provided_by_generated_source(self, generated_source: build.CustomTarget) -> 'ImmutableListProtocol[str]':
libs: T.List[str] = []
for f in generated_source.get_outputs():
if self.environment.is_library(f):
libs.append(os.path.join(self.get_target_dir(generated_source), f))
return libs
@lru_cache(maxsize=None)
def get_custom_target_provided_libraries(self, target: T.Union[build.BuildTarget, build.CustomTarget]) -> 'ImmutableListProtocol[str]':
libs: T.List[str] = []
for t in target.get_generated_sources():
if not isinstance(t, build.CustomTarget):
continue
libs.extend(self.get_custom_target_provided_by_generated_source(t))
return libs
def is_unity(self, target: build.BuildTarget) -> bool:
optval = self.get_option_for_target(OptionKey('unity'), target)
return optval == 'on' or (optval == 'subprojects' and target.subproject != '')
def get_custom_target_sources(self, target: build.CustomTarget) -> T.List[str]:
'''
Custom target sources can be of various object types; strings, File,
BuildTarget, even other CustomTargets.
Returns the path to them relative to the build root directory.
'''
srcs: T.List[str] = []
for i in target.get_sources():
if isinstance(i, str):
fname = [os.path.join(self.build_to_src, target.subdir, i)]
elif isinstance(i, build.BuildTarget):
fname = [self.get_target_filename(i)]
elif isinstance(i, (build.CustomTarget, build.CustomTargetIndex)):
fname = [os.path.join(self.get_custom_target_output_dir(i), p) for p in i.get_outputs()]
elif isinstance(i, build.GeneratedList):
fname = [os.path.join(self.get_target_private_dir(target), p) for p in i.get_outputs()]
elif isinstance(i, build.ExtractedObjects):
outputs = i.get_outputs(self)
fname = self.get_extracted_obj_paths(i.target, outputs)
else:
fname = [i.rel_to_builddir(self.build_to_src)]
if target.absolute_paths:
fname = [os.path.join(self.environment.get_build_dir(), f) for f in fname]
srcs += fname
return srcs
def get_extracted_obj_paths(self, target: build.BuildTarget, outputs: T.List[str]) -> T.List[str]:
return [os.path.join(self.get_target_private_dir(target), p) for p in outputs]
def get_custom_target_depend_files(self, target: build.CustomTarget, absolute_paths: bool = False) -> T.List[str]:
deps: T.List[str] = []
for i in target.depend_files:
if isinstance(i, mesonlib.File):
if absolute_paths:
deps.append(i.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir()))
else:
deps.append(i.rel_to_builddir(self.build_to_src))
else:
if absolute_paths:
deps.append(os.path.join(self.environment.get_source_dir(), target.subdir, i))
else:
deps.append(os.path.join(self.build_to_src, target.subdir, i))
return deps
def get_custom_target_output_dir(self, target: T.Union[build.Target, build.CustomTargetIndex]) -> str:
# The XCode backend is special. A target foo/bar does
# not go to ${BUILDDIR}/foo/bar but instead to
# ${BUILDDIR}/${BUILDTYPE}/foo/bar.
# Currently we set the include dir to be the former,
# and not the latter. Thus we need this extra customisation
# point. If in the future we make include dirs et al match
# ${BUILDDIR}/${BUILDTYPE} instead, this becomes unnecessary.
return self.get_target_dir(target)
@lru_cache(maxsize=None)
def get_normpath_target(self, source: str) -> str:
return os.path.normpath(source)
def get_custom_target_dirs(self, target: build.CustomTarget, compiler: 'Compiler', *,
absolute_path: bool = False) -> T.List[str]:
custom_target_include_dirs: T.List[str] = []
for i in target.get_generated_sources():
# Generator output goes into the target private dir which is
# already in the include paths list. Only custom targets have their
# own target build dir.
if not isinstance(i, (build.CustomTarget, build.CustomTargetIndex)):
continue
idir = self.get_normpath_target(self.get_custom_target_output_dir(i))
if not idir:
idir = '.'
if absolute_path:
idir = os.path.join(self.environment.get_build_dir(), idir)
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
return custom_target_include_dirs
def get_custom_target_dir_include_args(
self, target: build.CustomTarget, compiler: 'Compiler', *,
absolute_path: bool = False) -> T.List[str]:
incs: T.List[str] = []
for i in self.get_custom_target_dirs(target, compiler, absolute_path=absolute_path):
incs += compiler.get_include_args(i, False)
return incs
def eval_custom_target_command(
self, target: build.CustomTarget, absolute_outputs: bool = False) -> \
T.Tuple[T.List[str], T.List[str], T.List[str]]:
# We want the outputs to be absolute only when using the VS backend
# XXX: Maybe allow the vs backend to use relative paths too?
source_root = self.build_to_src
build_root = '.'
outdir = self.get_custom_target_output_dir(target)
if absolute_outputs:
source_root = self.environment.get_source_dir()
build_root = self.environment.get_build_dir()
outdir = os.path.join(self.environment.get_build_dir(), outdir)
outputs = [os.path.join(outdir, i) for i in target.get_outputs()]
inputs = self.get_custom_target_sources(target)
# Evaluate the command list
cmd: T.List[str] = []
for i in target.command:
if isinstance(i, build.BuildTarget):
cmd += self.build_target_to_cmd_array(i)
continue
elif isinstance(i, build.CustomTarget):
# GIR scanner will attempt to execute this binary but
# it assumes that it is in path, so always give it a full path.
tmp = i.get_outputs()[0]
i = os.path.join(self.get_custom_target_output_dir(i), tmp)
elif isinstance(i, mesonlib.File):
i = i.rel_to_builddir(self.build_to_src)
if target.absolute_paths or absolute_outputs:
i = os.path.join(self.environment.get_build_dir(), i)
# FIXME: str types are blindly added ignoring 'target.absolute_paths'
# because we can't know if they refer to a file or just a string
elif isinstance(i, str):
if '@SOURCE_ROOT@' in i:
i = i.replace('@SOURCE_ROOT@', source_root)
if '@BUILD_ROOT@' in i:
i = i.replace('@BUILD_ROOT@', build_root)
if '@CURRENT_SOURCE_DIR@' in i:
i = i.replace('@CURRENT_SOURCE_DIR@', os.path.join(source_root, target.subdir))
if '@DEPFILE@' in i:
if target.depfile is None:
msg = f'Custom target {target.name!r} has @DEPFILE@ but no depfile ' \
'keyword argument.'
raise MesonException(msg)
dfilename = os.path.join(outdir, target.depfile)
i = i.replace('@DEPFILE@', dfilename)
if '@PRIVATE_DIR@' in i:
if target.absolute_paths:
pdir = self.get_target_private_dir_abs(target)
else:
pdir = self.get_target_private_dir(target)
i = i.replace('@PRIVATE_DIR@', pdir)
else:
raise RuntimeError(f'Argument {i} is of unknown type {type(i)}')
cmd.append(i)
# Substitute the rest of the template strings
values = mesonlib.get_filenames_templates_dict(inputs, outputs)
cmd = mesonlib.substitute_values(cmd, values)
# This should not be necessary but removing it breaks
# building GStreamer on Windows. The underlying issue
# is problems with quoting backslashes on Windows
# which is the seventh circle of hell. The downside is
# that this breaks custom targets whose command lines
# have backslashes. If you try to fix this be sure to
# check that it does not break GST.
#
# The bug causes file paths such as c:\foo to get escaped
# into c:\\foo.
#
# Unfortunately we have not been able to come up with an
# isolated test case for this so unless you manage to come up
# with one, the only way is to test the building with Gst's
# setup. Note this in your MR or ping us and we will get it
# fixed.
#
# https://github.com/mesonbuild/meson/pull/737
cmd = [i.replace('\\', '/') for i in cmd]
return inputs, outputs, cmd
def get_run_target_env(self, target: build.RunTarget) -> build.EnvironmentVariables:
env = target.env if target.env else build.EnvironmentVariables()
introspect_cmd = join_args(self.environment.get_build_command() + ['introspect'])
env.set('MESON_SOURCE_ROOT', [self.environment.get_source_dir()])
env.set('MESON_BUILD_ROOT', [self.environment.get_build_dir()])
env.set('MESON_SUBDIR', [target.subdir])
env.set('MESONINTROSPECT', [introspect_cmd])
return env
def run_postconf_scripts(self) -> None:
from ..scripts.meson_exe import run_exe
introspect_cmd = join_args(self.environment.get_build_command() + ['introspect'])
env = {'MESON_SOURCE_ROOT': self.environment.get_source_dir(),
'MESON_BUILD_ROOT': self.environment.get_build_dir(),
'MESONINTROSPECT': introspect_cmd,
}
for s in self.build.postconf_scripts:
name = ' '.join(s.cmd_args)
mlog.log(f'Running postconf script {name!r}')
run_exe(s, env)
def create_install_data(self) -> InstallData:
strip_bin = self.environment.lookup_binary_entry(MachineChoice.HOST, 'strip')
if strip_bin is None:
if self.environment.is_cross_build():
mlog.warning('Cross file does not specify strip binary, result will not be stripped.')
else:
# TODO go through all candidates, like others
strip_bin = [detect.defaults['strip'][0]]
umask = self.environment.coredata.get_option(OptionKey('install_umask'))
assert isinstance(umask, (str, int)), 'for mypy'
d = InstallData(self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_prefix(),
self.environment.get_libdir(),
strip_bin,
umask,
self.environment.get_build_command() + ['introspect'],
self.environment.coredata.version)
self.generate_depmf_install(d)
self.generate_target_install(d)
self.generate_header_install(d)
self.generate_man_install(d)
self.generate_emptydir_install(d)
self.generate_data_install(d)
self.generate_symlink_install(d)
self.generate_custom_install_script(d)
self.generate_subdir_install(d)
return d
def create_install_data_files(self) -> None:
install_data_file = os.path.join(self.environment.get_scratch_dir(), 'install.dat')
with open(install_data_file, 'wb') as ofile:
pickle.dump(self.create_install_data(), ofile)
def guess_install_tag(self, fname: str, outdir: T.Optional[str] = None) -> T.Optional[str]:
prefix = self.environment.get_prefix()
bindir = Path(prefix, self.environment.get_bindir())
libdir = Path(prefix, self.environment.get_libdir())
incdir = Path(prefix, self.environment.get_includedir())
_ldir = self.environment.coredata.get_option(mesonlib.OptionKey('localedir'))
assert isinstance(_ldir, str), 'for mypy'
localedir = Path(prefix, _ldir)
dest_path = Path(prefix, outdir, Path(fname).name) if outdir else Path(prefix, fname)
if bindir in dest_path.parents:
return 'runtime'
elif libdir in dest_path.parents:
if dest_path.suffix in {'.a', '.pc'}:
return 'devel'
elif dest_path.suffix in {'.so', '.dll'}:
return 'runtime'
elif incdir in dest_path.parents:
return 'devel'
elif localedir in dest_path.parents:
return 'i18n'
mlog.debug('Failed to guess install tag for', dest_path)
return None
def generate_target_install(self, d: InstallData) -> None:
for t in self.build.get_targets().values():
if not t.should_install():
continue
outdirs, install_dir_name, custom_install_dir = t.get_install_dir(self.environment)
# Sanity-check the outputs and install_dirs
num_outdirs, num_out = len(outdirs), len(t.get_outputs())
if num_outdirs != 1 and num_outdirs != num_out:
m = 'Target {!r} has {} outputs: {!r}, but only {} "install_dir"s were found.\n' \
"Pass 'false' for outputs that should not be installed and 'true' for\n" \
'using the default installation directory for an output.'
raise MesonException(m.format(t.name, num_out, t.get_outputs(), num_outdirs))
assert len(t.install_tag) == num_out
install_mode = t.get_custom_install_mode()
# Install the target output(s)
if isinstance(t, build.BuildTarget):
# In general, stripping static archives is tricky and full of pitfalls.
# Wholesale stripping of static archives with a command such as
#
# strip libfoo.a
#
# is broken, as GNU's strip will remove *every* symbol in a static
# archive. One solution to this nonintuitive behaviour would be
# to only strip local/debug symbols. Unfortunately, strip arguments
# are not specified by POSIX and therefore not portable. GNU's `-g`
# option (i.e. remove debug symbols) is equivalent to Apple's `-S`.
#
# TODO: Create GNUStrip/AppleStrip/etc. hierarchy for more
# fine-grained stripping of static archives.
should_strip = not isinstance(t, build.StaticLibrary) and self.get_option_for_target(OptionKey('strip'), t)
assert isinstance(should_strip, bool), 'for mypy'
# Install primary build output (library/executable/jar, etc)
# Done separately because of strip/aliases/rpath
if outdirs[0] is not False:
tag = t.install_tag[0] or ('devel' if isinstance(t, build.StaticLibrary) else 'runtime')
mappings = t.get_link_deps_mapping(d.prefix, self.environment)
i = TargetInstallData(self.get_target_filename(t), outdirs[0],
install_dir_name, t.get_aliases(),
should_strip, mappings, t.rpath_dirs_to_remove,
t.install_rpath, install_mode, t.subproject,
tag=tag)
d.targets.append(i)
if isinstance(t, (build.SharedLibrary, build.SharedModule, build.Executable)):
# On toolchains/platforms that use an import library for
# linking (separate from the shared library with all the
# code), we need to install that too (dll.a/.lib).
if t.get_import_filename():
if custom_install_dir:
# If the DLL is installed into a custom directory,
# install the import library into the same place so
# it doesn't go into a surprising place
implib_install_dir = outdirs[0]
else:
implib_install_dir = self.environment.get_import_lib_dir()
# Install the import library; may not exist for shared modules
i = TargetInstallData(self.get_target_filename_for_linking(t),
implib_install_dir, install_dir_name,
{}, False, {}, set(), '', install_mode,
t.subproject, optional=isinstance(t, build.SharedModule),
tag='devel')
d.targets.append(i)
if not should_strip and t.get_debug_filename():
debug_file = os.path.join(self.get_target_dir(t), t.get_debug_filename())
i = TargetInstallData(debug_file, outdirs[0],
install_dir_name,
{}, False, {}, set(), '',
install_mode, t.subproject,
optional=True, tag='devel')
d.targets.append(i)
# Install secondary outputs. Only used for Vala right now.
if num_outdirs > 1:
for output, outdir, tag in zip(t.get_outputs()[1:], outdirs[1:], t.install_tag[1:]):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
i = TargetInstallData(f, outdir, install_dir_name, {}, False, {}, set(), None,
install_mode, t.subproject,
tag=tag)
d.targets.append(i)
elif isinstance(t, build.CustomTarget):
# If only one install_dir is specified, assume that all
# outputs will be installed into it. This is for
# backwards-compatibility and because it makes sense to
# avoid repetition since this is a common use-case.
#
# To selectively install only some outputs, pass `false` as
# the install_dir for the corresponding output by index
if num_outdirs == 1 and num_out > 1:
for output, tag in zip(t.get_outputs(), t.install_tag):
f = os.path.join(self.get_target_dir(t), output)
if not install_dir_name:
dir_name = os.path.join('{prefix}', outdirs[0])
i = TargetInstallData(f, outdirs[0], dir_name, {},
False, {}, set(), None, install_mode,
t.subproject, optional=not t.build_by_default,
tag=tag)
d.targets.append(i)
else:
for output, outdir, tag in zip(t.get_outputs(), outdirs, t.install_tag):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
if not install_dir_name:
dir_name = os.path.join('{prefix}', outdir)
i = TargetInstallData(f, outdir, dir_name,
{}, False, {}, set(), None, install_mode,
t.subproject, optional=not t.build_by_default,
tag=tag)
d.targets.append(i)
def generate_custom_install_script(self, d: InstallData) -> None:
d.install_scripts = self.build.install_scripts
def generate_header_install(self, d: InstallData) -> None:
incroot = self.environment.get_includedir()
headers = self.build.get_headers()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for h in headers:
outdir = outdir_name = h.get_custom_install_dir()
if outdir is None:
subdir = h.get_install_subdir()
if subdir is None:
outdir = incroot
outdir_name = '{includedir}'
else:
outdir = os.path.join(incroot, subdir)
outdir_name = os.path.join('{includedir}', subdir)
for f in h.get_sources():
if not isinstance(f, File):
raise MesonException(f'Invalid header type {f!r} can\'t be installed')
abspath = f.absolute_path(srcdir, builddir)
i = InstallDataBase(abspath, outdir, outdir_name, h.get_custom_install_mode(), h.subproject, tag='devel')
d.headers.append(i)
def generate_man_install(self, d: InstallData) -> None:
manroot = self.environment.get_mandir()
man = self.build.get_man()
for m in man:
for f in m.get_sources():
num = f.split('.')[-1]
subdir = m.get_custom_install_dir()
if subdir is None:
if m.locale:
subdir = os.path.join('{mandir}', m.locale, 'man' + num)
else:
subdir = os.path.join('{mandir}', 'man' + num)
fname = f.fname
if m.locale: # strip locale from file name
fname = fname.replace(f'.{m.locale}', '')
srcabs = f.absolute_path(self.environment.get_source_dir(), self.environment.get_build_dir())
dstname = os.path.join(subdir, os.path.basename(fname))
dstabs = dstname.replace('{mandir}', manroot)
i = InstallDataBase(srcabs, dstabs, dstname, m.get_custom_install_mode(), m.subproject, tag='man')
d.man.append(i)
def generate_emptydir_install(self, d: InstallData) -> None:
emptydir: T.List[build.EmptyDir] = self.build.get_emptydir()
for e in emptydir:
i = InstallEmptyDir(e.path, e.install_mode, e.subproject, e.install_tag)
d.emptydir.append(i)
def generate_data_install(self, d: InstallData) -> None:
data = self.build.get_data()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for de in data:
assert isinstance(de, build.Data)
subdir = de.install_dir
subdir_name = de.install_dir_name
if not subdir:
subdir = os.path.join(self.environment.get_datadir(), self.interpreter.build.project_name)
subdir_name = os.path.join('{datadir}', self.interpreter.build.project_name)
for src_file, dst_name in zip(de.sources, de.rename):
assert isinstance(src_file, mesonlib.File)
dst_abs = os.path.join(subdir, dst_name)
dstdir_name = os.path.join(subdir_name, dst_name)
tag = de.install_tag or self.guess_install_tag(dst_abs)
i = InstallDataBase(src_file.absolute_path(srcdir, builddir), dst_abs, dstdir_name,
de.install_mode, de.subproject, tag=tag, data_type=de.data_type)
d.data.append(i)
def generate_symlink_install(self, d: InstallData) -> None:
links: T.List[build.SymlinkData] = self.build.get_symlinks()
for l in links:
assert isinstance(l, build.SymlinkData)
install_dir = l.install_dir
name_abs = os.path.join(install_dir, l.name)
s = InstallSymlinkData(l.target, name_abs, install_dir, l.subproject, l.install_tag)
d.symlinks.append(s)
def generate_subdir_install(self, d: InstallData) -> None:
for sd in self.build.get_install_subdirs():
if sd.from_source_dir:
from_dir = self.environment.get_source_dir()
else:
from_dir = self.environment.get_build_dir()
src_dir = os.path.join(from_dir,
sd.source_subdir,
sd.installable_subdir).rstrip('/')
dst_dir = os.path.join(self.environment.get_prefix(),
sd.install_dir)
dst_name = os.path.join('{prefix}', sd.install_dir)
if not sd.strip_directory:
dst_dir = os.path.join(dst_dir, os.path.basename(src_dir))
dst_name = os.path.join(dst_dir, os.path.basename(src_dir))
i = SubdirInstallData(src_dir, dst_dir, dst_name, sd.install_mode, sd.exclude, sd.subproject, sd.install_tag)
d.install_subdirs.append(i)
def get_introspection_data(self, target_id: str, target: build.Target) -> T.List['TargetIntrospectionData']:
'''
Returns a list of source dicts with the following format for a given target:
[
{
"language": "<LANG>",
"compiler": ["result", "of", "comp.get_exelist()"],
"parameters": ["list", "of", "compiler", "parameters],
"sources": ["list", "of", "all", "<LANG>", "source", "files"],
"generated_sources": ["list", "of", "generated", "source", "files"]
}
]
This is a limited fallback / reference implementation. The backend should override this method.
'''
if isinstance(target, (build.CustomTarget, build.BuildTarget)):
source_list_raw = target.sources
source_list = []
for j in source_list_raw:
if isinstance(j, mesonlib.File):
source_list += [j.absolute_path(self.source_dir, self.build_dir)]
elif isinstance(j, str):
source_list += [os.path.join(self.source_dir, j)]
elif isinstance(j, (build.CustomTarget, build.BuildTarget)):
source_list += [os.path.join(self.build_dir, j.get_subdir(), o) for o in j.get_outputs()]
source_list = list(map(lambda x: os.path.normpath(x), source_list))
compiler: T.List[str] = []
if isinstance(target, build.CustomTarget):
tmp_compiler = target.command
for j in tmp_compiler:
if isinstance(j, mesonlib.File):
compiler += [j.absolute_path(self.source_dir, self.build_dir)]
elif isinstance(j, str):
compiler += [j]
elif isinstance(j, (build.BuildTarget, build.CustomTarget)):
compiler += j.get_outputs()
else:
raise RuntimeError(f'Type "{type(j).__name__}" is not supported in get_introspection_data. This is a bug')
return [{
'language': 'unknown',
'compiler': compiler,
'parameters': [],
'sources': source_list,
'generated_sources': []
}]
return []
def get_devenv(self) -> build.EnvironmentVariables:
env = build.EnvironmentVariables()
extra_paths = set()
library_paths = set()
for t in self.build.get_targets().values():
cross_built = not self.environment.machines.matches_build_machine(t.for_machine)
can_run = not cross_built or not self.environment.need_exe_wrapper()
in_default_dir = t.should_install() and not t.get_install_dir(self.environment)[2]
if not can_run or not in_default_dir:
continue
tdir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(t))
if isinstance(t, build.Executable):
# Add binaries that are going to be installed in bindir into PATH
# so they get used by default instead of searching on system when
# in developer environment.
extra_paths.add(tdir)
if mesonlib.is_windows() or mesonlib.is_cygwin():
# On windows we cannot rely on rpath to run executables from build
# directory. We have to add in PATH the location of every DLL needed.
extra_paths.update(self.determine_windows_extra_paths(t, []))
elif isinstance(t, build.SharedLibrary):
# Add libraries that are going to be installed in libdir into
# LD_LIBRARY_PATH. This allows running system applications using
# that library.
library_paths.add(tdir)
if mesonlib.is_windows() or mesonlib.is_cygwin():
extra_paths.update(library_paths)
elif mesonlib.is_osx():
env.prepend('DYLD_LIBRARY_PATH', list(library_paths))
else:
env.prepend('LD_LIBRARY_PATH', list(library_paths))
env.prepend('PATH', list(extra_paths))
return env
Fix performance regression in build file generation
Re-use any already determined rpaths for a target.
Fixes #9695
# Copyright 2012-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from functools import lru_cache
from itertools import chain
from pathlib import Path
import enum
import json
import os
import pickle
import re
import typing as T
import hashlib
from .. import build
from .. import dependencies
from .. import programs
from .. import mesonlib
from .. import mlog
from ..compilers import LANGUAGES_USING_LDFLAGS, detect
from ..mesonlib import (
File, MachineChoice, MesonException, OptionType, OrderedSet, OptionOverrideProxy,
classify_unity_sources, OptionKey, join_args
)
if T.TYPE_CHECKING:
from .._typing import ImmutableListProtocol
from ..arglist import CompilerArgs
from ..compilers import Compiler
from ..environment import Environment
from ..interpreter import Interpreter, Test
from ..linkers import StaticLinker
from ..mesonlib import FileMode, FileOrString
from ..wrap import WrapMode
from typing_extensions import TypedDict
class TargetIntrospectionData(TypedDict):
language: str
compiler : T.List[str]
parameters: T.List[str]
sources: T.List[str]
generated_sources: T.List[str]
# Languages that can mix with C or C++ but don't support unity builds yet
# because the syntax we use for unity builds is specific to C/++/ObjC/++.
# Assembly files cannot be unitified and neither can LLVM IR files
LANGS_CANT_UNITY = ('d', 'fortran', 'vala')
class RegenInfo:
def __init__(self, source_dir: str, build_dir: str, depfiles: T.List[str]):
self.source_dir = source_dir
self.build_dir = build_dir
self.depfiles = depfiles
class TestProtocol(enum.Enum):
EXITCODE = 0
TAP = 1
GTEST = 2
RUST = 3
@classmethod
def from_str(cls, string: str) -> 'TestProtocol':
if string == 'exitcode':
return cls.EXITCODE
elif string == 'tap':
return cls.TAP
elif string == 'gtest':
return cls.GTEST
elif string == 'rust':
return cls.RUST
raise MesonException(f'unknown test format {string}')
def __str__(self) -> str:
cls = type(self)
if self is cls.EXITCODE:
return 'exitcode'
elif self is cls.GTEST:
return 'gtest'
elif self is cls.RUST:
return 'rust'
return 'tap'
class CleanTrees:
'''
Directories outputted by custom targets that have to be manually cleaned
because on Linux `ninja clean` only deletes empty directories.
'''
def __init__(self, build_dir: str, trees: T.List[str]):
self.build_dir = build_dir
self.trees = trees
class InstallData:
def __init__(self, source_dir: str, build_dir: str, prefix: str, libdir: str,
strip_bin: T.List[str], install_umask: T.Union[str, int],
mesonintrospect: T.List[str], version: str):
# TODO: in python 3.8 or with typing_Extensions install_umask could be:
# `T.Union[T.Literal['preserve'], int]`, which would be more accurate.
self.source_dir = source_dir
self.build_dir = build_dir
self.prefix = prefix
self.libdir = libdir
self.strip_bin = strip_bin
self.install_umask = install_umask
self.targets: T.List[TargetInstallData] = []
self.headers: T.List[InstallDataBase] = []
self.man: T.List[InstallDataBase] = []
self.emptydir: T.List[InstallEmptyDir] = []
self.data: T.List[InstallDataBase] = []
self.symlinks: T.List[InstallSymlinkData] = []
self.install_scripts: T.List[ExecutableSerialisation] = []
self.install_subdirs: T.List[SubdirInstallData] = []
self.mesonintrospect = mesonintrospect
self.version = version
class TargetInstallData:
# TODO: install_mode should just always be a FileMode object
def __init__(self, fname: str, outdir: str, outdir_name: str, aliases: T.Dict[str, str],
strip: bool, install_name_mappings: T.Mapping[str, str], rpath_dirs_to_remove: T.Set[bytes],
install_rpath: str, install_mode: T.Optional['FileMode'],
subproject: str, optional: bool = False, tag: T.Optional[str] = None):
self.fname = fname
self.outdir = outdir
self.out_name = os.path.join(outdir_name, os.path.basename(fname))
self.aliases = aliases
self.strip = strip
self.install_name_mappings = install_name_mappings
self.rpath_dirs_to_remove = rpath_dirs_to_remove
self.install_rpath = install_rpath
self.install_mode = install_mode
self.subproject = subproject
self.optional = optional
self.tag = tag
class InstallEmptyDir:
def __init__(self, path: str, install_mode: 'FileMode', subproject: str, tag: T.Optional[str] = None):
self.path = path
self.install_mode = install_mode
self.subproject = subproject
self.tag = tag
class InstallDataBase:
def __init__(self, path: str, install_path: str, install_path_name: str,
install_mode: 'FileMode', subproject: str, tag: T.Optional[str] = None,
data_type: T.Optional[str] = None):
self.path = path
self.install_path = install_path
self.install_path_name = install_path_name
self.install_mode = install_mode
self.subproject = subproject
self.tag = tag
self.data_type = data_type
class InstallSymlinkData:
def __init__(self, target: str, name: str, install_path: str,
subproject: str, tag: T.Optional[str] = None):
self.target = target
self.name = name
self.install_path = install_path
self.subproject = subproject
self.tag = tag
class SubdirInstallData(InstallDataBase):
def __init__(self, path: str, install_path: str, install_path_name: str,
install_mode: 'FileMode', exclude: T.Tuple[T.Set[str], T.Set[str]],
subproject: str, tag: T.Optional[str] = None, data_type: T.Optional[str] = None):
super().__init__(path, install_path, install_path_name, install_mode, subproject, tag, data_type)
self.exclude = exclude
class ExecutableSerialisation:
# XXX: should capture and feed default to False, instead of None?
def __init__(self, cmd_args: T.List[str],
env: T.Optional[build.EnvironmentVariables] = None,
exe_wrapper: T.Optional['programs.ExternalProgram'] = None,
workdir: T.Optional[str] = None,
extra_paths: T.Optional[T.List] = None,
capture: T.Optional[bool] = None,
feed: T.Optional[bool] = None,
tag: T.Optional[str] = None,
verbose: bool = False,
) -> None:
self.cmd_args = cmd_args
self.env = env
if exe_wrapper is not None:
assert isinstance(exe_wrapper, programs.ExternalProgram)
self.exe_wrapper = exe_wrapper
self.workdir = workdir
self.extra_paths = extra_paths
self.capture = capture
self.feed = feed
self.pickled = False
self.skip_if_destdir = False
self.verbose = verbose
self.subproject = ''
self.tag = tag
class TestSerialisation:
def __init__(self, name: str, project: str, suite: T.List[str], fname: T.List[str],
is_cross_built: bool, exe_wrapper: T.Optional[programs.ExternalProgram],
needs_exe_wrapper: bool, is_parallel: bool, cmd_args: T.List[str],
env: build.EnvironmentVariables, should_fail: bool,
timeout: T.Optional[int], workdir: T.Optional[str],
extra_paths: T.List[str], protocol: TestProtocol, priority: int,
cmd_is_built: bool, depends: T.List[str], version: str):
self.name = name
self.project_name = project
self.suite = suite
self.fname = fname
self.is_cross_built = is_cross_built
if exe_wrapper is not None:
assert isinstance(exe_wrapper, programs.ExternalProgram)
self.exe_wrapper = exe_wrapper
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.timeout = timeout
self.workdir = workdir
self.extra_paths = extra_paths
self.protocol = protocol
self.priority = priority
self.needs_exe_wrapper = needs_exe_wrapper
self.cmd_is_built = cmd_is_built
self.depends = depends
self.version = version
def get_backend_from_name(backend: str, build: T.Optional[build.Build] = None, interpreter: T.Optional['Interpreter'] = None) -> T.Optional['Backend']:
if backend == 'ninja':
from . import ninjabackend
return ninjabackend.NinjaBackend(build, interpreter)
elif backend == 'vs':
from . import vs2010backend
return vs2010backend.autodetect_vs_version(build, interpreter)
elif backend == 'vs2010':
from . import vs2010backend
return vs2010backend.Vs2010Backend(build, interpreter)
elif backend == 'vs2012':
from . import vs2012backend
return vs2012backend.Vs2012Backend(build, interpreter)
elif backend == 'vs2013':
from . import vs2013backend
return vs2013backend.Vs2013Backend(build, interpreter)
elif backend == 'vs2015':
from . import vs2015backend
return vs2015backend.Vs2015Backend(build, interpreter)
elif backend == 'vs2017':
from . import vs2017backend
return vs2017backend.Vs2017Backend(build, interpreter)
elif backend == 'vs2019':
from . import vs2019backend
return vs2019backend.Vs2019Backend(build, interpreter)
elif backend == 'vs2022':
from . import vs2022backend
return vs2022backend.Vs2022Backend(build, interpreter)
elif backend == 'xcode':
from . import xcodebackend
return xcodebackend.XCodeBackend(build, interpreter)
return None
# This class contains the basic functionality that is needed by all backends.
# Feel free to move stuff in and out of it as you see fit.
class Backend:
environment: T.Optional['Environment']
def __init__(self, build: T.Optional[build.Build], interpreter: T.Optional['Interpreter']):
# Make it possible to construct a dummy backend
# This is used for introspection without a build directory
if build is None:
self.environment = None
return
self.build = build
self.interpreter = interpreter
self.environment = build.environment
self.processed_targets: T.Set[str] = set()
self.name = '<UNKNOWN>'
self.build_dir = self.environment.get_build_dir()
self.source_dir = self.environment.get_source_dir()
self.build_to_src = mesonlib.relpath(self.environment.get_source_dir(),
self.environment.get_build_dir())
self.src_to_build = mesonlib.relpath(self.environment.get_build_dir(),
self.environment.get_source_dir())
def generate(self) -> None:
raise RuntimeError(f'generate is not implemented in {type(self).__name__}')
def get_target_filename(self, t: T.Union[build.Target, build.CustomTargetIndex], *, warn_multi_output: bool = True) -> str:
if isinstance(t, build.CustomTarget):
if warn_multi_output and len(t.get_outputs()) != 1:
mlog.warning(f'custom_target {t.name!r} has more than one output! '
'Using the first one.')
filename = t.get_outputs()[0]
elif isinstance(t, build.CustomTargetIndex):
filename = t.get_outputs()[0]
else:
assert isinstance(t, build.BuildTarget)
filename = t.get_filename()
return os.path.join(self.get_target_dir(t), filename)
def get_target_filename_abs(self, target: T.Union[build.Target, build.CustomTargetIndex]) -> str:
return os.path.join(self.environment.get_build_dir(), self.get_target_filename(target))
def get_base_options_for_target(self, target: build.BuildTarget) -> OptionOverrideProxy:
return OptionOverrideProxy(target.option_overrides_base,
{k: v for k, v in self.environment.coredata.options.items()
if k.type in {OptionType.BASE, OptionType.BUILTIN}})
def get_compiler_options_for_target(self, target: build.BuildTarget) -> OptionOverrideProxy:
comp_reg = {k: v for k, v in self.environment.coredata.options.items() if k.is_compiler()}
comp_override = target.option_overrides_compiler
return OptionOverrideProxy(comp_override, comp_reg)
def get_option_for_target(self, option_name: 'OptionKey', target: build.BuildTarget) -> T.Union[str, int, bool, 'WrapMode']:
if option_name in target.option_overrides_base:
override = target.option_overrides_base[option_name]
v = self.environment.coredata.validate_option_value(option_name, override)
else:
v = self.environment.coredata.get_option(option_name.evolve(subproject=target.subproject))
# We don't actually have wrapmode here to do an assert, so just do a
# cast, we know what's in coredata anyway.
# TODO: if it's possible to annotate get_option or validate_option_value
# in the future we might be able to remove the cast here
return T.cast(T.Union[str, int, bool, 'WrapMode'], v)
def get_source_dir_include_args(self, target: build.BuildTarget, compiler: 'Compiler', *, absolute_path: bool = False) -> T.List[str]:
curdir = target.get_subdir()
if absolute_path:
lead = self.source_dir
else:
lead = self.build_to_src
tmppath = os.path.normpath(os.path.join(lead, curdir))
return compiler.get_include_args(tmppath, False)
def get_build_dir_include_args(self, target: build.BuildTarget, compiler: 'Compiler', *, absolute_path: bool = False) -> T.List[str]:
if absolute_path:
curdir = os.path.join(self.build_dir, target.get_subdir())
else:
curdir = target.get_subdir()
if curdir == '':
curdir = '.'
return compiler.get_include_args(curdir, False)
def get_target_filename_for_linking(self, target: T.Union[build.Target, build.CustomTargetIndex]) -> T.Optional[str]:
# On some platforms (msvc for instance), the file that is used for
# dynamic linking is not the same as the dynamic library itself. This
# file is called an import library, and we want to link against that.
# On all other platforms, we link to the library directly.
if isinstance(target, build.SharedLibrary):
link_lib = target.get_import_filename() or target.get_filename()
return os.path.join(self.get_target_dir(target), link_lib)
elif isinstance(target, build.StaticLibrary):
return os.path.join(self.get_target_dir(target), target.get_filename())
elif isinstance(target, (build.CustomTarget, build.CustomTargetIndex)):
if not target.is_linkable_target():
raise MesonException(f'Tried to link against custom target "{target.name}", which is not linkable.')
return os.path.join(self.get_target_dir(target), target.get_filename())
elif isinstance(target, build.Executable):
if target.import_filename:
return os.path.join(self.get_target_dir(target), target.get_import_filename())
else:
return None
raise AssertionError(f'BUG: Tried to link to {target!r} which is not linkable')
@lru_cache(maxsize=None)
def get_target_dir(self, target: T.Union[build.Target, build.CustomTargetIndex]) -> str:
if isinstance(target, build.RunTarget):
# this produces no output, only a dummy top-level name
dirname = ''
elif self.environment.coredata.get_option(OptionKey('layout')) == 'mirror':
dirname = target.get_subdir()
else:
dirname = 'meson-out'
return dirname
def get_target_dir_relative_to(self, t: build.Target, o: build.Target) -> str:
'''Get a target dir relative to another target's directory'''
target_dir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(t))
othert_dir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(o))
return os.path.relpath(target_dir, othert_dir)
def get_target_source_dir(self, target: build.Target) -> str:
# if target dir is empty, avoid extraneous trailing / from os.path.join()
target_dir = self.get_target_dir(target)
if target_dir:
return os.path.join(self.build_to_src, target_dir)
return self.build_to_src
def get_target_private_dir(self, target: T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex]) -> str:
return os.path.join(self.get_target_filename(target, warn_multi_output=False) + '.p')
def get_target_private_dir_abs(self, target: T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex]) -> str:
return os.path.join(self.environment.get_build_dir(), self.get_target_private_dir(target))
@lru_cache(maxsize=None)
def get_target_generated_dir(
self, target: T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex],
gensrc: T.Union[build.CustomTarget, build.CustomTargetIndex, build.GeneratedList],
src: str) -> str:
"""
Takes a BuildTarget, a generator source (CustomTarget or GeneratedList),
and a generated source filename.
Returns the full path of the generated source relative to the build root
"""
# CustomTarget generators output to the build dir of the CustomTarget
if isinstance(gensrc, (build.CustomTarget, build.CustomTargetIndex)):
return os.path.join(self.get_target_dir(gensrc), src)
# GeneratedList generators output to the private build directory of the
# target that the GeneratedList is used in
return os.path.join(self.get_target_private_dir(target), src)
def get_unity_source_file(self, target: T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex],
suffix: str, number: int) -> mesonlib.File:
# There is a potential conflict here, but it is unlikely that
# anyone both enables unity builds and has a file called foo-unity.cpp.
osrc = f'{target.name}-unity{number}.{suffix}'
return mesonlib.File.from_built_file(self.get_target_private_dir(target), osrc)
def generate_unity_files(self, target: build.BuildTarget, unity_src: str) -> T.List[mesonlib.File]:
abs_files: T.List[str] = []
result: T.List[mesonlib.File] = []
compsrcs = classify_unity_sources(target.compilers.values(), unity_src)
unity_size = self.get_option_for_target(OptionKey('unity_size'), target)
assert isinstance(unity_size, int), 'for mypy'
def init_language_file(suffix: str, unity_file_number: int) -> T.TextIO:
unity_src = self.get_unity_source_file(target, suffix, unity_file_number)
outfileabs = unity_src.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
outfileabs_tmp = outfileabs + '.tmp'
abs_files.append(outfileabs)
outfileabs_tmp_dir = os.path.dirname(outfileabs_tmp)
if not os.path.exists(outfileabs_tmp_dir):
os.makedirs(outfileabs_tmp_dir)
result.append(unity_src)
return open(outfileabs_tmp, 'w', encoding='utf-8')
# For each language, generate unity source files and return the list
for comp, srcs in compsrcs.items():
files_in_current = unity_size + 1
unity_file_number = 0
# TODO: this could be simplified with an algorithm that pre-sorts
# the sources into the size of chunks we want
ofile = None
for src in srcs:
if files_in_current >= unity_size:
if ofile:
ofile.close()
ofile = init_language_file(comp.get_default_suffix(), unity_file_number)
unity_file_number += 1
files_in_current = 0
ofile.write(f'#include<{src}>\n')
files_in_current += 1
if ofile:
ofile.close()
for x in abs_files:
mesonlib.replace_if_different(x, x + '.tmp')
return result
@staticmethod
def relpath(todir: str, fromdir: str) -> str:
return os.path.relpath(os.path.join('dummyprefixdir', todir),
os.path.join('dummyprefixdir', fromdir))
def flatten_object_list(self, target: build.BuildTarget, proj_dir_to_build_root: str = '') -> T.List[str]:
obj_list = self._flatten_object_list(target, target.get_objects(), proj_dir_to_build_root)
return list(dict.fromkeys(obj_list))
def _flatten_object_list(self, target: build.BuildTarget,
objects: T.Sequence[T.Union[str, 'File', build.ExtractedObjects]],
proj_dir_to_build_root: str) -> T.List[str]:
obj_list: T.List[str] = []
for obj in objects:
if isinstance(obj, str):
o = os.path.join(proj_dir_to_build_root,
self.build_to_src, target.get_subdir(), obj)
obj_list.append(o)
elif isinstance(obj, mesonlib.File):
if obj.is_built:
o = os.path.join(proj_dir_to_build_root,
obj.rel_to_builddir(self.build_to_src))
obj_list.append(o)
else:
o = os.path.join(proj_dir_to_build_root,
self.build_to_src)
obj_list.append(obj.rel_to_builddir(o))
elif isinstance(obj, build.ExtractedObjects):
if obj.recursive:
obj_list += self._flatten_object_list(obj.target, obj.objlist, proj_dir_to_build_root)
obj_list += self.determine_ext_objs(obj, proj_dir_to_build_root)
else:
raise MesonException('Unknown data type in object list.')
return obj_list
@staticmethod
def is_swift_target(target: build.BuildTarget) -> bool:
for s in target.sources:
if s.endswith('swift'):
return True
return False
def determine_swift_dep_dirs(self, target: build.BuildTarget) -> T.List[str]:
result: T.List[str] = []
for l in target.link_targets:
result.append(self.get_target_private_dir_abs(l))
return result
def get_executable_serialisation(
self, cmd: T.Sequence[T.Union[programs.ExternalProgram, build.BuildTarget, build.CustomTarget, File, str]],
workdir: T.Optional[str] = None,
extra_bdeps: T.Optional[T.List[build.BuildTarget]] = None,
capture: T.Optional[bool] = None,
feed: T.Optional[bool] = None,
env: T.Optional[build.EnvironmentVariables] = None,
tag: T.Optional[str] = None,
verbose: bool = False) -> 'ExecutableSerialisation':
# XXX: cmd_args either need to be lowered to strings, or need to be checked for non-string arguments, right?
exe, *raw_cmd_args = cmd
if isinstance(exe, programs.ExternalProgram):
exe_cmd = exe.get_command()
exe_for_machine = exe.for_machine
elif isinstance(exe, build.BuildTarget):
exe_cmd = [self.get_target_filename_abs(exe)]
exe_for_machine = exe.for_machine
elif isinstance(exe, build.CustomTarget):
# The output of a custom target can either be directly runnable
# or not, that is, a script, a native binary or a cross compiled
# binary when exe wrapper is available and when it is not.
# This implementation is not exhaustive but it works in the
# common cases.
exe_cmd = [self.get_target_filename_abs(exe)]
exe_for_machine = MachineChoice.BUILD
elif isinstance(exe, mesonlib.File):
exe_cmd = [exe.rel_to_builddir(self.environment.source_dir)]
exe_for_machine = MachineChoice.BUILD
else:
exe_cmd = [exe]
exe_for_machine = MachineChoice.BUILD
cmd_args: T.List[str] = []
for c in raw_cmd_args:
if isinstance(c, programs.ExternalProgram):
p = c.get_path()
assert isinstance(p, str)
cmd_args.append(p)
elif isinstance(c, (build.BuildTarget, build.CustomTarget)):
cmd_args.append(self.get_target_filename_abs(c))
elif isinstance(c, mesonlib.File):
cmd_args.append(c.rel_to_builddir(self.environment.source_dir))
else:
cmd_args.append(c)
machine = self.environment.machines[exe_for_machine]
if machine.is_windows() or machine.is_cygwin():
extra_paths = self.determine_windows_extra_paths(exe, extra_bdeps or [])
else:
extra_paths = []
is_cross_built = not self.environment.machines.matches_build_machine(exe_for_machine)
if is_cross_built and self.environment.need_exe_wrapper():
exe_wrapper = self.environment.get_exe_wrapper()
if not exe_wrapper or not exe_wrapper.found():
msg = 'An exe_wrapper is needed but was not found. Please define one ' \
'in cross file and check the command and/or add it to PATH.'
raise MesonException(msg)
else:
if exe_cmd[0].endswith('.jar'):
exe_cmd = ['java', '-jar'] + exe_cmd
elif exe_cmd[0].endswith('.exe') and not (mesonlib.is_windows() or mesonlib.is_cygwin() or mesonlib.is_wsl()):
exe_cmd = ['mono'] + exe_cmd
exe_wrapper = None
workdir = workdir or self.environment.get_build_dir()
return ExecutableSerialisation(exe_cmd + cmd_args, env,
exe_wrapper, workdir,
extra_paths, capture, feed, tag, verbose)
def as_meson_exe_cmdline(self, exe: T.Union[str, mesonlib.File, build.BuildTarget, build.CustomTarget, programs.ExternalProgram],
cmd_args: T.Sequence[T.Union[str, mesonlib.File, build.BuildTarget, build.CustomTarget, programs.ExternalProgram]],
workdir: T.Optional[str] = None,
extra_bdeps: T.Optional[T.List[build.BuildTarget]] = None,
capture: T.Optional[bool] = None,
feed: T.Optional[bool] = None,
force_serialize: bool = False,
env: T.Optional[build.EnvironmentVariables] = None,
verbose: bool = False) -> T.Tuple[T.Sequence[T.Union[str, File, build.Target, programs.ExternalProgram]], str]:
'''
Serialize an executable for running with a generator or a custom target
'''
cmd: T.List[T.Union[str, mesonlib.File, build.BuildTarget, build.CustomTarget, programs.ExternalProgram]] = []
cmd.append(exe)
cmd.extend(cmd_args)
es = self.get_executable_serialisation(cmd, workdir, extra_bdeps, capture, feed, env, verbose=verbose)
reasons: T.List[str] = []
if es.extra_paths:
reasons.append('to set PATH')
if es.exe_wrapper:
reasons.append('to use exe_wrapper')
if workdir:
reasons.append('to set workdir')
if any('\n' in c for c in es.cmd_args):
reasons.append('because command contains newlines')
if es.env and es.env.varnames:
reasons.append('to set env')
force_serialize = force_serialize or bool(reasons)
if capture:
reasons.append('to capture output')
if feed:
reasons.append('to feed input')
if not force_serialize:
if not capture and not feed:
return es.cmd_args, ''
args: T.List[str] = []
if capture:
args += ['--capture', str(capture)]
if feed:
args += ['--feed', str(feed)]
return (
self.environment.get_build_command() + ['--internal', 'exe'] + args + ['--'] + es.cmd_args,
', '.join(reasons)
)
if isinstance(exe, (programs.ExternalProgram,
build.BuildTarget, build.CustomTarget)):
basename = exe.name
elif isinstance(exe, mesonlib.File):
basename = os.path.basename(exe.fname)
else:
basename = os.path.basename(exe)
# Can't just use exe.name here; it will likely be run more than once
# Take a digest of the cmd args, env, workdir, capture, and feed. This
# avoids collisions and also makes the name deterministic over
# regenerations which avoids a rebuild by Ninja because the cmdline
# stays the same.
hasher = hashlib.sha1()
if es.env:
es.env.hash(hasher)
hasher.update(bytes(str(es.cmd_args), encoding='utf-8'))
hasher.update(bytes(str(es.workdir), encoding='utf-8'))
hasher.update(bytes(str(capture), encoding='utf-8'))
hasher.update(bytes(str(feed), encoding='utf-8'))
digest = hasher.hexdigest()
scratch_file = f'meson_exe_{basename}_{digest}.dat'
exe_data = os.path.join(self.environment.get_scratch_dir(), scratch_file)
with open(exe_data, 'wb') as f:
pickle.dump(es, f)
return (self.environment.get_build_command() + ['--internal', 'exe', '--unpickle', exe_data],
', '.join(reasons))
def serialize_tests(self) -> T.Tuple[str, str]:
test_data = os.path.join(self.environment.get_scratch_dir(), 'meson_test_setup.dat')
with open(test_data, 'wb') as datafile:
self.write_test_file(datafile)
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
with open(benchmark_data, 'wb') as datafile:
self.write_benchmark_file(datafile)
return test_data, benchmark_data
def determine_linker_and_stdlib_args(self, target: build.BuildTarget) -> T.Tuple[T.Union['Compiler', 'StaticLinker'], T.List[str]]:
'''
If we're building a static library, there is only one static linker.
Otherwise, we query the target for the dynamic linker.
'''
if isinstance(target, build.StaticLibrary):
return self.build.static_linker[target.for_machine], []
l, stdlib_args = target.get_clink_dynamic_linker_and_stdlibs()
return l, stdlib_args
@staticmethod
def _libdir_is_system(libdir: str, compilers: T.Mapping[str, 'Compiler'], env: 'Environment') -> bool:
libdir = os.path.normpath(libdir)
for cc in compilers.values():
if libdir in cc.get_library_dirs(env):
return True
return False
def get_external_rpath_dirs(self, target: build.BuildTarget) -> T.Set[str]:
dirs: T.Set[str] = set()
args: T.List[str] = []
for lang in LANGUAGES_USING_LDFLAGS:
try:
e = self.environment.coredata.get_external_link_args(target.for_machine, lang)
if isinstance(e, str):
args.append(e)
else:
args.extend(e)
except Exception:
pass
# Match rpath formats:
# -Wl,-rpath=
# -Wl,-rpath,
rpath_regex = re.compile(r'-Wl,-rpath[=,]([^,]+)')
# Match solaris style compat runpath formats:
# -Wl,-R
# -Wl,-R,
runpath_regex = re.compile(r'-Wl,-R[,]?([^,]+)')
# Match symbols formats:
# -Wl,--just-symbols=
# -Wl,--just-symbols,
symbols_regex = re.compile(r'-Wl,--just-symbols[=,]([^,]+)')
for arg in args:
rpath_match = rpath_regex.match(arg)
if rpath_match:
for dir in rpath_match.group(1).split(':'):
dirs.add(dir)
runpath_match = runpath_regex.match(arg)
if runpath_match:
for dir in runpath_match.group(1).split(':'):
# The symbols arg is an rpath if the path is a directory
if Path(dir).is_dir():
dirs.add(dir)
symbols_match = symbols_regex.match(arg)
if symbols_match:
for dir in symbols_match.group(1).split(':'):
# Prevent usage of --just-symbols to specify rpath
if Path(dir).is_dir():
raise MesonException(f'Invalid arg for --just-symbols, {dir} is a directory.')
return dirs
@lru_cache(maxsize=None)
def rpaths_for_bundled_shared_libraries(self, target: build.BuildTarget, exclude_system: bool = True) -> 'ImmutableListProtocol[str]':
paths: T.List[str] = []
for dep in target.external_deps:
if not isinstance(dep, (dependencies.ExternalLibrary, dependencies.PkgConfigDependency)):
continue
la = dep.link_args
if len(la) != 1 or not os.path.isabs(la[0]):
continue
# The only link argument is an absolute path to a library file.
libpath = la[0]
libdir = os.path.dirname(libpath)
if exclude_system and self._libdir_is_system(libdir, target.compilers, self.environment):
# No point in adding system paths.
continue
# Don't remove rpaths specified in LDFLAGS.
if libdir in self.get_external_rpath_dirs(target):
continue
# Windows doesn't support rpaths, but we use this function to
# emulate rpaths by setting PATH, so also accept DLLs here
if os.path.splitext(libpath)[1] not in ['.dll', '.lib', '.so', '.dylib']:
continue
if libdir.startswith(self.environment.get_source_dir()):
rel_to_src = libdir[len(self.environment.get_source_dir()) + 1:]
assert not os.path.isabs(rel_to_src), f'rel_to_src: {rel_to_src} is absolute'
paths.append(os.path.join(self.build_to_src, rel_to_src))
else:
paths.append(libdir)
for i in chain(target.link_targets, target.link_whole_targets):
if isinstance(i, build.BuildTarget):
paths.extend(self.rpaths_for_bundled_shared_libraries(i, exclude_system))
return paths
# This may take other types
def determine_rpath_dirs(self, target: T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex]
) -> T.Tuple[str, ...]:
result: OrderedSet[str]
if self.environment.coredata.get_option(OptionKey('layout')) == 'mirror':
# Need a copy here
result = OrderedSet(target.get_link_dep_subdirs())
else:
result = OrderedSet()
result.add('meson-out')
if isinstance(target, build.BuildTarget):
result.update(self.rpaths_for_bundled_shared_libraries(target))
target.rpath_dirs_to_remove.update([d.encode('utf-8') for d in result])
return tuple(result)
@staticmethod
def canonicalize_filename(fname: str) -> str:
for ch in ('/', '\\', ':'):
fname = fname.replace(ch, '_')
return fname
def object_filename_from_source(self, target: build.BuildTarget, source: 'FileOrString') -> str:
assert isinstance(source, mesonlib.File)
build_dir = self.environment.get_build_dir()
rel_src = source.rel_to_builddir(self.build_to_src)
# foo.vala files compile down to foo.c and then foo.c.o, not foo.vala.o
if rel_src.endswith(('.vala', '.gs')):
# See description in generate_vala_compile for this logic.
if source.is_built:
if os.path.isabs(rel_src):
rel_src = rel_src[len(build_dir) + 1:]
rel_src = os.path.relpath(rel_src, self.get_target_private_dir(target))
else:
rel_src = os.path.basename(rel_src)
# A meson- prefixed directory is reserved; hopefully no-one creates a file name with such a weird prefix.
gen_source = 'meson-generated_' + rel_src[:-5] + '.c'
elif source.is_built:
if os.path.isabs(rel_src):
rel_src = rel_src[len(build_dir) + 1:]
targetdir = self.get_target_private_dir(target)
# A meson- prefixed directory is reserved; hopefully no-one creates a file name with such a weird prefix.
gen_source = 'meson-generated_' + os.path.relpath(rel_src, targetdir)
else:
if os.path.isabs(rel_src):
# Use the absolute path directly to avoid file name conflicts
gen_source = rel_src
else:
gen_source = os.path.relpath(os.path.join(build_dir, rel_src),
os.path.join(self.environment.get_source_dir(), target.get_subdir()))
machine = self.environment.machines[target.for_machine]
return self.canonicalize_filename(gen_source) + '.' + machine.get_object_suffix()
def determine_ext_objs(self, extobj: 'build.ExtractedObjects', proj_dir_to_build_root: str) -> T.List[str]:
result: T.List[str] = []
# Merge sources and generated sources
raw_sources = list(extobj.srclist)
for gensrc in extobj.genlist:
for r in gensrc.get_outputs():
path = self.get_target_generated_dir(extobj.target, gensrc, r)
dirpart, fnamepart = os.path.split(path)
raw_sources.append(File(True, dirpart, fnamepart))
# Filter out headers and all non-source files
sources: T.List['FileOrString'] = []
for s in raw_sources:
if self.environment.is_source(s) and not self.environment.is_header(s):
sources.append(s)
elif self.environment.is_object(s):
result.append(s.relative_name())
# extobj could contain only objects and no sources
if not sources:
return result
targetdir = self.get_target_private_dir(extobj.target)
# With unity builds, sources don't map directly to objects,
# we only support extracting all the objects in this mode,
# so just return all object files.
if self.is_unity(extobj.target):
compsrcs = classify_unity_sources(extobj.target.compilers.values(), sources)
sources = []
unity_size = self.get_option_for_target(OptionKey('unity_size'), extobj.target)
assert isinstance(unity_size, int), 'for mypy'
for comp, srcs in compsrcs.items():
if comp.language in LANGS_CANT_UNITY:
sources += srcs
continue
for i in range(len(srcs) // unity_size + 1):
_src = self.get_unity_source_file(extobj.target,
comp.get_default_suffix(), i)
sources.append(_src)
for osrc in sources:
objname = self.object_filename_from_source(extobj.target, osrc)
objpath = os.path.join(proj_dir_to_build_root, targetdir, objname)
result.append(objpath)
return result
def get_pch_include_args(self, compiler: 'Compiler', target: build.BuildTarget) -> T.List[str]:
args: T.List[str] = []
pchpath = self.get_target_private_dir(target)
includeargs = compiler.get_include_args(pchpath, False)
p = target.get_pch(compiler.get_language())
if p:
args += compiler.get_pch_use_args(pchpath, p[0])
return includeargs + args
def create_msvc_pch_implementation(self, target: build.BuildTarget, lang: str, pch_header: str) -> str:
# We have to include the language in the file name, otherwise
# pch.c and pch.cpp will both end up as pch.obj in VS backends.
impl_name = f'meson_pch-{lang}.{lang}'
pch_rel_to_build = os.path.join(self.get_target_private_dir(target), impl_name)
# Make sure to prepend the build dir, since the working directory is
# not defined. Otherwise, we might create the file in the wrong path.
pch_file = os.path.join(self.build_dir, pch_rel_to_build)
os.makedirs(os.path.dirname(pch_file), exist_ok=True)
content = f'#include "{os.path.basename(pch_header)}"'
pch_file_tmp = pch_file + '.tmp'
with open(pch_file_tmp, 'w', encoding='utf-8') as f:
f.write(content)
mesonlib.replace_if_different(pch_file, pch_file_tmp)
return pch_rel_to_build
@staticmethod
def escape_extra_args(args: T.List[str]) -> T.List[str]:
# all backslashes in defines are doubly-escaped
extra_args: T.List[str] = []
for arg in args:
if arg.startswith(('-D', '/D')):
arg = arg.replace('\\', '\\\\')
extra_args.append(arg)
return extra_args
def get_no_stdlib_args(self, target: 'build.BuildTarget', compiler: 'Compiler') -> T.List[str]:
if compiler.language in self.build.stdlibs[target.for_machine]:
return compiler.get_no_stdinc_args()
return []
def generate_basic_compiler_args(self, target: build.BuildTarget, compiler: 'Compiler', no_warn_args: bool = False) -> 'CompilerArgs':
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
# starting from hard-coded defaults followed by build options and so on.
commands = compiler.compiler_args()
copt_proxy = self.get_compiler_options_for_target(target)
# First, the trivial ones that are impossible to override.
#
# Add -nostdinc/-nostdinc++ if needed; can't be overridden
commands += self.get_no_stdlib_args(target, compiler)
# Add things like /NOLOGO or -pipe; usually can't be overridden
commands += compiler.get_always_args()
# Only add warning-flags by default if the buildtype enables it, and if
# we weren't explicitly asked to not emit warnings (for Vala, f.ex)
if no_warn_args:
commands += compiler.get_no_warn_args()
else:
# warning_level is a string, but mypy can't determine that
commands += compiler.get_warn_args(T.cast(str, self.get_option_for_target(OptionKey('warning_level'), target)))
# Add -Werror if werror=true is set in the build options set on the
# command-line or default_options inside project(). This only sets the
# action to be done for warnings if/when they are emitted, so it's ok
# to set it after get_no_warn_args() or get_warn_args().
if self.get_option_for_target(OptionKey('werror'), target):
commands += compiler.get_werror_args()
# Add compile args for c_* or cpp_* build options set on the
# command-line or default_options inside project().
commands += compiler.get_option_compile_args(copt_proxy)
# Add buildtype args: optimization level, debugging, etc.
buildtype = self.get_option_for_target(OptionKey('buildtype'), target)
assert isinstance(buildtype, str), 'for mypy'
commands += compiler.get_buildtype_args(buildtype)
optimization = self.get_option_for_target(OptionKey('optimization'), target)
assert isinstance(optimization, str), 'for mypy'
commands += compiler.get_optimization_args(optimization)
debug = self.get_option_for_target(OptionKey('debug'), target)
assert isinstance(debug, bool), 'for mypy'
commands += compiler.get_debug_args(debug)
# Add compile args added using add_project_arguments()
commands += self.build.get_project_args(compiler, target.subproject, target.for_machine)
# Add compile args added using add_global_arguments()
# These override per-project arguments
commands += self.build.get_global_args(compiler, target.for_machine)
# Using both /ZI and /Zi at the same times produces a compiler warning.
# We do not add /ZI by default. If it is being used it is because the user has explicitly enabled it.
# /ZI needs to be removed in that case to avoid cl's warning to that effect (D9025 : overriding '/ZI' with '/Zi')
if ('/ZI' in commands) and ('/Zi' in commands):
commands.remove('/Zi')
# Compile args added from the env: CFLAGS/CXXFLAGS, etc, or the cross
# file. We want these to override all the defaults, but not the
# per-target compile args.
commands += self.environment.coredata.get_external_args(target.for_machine, compiler.get_language())
# Always set -fPIC for shared libraries
if isinstance(target, build.SharedLibrary):
commands += compiler.get_pic_args()
# Set -fPIC for static libraries by default unless explicitly disabled
if isinstance(target, build.StaticLibrary) and target.pic:
commands += compiler.get_pic_args()
elif isinstance(target, (build.StaticLibrary, build.Executable)) and target.pie:
commands += compiler.get_pie_args()
# Add compile args needed to find external dependencies. Link args are
# added while generating the link command.
# NOTE: We must preserve the order in which external deps are
# specified, so we reverse the list before iterating over it.
for dep in reversed(target.get_external_deps()):
if not dep.found():
continue
if compiler.language == 'vala':
if isinstance(dep, dependencies.PkgConfigDependency):
if dep.name == 'glib-2.0' and dep.version_reqs is not None:
for req in dep.version_reqs:
if req.startswith(('>=', '==')):
commands += ['--target-glib', req[2:]]
break
commands += ['--pkg', dep.name]
elif isinstance(dep, dependencies.ExternalLibrary):
commands += dep.get_link_args('vala')
else:
commands += compiler.get_dependency_compile_args(dep)
# Qt needs -fPIC for executables
# XXX: We should move to -fPIC for all executables
if isinstance(target, build.Executable):
commands += dep.get_exe_args(compiler)
# For 'automagic' deps: Boost and GTest. Also dependency('threads').
# pkg-config puts the thread flags itself via `Cflags:`
# Fortran requires extra include directives.
if compiler.language == 'fortran':
for lt in chain(target.link_targets, target.link_whole_targets):
priv_dir = self.get_target_private_dir(lt)
commands += compiler.get_include_args(priv_dir, False)
return commands
def build_target_link_arguments(self, compiler: 'Compiler', deps: T.List[build.Target]) -> T.List[str]:
args: T.List[str] = []
for d in deps:
if not d.is_linkable_target():
raise RuntimeError(f'Tried to link with a non-library target "{d.get_basename()}".')
arg = self.get_target_filename_for_linking(d)
if not arg:
continue
if compiler.get_language() == 'd':
arg = '-Wl,' + arg
else:
arg = compiler.get_linker_lib_prefix() + arg
args.append(arg)
return args
def get_mingw_extra_paths(self, target: build.BuildTarget) -> T.List[str]:
paths: OrderedSet[str] = OrderedSet()
# The cross bindir
root = self.environment.properties[target.for_machine].get_root()
if root:
paths.add(os.path.join(root, 'bin'))
# The toolchain bindir
sys_root = self.environment.properties[target.for_machine].get_sys_root()
if sys_root:
paths.add(os.path.join(sys_root, 'bin'))
# Get program and library dirs from all target compilers
if isinstance(target, build.BuildTarget):
for cc in target.compilers.values():
paths.update(cc.get_program_dirs(self.environment))
paths.update(cc.get_library_dirs(self.environment))
return list(paths)
def determine_windows_extra_paths(
self, target: T.Union[build.BuildTarget, build.CustomTarget, programs.ExternalProgram, mesonlib.File, str],
extra_bdeps: T.Sequence[T.Union[build.BuildTarget, build.CustomTarget]]) -> T.List[str]:
"""On Windows there is no such thing as an rpath.
We must determine all locations of DLLs that this exe
links to and return them so they can be used in unit
tests.
"""
result: T.Set[str] = set()
prospectives: T.Set[build.Target] = set()
if isinstance(target, build.BuildTarget):
prospectives.update(target.get_transitive_link_deps())
# External deps
for deppath in self.rpaths_for_bundled_shared_libraries(target, exclude_system=False):
result.add(os.path.normpath(os.path.join(self.environment.get_build_dir(), deppath)))
for bdep in extra_bdeps:
prospectives.add(bdep)
if isinstance(bdep, build.BuildTarget):
prospectives.update(bdep.get_transitive_link_deps())
# Internal deps
for ld in prospectives:
dirseg = os.path.join(self.environment.get_build_dir(), self.get_target_dir(ld))
result.add(dirseg)
if (isinstance(target, build.BuildTarget) and
not self.environment.machines.matches_build_machine(target.for_machine)):
result.update(self.get_mingw_extra_paths(target))
return list(result)
def write_benchmark_file(self, datafile: T.BinaryIO) -> None:
self.write_test_serialisation(self.build.get_benchmarks(), datafile)
def write_test_file(self, datafile: T.BinaryIO) -> None:
self.write_test_serialisation(self.build.get_tests(), datafile)
def create_test_serialisation(self, tests: T.List['Test']) -> T.List[TestSerialisation]:
arr: T.List[TestSerialisation] = []
for t in sorted(tests, key=lambda tst: -1 * tst.priority):
exe = t.get_exe()
if isinstance(exe, programs.ExternalProgram):
cmd = exe.get_command()
else:
cmd = [os.path.join(self.environment.get_build_dir(), self.get_target_filename(exe))]
if isinstance(exe, (build.BuildTarget, programs.ExternalProgram)):
test_for_machine = exe.for_machine
else:
# E.g. an external verifier or simulator program run on a generated executable.
# Can always be run without a wrapper.
test_for_machine = MachineChoice.BUILD
# we allow passing compiled executables to tests, which may be cross built.
# We need to consider these as well when considering whether the target is cross or not.
for a in t.cmd_args:
if isinstance(a, build.BuildTarget):
if a.for_machine is MachineChoice.HOST:
test_for_machine = MachineChoice.HOST
break
is_cross = self.environment.is_cross_build(test_for_machine)
if is_cross and self.environment.need_exe_wrapper():
exe_wrapper = self.environment.get_exe_wrapper()
else:
exe_wrapper = None
machine = self.environment.machines[exe.for_machine]
if machine.is_windows() or machine.is_cygwin():
extra_bdeps: T.List[T.Union[build.BuildTarget, build.CustomTarget]] = []
if isinstance(exe, build.CustomTarget):
extra_bdeps = list(exe.get_transitive_build_target_deps())
extra_paths = self.determine_windows_extra_paths(exe, extra_bdeps)
else:
extra_paths = []
cmd_args: T.List[str] = []
depends: T.Set[build.Target] = set(t.depends)
if isinstance(exe, build.Target):
depends.add(exe)
for a in t.cmd_args:
if isinstance(a, build.Target):
depends.add(a)
if isinstance(a, build.BuildTarget):
extra_paths += self.determine_windows_extra_paths(a, [])
if isinstance(a, mesonlib.File):
a = os.path.join(self.environment.get_build_dir(), a.rel_to_builddir(self.build_to_src))
cmd_args.append(a)
elif isinstance(a, str):
cmd_args.append(a)
elif isinstance(a, build.Executable):
p = self.construct_target_rel_path(a, t.workdir)
if p == a.get_filename():
p = './' + p
cmd_args.append(p)
elif isinstance(a, build.Target):
cmd_args.append(self.construct_target_rel_path(a, t.workdir))
else:
raise MesonException('Bad object in test command.')
ts = TestSerialisation(t.get_name(), t.project_name, t.suite, cmd, is_cross,
exe_wrapper, self.environment.need_exe_wrapper(),
t.is_parallel, cmd_args, t.env,
t.should_fail, t.timeout, t.workdir,
extra_paths, t.protocol, t.priority,
isinstance(exe, build.Executable),
[x.get_id() for x in depends],
self.environment.coredata.version)
arr.append(ts)
return arr
def write_test_serialisation(self, tests: T.List['Test'], datafile: T.BinaryIO) -> None:
pickle.dump(self.create_test_serialisation(tests), datafile)
def construct_target_rel_path(self, a: build.Target, workdir: T.Optional[str]) -> str:
if workdir is None:
return self.get_target_filename(a)
assert os.path.isabs(workdir)
abs_path = self.get_target_filename_abs(a)
return os.path.relpath(abs_path, workdir)
def generate_depmf_install(self, d: InstallData) -> None:
if self.build.dep_manifest_name is None:
return
ifilename = os.path.join(self.environment.get_build_dir(), 'depmf.json')
ofilename = os.path.join(self.environment.get_prefix(), self.build.dep_manifest_name)
out_name = os.path.join('{prefix}', self.build.dep_manifest_name)
mfobj = {'type': 'dependency manifest', 'version': '1.0',
'projects': {k: v.to_json() for k, v in self.build.dep_manifest.items()}}
with open(ifilename, 'w', encoding='utf-8') as f:
f.write(json.dumps(mfobj))
# Copy file from, to, and with mode unchanged
d.data.append(InstallDataBase(ifilename, ofilename, out_name, None, '',
tag='devel', data_type='depmf'))
def get_regen_filelist(self) -> T.List[str]:
'''List of all files whose alteration means that the build
definition needs to be regenerated.'''
deps = [str(Path(self.build_to_src) / df)
for df in self.interpreter.get_build_def_files()]
if self.environment.is_cross_build():
deps.extend(self.environment.coredata.cross_files)
deps.extend(self.environment.coredata.config_files)
deps.append('meson-private/coredata.dat')
self.check_clock_skew(deps)
return deps
def generate_regen_info(self) -> None:
deps = self.get_regen_filelist()
regeninfo = RegenInfo(self.environment.get_source_dir(),
self.environment.get_build_dir(),
deps)
filename = os.path.join(self.environment.get_scratch_dir(),
'regeninfo.dump')
with open(filename, 'wb') as f:
pickle.dump(regeninfo, f)
def check_clock_skew(self, file_list: T.List[str]) -> None:
# If a file that leads to reconfiguration has a time
# stamp in the future, it will trigger an eternal reconfigure
# loop.
import time
now = time.time()
for f in file_list:
absf = os.path.join(self.environment.get_build_dir(), f)
ftime = os.path.getmtime(absf)
delta = ftime - now
# On Windows disk time stamps sometimes point
# to the future by a minuscule amount, less than
# 0.001 seconds. I don't know why.
if delta > 0.001:
raise MesonException(f'Clock skew detected. File {absf} has a time stamp {delta:.4f}s in the future.')
def build_target_to_cmd_array(self, bt: T.Union[build.BuildTarget, programs.ExternalProgram]) -> T.List[str]:
if isinstance(bt, build.BuildTarget):
arr = [os.path.join(self.environment.get_build_dir(), self.get_target_filename(bt))]
else:
arr = bt.get_command()
return arr
def replace_extra_args(self, args: T.List[str], genlist: 'build.GeneratedList') -> T.List[str]:
final_args: T.List[str] = []
for a in args:
if a == '@EXTRA_ARGS@':
final_args += genlist.get_extra_args()
else:
final_args.append(a)
return final_args
def replace_outputs(self, args: T.List[str], private_dir: str, output_list: T.List[str]) -> T.List[str]:
newargs: T.List[str] = []
regex = re.compile(r'@OUTPUT(\d+)@')
for arg in args:
m = regex.search(arg)
while m is not None:
index = int(m.group(1))
src = f'@OUTPUT{index}@'
arg = arg.replace(src, os.path.join(private_dir, output_list[index]))
m = regex.search(arg)
newargs.append(arg)
return newargs
def get_build_by_default_targets(self) -> 'T.OrderedDict[str, T.Union[build.BuildTarget, build.CustomTarget]]':
result: 'T.OrderedDict[str, T.Union[build.BuildTarget, build.CustomTarget]]' = OrderedDict()
# Get all build and custom targets that must be built by default
for name, b in self.build.get_targets().items():
if b.build_by_default:
result[name] = b
# Get all targets used as test executables and arguments. These must
# also be built by default. XXX: Sometime in the future these should be
# built only before running tests.
for t in self.build.get_tests():
exe = t.exe
if isinstance(exe, (build.CustomTarget, build.BuildTarget)):
result[exe.get_id()] = exe
for arg in t.cmd_args:
if not isinstance(arg, (build.CustomTarget, build.BuildTarget)):
continue
result[arg.get_id()] = arg
for dep in t.depends:
assert isinstance(dep, (build.CustomTarget, build.BuildTarget))
result[dep.get_id()] = dep
return result
@lru_cache(maxsize=None)
def get_custom_target_provided_by_generated_source(self, generated_source: build.CustomTarget) -> 'ImmutableListProtocol[str]':
libs: T.List[str] = []
for f in generated_source.get_outputs():
if self.environment.is_library(f):
libs.append(os.path.join(self.get_target_dir(generated_source), f))
return libs
@lru_cache(maxsize=None)
def get_custom_target_provided_libraries(self, target: T.Union[build.BuildTarget, build.CustomTarget]) -> 'ImmutableListProtocol[str]':
libs: T.List[str] = []
for t in target.get_generated_sources():
if not isinstance(t, build.CustomTarget):
continue
libs.extend(self.get_custom_target_provided_by_generated_source(t))
return libs
def is_unity(self, target: build.BuildTarget) -> bool:
optval = self.get_option_for_target(OptionKey('unity'), target)
return optval == 'on' or (optval == 'subprojects' and target.subproject != '')
def get_custom_target_sources(self, target: build.CustomTarget) -> T.List[str]:
'''
Custom target sources can be of various object types; strings, File,
BuildTarget, even other CustomTargets.
Returns the path to them relative to the build root directory.
'''
srcs: T.List[str] = []
for i in target.get_sources():
if isinstance(i, str):
fname = [os.path.join(self.build_to_src, target.subdir, i)]
elif isinstance(i, build.BuildTarget):
fname = [self.get_target_filename(i)]
elif isinstance(i, (build.CustomTarget, build.CustomTargetIndex)):
fname = [os.path.join(self.get_custom_target_output_dir(i), p) for p in i.get_outputs()]
elif isinstance(i, build.GeneratedList):
fname = [os.path.join(self.get_target_private_dir(target), p) for p in i.get_outputs()]
elif isinstance(i, build.ExtractedObjects):
outputs = i.get_outputs(self)
fname = self.get_extracted_obj_paths(i.target, outputs)
else:
fname = [i.rel_to_builddir(self.build_to_src)]
if target.absolute_paths:
fname = [os.path.join(self.environment.get_build_dir(), f) for f in fname]
srcs += fname
return srcs
def get_extracted_obj_paths(self, target: build.BuildTarget, outputs: T.List[str]) -> T.List[str]:
return [os.path.join(self.get_target_private_dir(target), p) for p in outputs]
def get_custom_target_depend_files(self, target: build.CustomTarget, absolute_paths: bool = False) -> T.List[str]:
deps: T.List[str] = []
for i in target.depend_files:
if isinstance(i, mesonlib.File):
if absolute_paths:
deps.append(i.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir()))
else:
deps.append(i.rel_to_builddir(self.build_to_src))
else:
if absolute_paths:
deps.append(os.path.join(self.environment.get_source_dir(), target.subdir, i))
else:
deps.append(os.path.join(self.build_to_src, target.subdir, i))
return deps
def get_custom_target_output_dir(self, target: T.Union[build.Target, build.CustomTargetIndex]) -> str:
# The XCode backend is special. A target foo/bar does
# not go to ${BUILDDIR}/foo/bar but instead to
# ${BUILDDIR}/${BUILDTYPE}/foo/bar.
# Currently we set the include dir to be the former,
# and not the latter. Thus we need this extra customisation
# point. If in the future we make include dirs et al match
# ${BUILDDIR}/${BUILDTYPE} instead, this becomes unnecessary.
return self.get_target_dir(target)
@lru_cache(maxsize=None)
def get_normpath_target(self, source: str) -> str:
return os.path.normpath(source)
def get_custom_target_dirs(self, target: build.CustomTarget, compiler: 'Compiler', *,
absolute_path: bool = False) -> T.List[str]:
custom_target_include_dirs: T.List[str] = []
for i in target.get_generated_sources():
# Generator output goes into the target private dir which is
# already in the include paths list. Only custom targets have their
# own target build dir.
if not isinstance(i, (build.CustomTarget, build.CustomTargetIndex)):
continue
idir = self.get_normpath_target(self.get_custom_target_output_dir(i))
if not idir:
idir = '.'
if absolute_path:
idir = os.path.join(self.environment.get_build_dir(), idir)
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
return custom_target_include_dirs
def get_custom_target_dir_include_args(
self, target: build.CustomTarget, compiler: 'Compiler', *,
absolute_path: bool = False) -> T.List[str]:
incs: T.List[str] = []
for i in self.get_custom_target_dirs(target, compiler, absolute_path=absolute_path):
incs += compiler.get_include_args(i, False)
return incs
def eval_custom_target_command(
self, target: build.CustomTarget, absolute_outputs: bool = False) -> \
T.Tuple[T.List[str], T.List[str], T.List[str]]:
# We want the outputs to be absolute only when using the VS backend
# XXX: Maybe allow the vs backend to use relative paths too?
source_root = self.build_to_src
build_root = '.'
outdir = self.get_custom_target_output_dir(target)
if absolute_outputs:
source_root = self.environment.get_source_dir()
build_root = self.environment.get_build_dir()
outdir = os.path.join(self.environment.get_build_dir(), outdir)
outputs = [os.path.join(outdir, i) for i in target.get_outputs()]
inputs = self.get_custom_target_sources(target)
# Evaluate the command list
cmd: T.List[str] = []
for i in target.command:
if isinstance(i, build.BuildTarget):
cmd += self.build_target_to_cmd_array(i)
continue
elif isinstance(i, build.CustomTarget):
# GIR scanner will attempt to execute this binary but
# it assumes that it is in path, so always give it a full path.
tmp = i.get_outputs()[0]
i = os.path.join(self.get_custom_target_output_dir(i), tmp)
elif isinstance(i, mesonlib.File):
i = i.rel_to_builddir(self.build_to_src)
if target.absolute_paths or absolute_outputs:
i = os.path.join(self.environment.get_build_dir(), i)
# FIXME: str types are blindly added ignoring 'target.absolute_paths'
# because we can't know if they refer to a file or just a string
elif isinstance(i, str):
if '@SOURCE_ROOT@' in i:
i = i.replace('@SOURCE_ROOT@', source_root)
if '@BUILD_ROOT@' in i:
i = i.replace('@BUILD_ROOT@', build_root)
if '@CURRENT_SOURCE_DIR@' in i:
i = i.replace('@CURRENT_SOURCE_DIR@', os.path.join(source_root, target.subdir))
if '@DEPFILE@' in i:
if target.depfile is None:
msg = f'Custom target {target.name!r} has @DEPFILE@ but no depfile ' \
'keyword argument.'
raise MesonException(msg)
dfilename = os.path.join(outdir, target.depfile)
i = i.replace('@DEPFILE@', dfilename)
if '@PRIVATE_DIR@' in i:
if target.absolute_paths:
pdir = self.get_target_private_dir_abs(target)
else:
pdir = self.get_target_private_dir(target)
i = i.replace('@PRIVATE_DIR@', pdir)
else:
raise RuntimeError(f'Argument {i} is of unknown type {type(i)}')
cmd.append(i)
# Substitute the rest of the template strings
values = mesonlib.get_filenames_templates_dict(inputs, outputs)
cmd = mesonlib.substitute_values(cmd, values)
# This should not be necessary but removing it breaks
# building GStreamer on Windows. The underlying issue
# is problems with quoting backslashes on Windows
# which is the seventh circle of hell. The downside is
# that this breaks custom targets whose command lines
# have backslashes. If you try to fix this be sure to
# check that it does not break GST.
#
# The bug causes file paths such as c:\foo to get escaped
# into c:\\foo.
#
# Unfortunately we have not been able to come up with an
# isolated test case for this so unless you manage to come up
# with one, the only way is to test the building with Gst's
# setup. Note this in your MR or ping us and we will get it
# fixed.
#
# https://github.com/mesonbuild/meson/pull/737
cmd = [i.replace('\\', '/') for i in cmd]
return inputs, outputs, cmd
def get_run_target_env(self, target: build.RunTarget) -> build.EnvironmentVariables:
env = target.env if target.env else build.EnvironmentVariables()
introspect_cmd = join_args(self.environment.get_build_command() + ['introspect'])
env.set('MESON_SOURCE_ROOT', [self.environment.get_source_dir()])
env.set('MESON_BUILD_ROOT', [self.environment.get_build_dir()])
env.set('MESON_SUBDIR', [target.subdir])
env.set('MESONINTROSPECT', [introspect_cmd])
return env
def run_postconf_scripts(self) -> None:
from ..scripts.meson_exe import run_exe
introspect_cmd = join_args(self.environment.get_build_command() + ['introspect'])
env = {'MESON_SOURCE_ROOT': self.environment.get_source_dir(),
'MESON_BUILD_ROOT': self.environment.get_build_dir(),
'MESONINTROSPECT': introspect_cmd,
}
for s in self.build.postconf_scripts:
name = ' '.join(s.cmd_args)
mlog.log(f'Running postconf script {name!r}')
run_exe(s, env)
def create_install_data(self) -> InstallData:
strip_bin = self.environment.lookup_binary_entry(MachineChoice.HOST, 'strip')
if strip_bin is None:
if self.environment.is_cross_build():
mlog.warning('Cross file does not specify strip binary, result will not be stripped.')
else:
# TODO go through all candidates, like others
strip_bin = [detect.defaults['strip'][0]]
umask = self.environment.coredata.get_option(OptionKey('install_umask'))
assert isinstance(umask, (str, int)), 'for mypy'
d = InstallData(self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_prefix(),
self.environment.get_libdir(),
strip_bin,
umask,
self.environment.get_build_command() + ['introspect'],
self.environment.coredata.version)
self.generate_depmf_install(d)
self.generate_target_install(d)
self.generate_header_install(d)
self.generate_man_install(d)
self.generate_emptydir_install(d)
self.generate_data_install(d)
self.generate_symlink_install(d)
self.generate_custom_install_script(d)
self.generate_subdir_install(d)
return d
def create_install_data_files(self) -> None:
install_data_file = os.path.join(self.environment.get_scratch_dir(), 'install.dat')
with open(install_data_file, 'wb') as ofile:
pickle.dump(self.create_install_data(), ofile)
def guess_install_tag(self, fname: str, outdir: T.Optional[str] = None) -> T.Optional[str]:
prefix = self.environment.get_prefix()
bindir = Path(prefix, self.environment.get_bindir())
libdir = Path(prefix, self.environment.get_libdir())
incdir = Path(prefix, self.environment.get_includedir())
_ldir = self.environment.coredata.get_option(mesonlib.OptionKey('localedir'))
assert isinstance(_ldir, str), 'for mypy'
localedir = Path(prefix, _ldir)
dest_path = Path(prefix, outdir, Path(fname).name) if outdir else Path(prefix, fname)
if bindir in dest_path.parents:
return 'runtime'
elif libdir in dest_path.parents:
if dest_path.suffix in {'.a', '.pc'}:
return 'devel'
elif dest_path.suffix in {'.so', '.dll'}:
return 'runtime'
elif incdir in dest_path.parents:
return 'devel'
elif localedir in dest_path.parents:
return 'i18n'
mlog.debug('Failed to guess install tag for', dest_path)
return None
def generate_target_install(self, d: InstallData) -> None:
for t in self.build.get_targets().values():
if not t.should_install():
continue
outdirs, install_dir_name, custom_install_dir = t.get_install_dir(self.environment)
# Sanity-check the outputs and install_dirs
num_outdirs, num_out = len(outdirs), len(t.get_outputs())
if num_outdirs != 1 and num_outdirs != num_out:
m = 'Target {!r} has {} outputs: {!r}, but only {} "install_dir"s were found.\n' \
"Pass 'false' for outputs that should not be installed and 'true' for\n" \
'using the default installation directory for an output.'
raise MesonException(m.format(t.name, num_out, t.get_outputs(), num_outdirs))
assert len(t.install_tag) == num_out
install_mode = t.get_custom_install_mode()
# Install the target output(s)
if isinstance(t, build.BuildTarget):
# In general, stripping static archives is tricky and full of pitfalls.
# Wholesale stripping of static archives with a command such as
#
# strip libfoo.a
#
# is broken, as GNU's strip will remove *every* symbol in a static
# archive. One solution to this nonintuitive behaviour would be
# to only strip local/debug symbols. Unfortunately, strip arguments
# are not specified by POSIX and therefore not portable. GNU's `-g`
# option (i.e. remove debug symbols) is equivalent to Apple's `-S`.
#
# TODO: Create GNUStrip/AppleStrip/etc. hierarchy for more
# fine-grained stripping of static archives.
should_strip = not isinstance(t, build.StaticLibrary) and self.get_option_for_target(OptionKey('strip'), t)
assert isinstance(should_strip, bool), 'for mypy'
# Install primary build output (library/executable/jar, etc)
# Done separately because of strip/aliases/rpath
if outdirs[0] is not False:
tag = t.install_tag[0] or ('devel' if isinstance(t, build.StaticLibrary) else 'runtime')
mappings = t.get_link_deps_mapping(d.prefix, self.environment)
i = TargetInstallData(self.get_target_filename(t), outdirs[0],
install_dir_name, t.get_aliases(),
should_strip, mappings, t.rpath_dirs_to_remove,
t.install_rpath, install_mode, t.subproject,
tag=tag)
d.targets.append(i)
if isinstance(t, (build.SharedLibrary, build.SharedModule, build.Executable)):
# On toolchains/platforms that use an import library for
# linking (separate from the shared library with all the
# code), we need to install that too (dll.a/.lib).
if t.get_import_filename():
if custom_install_dir:
# If the DLL is installed into a custom directory,
# install the import library into the same place so
# it doesn't go into a surprising place
implib_install_dir = outdirs[0]
else:
implib_install_dir = self.environment.get_import_lib_dir()
# Install the import library; may not exist for shared modules
i = TargetInstallData(self.get_target_filename_for_linking(t),
implib_install_dir, install_dir_name,
{}, False, {}, set(), '', install_mode,
t.subproject, optional=isinstance(t, build.SharedModule),
tag='devel')
d.targets.append(i)
if not should_strip and t.get_debug_filename():
debug_file = os.path.join(self.get_target_dir(t), t.get_debug_filename())
i = TargetInstallData(debug_file, outdirs[0],
install_dir_name,
{}, False, {}, set(), '',
install_mode, t.subproject,
optional=True, tag='devel')
d.targets.append(i)
# Install secondary outputs. Only used for Vala right now.
if num_outdirs > 1:
for output, outdir, tag in zip(t.get_outputs()[1:], outdirs[1:], t.install_tag[1:]):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
i = TargetInstallData(f, outdir, install_dir_name, {}, False, {}, set(), None,
install_mode, t.subproject,
tag=tag)
d.targets.append(i)
elif isinstance(t, build.CustomTarget):
# If only one install_dir is specified, assume that all
# outputs will be installed into it. This is for
# backwards-compatibility and because it makes sense to
# avoid repetition since this is a common use-case.
#
# To selectively install only some outputs, pass `false` as
# the install_dir for the corresponding output by index
if num_outdirs == 1 and num_out > 1:
for output, tag in zip(t.get_outputs(), t.install_tag):
f = os.path.join(self.get_target_dir(t), output)
if not install_dir_name:
dir_name = os.path.join('{prefix}', outdirs[0])
i = TargetInstallData(f, outdirs[0], dir_name, {},
False, {}, set(), None, install_mode,
t.subproject, optional=not t.build_by_default,
tag=tag)
d.targets.append(i)
else:
for output, outdir, tag in zip(t.get_outputs(), outdirs, t.install_tag):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
if not install_dir_name:
dir_name = os.path.join('{prefix}', outdir)
i = TargetInstallData(f, outdir, dir_name,
{}, False, {}, set(), None, install_mode,
t.subproject, optional=not t.build_by_default,
tag=tag)
d.targets.append(i)
def generate_custom_install_script(self, d: InstallData) -> None:
d.install_scripts = self.build.install_scripts
def generate_header_install(self, d: InstallData) -> None:
incroot = self.environment.get_includedir()
headers = self.build.get_headers()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for h in headers:
outdir = outdir_name = h.get_custom_install_dir()
if outdir is None:
subdir = h.get_install_subdir()
if subdir is None:
outdir = incroot
outdir_name = '{includedir}'
else:
outdir = os.path.join(incroot, subdir)
outdir_name = os.path.join('{includedir}', subdir)
for f in h.get_sources():
if not isinstance(f, File):
raise MesonException(f'Invalid header type {f!r} can\'t be installed')
abspath = f.absolute_path(srcdir, builddir)
i = InstallDataBase(abspath, outdir, outdir_name, h.get_custom_install_mode(), h.subproject, tag='devel')
d.headers.append(i)
def generate_man_install(self, d: InstallData) -> None:
manroot = self.environment.get_mandir()
man = self.build.get_man()
for m in man:
for f in m.get_sources():
num = f.split('.')[-1]
subdir = m.get_custom_install_dir()
if subdir is None:
if m.locale:
subdir = os.path.join('{mandir}', m.locale, 'man' + num)
else:
subdir = os.path.join('{mandir}', 'man' + num)
fname = f.fname
if m.locale: # strip locale from file name
fname = fname.replace(f'.{m.locale}', '')
srcabs = f.absolute_path(self.environment.get_source_dir(), self.environment.get_build_dir())
dstname = os.path.join(subdir, os.path.basename(fname))
dstabs = dstname.replace('{mandir}', manroot)
i = InstallDataBase(srcabs, dstabs, dstname, m.get_custom_install_mode(), m.subproject, tag='man')
d.man.append(i)
def generate_emptydir_install(self, d: InstallData) -> None:
emptydir: T.List[build.EmptyDir] = self.build.get_emptydir()
for e in emptydir:
i = InstallEmptyDir(e.path, e.install_mode, e.subproject, e.install_tag)
d.emptydir.append(i)
def generate_data_install(self, d: InstallData) -> None:
data = self.build.get_data()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for de in data:
assert isinstance(de, build.Data)
subdir = de.install_dir
subdir_name = de.install_dir_name
if not subdir:
subdir = os.path.join(self.environment.get_datadir(), self.interpreter.build.project_name)
subdir_name = os.path.join('{datadir}', self.interpreter.build.project_name)
for src_file, dst_name in zip(de.sources, de.rename):
assert isinstance(src_file, mesonlib.File)
dst_abs = os.path.join(subdir, dst_name)
dstdir_name = os.path.join(subdir_name, dst_name)
tag = de.install_tag or self.guess_install_tag(dst_abs)
i = InstallDataBase(src_file.absolute_path(srcdir, builddir), dst_abs, dstdir_name,
de.install_mode, de.subproject, tag=tag, data_type=de.data_type)
d.data.append(i)
def generate_symlink_install(self, d: InstallData) -> None:
links: T.List[build.SymlinkData] = self.build.get_symlinks()
for l in links:
assert isinstance(l, build.SymlinkData)
install_dir = l.install_dir
name_abs = os.path.join(install_dir, l.name)
s = InstallSymlinkData(l.target, name_abs, install_dir, l.subproject, l.install_tag)
d.symlinks.append(s)
def generate_subdir_install(self, d: InstallData) -> None:
for sd in self.build.get_install_subdirs():
if sd.from_source_dir:
from_dir = self.environment.get_source_dir()
else:
from_dir = self.environment.get_build_dir()
src_dir = os.path.join(from_dir,
sd.source_subdir,
sd.installable_subdir).rstrip('/')
dst_dir = os.path.join(self.environment.get_prefix(),
sd.install_dir)
dst_name = os.path.join('{prefix}', sd.install_dir)
if not sd.strip_directory:
dst_dir = os.path.join(dst_dir, os.path.basename(src_dir))
dst_name = os.path.join(dst_dir, os.path.basename(src_dir))
i = SubdirInstallData(src_dir, dst_dir, dst_name, sd.install_mode, sd.exclude, sd.subproject, sd.install_tag)
d.install_subdirs.append(i)
def get_introspection_data(self, target_id: str, target: build.Target) -> T.List['TargetIntrospectionData']:
'''
Returns a list of source dicts with the following format for a given target:
[
{
"language": "<LANG>",
"compiler": ["result", "of", "comp.get_exelist()"],
"parameters": ["list", "of", "compiler", "parameters],
"sources": ["list", "of", "all", "<LANG>", "source", "files"],
"generated_sources": ["list", "of", "generated", "source", "files"]
}
]
This is a limited fallback / reference implementation. The backend should override this method.
'''
if isinstance(target, (build.CustomTarget, build.BuildTarget)):
source_list_raw = target.sources
source_list = []
for j in source_list_raw:
if isinstance(j, mesonlib.File):
source_list += [j.absolute_path(self.source_dir, self.build_dir)]
elif isinstance(j, str):
source_list += [os.path.join(self.source_dir, j)]
elif isinstance(j, (build.CustomTarget, build.BuildTarget)):
source_list += [os.path.join(self.build_dir, j.get_subdir(), o) for o in j.get_outputs()]
source_list = list(map(lambda x: os.path.normpath(x), source_list))
compiler: T.List[str] = []
if isinstance(target, build.CustomTarget):
tmp_compiler = target.command
for j in tmp_compiler:
if isinstance(j, mesonlib.File):
compiler += [j.absolute_path(self.source_dir, self.build_dir)]
elif isinstance(j, str):
compiler += [j]
elif isinstance(j, (build.BuildTarget, build.CustomTarget)):
compiler += j.get_outputs()
else:
raise RuntimeError(f'Type "{type(j).__name__}" is not supported in get_introspection_data. This is a bug')
return [{
'language': 'unknown',
'compiler': compiler,
'parameters': [],
'sources': source_list,
'generated_sources': []
}]
return []
def get_devenv(self) -> build.EnvironmentVariables:
env = build.EnvironmentVariables()
extra_paths = set()
library_paths = set()
for t in self.build.get_targets().values():
cross_built = not self.environment.machines.matches_build_machine(t.for_machine)
can_run = not cross_built or not self.environment.need_exe_wrapper()
in_default_dir = t.should_install() and not t.get_install_dir(self.environment)[2]
if not can_run or not in_default_dir:
continue
tdir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(t))
if isinstance(t, build.Executable):
# Add binaries that are going to be installed in bindir into PATH
# so they get used by default instead of searching on system when
# in developer environment.
extra_paths.add(tdir)
if mesonlib.is_windows() or mesonlib.is_cygwin():
# On windows we cannot rely on rpath to run executables from build
# directory. We have to add in PATH the location of every DLL needed.
extra_paths.update(self.determine_windows_extra_paths(t, []))
elif isinstance(t, build.SharedLibrary):
# Add libraries that are going to be installed in libdir into
# LD_LIBRARY_PATH. This allows running system applications using
# that library.
library_paths.add(tdir)
if mesonlib.is_windows() or mesonlib.is_cygwin():
extra_paths.update(library_paths)
elif mesonlib.is_osx():
env.prepend('DYLD_LIBRARY_PATH', list(library_paths))
else:
env.prepend('LD_LIBRARY_PATH', list(library_paths))
env.prepend('PATH', list(extra_paths))
return env
|
import csv
import json
from importlib.metadata import entry_points
from invisibleroads_macros_log import format_path
from logging import getLogger
from os.path import basename, getmtime, join, splitext
from ..constants import (
FUNCTION_BY_NAME,
VARIABLE_CACHE,
VARIABLE_ID_PATTERN)
from ..exceptions import (
CrossComputeConfigurationError,
CrossComputeDataError)
from ..macros.package import import_attribute
from ..macros.web import get_html_from_markdown
class VariableView():
view_name = 'variable'
is_asynchronous = False
def __init__(self, variable_definition):
self.variable_definition = variable_definition
self.variable_id = variable_definition['id']
self.variable_path = variable_definition['path']
self.variable_mode = variable_definition['mode']
@classmethod
def get_from(Class, variable_definition):
view_name = variable_definition['view']
try:
View = VIEW_BY_NAME[view_name]
except KeyError:
L.error('%s view not installed', view_name)
View = Class
return View(variable_definition)
def load(self, absolute_batch_folder):
self.data = self._get_data(absolute_batch_folder)
self.configuration = self._get_configuration(absolute_batch_folder)
return self
def parse(self, data):
return data
def render(
self, mode_name, element_id, function_names, request_path,
for_print):
if mode_name == 'input':
render = self.render_input
else:
render = self.render_output
return render(element_id, function_names, request_path, for_print)
def render_input(
self, element_id, function_names, request_path, for_print):
return {
'css_uris': [],
'js_uris': [],
'body_text': '',
'js_texts': [],
}
def render_output(
self, element_id, function_names, request_path, for_print):
return {
'css_uris': [],
'js_uris': [],
'body_text': '',
'js_texts': [],
}
def _get_data(self, absolute_batch_folder):
variable_path = self.variable_path
if self.is_asynchronous or variable_path == 'ENVIRONMENT':
variable_data = ''
else:
absolute_variable_path = join(
absolute_batch_folder, self.variable_mode, variable_path)
variable_data = load_variable_data(
absolute_variable_path, self.variable_id)
return variable_data
def _get_configuration(self, absolute_batch_folder):
variable_configuration = self.variable_definition.get(
'configuration', {})
configuration_path = variable_configuration.get('path')
if configuration_path:
path = join(
absolute_batch_folder, self.variable_mode, configuration_path)
try:
variable_configuration.update(json.load(open(path, 'rt')))
except OSError:
L.error('path not found %s', format_path(path))
except json.JSONDecodeError:
L.error('must be json %s', format_path(path))
except TypeError:
L.error('must contain a dictionary %s', format_path(path))
return variable_configuration
class LinkView(VariableView):
view_name = 'link'
is_asynchronous = True
def render_output(
self, element_id, function_names, request_path, for_print):
variable_id = self.variable_id
c = self.configuration
text = c.get('text', variable_id)
name = c.get('download', basename(self.variable_path))
body_text = (
f'<a id="{element_id}" href="{request_path}/{variable_id}" '
f'class="{self.view_name} {variable_id}" download="{name}">'
f'{text}</a>')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
class StringView(VariableView):
view_name = 'string'
input_type = 'text'
function_by_name = FUNCTION_BY_NAME
def render_input(
self, element_id, function_names, request_path, for_print):
variable_id = self.variable_id
body_text = (
f'<input id="{element_id}" name="{variable_id}" '
f'class="{self.view_name} {variable_id}" '
f'value="{self.data}" type="{self.input_type}">')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
def render_output(
self, element_id, function_names, request_path, for_print):
try:
data = apply_functions(
self.data, function_names, self.function_by_name)
except KeyError as e:
L.error('%s function not supported for string', e)
data = self.data
body_text = (
f'<span id="{element_id}" '
f'class="{self.view_name} {self.variable_id}">'
f'{data}</span>')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
class NumberView(StringView):
view_name = 'number'
input_type = 'number'
def parse(self, data):
try:
data = float(data)
except ValueError:
raise CrossComputeDataError(f'{data} is not a number')
if data.is_integer():
data = int(data)
return data
class PasswordView(StringView):
view_name = 'password'
input_type = 'password'
class EmailView(StringView):
view_name = 'email'
input_type = 'email'
class TextView(StringView):
view_name = 'text'
def render_input(
self, element_id, function_names, request_path, for_print):
variable_id = self.variable_id
body_text = (
f'<textarea id="{element_id}" name="{variable_id}" '
f'class="{self.view_name} {variable_id}">'
f'{self.data}</textarea>')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
class MarkdownView(TextView):
view_name = 'markdown'
def render_output(
self, element_id, function_names, request_path, for_print):
data = get_html_from_markdown(self.data)
body_text = (
f'<span id="{element_id}" '
f'class="{self.view_name} {self.variable_id}">'
f'{data}</span>')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
class ImageView(VariableView):
view_name = 'image'
is_asynchronous = True
def render_output(
self, element_id, function_names, request_path, for_print):
variable_id = self.variable_id
body_text = (
f'<img id="{element_id}" '
f'class="{self.view_name} {variable_id}" '
f'src="{request_path}/{variable_id}">')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
def save_variable_data(target_path, variable_definitions, data_by_id):
file_extension = splitext(target_path)[1]
variable_data_by_id = get_variable_data_by_id(
variable_definitions, data_by_id)
if file_extension == '.dictionary':
with open(target_path, 'wt') as input_file:
json.dump(variable_data_by_id, input_file)
elif len(variable_data_by_id) > 1:
raise CrossComputeConfigurationError(
f'{file_extension} does not support multiple variables')
else:
variable_data = list(variable_data_by_id.values())[0]
open(target_path, 'wt').write(variable_data)
def load_variable_data(path, variable_id):
try:
new_time = getmtime(path)
except OSError:
new_time = None
key = path, variable_id
if key in VARIABLE_CACHE:
old_time, variable_value = VARIABLE_CACHE[key]
if old_time == new_time:
return variable_value
file_extension = splitext(path)[1]
try:
with open(path, 'rt') as file:
if file_extension == '.dictionary':
value_by_id = json.load(file)
for i, v in value_by_id.items():
VARIABLE_CACHE[(path, i)] = new_time, v
value = value_by_id[variable_id]
else:
value = file.read().rstrip()
except Exception:
L.warning(f'could not load {variable_id} from {path}')
value = ''
VARIABLE_CACHE[(path, variable_id)] = new_time, value
return value
def get_variable_data_by_id(variable_definitions, data_by_id):
variable_data_by_id = {}
for variable_definition in variable_definitions:
variable_id = variable_definition['id']
if None in data_by_id:
variable_data = data_by_id[None]
else:
try:
variable_data = data_by_id[variable_id]
except KeyError:
raise CrossComputeConfigurationError(
f'{variable_id} not defined in batch configuration')
variable_data_by_id[variable_id] = variable_data
return variable_data_by_id
def yield_data_by_id_from_csv(path, variable_definitions):
try:
with open(path, 'rt') as file:
csv_reader = csv.reader(file)
keys = [_.strip() for _ in next(csv_reader)]
for values in csv_reader:
data_by_id = parse_data_by_id(dict(zip(
keys, values)), variable_definitions)
if data_by_id.get('#') == '#':
continue
yield data_by_id
except OSError:
raise CrossComputeConfigurationError(f'{path} path not found')
def yield_data_by_id_from_txt(path, variable_definitions):
if len(variable_definitions) > 1:
raise CrossComputeConfigurationError(
'use .csv to configure multiple variables')
try:
variable_id = variable_definitions[0]['id']
except IndexError:
variable_id = None
try:
with open(path, 'rt') as batch_configuration_file:
for line in batch_configuration_file:
line = line.strip()
if not line or line.startswith('#'):
continue
yield parse_data_by_id({
variable_id: line}, variable_definitions)
except OSError:
raise CrossComputeConfigurationError(f'{path} path not found')
def parse_data_by_id(data_by_id, variable_definitions):
for variable_definition in variable_definitions:
variable_id = variable_definition['id']
try:
variable_data = data_by_id[variable_id]
except KeyError:
raise CrossComputeDataError(f'{variable_id} required')
variable_view = VariableView.get_from(variable_definition)
try:
variable_data = variable_view.parse(variable_data)
except CrossComputeDataError as e:
raise CrossComputeDataError(f'{e} for variable {variable_id}')
data_by_id[variable_id] = variable_data
return data_by_id
def format_text(text, data_by_id):
if not data_by_id:
return text
if None in data_by_id:
f = data_by_id[None]
else:
def f(match):
matching_text = match.group(0)
expression_text = match.group(1)
expression_terms = expression_text.split('|')
variable_id = expression_terms[0].strip()
try:
text = data_by_id[variable_id]
except KeyError:
L.warning('%s missing in batch configuration', variable_id)
return matching_text
try:
text = apply_functions(
text, expression_terms[1:], FUNCTION_BY_NAME)
except KeyError as e:
L.error('%s function not supported for string', e)
return str(text)
return VARIABLE_ID_PATTERN.sub(f, text)
def apply_functions(value, function_names, function_by_name):
for function_name in function_names:
function_name = function_name.strip()
if not function_name:
continue
try:
f = function_by_name[function_name]
except KeyError:
raise
value = f(value)
return value
VIEW_BY_NAME = {_.name: import_attribute(_.value) for _ in entry_points()[
'crosscompute.views']}
L = getLogger(__name__)
Use filename by default
import csv
import json
from importlib.metadata import entry_points
from invisibleroads_macros_log import format_path
from logging import getLogger
from os.path import basename, getmtime, join, splitext
from ..constants import (
FUNCTION_BY_NAME,
VARIABLE_CACHE,
VARIABLE_ID_PATTERN)
from ..exceptions import (
CrossComputeConfigurationError,
CrossComputeDataError)
from ..macros.package import import_attribute
from ..macros.web import get_html_from_markdown
class VariableView():
view_name = 'variable'
is_asynchronous = False
def __init__(self, variable_definition):
self.variable_definition = variable_definition
self.variable_id = variable_definition['id']
self.variable_path = variable_definition['path']
self.variable_mode = variable_definition['mode']
@classmethod
def get_from(Class, variable_definition):
view_name = variable_definition['view']
try:
View = VIEW_BY_NAME[view_name]
except KeyError:
L.error('%s view not installed', view_name)
View = Class
return View(variable_definition)
def load(self, absolute_batch_folder):
self.data = self._get_data(absolute_batch_folder)
self.configuration = self._get_configuration(absolute_batch_folder)
return self
def parse(self, data):
return data
def render(
self, mode_name, element_id, function_names, request_path,
for_print):
if mode_name == 'input':
render = self.render_input
else:
render = self.render_output
return render(element_id, function_names, request_path, for_print)
def render_input(
self, element_id, function_names, request_path, for_print):
return {
'css_uris': [],
'js_uris': [],
'body_text': '',
'js_texts': [],
}
def render_output(
self, element_id, function_names, request_path, for_print):
return {
'css_uris': [],
'js_uris': [],
'body_text': '',
'js_texts': [],
}
def _get_data(self, absolute_batch_folder):
variable_path = self.variable_path
if self.is_asynchronous or variable_path == 'ENVIRONMENT':
variable_data = ''
else:
absolute_variable_path = join(
absolute_batch_folder, self.variable_mode, variable_path)
variable_data = load_variable_data(
absolute_variable_path, self.variable_id)
return variable_data
def _get_configuration(self, absolute_batch_folder):
variable_configuration = self.variable_definition.get(
'configuration', {})
configuration_path = variable_configuration.get('path')
if configuration_path:
path = join(
absolute_batch_folder, self.variable_mode, configuration_path)
try:
variable_configuration.update(json.load(open(path, 'rt')))
except OSError:
L.error('path not found %s', format_path(path))
except json.JSONDecodeError:
L.error('must be json %s', format_path(path))
except TypeError:
L.error('must contain a dictionary %s', format_path(path))
return variable_configuration
class LinkView(VariableView):
view_name = 'link'
is_asynchronous = True
def render_output(
self, element_id, function_names, request_path, for_print):
variable_id = self.variable_id
c = self.configuration
name = c.get('download', basename(self.variable_path))
text = c.get('text', name)
body_text = (
f'<a id="{element_id}" href="{request_path}/{variable_id}" '
f'class="{self.view_name} {variable_id}" download="{name}">'
f'{text}</a>')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
class StringView(VariableView):
view_name = 'string'
input_type = 'text'
function_by_name = FUNCTION_BY_NAME
def render_input(
self, element_id, function_names, request_path, for_print):
variable_id = self.variable_id
body_text = (
f'<input id="{element_id}" name="{variable_id}" '
f'class="{self.view_name} {variable_id}" '
f'value="{self.data}" type="{self.input_type}">')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
def render_output(
self, element_id, function_names, request_path, for_print):
try:
data = apply_functions(
self.data, function_names, self.function_by_name)
except KeyError as e:
L.error('%s function not supported for string', e)
data = self.data
body_text = (
f'<span id="{element_id}" '
f'class="{self.view_name} {self.variable_id}">'
f'{data}</span>')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
class NumberView(StringView):
view_name = 'number'
input_type = 'number'
def parse(self, data):
try:
data = float(data)
except ValueError:
raise CrossComputeDataError(f'{data} is not a number')
if data.is_integer():
data = int(data)
return data
class PasswordView(StringView):
view_name = 'password'
input_type = 'password'
class EmailView(StringView):
view_name = 'email'
input_type = 'email'
class TextView(StringView):
view_name = 'text'
def render_input(
self, element_id, function_names, request_path, for_print):
variable_id = self.variable_id
body_text = (
f'<textarea id="{element_id}" name="{variable_id}" '
f'class="{self.view_name} {variable_id}">'
f'{self.data}</textarea>')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
class MarkdownView(TextView):
view_name = 'markdown'
def render_output(
self, element_id, function_names, request_path, for_print):
data = get_html_from_markdown(self.data)
body_text = (
f'<span id="{element_id}" '
f'class="{self.view_name} {self.variable_id}">'
f'{data}</span>')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
class ImageView(VariableView):
view_name = 'image'
is_asynchronous = True
def render_output(
self, element_id, function_names, request_path, for_print):
variable_id = self.variable_id
body_text = (
f'<img id="{element_id}" '
f'class="{self.view_name} {variable_id}" '
f'src="{request_path}/{variable_id}">')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
def save_variable_data(target_path, variable_definitions, data_by_id):
file_extension = splitext(target_path)[1]
variable_data_by_id = get_variable_data_by_id(
variable_definitions, data_by_id)
if file_extension == '.dictionary':
with open(target_path, 'wt') as input_file:
json.dump(variable_data_by_id, input_file)
elif len(variable_data_by_id) > 1:
raise CrossComputeConfigurationError(
f'{file_extension} does not support multiple variables')
else:
variable_data = list(variable_data_by_id.values())[0]
open(target_path, 'wt').write(variable_data)
def load_variable_data(path, variable_id):
try:
new_time = getmtime(path)
except OSError:
new_time = None
key = path, variable_id
if key in VARIABLE_CACHE:
old_time, variable_value = VARIABLE_CACHE[key]
if old_time == new_time:
return variable_value
file_extension = splitext(path)[1]
try:
with open(path, 'rt') as file:
if file_extension == '.dictionary':
value_by_id = json.load(file)
for i, v in value_by_id.items():
VARIABLE_CACHE[(path, i)] = new_time, v
value = value_by_id[variable_id]
else:
value = file.read().rstrip()
except Exception:
L.warning(f'could not load {variable_id} from {path}')
value = ''
VARIABLE_CACHE[(path, variable_id)] = new_time, value
return value
def get_variable_data_by_id(variable_definitions, data_by_id):
variable_data_by_id = {}
for variable_definition in variable_definitions:
variable_id = variable_definition['id']
if None in data_by_id:
variable_data = data_by_id[None]
else:
try:
variable_data = data_by_id[variable_id]
except KeyError:
raise CrossComputeConfigurationError(
f'{variable_id} not defined in batch configuration')
variable_data_by_id[variable_id] = variable_data
return variable_data_by_id
def yield_data_by_id_from_csv(path, variable_definitions):
try:
with open(path, 'rt') as file:
csv_reader = csv.reader(file)
keys = [_.strip() for _ in next(csv_reader)]
for values in csv_reader:
data_by_id = parse_data_by_id(dict(zip(
keys, values)), variable_definitions)
if data_by_id.get('#') == '#':
continue
yield data_by_id
except OSError:
raise CrossComputeConfigurationError(f'{path} path not found')
def yield_data_by_id_from_txt(path, variable_definitions):
if len(variable_definitions) > 1:
raise CrossComputeConfigurationError(
'use .csv to configure multiple variables')
try:
variable_id = variable_definitions[0]['id']
except IndexError:
variable_id = None
try:
with open(path, 'rt') as batch_configuration_file:
for line in batch_configuration_file:
line = line.strip()
if not line or line.startswith('#'):
continue
yield parse_data_by_id({
variable_id: line}, variable_definitions)
except OSError:
raise CrossComputeConfigurationError(f'{path} path not found')
def parse_data_by_id(data_by_id, variable_definitions):
for variable_definition in variable_definitions:
variable_id = variable_definition['id']
try:
variable_data = data_by_id[variable_id]
except KeyError:
raise CrossComputeDataError(f'{variable_id} required')
variable_view = VariableView.get_from(variable_definition)
try:
variable_data = variable_view.parse(variable_data)
except CrossComputeDataError as e:
raise CrossComputeDataError(f'{e} for variable {variable_id}')
data_by_id[variable_id] = variable_data
return data_by_id
def format_text(text, data_by_id):
if not data_by_id:
return text
if None in data_by_id:
f = data_by_id[None]
else:
def f(match):
matching_text = match.group(0)
expression_text = match.group(1)
expression_terms = expression_text.split('|')
variable_id = expression_terms[0].strip()
try:
text = data_by_id[variable_id]
except KeyError:
L.warning('%s missing in batch configuration', variable_id)
return matching_text
try:
text = apply_functions(
text, expression_terms[1:], FUNCTION_BY_NAME)
except KeyError as e:
L.error('%s function not supported for string', e)
return str(text)
return VARIABLE_ID_PATTERN.sub(f, text)
def apply_functions(value, function_names, function_by_name):
for function_name in function_names:
function_name = function_name.strip()
if not function_name:
continue
try:
f = function_by_name[function_name]
except KeyError:
raise
value = f(value)
return value
VIEW_BY_NAME = {_.name: import_attribute(_.value) for _ in entry_points()[
'crosscompute.views']}
L = getLogger(__name__)
|
cc2d6b75-2ead-11e5-af75-7831c1d44c14
cc36eb45-2ead-11e5-b3ea-7831c1d44c14
cc36eb45-2ead-11e5-b3ea-7831c1d44c14 |
from __future__ import print_function
from permuta import *
import permstruct
import permstruct.dag
import sys
# Avoidance of classical patterns of length 3
#---------------------------------------------------------------------------#
# 1 pattern of length 3 1 pattern of length 4 #
#---------------------------------------------------------------------------#
# -- Wilf-class 1 -- #
# # The permutations ================================================== > ?!
# # Finite and not very interesting
# # perhaps we must use inp_dag = permstruct.dag.N_P(perm_bound)
# patts = [Permutation([3,2,1]), Permutation([1,2,3,4])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 2
# upper_bound = 1
#
# # Grids
# max_rule_size = (6, 6)
# max_non_empty = 6
# max_rules = 30
# -- Wilf-class 2 -- #
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([3,2,1]), Permutation([2,1,3,4])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 2
# upper_bound = 1
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 30
# -- Wilf-class 3 -- #
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([4,3,2,1])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 30
# -- Wilf-class 4 -- #
# The permutations ================================================== > FAILURE!
# patts = [Permutation([3,2,1]), Permutation([1,3,2,4])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (4, 4)
# max_non_empty = 4
# max_rules = None
# -- Wilf-class 9 -- #
# The permutations ================================================== > FAILURE!
patts = [Permutation([1,3,2]), Permutation([4,2,1,3])]
perm_prop = lambda p: all( p.avoids(q) for q in patts )
perm_bound = 7
ignored = 0
# The dag
max_len_patt = 3
upper_bound = 2
remove = False
# Grids
max_rule_size = (3, 3)
max_non_empty = 3
max_rules = None
#------------------------------------------------#
# 2 patterns #
#------------------------------------------------#
# -- Wilf-class 1 -- #
# # The permutations ================================================== > SUCCESS!
# # This class is a bit weird since it is finite. But we can still do it!
# patts = [Permutation([1,2,3]), Permutation([3,2,1])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = None
# upper_bound = 3
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 4 # <--------------------------------- Note!
# max_rules = 100
# -- Wilf-class 2 -- #
# # The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,2,3]), Permutation([2,3,1])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = None
# upper_bound = 3
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# -- Wilf-class 3 -- #
# # The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,2,3]), Permutation([1,3,2])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 3
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# # The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([3,1,2])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 3
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([2,3,1]), Permutation([3,1,2])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 3
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
#------------------------------------------------#
# 3 patterns #
#------------------------------------------------#
# We use Simion and Schmidt to eliminate symmetric cases
# # The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,2,3]), Permutation([1,3,2]), Permutation([2,1,3])]
# # patts = [Permutation([1,2,3]), Permutation([1,3,2]), Permutation([2,3,1])]
# # patts = [Permutation([1,3,2]), Permutation([2,1,3]), Permutation([2,3,1])]
# # patts = [Permutation([1,2,3]), Permutation([1,3,2]), Permutation([3,1,2])]
# # patts = [Permutation([1,2,3]), Permutation([2,3,1]), Permutation([3,1,2])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 3
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
#------------------------------------------------#
# 4 patterns #
#------------------------------------------------#
# We use Simion and Schmidt to eliminate symmetric cases
# # The permutations ================================================== > SUCCESS!
# patts = [Permutation([2,3,1]), Permutation([1,3,2]), Permutation([3,1,2]), Permutation([2,1,3])]
# # patts = [Permutation([2,3,1]), Permutation([3,2,1]), Permutation([1,3,2]), Permutation([3,1,2])]
# # patts = [Permutation([3,1,2]), Permutation([3,2,1]), Permutation([1,3,2]), Permutation([2,1,3])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 3
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# Creating the dag
inp_dag = permstruct.dag.taylor_dag(patts, max_len_patt=max_len_patt, perm_bound=perm_bound, remove=remove, upper_bound=upper_bound)
for el in inp_dag.elements:
print(el.description if el is not None else 'None')
# Finding the rules and running exact cover
sol_iter = permstruct.exhaustive(perm_prop, perm_bound, inp_dag, max_rule_size, max_non_empty, max_rules, ignore_first=ignored)
for sol in sol_iter:
print('====================================')
print("")
for rule in sol:
print(rule)
print("")
Added to len3 and 4 pairs
from __future__ import print_function
from permuta import *
import permstruct
import permstruct.dag
import sys
# Avoidance of classical patterns of length 3
#---------------------------------------------------------------------------#
# 1 pattern of length 3 1 pattern of length 4 #
#---------------------------------------------------------------------------#
# We are able to do every Av(132,q) where q has length 4 EXECpT ONE!!!!!. Out of the remaining
# 8 classes of the form Av(321,q) we can only do two, q = 1234 (trivial, but
# takes a very long time to find the set cover) and q = 2134. Hopefully we can
# do more with mutation rules
# -- Wilf-class 1 -- #
# # The permutations ================================================== > ?!
# # Finite and not very interesting
# # perhaps we must use inp_dag = permstruct.dag.N_P(perm_bound)
# patts = [Permutation([3,2,1]), Permutation([1,2,3,4])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 2
# upper_bound = 1
#
# # Grids
# max_rule_size = (6, 6)
# max_non_empty = 6
# max_rules = 30
# -- Wilf-class 2 -- #
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([3,2,1]), Permutation([2,1,3,4])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 2
# upper_bound = 1
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 30
# -- Wilf-class 3 -- #
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([4,3,2,1])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 30
# -- Wilf-class 4 -- #
# The permutations ================================================== > FAILURE!
# patts = [Permutation([3,2,1]), Permutation([1,3,2,4])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (4, 4)
# max_non_empty = 4
# max_rules = None
# -- Wilf-class 5 -- #
# The permutations ================================================== > FAILURE!
# patts = [Permutation([3,2,1]), Permutation([1,3,4,2])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = None
# -- Wilf-class 6 -- #
# The permutations ================================================== > FAILURE!
# patts = [Permutation([3,2,1]), Permutation([2,1,4,3])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = None
# -- Wilf-class 7 -- #
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([4,3,1,2])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = None
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([4,2,3,1])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = None
# -- Wilf-class 8 -- #
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([3,2,1,4])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = False
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = None
# -- Wilf-class 9 -- #
# The permutations ================================================== > FAILURE!
# patts = [Permutation([3,2,1]), Permutation([2,3,4,1])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = None
# The permutations ================================================== > FAILURE!
# patts = [Permutation([3,2,1]), Permutation([3,4,1,2])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = None
# The permutations ================================================== > FAILURE!
# patts = [Permutation([3,2,1]), Permutation([3,1,4,2])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = True
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = None
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([1,2,3,4])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = False
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = None
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([4,2,1,3])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = False
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = None
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([4,1,2,3])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = False
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = None
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([3,1,2,4])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = False
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = None
# The permutations ================================================== > SUCCESS!
# patts = [Permutation([1,3,2]), Permutation([2,1,3,4])]
# perm_prop = lambda p: all( p.avoids(q) for q in patts )
#
# perm_bound = 7
# ignored = 0
#
# # The dag
# max_len_patt = 3
# upper_bound = 2
# remove = False
#
# # Grids
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = None
# The permutations ================================================== > FAILURE!
patts = [Permutation([1,3,2]), Permutation([3,4,1,2])]
perm_prop = lambda p: all( p.avoids(q) for q in patts )
perm_bound = 7
ignored = 0
# The dag
max_len_patt = 2
upper_bound = 2
remove = True
# Grids
max_rule_size = (3, 3)
max_non_empty = 3
max_rules = None
# Creating the dag
# inp_dag = permstruct.dag.N_P_X1_mon(perm_prop, perm_bound)
inp_dag = permstruct.dag.taylor_dag(patts, max_len_patt=max_len_patt, perm_bound=perm_bound, remove=remove, upper_bound=upper_bound)
for el in inp_dag.elements:
print(el.description if el is not None else 'None')
# Finding the rules and running exact cover
sol_iter = permstruct.exhaustive(perm_prop, perm_bound, inp_dag, max_rule_size, max_non_empty, max_rules, ignore_first=ignored)
for sol in sol_iter:
print('====================================')
print("")
for rule in sol:
print(rule)
print("")
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Import Salt libs
from salt.utils.odict import OrderedDict
from salt.modules import pillar as pillarmod
pillar_value_1 = dict(a=1, b='very secret')
class PillarModuleTestCase(TestCase):
def test_obfuscate_inner_recursion(self):
self.assertEqual(
pillarmod._obfuscate_inner(dict(a=[1, 2],
b=dict(pwd='secret', deeper=('a', 1)))),
dict(a=['<int>', '<int>'],
b=dict(pwd='<str>', deeper=('<str>', '<int>')))
)
def test_obfuscate_inner_more_types(self):
self.assertEqual(pillarmod._obfuscate_inner(OrderedDict([('key', 'value')])),
OrderedDict([('key', '<str>')]))
self.assertEqual(pillarmod._obfuscate_inner(set((1, 2))),
set(['<int>']))
self.assertEqual(pillarmod._obfuscate_inner((1, 2)),
('<int>', '<int>'))
@skipIf(NO_MOCK, NO_MOCK_REASON)
@patch('salt.modules.pillar.items', MagicMock(return_value=pillar_value_1))
def test_obfuscate(self):
self.assertEqual(pillarmod.obfuscate(),
dict(a='<int>', b='<str>'))
@skipIf(NO_MOCK, NO_MOCK_REASON)
@patch('salt.modules.pillar.items', MagicMock(return_value=pillar_value_1))
def test_ls(self):
self.assertEqual(pillarmod.ls(), ['a', 'b'])
# gracinet: not sure this is really useful, but other test modules have this as well
if __name__ == '__main__':
from integration import run_tests
run_tests(PillarModuleTestCase, needs_daemon=False)
Use assertCountEqual instead of assertEqual for lists in Py3
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Import Salt libs
import salt.ext.six as six
from salt.utils.odict import OrderedDict
from salt.modules import pillar as pillarmod
pillar_value_1 = dict(a=1, b='very secret')
class PillarModuleTestCase(TestCase):
def test_obfuscate_inner_recursion(self):
self.assertEqual(
pillarmod._obfuscate_inner(dict(a=[1, 2],
b=dict(pwd='secret', deeper=('a', 1)))),
dict(a=['<int>', '<int>'],
b=dict(pwd='<str>', deeper=('<str>', '<int>')))
)
def test_obfuscate_inner_more_types(self):
self.assertEqual(pillarmod._obfuscate_inner(OrderedDict([('key', 'value')])),
OrderedDict([('key', '<str>')]))
self.assertEqual(pillarmod._obfuscate_inner(set((1, 2))),
set(['<int>']))
self.assertEqual(pillarmod._obfuscate_inner((1, 2)),
('<int>', '<int>'))
@skipIf(NO_MOCK, NO_MOCK_REASON)
@patch('salt.modules.pillar.items', MagicMock(return_value=pillar_value_1))
def test_obfuscate(self):
self.assertEqual(pillarmod.obfuscate(),
dict(a='<int>', b='<str>'))
@skipIf(NO_MOCK, NO_MOCK_REASON)
@patch('salt.modules.pillar.items', MagicMock(return_value=pillar_value_1))
def test_ls(self):
if six.PY3:
self.assertCountEqual(pillarmod.ls(), ['a', 'b'])
else:
self.assertEqual(pillarmod.ls(), ['a', 'b'])
# gracinet: not sure this is really useful, but other test modules have this as well
if __name__ == '__main__':
from integration import run_tests
run_tests(PillarModuleTestCase, needs_daemon=False)
|
"""
"""
from Tkinter import *
from re import escape
import mimetypes
class AreaVi(Text):
ACTIVE = None
def __init__(self, default_filename, *args, **kwargs):
"""
This class receives all Text widget arguments
and one named default_filename which means
the filename that is saved when no filename
is specified.
default_filename:
The default path file where contents are saved.
It implements a system of modes to handle
tkinter keypresses events.
The method hook can be used to bind events
to callbacks mapped to specific modes.
"""
Text.__init__(self, *args, **kwargs)
self.setup = dict()
# Maybe it should be?
# abspath(default_filename)
self.default_filename = default_filename
# The file's path and name.
self.filename = default_filename
# Shouldn't it be LAST_COL and MSEL?
self.last_col = '_last_col_'
self.mark_set(self.last_col, '1.0')
# This mark is used in AreaVi.replace_all.
self.STOP_REPLACE_INDEX = '_stop_replace_index_'
# Tags have name starting and ending with __
# def cave(event):
# AreaVi.ACTIVE = event.widget
# self.hook(-1, '<FocusIn>', cave)
AreaVi.ACTIVE = self
self.charset = 'utf-8'
def active(self):
"""
It is used to create a model of target for plugins
defining python functions to run on fly.
With such an abstraction it is possible to define a AreaVi instance target
that python code will act on.
"""
AreaVi.ACTIVE = self
def chmode(self, id):
"""
This function is used to change the AreaVi instance's mode.
It receives one parameter named id which means the
mode number.
"""
opt = self.setup[id]
self.id = id
MODE_X = 'mode%s-1' % self
MODE_Y = 'mode%s%s' % (self, id)
if opt: self.bindtags((MODE_X, MODE_Y, self, 'Text', '.'))
else: self.bindtags((MODE_X, MODE_Y, self, '.'))
def add_mode(self, id, opt=False):
"""
It adds a new mode. The opt argument means whether
it should propagate the event to the internal text widget callbacks.
"""
self.setup[id] = opt
def del_mode(self, id):
"""
It performs the opposite of add_mode.
"""
del self.setup[id]
def hook(self, id, seq, callback):
"""
This method is used to hook a callback to a sequence
specified with its mode. The standard modes are insert and selection.
The insert mode prints the key character on the text area.
"""
MODE_Y = 'mode%s%s' % (self, id)
self.bind_class(MODE_Y, seq, callback, add=True)
def unhook(self, id, seq, callback=None):
"""
It performs the opposite of unhook.
"""
MODE_Y = 'mode%s%s' % (self, id)
self.unbind_class(MODE_Y, seq)
def install(self, *args):
"""
It is like self.hook but accepts
a sequence of (id, seq, callback).
"""
for id, seq, callback in args:
self.hook(id, seq, callback)
def uninstall(self, *args):
"""
Like self.hook but accepts.
(id, seq, callback).
"""
for id, seq, callback in args:
self.unhook(id, seq, callback)
def append(self, data):
"""
"""
self.insert('end', data)
self.mark_set('insert', 'end')
self.see('end')
def curline(self):
"""
A short hand for.
area.get('insert linestart', 'insert +1l linestart')
"""
return self.get('insert linestart', 'insert +1l linestart')
def tag_update(self, name, index0, index1, *args):
"""
It removes a given tag from index0 to index1 and re adds
the tag to the ranges of text delimited in args.
Example:
DATA_X = 'It is black.\n'
DATA_Y = 'It is blue.\n'
text = Text()
text.pack()
text.insert('1.0', DATA_X)
text.insert('2.0', DATA_Y)
text.tag_add('X', '1.0', '1.0 lineend')
text.tag_add('Y', '2.0', '2.0 lineend')
text.tag_config('X', background='black')
text.tag_config('Y', foreground='blue')
text.tag_update(text, 'X', '1.0', 'end', ('2.0', '2.0 lineend'))
It removes the X tag from '1.0' to 'end' then adds
the X tag to the range '2.0' '2.0 lineend'.
"""
self.tag_remove(name, index0, index1)
for indi, indj in args:
self.tag_add(name, indi, indj)
def insee(self, index, data):
"""
"""
self.insert(index, data)
self.see('insert')
def cmd_like(self):
"""
"""
data = self.get('insert linestart', 'insert lineend')
self.delete('insert linestart', 'insert lineend')
return data
def indref(self, index):
"""
This is a short hand function.
It is used to convert a Text index
into two integers.
Ex:
a, b = area.indref('insert')
Now, a and b can be manipulated
as numbers.
"""
a, b = self.index(index).split('.')
return int(a), int(b)
def setcur(self, line, col):
"""
It is used to set the cursor position at
a given index using line and col.
line is a number which represents
a given line index in the AreaVi instance.
col is a column.
"""
self.mark_set('insert', '%s.%s' % (line, col))
self.see('insert')
def setcurl(self, line):
"""
set cursor line.
It is used to set the cursor position at a given
line. It sets the cursor at line.0 position.
"""
self.mark_set('insert', '%s.%s' % (line, '0'))
self.see('insert')
def indint(self, index):
"""
This method is used
when i can't use self.indref.
It seems self.indref returns
2.10 when the input is 2.34
it happens when the index col
is longer than the actual line col.
"""
a, b = index.split('.')
return int(a), int(b)
def indcol(self):
"""
This is a short hand method for getting
the last col in which the cursor was in.
It is useful when implementing functions to
select pieces of text.
"""
a, b = self.indref(self.last_col)
return int(a), int(b)
def setcol(self, line, col):
"""
It sets the mark used by the arrows
keys and selection state.
"""
self.mark_set(self.last_col, '%s.%s' % (line, col))
def indcur(self):
"""
It returns the actual line, col for the
cursor position. So, the values can be
manipulated with integers.
"""
a, b = self.indref('insert')
return int(a), int(b)
def seecur(self):
"""
Just a shorthand for area.see('insert')
which makes the cursor visible wherever it is in.
"""
self.see('insert')
def inset(self, index):
"""
Just a shorthand for area.mark_set('insert', index)
so we spare some typing.
"""
self.mark_set('insert', index)
def is_end(self):
"""
This function returns True if the cursor is positioned
at the end of the AreaVi instance.
This is useful when implementing other methods.
Like those from visual block selection to avoid
the cursor jumping to odd places when it achieves
the end of the text region.
"""
# I have to use 'end -1l linestart' since it seems the 'end' tag
# corresponds to a one line after the last visible line.
# So last line lineend != 'end'.
return self.compare('insert linestart', '!=', 'end -1l linestart')
def is_start(self):
"""
This function returns True if the cursor is
at the start of the text region. It is on index '1.0'
"""
return self.compare('insert linestart', '!=', '1.0')
def down(self):
"""
It sets the cursor position one line down.
"""
if self.is_end():
# We first check if it is at the end
# so we avoid the cursor jumping at odd positions.
a, b = self.indcol()
c, d = self.indcur()
self.setcur(c + 1, b)
def up(self):
"""
It sets the cursor one line up.
"""
if self.is_start():
a, b = self.indcol()
c, d = self.indcur()
self.setcur(c - 1, b)
def left(self):
"""
It moves the cursor one character left.
"""
self.mark_set('insert', 'insert -1c')
# The mark used by self.down, self.up.
self.mark_set(self.last_col, 'insert')
def right(self):
"""
It moves the cursor one character right.
"""
self.mark_set('insert', 'insert +1c')
# The mark used by self.down, self.up.
self.mark_set(self.last_col, 'insert')
def start_selection(self):
"""
It sets the mark sel_start to the insert position.
So, when sel_up, sel_down, sel_right, sel_left are
called then they will select a region from this mark.
"""
self.mark_set('_sel_start_', 'insert')
def start_block_selection(self):
self.mark_set('_block_sel_start_', 'insert')
def is_add_up(self, index):
"""
It checks whether the selection must be
removed or added.
If it returns True then the selection must be
removed. True means that the '_sel_start_'
mark is positioned above the cursor position.
So, it must remove the selection instead of
adding it.
"""
return self.compare('%s linestart' % index, '<=', 'insert linestart')
def rmsel(self, index0, index1):
"""
This method is a short hand for area.tag_remove('sel', index0, index1)
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_remove('sel', index2, index3)
def addsel(self, index0, index1):
"""
It adds 'sel' to the range (AreaVi.min(index0, index1),
AreaVi.max(index0, index1))
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_add('sel', index2, index3)
def min(self, index0, index1):
"""
It returns the min between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index0
else:
return index1
def max(self, index0, index1):
"""
It returns the max between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index1
else:
return index0
def sel_up(self):
"""
It adds 'sel' one line up the 'insert' position
and sets the cursor one line up.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.up()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_down(self, index):
"""
It returns True if the cursor is positioned below
the initial mark for selection.
It determins if the selection must be removed or added when
sel_down is called.
"""
return self.compare('%s linestart' % index, '>=', 'insert linestart')
def sel_down(self):
"""
It adds or removes selection one line down.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.down()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_right(self, index):
"""
It returns True if the cursor is positioned at the left
of the initial selection mark. It is useful for sel_right method.
"""
return self.compare(index, '>=', 'insert')
def sel_right(self):
"""
It adds or removes selection one character right.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.right()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_left(self, index):
"""
It returns True if the cursor is positioned at the right of
the initial mark selection.
"""
return self.compare(index, '<=', 'insert')
def sel_left(self):
"""
It adds or removes selection one character left.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.left()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def indmsel(self):
"""
It is just a shorthand for getting the last selection mark.
"""
a, b = self.indref('_sel_start_')
return int(a), int(b)
def addblock(self, index0, index1):
"""
It adds block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
e = min(b, d)
f = max(b, d)
self.addsel('%s.%s' % (ind, e), '%s.%s' % (ind, f))
def rmblock(self, index0, index1):
"""
It removes block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
e = min(b, d)
f = max(b, d)
self.rmsel('%s.%s' % (ind, e), '%s.%s' % (ind, f))
def block_down(self):
"""
It adds or removes block selection one line down.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.down()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def block_up(self):
"""
It adds or removes block selection one line up.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.up()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_start(self):
"""
It returns True if the cursor is at the start of the cursor line.
"""
return self.compare('insert', '!=', 'insert linestart')
def block_left(self):
"""
It adds block selection to the left.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.left()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_end(self):
"""
It returns True if the cursor is at the end of the cursor line.
"""
return self.compare('insert', '!=', 'insert lineend')
def block_right(self):
"""
It adds a block selection to the right.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.right()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def clear_selection(self):
"""
It removes 'sel' tag from all ranges.
"""
try:
self.tag_remove('sel', 'sel.first', 'sel.last')
except Exception:
pass
def select_char(self):
"""
it adds 'sel' a char ahead the cursor position.
"""
self.addsel('insert', 'insert +1c')
def unselect_char(self):
"""
It removes 'sel' a char from the cursor position.
"""
self.rmsel('insert', 'insert +1c')
def clchar(self):
"""
It deletes a char from the cursor position.
"""
self.edit_separator()
self.delete('insert', 'insert +1c')
def do_undo(self):
"""
It does undo.
"""
try:
self.edit_undo()
except TclError:
pass
def do_redo(self):
"""
It redoes.
"""
try:
self.edit_redo()
except TclError:
pass
def sel_text_start(self):
"""
It selects all text from insert position to the start position
of the text.
"""
index = self.index('insert')
self.go_text_start()
self.addsel(index, 'insert')
def sel_text_end(self):
"""
It selects all text from the insert position to the end of the text.
"""
index = self.index('insert')
self.go_text_end()
self.addsel(index, 'insert')
def go_text_start(self):
"""
It goes to the first position in the text.
"""
self.mark_set('insert', '1.0')
self.see('insert')
def go_text_end(self):
"""
It goes to the end of the text.
"""
self.mark_set('insert', 'end linestart')
self.see('insert')
def sel_line_start(self):
"""
It adds selection from the insert position to the
start of the line.
"""
index = self.index('insert')
self.go_line_start()
self.addsel(index, 'insert')
def sel_line_end(self):
"""
It selects all text from insert position to the end of the line.
"""
index = self.index('insert')
self.go_line_end()
self.addsel(index, 'insert')
def go_line_start(self):
"""
It goes to the beginning of the cursor position line.
"""
self.mark_set('insert', 'insert linestart')
def go_line_end(self):
"""
It goes to the end of the cursor position line.
"""
self.mark_set('insert', 'insert lineend')
def go_next_word(self):
"""
It puts the cursor on the beginning of the next word.
"""
self.seek_next_down('\M')
def go_prev_word(self):
"""
It puts the cursor in the beginning of the previous word.
"""
self.seek_next_up('\M')
def go_next_sym(self, chars):
"""
It puts the cursor on the next occurency of the symbols in chars.
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.seek_next_down(REG)
def go_prev_sym(self, chars):
"""
It puts the cursor on the previous occurency of:
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.seek_next_up(REG)
def cllin(self):
"""
It deletes the cursor position line, makes the cursor visible
and adds a separator to the undo stack.
"""
self.edit_separator()
self.delete('insert linestart', 'insert +1l linestart')
self.see('insert')
def cpsel(self):
"""
It copies to the clip board ranges of text
that are selected and removes the selection.
"""
data = self.tag_get_ranges('sel')
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def cpblock(self):
"""
It copies blocks of text that are selected
with a separator '\n'.
"""
data = self.tag_get_ranges('sel', '\n')
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def ctblock(self):
"""
It cuts blocks of text with a separator '\n'.
"""
data = self.tag_get_ranges('sel', '\n')
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.tag_delete_ranges('sel')
def ctsel(self):
"""
It cuts the selected text.
"""
data = self.tag_get_ranges('sel')
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.tag_delete_ranges('sel')
def clsel(self):
"""
It deletes all selected text.
"""
self.edit_separator()
self.tag_delete_ranges('sel')
def ptsel(self):
"""
It pastes over the cursor position data from the clipboard
and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert', data)
def ptsel_after(self):
"""
It pastes one line after the cursor position data from clipboard
and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert +1l linestart', data)
def ptsel_before(self):
"""
It pastes data from the cursor position one line before the cursor
position and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert linestart', data)
def select_line(self):
"""
It adds selection to the cursor position line.
"""
self.tag_add('sel', 'insert linestart', 'insert +1l linestart')
def unselect_line(self):
"""
It removes selection from the cursor position line.
"""
self.tag_remove('sel', 'insert linestart', 'insert +1l linestart')
def toggle_line_selection(self):
map = self.tag_contains('sel', 'insert linestart', 'insert +1l linestart')
if map:
self.unselect_line()
else:
self.select_line()
def select_word(self):
"""
It selects a word from the cursor position.
"""
index1 = self.search(' ', 'insert', stopindex='insert linestart', backwards=True)
index2 = self.search(' ', 'insert', stopindex='insert lineend')
self.tag_add('sel', 'insert linestart' if not index1 else '%s +1c' % index1,
'insert lineend' if not index2 else index2)
def scroll_line_up(self):
"""
It scrolls one line up
"""
self.yview(SCROLL, -1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert -1l')
def scroll_line_down(self):
"""
It scrolls one line down.
"""
self.yview(SCROLL, 1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert +1l')
def scroll_page_down(self):
"""
It goes one page down.
"""
self.yview(SCROLL, 1, 'page')
self.mark_set('insert', '@0,0')
def scroll_page_up(self):
"""
It goes one page up.
"""
self.yview(SCROLL, -1, 'page')
self.mark_set('insert', '@0,0')
def insert_line_down(self):
"""
It inserts one line down from the cursor position.
"""
self.edit_separator()
self.insert('insert +1l linestart', '\n')
self.mark_set('insert', 'insert +1l linestart')
self.see('insert')
def select_all(self):
"""
It selects all text.
"""
self.tag_add('sel', '1.0', 'end')
def insert_line_up(self):
"""
It inserts one line up.
"""
self.edit_separator()
self.insert('insert linestart', '\n')
self.mark_set('insert', 'insert -1l linestart')
self.see('insert')
def shift_sel_right(self, width, char):
"""
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_right(srow, erow, width, char)
def shift_sel_left(self, width):
"""
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_left(srow, erow, width)
def shift_right(self, srow, erow, width, char):
"""
Given a start row and a end row it shifts
a block of text to the right.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.insert('%s.0' % ind, width * char)
def shift_left(self, srow, erow, width):
"""
Given a start row and a end row it shifts
a block of text to the left.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.delete('%s.0' % ind, '%s.%s' % (ind, width))
def tag_find_ranges(self, name, regex, *args, **kwargs):
"""
It returns an interator corresponding to calling AreaVi.find
between the ranges of the tag specified by name.
You shouldn't delete or insert data while performing this operation.
"""
# It should be built on top of nextrange.
map = self.tag_ranges(name)
for indi in range(0, len(map) - 1, 2):
seq = self.find(regex, map[indi], map[indi + 1], *args, **kwargs)
for indj in seq:
yield indj
def tag_replace_ranges(self, name, regex, data, index='1.0', stopindex='end',
*args, **kwargs):
"""
It replaces all occurrences of regex inside a tag ranges
for data.
name - Name of the tag.
regex - The pattern.
data - The data to replace.
args - Arguments given to AreaVi.find.
**kwargs - A dictionary of arguments given to AreaVi.find.
"""
while True:
map = self.tag_nextrange(name, index, stopindex)
if not map: break
index3, index4 = map
index = index4
self.replace_all(regex, data, index3, index4, *args, **kwargs)
def tag_setup(self, theme):
"""
Just a short hand for
theme = {'tag_name': {'background': 'blue'}
for name, kwargs in theme.iteritems():
self.tag_config(name, **kwargs)
self.tag_lower(name)
"""
for name, kwargs in theme.iteritems():
self.tag_config(name, **kwargs)
self.tag_lower(name)
def tag_add_found(self, name, map):
""""
It adds a tag to the match ranges from either AreaVi.find or
AreaVi.tag_find_ranges.
name - The tag to be added.
map - An iterator from AreaVi.find or AreaVi.tag_find_ranges.
"""
for _, index0, index1 in map:
self.tag_add(name, index0, index1)
def split_with_cond(self, regex, cond, *args, **kwargs):
"""
It determines which chunks should be yielded based on cond.
"""
for chk, index0, index1 in self.split(regex, *args, **kwargs):
data = cond(chk, index0, index1)
if data: yield data
def split(self, *args, **kwargs):
"""
It splits the contents of the text widget into chunks based on a regex.
"""
index0 = '1.0'
for chk, index1, index2 in self.find(*args, **kwargs):
if self.compare(index1, '>', index0):
yield(self.get(index0, index1), index0, index1)
yield(chk, index1, index2)
index0 = index2
def find_with_cond(self, regex, cond, *args, **kwargs):
"""
It determines which matches should be yielded.
"""
for chk, index0, index1 in self.find(regex, *args, **kwargs):
data = cond(chk, index0, index1)
if not data: continue
yield(data)
def find_one_by_line(self, regex, index, stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
count = IntVar()
while True:
index = self.search(regex, index, stopindex, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index:
break
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
index = '%s +1l' % pos0
yield(chunk, pos0, pos1)
def find(self, regex, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
"""
It returns an iterator of matches. It is based on the Text.search method
"""
count = IntVar()
while True:
index = self.search(regex, index, stopindex, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index:
break
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
index = '%s +1c' % tmp
yield(chunk, pos0, pos1)
def search(self, pattern, index, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=None, nocase=None,
count=None, elide=None, nolinestop=None):
'''Standard search method, but with support for the nolinestop
option which is new in tk 8.5 but not supported by tkinter out
of the box.
'''
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if nolinestop: args.append("-nolinestop")
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def seek_next_up(self, regex, index0='insert', stopindex='1.0', exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find the next match with regex up the cursor.
It sets the cursor at the index of the occurrence.
"""
count = IntVar()
index = self.search(regex, index0, stopindex=stopindex, regexp=regexp, exact=exact,
nocase=nocase, elide=elide, nolinestop=nolinestop, backwards=True, count=count)
if not index: return
index1 = self.index('%s +%sc' % (index, count.get()))
self.mark_set('insert', index)
self.see('insert')
return index, index1
def seek_next_down(self, regex, index0='insert', stopindex='end', exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find the next match with regex down.
It sets the cursor at the index of the occurrence.
"""
count = IntVar()
index = self.search(regex, index0, stopindex=stopindex, regexp=regexp, exact=exact, nocase=nocase,
elide=elide, nolinestop=nolinestop, count=count)
if not index: return
index1 = self.index('%s +%sc' % (index, count.get()))
self.mark_set('insert', index1)
self.see('insert')
return index, index1
def pick_next_up(self, name, *args, **kwargs):
"""
"""
index = self.seek_next_up(*args, **kwargs)
if not index:
return
self.tag_add(name, *index)
return index
def pick_next_down(self, name, *args, **kwargs):
"""
"""
index = self.seek_next_down(*args, **kwargs)
if not index:
return
self.tag_add(name, *index)
return index
def replace(self, regex, data, index=None, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=True, nocase=None, elide=None, nolinestop=None):
"""
It is used to replace occurrences of a given match.
It is possible to use a callback function to return what is replaced
as well.
"""
count = IntVar()
index = self.search(regex, index, stopindex, forwards=forwards, backwards=backwards, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index: return
if callable(data): data = data(index, self.index('%s +%sc' % (index, count.get())))
index0 = self.index('%s +%sc' % (index, count.get()))
self.delete(index, index0)
self.insert(index, data)
return index, len(data)
def replace_all(self, regex, data, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
"""
It is used to replace all occurrences of a given match in a range.
It accepts a callback function that determines what is replaced.
"""
# It is needed because the range will grow
# when data is inserted, the intent is searching
# over a pre defined range.
self.mark_set(self.STOP_REPLACE_INDEX, stopindex)
while True:
map = self.replace(regex, data, index, self.STOP_REPLACE_INDEX, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide)
if not map: return
index, size = map
index = self.index('%s +%sc' % (index, size))
def get_paren_search_dir(self, start, end):
"""
"""
char = self.get('insert', 'insert +1c')
if char == start:
return False
elif char == end:
return True
else:
return None
def get_paren_search_sign(self, start, end):
"""
"""
char = self.get('insert', 'insert +1c')
if char == start:
return '+'
elif char == end:
return '-'
else:
return None
def select_case_pair(self, pair, MAX=1500):
"""
"""
index = self.case_pair(MAX, *pair)
if not index: return
min = self.min(index, 'insert')
if self.compare(min, '==', 'insert'): min = '%s +1c' % min
max = self.max(index, 'insert')
if self.compare(max, '==', 'insert'): min = '%s +1c' % min
self.tag_add('sel', min, max)
def case_pair(self, max, start='(', end=')'):
"""
Once this method is called, it returns an index for the next
matching parenthesis or None if the char over the cursor
isn't either '(' or ')'.
"""
dir = self.get_paren_search_dir(start, end)
# If dir is None then there is no match.
if dir == None: return ''
REG = '\%s|\%s' % (start, end)
sign = self.get_paren_search_sign(start, end)
count = 0
# If we are searching fowards we don't need
# to add 1c.
index = 'insert %s' % ('+1c' if dir else '')
size = IntVar(0)
while True:
index = self.search(REG, index = index,
stopindex = 'insert %s%sc' % (sign, max),
count = size,
backwards = dir,
regexp = True)
if not index: return ''
char = self.get(index, '%s +1c' % index)
count = count + (1 if char == start else -1)
if not count:
return index
# When we are searching backwards we don't need
# to set a character back because index will point
# to the start of the match.
index = '%s %s' % (index, '+1c' if not dir else '')
def clear_data(self):
"""
It clears all text inside an AreaVi instance.
"""
import os
self.delete('1.0', 'end')
self.filename = os.path.abspath(self.default_filename)
self.event_generate('<<ClearData>>')
def load_data(self, filename):
"""
It dumps all text from a file into an AreaVi instance.
filename - Name of the file.
"""
import os
filename = os.path.abspath(filename)
self.filename = filename
fd = open(filename, 'r')
data = fd.read()
fd.close()
# i could generate a tk event here.
try:
data = data.decode(self.charset)
except UnicodeDecodeError:
self.charset = ''
self.delete('1.0', 'end')
self.insert('1.0', data)
self.event_generate('<<LoadData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Load-%s>>' % type)
def decode(self, name):
self.charset = name
self.load_data(self.filename)
def save_data(self):
"""
It saves the actual text content in the current file.
"""
data = self.get('1.0', 'end')
data = data.encode(self.charset)
fd = open(self.filename, 'w')
fd.write(data)
fd.close()
self.event_generate('<<SaveData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Save-%s>>' % type)
def save_data_as(self, filename):
"""
It saves the content of the given AreaVi instance into
a file whose name is specified in filename.
filename - Name of the file to save the data.
"""
self.filename = filename
self.save_data()
def tag_contains(self, name, index0, index1):
"""
It returns True if there is a tag range for
a tag name which contains index0 and index1.
The algorithm consists of:
It calls text.tag_ranges(name)
which returns a list of indexs
that the tag is attached to.
Then it goes through the list of indexs
checking which of the indexes contains index0 and index1.
"""
ls = self.tag_ranges(name)
for ind in xrange(0, len(ls) - 1, 2):
index2 = ls[ind].string
index3 = ls[ind + 1].string
r1 = self.compare(index2, '<=', index0)
r2 = self.compare(index3, '>=', index1)
if r1 and r2: return index2, index3
return ()
def tag_sub_ranges(self, name, data, index0='1.0', index1='end'):
"""
It replaces ranges of text delimited by tag between index0
and index1 for data.
"""
while 1:
map = self.tag_nextrange(name, index0, index1)
if not map: break
index3, index4 = map
self.delete(index3, index4)
self.insert(index3, data)
def tag_delete_ranges(self, name, *args):
"""
It deletes ranges of text that are mapped to tag name.
"""
self.tag_sub_ranges(name, '', *args)
def tag_get_ranges(self, name, sep=''):
"""
It should be built from get_slices_data
"""
data = ''
for ind in self.tag_get_data(name):
data = data + ind + sep
return data
def tag_get_data(self, name):
"""
It returns an iterator with the text inside tag name.
"""
try:
map = self.tag_ranges(name)
except Exception:
pass
else:
for ind in xrange(0, len(map) - 1, 2):
data = self.get(map[ind], map[ind + 1])
yield(data)
def mark_set_next(self, tag, mark):
"""
"""
next_tag = self.tag_nextrange(tag, '%s +1c' % mark)
if next_tag:
self.mark_set(mark, next_tag[0])
def mark_set_prev(self, tag, mark):
"""
"""
prev_tag = self.tag_prevrange(tag, mark)
if prev_tag:
self.mark_set(mark, prev_tag[0])
def tag_prev_occur(self, tag_names, index0, index1, default):
for ind in tag_names:
pos = self.tag_prevrange(ind, index0, index1)
if pos: return pos[1]
return default
def tag_next_occur(self, tag_names, index0, index1, default):
for ind in tag_names:
pos = self.tag_nextrange(ind, index0, index1)
if pos: return pos[0]
return default
@staticmethod
def get_all_areavi_instances(wid):
for ind in wid.winfo_children():
if isinstance(ind, AreaVi):
yield ind
else:
for ind in AreaVi.get_all_areavi_instances(ind):
yield ind
@staticmethod
def get_opened_files(wid):
map = dict()
for ind in AreaVi.get_all_areavi_instances(wid):
map[ind.filename] = ind
return map
@staticmethod
def find_on_all(wid, chunk):
for indi in AreaVi.get_all_areavi_instances(wid):
it = indi.find(chunk, '1.0', 'end')
for indj in it:
yield indi, indj
def get_cursor_word(self):
"""
"""
if self.compare('insert', '==', 'insert linestart'):
return ''
index = self.search(' ', 'insert',
stopindex='insert linestart',regexp=True,
backwards=True)
if not index: index = 'insert linestart'
else: index = '%s +1c' % index
if self.compare(index, '==', 'insert'): return ''
data = self.get(index, 'insert')
return data, index
def match_word(self, wid, delim=' '):
data, index = self.get_cursor_word()
for area, (chk, pos0, pos1) in self.find_on_all(wid, '[^ ]*%s[^ ]+' % data):
yield chk, index
def complete_word(self, wid):
seq = self.match_word(wid)
table = []
for data, index in seq:
if not data in table:
table.append(data)
else:
continue
self.delete(index, 'insert')
self.insert(index, data)
yield
Improving AreaVi.select_word method.
"""
"""
from Tkinter import *
from re import escape
import mimetypes
class AreaVi(Text):
ACTIVE = None
def __init__(self, default_filename, *args, **kwargs):
"""
This class receives all Text widget arguments
and one named default_filename which means
the filename that is saved when no filename
is specified.
default_filename:
The default path file where contents are saved.
It implements a system of modes to handle
tkinter keypresses events.
The method hook can be used to bind events
to callbacks mapped to specific modes.
"""
Text.__init__(self, *args, **kwargs)
self.setup = dict()
# Maybe it should be?
# abspath(default_filename)
self.default_filename = default_filename
# The file's path and name.
self.filename = default_filename
# Shouldn't it be LAST_COL and MSEL?
self.last_col = '_last_col_'
self.mark_set(self.last_col, '1.0')
# This mark is used in AreaVi.replace_all.
self.STOP_REPLACE_INDEX = '_stop_replace_index_'
# Tags have name starting and ending with __
# def cave(event):
# AreaVi.ACTIVE = event.widget
# self.hook(-1, '<FocusIn>', cave)
AreaVi.ACTIVE = self
self.charset = 'utf-8'
def active(self):
"""
It is used to create a model of target for plugins
defining python functions to run on fly.
With such an abstraction it is possible to define a AreaVi instance target
that python code will act on.
"""
AreaVi.ACTIVE = self
def chmode(self, id):
"""
This function is used to change the AreaVi instance's mode.
It receives one parameter named id which means the
mode number.
"""
opt = self.setup[id]
self.id = id
MODE_X = 'mode%s-1' % self
MODE_Y = 'mode%s%s' % (self, id)
if opt: self.bindtags((MODE_X, MODE_Y, self, 'Text', '.'))
else: self.bindtags((MODE_X, MODE_Y, self, '.'))
def add_mode(self, id, opt=False):
"""
It adds a new mode. The opt argument means whether
it should propagate the event to the internal text widget callbacks.
"""
self.setup[id] = opt
def del_mode(self, id):
"""
It performs the opposite of add_mode.
"""
del self.setup[id]
def hook(self, id, seq, callback):
"""
This method is used to hook a callback to a sequence
specified with its mode. The standard modes are insert and selection.
The insert mode prints the key character on the text area.
"""
MODE_Y = 'mode%s%s' % (self, id)
self.bind_class(MODE_Y, seq, callback, add=True)
def unhook(self, id, seq, callback=None):
"""
It performs the opposite of unhook.
"""
MODE_Y = 'mode%s%s' % (self, id)
self.unbind_class(MODE_Y, seq)
def install(self, *args):
"""
It is like self.hook but accepts
a sequence of (id, seq, callback).
"""
for id, seq, callback in args:
self.hook(id, seq, callback)
def uninstall(self, *args):
"""
Like self.hook but accepts.
(id, seq, callback).
"""
for id, seq, callback in args:
self.unhook(id, seq, callback)
def append(self, data):
"""
"""
self.insert('end', data)
self.mark_set('insert', 'end')
self.see('end')
def curline(self):
"""
A short hand for.
area.get('insert linestart', 'insert +1l linestart')
"""
return self.get('insert linestart', 'insert +1l linestart')
def tag_update(self, name, index0, index1, *args):
"""
It removes a given tag from index0 to index1 and re adds
the tag to the ranges of text delimited in args.
Example:
DATA_X = 'It is black.\n'
DATA_Y = 'It is blue.\n'
text = Text()
text.pack()
text.insert('1.0', DATA_X)
text.insert('2.0', DATA_Y)
text.tag_add('X', '1.0', '1.0 lineend')
text.tag_add('Y', '2.0', '2.0 lineend')
text.tag_config('X', background='black')
text.tag_config('Y', foreground='blue')
text.tag_update(text, 'X', '1.0', 'end', ('2.0', '2.0 lineend'))
It removes the X tag from '1.0' to 'end' then adds
the X tag to the range '2.0' '2.0 lineend'.
"""
self.tag_remove(name, index0, index1)
for indi, indj in args:
self.tag_add(name, indi, indj)
def insee(self, index, data):
"""
"""
self.insert(index, data)
self.see('insert')
def cmd_like(self):
"""
"""
data = self.get('insert linestart', 'insert lineend')
self.delete('insert linestart', 'insert lineend')
return data
def indref(self, index):
"""
This is a short hand function.
It is used to convert a Text index
into two integers.
Ex:
a, b = area.indref('insert')
Now, a and b can be manipulated
as numbers.
"""
a, b = self.index(index).split('.')
return int(a), int(b)
def setcur(self, line, col):
"""
It is used to set the cursor position at
a given index using line and col.
line is a number which represents
a given line index in the AreaVi instance.
col is a column.
"""
self.mark_set('insert', '%s.%s' % (line, col))
self.see('insert')
def setcurl(self, line):
"""
set cursor line.
It is used to set the cursor position at a given
line. It sets the cursor at line.0 position.
"""
self.mark_set('insert', '%s.%s' % (line, '0'))
self.see('insert')
def indint(self, index):
"""
This method is used
when i can't use self.indref.
It seems self.indref returns
2.10 when the input is 2.34
it happens when the index col
is longer than the actual line col.
"""
a, b = index.split('.')
return int(a), int(b)
def indcol(self):
"""
This is a short hand method for getting
the last col in which the cursor was in.
It is useful when implementing functions to
select pieces of text.
"""
a, b = self.indref(self.last_col)
return int(a), int(b)
def setcol(self, line, col):
"""
It sets the mark used by the arrows
keys and selection state.
"""
self.mark_set(self.last_col, '%s.%s' % (line, col))
def indcur(self):
"""
It returns the actual line, col for the
cursor position. So, the values can be
manipulated with integers.
"""
a, b = self.indref('insert')
return int(a), int(b)
def seecur(self):
"""
Just a shorthand for area.see('insert')
which makes the cursor visible wherever it is in.
"""
self.see('insert')
def inset(self, index):
"""
Just a shorthand for area.mark_set('insert', index)
so we spare some typing.
"""
self.mark_set('insert', index)
def is_end(self):
"""
This function returns True if the cursor is positioned
at the end of the AreaVi instance.
This is useful when implementing other methods.
Like those from visual block selection to avoid
the cursor jumping to odd places when it achieves
the end of the text region.
"""
# I have to use 'end -1l linestart' since it seems the 'end' tag
# corresponds to a one line after the last visible line.
# So last line lineend != 'end'.
return self.compare('insert linestart', '!=', 'end -1l linestart')
def is_start(self):
"""
This function returns True if the cursor is
at the start of the text region. It is on index '1.0'
"""
return self.compare('insert linestart', '!=', '1.0')
def down(self):
"""
It sets the cursor position one line down.
"""
if self.is_end():
# We first check if it is at the end
# so we avoid the cursor jumping at odd positions.
a, b = self.indcol()
c, d = self.indcur()
self.setcur(c + 1, b)
def up(self):
"""
It sets the cursor one line up.
"""
if self.is_start():
a, b = self.indcol()
c, d = self.indcur()
self.setcur(c - 1, b)
def left(self):
"""
It moves the cursor one character left.
"""
self.mark_set('insert', 'insert -1c')
# The mark used by self.down, self.up.
self.mark_set(self.last_col, 'insert')
def right(self):
"""
It moves the cursor one character right.
"""
self.mark_set('insert', 'insert +1c')
# The mark used by self.down, self.up.
self.mark_set(self.last_col, 'insert')
def start_selection(self):
"""
It sets the mark sel_start to the insert position.
So, when sel_up, sel_down, sel_right, sel_left are
called then they will select a region from this mark.
"""
self.mark_set('_sel_start_', 'insert')
def start_block_selection(self):
self.mark_set('_block_sel_start_', 'insert')
def is_add_up(self, index):
"""
It checks whether the selection must be
removed or added.
If it returns True then the selection must be
removed. True means that the '_sel_start_'
mark is positioned above the cursor position.
So, it must remove the selection instead of
adding it.
"""
return self.compare('%s linestart' % index, '<=', 'insert linestart')
def rmsel(self, index0, index1):
"""
This method is a short hand for area.tag_remove('sel', index0, index1)
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_remove('sel', index2, index3)
def addsel(self, index0, index1):
"""
It adds 'sel' to the range (AreaVi.min(index0, index1),
AreaVi.max(index0, index1))
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_add('sel', index2, index3)
def min(self, index0, index1):
"""
It returns the min between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index0
else:
return index1
def max(self, index0, index1):
"""
It returns the max between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index1
else:
return index0
def sel_up(self):
"""
It adds 'sel' one line up the 'insert' position
and sets the cursor one line up.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.up()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_down(self, index):
"""
It returns True if the cursor is positioned below
the initial mark for selection.
It determins if the selection must be removed or added when
sel_down is called.
"""
return self.compare('%s linestart' % index, '>=', 'insert linestart')
def sel_down(self):
"""
It adds or removes selection one line down.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.down()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_right(self, index):
"""
It returns True if the cursor is positioned at the left
of the initial selection mark. It is useful for sel_right method.
"""
return self.compare(index, '>=', 'insert')
def sel_right(self):
"""
It adds or removes selection one character right.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.right()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_left(self, index):
"""
It returns True if the cursor is positioned at the right of
the initial mark selection.
"""
return self.compare(index, '<=', 'insert')
def sel_left(self):
"""
It adds or removes selection one character left.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.left()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def indmsel(self):
"""
It is just a shorthand for getting the last selection mark.
"""
a, b = self.indref('_sel_start_')
return int(a), int(b)
def addblock(self, index0, index1):
"""
It adds block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
e = min(b, d)
f = max(b, d)
self.addsel('%s.%s' % (ind, e), '%s.%s' % (ind, f))
def rmblock(self, index0, index1):
"""
It removes block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
e = min(b, d)
f = max(b, d)
self.rmsel('%s.%s' % (ind, e), '%s.%s' % (ind, f))
def block_down(self):
"""
It adds or removes block selection one line down.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.down()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def block_up(self):
"""
It adds or removes block selection one line up.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.up()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_start(self):
"""
It returns True if the cursor is at the start of the cursor line.
"""
return self.compare('insert', '!=', 'insert linestart')
def block_left(self):
"""
It adds block selection to the left.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.left()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_end(self):
"""
It returns True if the cursor is at the end of the cursor line.
"""
return self.compare('insert', '!=', 'insert lineend')
def block_right(self):
"""
It adds a block selection to the right.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.right()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def clear_selection(self):
"""
It removes 'sel' tag from all ranges.
"""
try:
self.tag_remove('sel', 'sel.first', 'sel.last')
except Exception:
pass
def select_char(self):
"""
it adds 'sel' a char ahead the cursor position.
"""
self.addsel('insert', 'insert +1c')
def unselect_char(self):
"""
It removes 'sel' a char from the cursor position.
"""
self.rmsel('insert', 'insert +1c')
def clchar(self):
"""
It deletes a char from the cursor position.
"""
self.edit_separator()
self.delete('insert', 'insert +1c')
def do_undo(self):
"""
It does undo.
"""
try:
self.edit_undo()
except TclError:
pass
def do_redo(self):
"""
It redoes.
"""
try:
self.edit_redo()
except TclError:
pass
def sel_text_start(self):
"""
It selects all text from insert position to the start position
of the text.
"""
index = self.index('insert')
self.go_text_start()
self.addsel(index, 'insert')
def sel_text_end(self):
"""
It selects all text from the insert position to the end of the text.
"""
index = self.index('insert')
self.go_text_end()
self.addsel(index, 'insert')
def go_text_start(self):
"""
It goes to the first position in the text.
"""
self.mark_set('insert', '1.0')
self.see('insert')
def go_text_end(self):
"""
It goes to the end of the text.
"""
self.mark_set('insert', 'end linestart')
self.see('insert')
def sel_line_start(self):
"""
It adds selection from the insert position to the
start of the line.
"""
index = self.index('insert')
self.go_line_start()
self.addsel(index, 'insert')
def sel_line_end(self):
"""
It selects all text from insert position to the end of the line.
"""
index = self.index('insert')
self.go_line_end()
self.addsel(index, 'insert')
def go_line_start(self):
"""
It goes to the beginning of the cursor position line.
"""
self.mark_set('insert', 'insert linestart')
def go_line_end(self):
"""
It goes to the end of the cursor position line.
"""
self.mark_set('insert', 'insert lineend')
def go_next_word(self):
"""
It puts the cursor on the beginning of the next word.
"""
self.seek_next_down('\M')
def go_prev_word(self):
"""
It puts the cursor in the beginning of the previous word.
"""
self.seek_next_up('\M')
def go_next_sym(self, chars):
"""
It puts the cursor on the next occurency of the symbols in chars.
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.seek_next_down(REG)
def go_prev_sym(self, chars):
"""
It puts the cursor on the previous occurency of:
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.seek_next_up(REG)
def cllin(self):
"""
It deletes the cursor position line, makes the cursor visible
and adds a separator to the undo stack.
"""
self.edit_separator()
self.delete('insert linestart', 'insert +1l linestart')
self.see('insert')
def cpsel(self):
"""
It copies to the clip board ranges of text
that are selected and removes the selection.
"""
data = self.tag_get_ranges('sel')
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def cpblock(self):
"""
It copies blocks of text that are selected
with a separator '\n'.
"""
data = self.tag_get_ranges('sel', '\n')
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def ctblock(self):
"""
It cuts blocks of text with a separator '\n'.
"""
data = self.tag_get_ranges('sel', '\n')
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.tag_delete_ranges('sel')
def ctsel(self):
"""
It cuts the selected text.
"""
data = self.tag_get_ranges('sel')
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.tag_delete_ranges('sel')
def clsel(self):
"""
It deletes all selected text.
"""
self.edit_separator()
self.tag_delete_ranges('sel')
def ptsel(self):
"""
It pastes over the cursor position data from the clipboard
and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert', data)
def ptsel_after(self):
"""
It pastes one line after the cursor position data from clipboard
and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert +1l linestart', data)
def ptsel_before(self):
"""
It pastes data from the cursor position one line before the cursor
position and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert linestart', data)
def select_line(self):
"""
It adds selection to the cursor position line.
"""
self.tag_add('sel', 'insert linestart', 'insert +1l linestart')
def unselect_line(self):
"""
It removes selection from the cursor position line.
"""
self.tag_remove('sel', 'insert linestart', 'insert +1l linestart')
def toggle_line_selection(self):
map = self.tag_contains('sel', 'insert linestart', 'insert +1l linestart')
if map:
self.unselect_line()
else:
self.select_line()
def select_word(self):
"""
It selects a word from the cursor position.
"""
index1 = self.search('\W', 'insert', regexp=True, stopindex='insert linestart', backwards=True)
index2 = self.search('\W', 'insert', regexp=True, stopindex='insert lineend')
self.tag_add('sel', 'insert linestart' if not index1 else '%s +1c' % index1,
'insert lineend' if not index2 else index2)
def scroll_line_up(self):
"""
It scrolls one line up
"""
self.yview(SCROLL, -1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert -1l')
def scroll_line_down(self):
"""
It scrolls one line down.
"""
self.yview(SCROLL, 1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert +1l')
def scroll_page_down(self):
"""
It goes one page down.
"""
self.yview(SCROLL, 1, 'page')
self.mark_set('insert', '@0,0')
def scroll_page_up(self):
"""
It goes one page up.
"""
self.yview(SCROLL, -1, 'page')
self.mark_set('insert', '@0,0')
def insert_line_down(self):
"""
It inserts one line down from the cursor position.
"""
self.edit_separator()
self.insert('insert +1l linestart', '\n')
self.mark_set('insert', 'insert +1l linestart')
self.see('insert')
def select_all(self):
"""
It selects all text.
"""
self.tag_add('sel', '1.0', 'end')
def insert_line_up(self):
"""
It inserts one line up.
"""
self.edit_separator()
self.insert('insert linestart', '\n')
self.mark_set('insert', 'insert -1l linestart')
self.see('insert')
def shift_sel_right(self, width, char):
"""
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_right(srow, erow, width, char)
def shift_sel_left(self, width):
"""
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_left(srow, erow, width)
def shift_right(self, srow, erow, width, char):
"""
Given a start row and a end row it shifts
a block of text to the right.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.insert('%s.0' % ind, width * char)
def shift_left(self, srow, erow, width):
"""
Given a start row and a end row it shifts
a block of text to the left.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.delete('%s.0' % ind, '%s.%s' % (ind, width))
def tag_find_ranges(self, name, regex, *args, **kwargs):
"""
It returns an interator corresponding to calling AreaVi.find
between the ranges of the tag specified by name.
You shouldn't delete or insert data while performing this operation.
"""
# It should be built on top of nextrange.
map = self.tag_ranges(name)
for indi in range(0, len(map) - 1, 2):
seq = self.find(regex, map[indi], map[indi + 1], *args, **kwargs)
for indj in seq:
yield indj
def tag_replace_ranges(self, name, regex, data, index='1.0', stopindex='end',
*args, **kwargs):
"""
It replaces all occurrences of regex inside a tag ranges
for data.
name - Name of the tag.
regex - The pattern.
data - The data to replace.
args - Arguments given to AreaVi.find.
**kwargs - A dictionary of arguments given to AreaVi.find.
"""
while True:
map = self.tag_nextrange(name, index, stopindex)
if not map: break
index3, index4 = map
index = index4
self.replace_all(regex, data, index3, index4, *args, **kwargs)
def tag_setup(self, theme):
"""
Just a short hand for
theme = {'tag_name': {'background': 'blue'}
for name, kwargs in theme.iteritems():
self.tag_config(name, **kwargs)
self.tag_lower(name)
"""
for name, kwargs in theme.iteritems():
self.tag_config(name, **kwargs)
self.tag_lower(name)
def tag_add_found(self, name, map):
""""
It adds a tag to the match ranges from either AreaVi.find or
AreaVi.tag_find_ranges.
name - The tag to be added.
map - An iterator from AreaVi.find or AreaVi.tag_find_ranges.
"""
for _, index0, index1 in map:
self.tag_add(name, index0, index1)
def split_with_cond(self, regex, cond, *args, **kwargs):
"""
It determines which chunks should be yielded based on cond.
"""
for chk, index0, index1 in self.split(regex, *args, **kwargs):
data = cond(chk, index0, index1)
if data: yield data
def split(self, *args, **kwargs):
"""
It splits the contents of the text widget into chunks based on a regex.
"""
index0 = '1.0'
for chk, index1, index2 in self.find(*args, **kwargs):
if self.compare(index1, '>', index0):
yield(self.get(index0, index1), index0, index1)
yield(chk, index1, index2)
index0 = index2
def find_with_cond(self, regex, cond, *args, **kwargs):
"""
It determines which matches should be yielded.
"""
for chk, index0, index1 in self.find(regex, *args, **kwargs):
data = cond(chk, index0, index1)
if not data: continue
yield(data)
def find_one_by_line(self, regex, index, stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
count = IntVar()
while True:
index = self.search(regex, index, stopindex, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index:
break
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
index = '%s +1l' % pos0
yield(chunk, pos0, pos1)
def find(self, regex, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
"""
It returns an iterator of matches. It is based on the Text.search method
"""
count = IntVar()
while True:
index = self.search(regex, index, stopindex, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index:
break
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
index = '%s +1c' % tmp
yield(chunk, pos0, pos1)
def search(self, pattern, index, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=None, nocase=None,
count=None, elide=None, nolinestop=None):
'''Standard search method, but with support for the nolinestop
option which is new in tk 8.5 but not supported by tkinter out
of the box.
'''
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if nolinestop: args.append("-nolinestop")
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def seek_next_up(self, regex, index0='insert', stopindex='1.0', exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find the next match with regex up the cursor.
It sets the cursor at the index of the occurrence.
"""
count = IntVar()
index = self.search(regex, index0, stopindex=stopindex, regexp=regexp, exact=exact,
nocase=nocase, elide=elide, nolinestop=nolinestop, backwards=True, count=count)
if not index: return
index1 = self.index('%s +%sc' % (index, count.get()))
self.mark_set('insert', index)
self.see('insert')
return index, index1
def seek_next_down(self, regex, index0='insert', stopindex='end', exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find the next match with regex down.
It sets the cursor at the index of the occurrence.
"""
count = IntVar()
index = self.search(regex, index0, stopindex=stopindex, regexp=regexp, exact=exact, nocase=nocase,
elide=elide, nolinestop=nolinestop, count=count)
if not index: return
index1 = self.index('%s +%sc' % (index, count.get()))
self.mark_set('insert', index1)
self.see('insert')
return index, index1
def pick_next_up(self, name, *args, **kwargs):
"""
"""
index = self.seek_next_up(*args, **kwargs)
if not index:
return
self.tag_add(name, *index)
return index
def pick_next_down(self, name, *args, **kwargs):
"""
"""
index = self.seek_next_down(*args, **kwargs)
if not index:
return
self.tag_add(name, *index)
return index
def replace(self, regex, data, index=None, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=True, nocase=None, elide=None, nolinestop=None):
"""
It is used to replace occurrences of a given match.
It is possible to use a callback function to return what is replaced
as well.
"""
count = IntVar()
index = self.search(regex, index, stopindex, forwards=forwards, backwards=backwards, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index: return
if callable(data): data = data(index, self.index('%s +%sc' % (index, count.get())))
index0 = self.index('%s +%sc' % (index, count.get()))
self.delete(index, index0)
self.insert(index, data)
return index, len(data)
def replace_all(self, regex, data, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
"""
It is used to replace all occurrences of a given match in a range.
It accepts a callback function that determines what is replaced.
"""
# It is needed because the range will grow
# when data is inserted, the intent is searching
# over a pre defined range.
self.mark_set(self.STOP_REPLACE_INDEX, stopindex)
while True:
map = self.replace(regex, data, index, self.STOP_REPLACE_INDEX, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide)
if not map: return
index, size = map
index = self.index('%s +%sc' % (index, size))
def get_paren_search_dir(self, start, end):
"""
"""
char = self.get('insert', 'insert +1c')
if char == start:
return False
elif char == end:
return True
else:
return None
def get_paren_search_sign(self, start, end):
"""
"""
char = self.get('insert', 'insert +1c')
if char == start:
return '+'
elif char == end:
return '-'
else:
return None
def select_case_pair(self, pair, MAX=1500):
"""
"""
index = self.case_pair(MAX, *pair)
if not index: return
min = self.min(index, 'insert')
if self.compare(min, '==', 'insert'): min = '%s +1c' % min
max = self.max(index, 'insert')
if self.compare(max, '==', 'insert'): min = '%s +1c' % min
self.tag_add('sel', min, max)
def case_pair(self, max, start='(', end=')'):
"""
Once this method is called, it returns an index for the next
matching parenthesis or None if the char over the cursor
isn't either '(' or ')'.
"""
dir = self.get_paren_search_dir(start, end)
# If dir is None then there is no match.
if dir == None: return ''
REG = '\%s|\%s' % (start, end)
sign = self.get_paren_search_sign(start, end)
count = 0
# If we are searching fowards we don't need
# to add 1c.
index = 'insert %s' % ('+1c' if dir else '')
size = IntVar(0)
while True:
index = self.search(REG, index = index,
stopindex = 'insert %s%sc' % (sign, max),
count = size,
backwards = dir,
regexp = True)
if not index: return ''
char = self.get(index, '%s +1c' % index)
count = count + (1 if char == start else -1)
if not count:
return index
# When we are searching backwards we don't need
# to set a character back because index will point
# to the start of the match.
index = '%s %s' % (index, '+1c' if not dir else '')
def clear_data(self):
"""
It clears all text inside an AreaVi instance.
"""
import os
self.delete('1.0', 'end')
self.filename = os.path.abspath(self.default_filename)
self.event_generate('<<ClearData>>')
def load_data(self, filename):
"""
It dumps all text from a file into an AreaVi instance.
filename - Name of the file.
"""
import os
filename = os.path.abspath(filename)
self.filename = filename
fd = open(filename, 'r')
data = fd.read()
fd.close()
# i could generate a tk event here.
try:
data = data.decode(self.charset)
except UnicodeDecodeError:
self.charset = ''
self.delete('1.0', 'end')
self.insert('1.0', data)
self.event_generate('<<LoadData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Load-%s>>' % type)
def decode(self, name):
self.charset = name
self.load_data(self.filename)
def save_data(self):
"""
It saves the actual text content in the current file.
"""
data = self.get('1.0', 'end')
data = data.encode(self.charset)
fd = open(self.filename, 'w')
fd.write(data)
fd.close()
self.event_generate('<<SaveData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Save-%s>>' % type)
def save_data_as(self, filename):
"""
It saves the content of the given AreaVi instance into
a file whose name is specified in filename.
filename - Name of the file to save the data.
"""
self.filename = filename
self.save_data()
def tag_contains(self, name, index0, index1):
"""
It returns True if there is a tag range for
a tag name which contains index0 and index1.
The algorithm consists of:
It calls text.tag_ranges(name)
which returns a list of indexs
that the tag is attached to.
Then it goes through the list of indexs
checking which of the indexes contains index0 and index1.
"""
ls = self.tag_ranges(name)
for ind in xrange(0, len(ls) - 1, 2):
index2 = ls[ind].string
index3 = ls[ind + 1].string
r1 = self.compare(index2, '<=', index0)
r2 = self.compare(index3, '>=', index1)
if r1 and r2: return index2, index3
return ()
def tag_sub_ranges(self, name, data, index0='1.0', index1='end'):
"""
It replaces ranges of text delimited by tag between index0
and index1 for data.
"""
while 1:
map = self.tag_nextrange(name, index0, index1)
if not map: break
index3, index4 = map
self.delete(index3, index4)
self.insert(index3, data)
def tag_delete_ranges(self, name, *args):
"""
It deletes ranges of text that are mapped to tag name.
"""
self.tag_sub_ranges(name, '', *args)
def tag_get_ranges(self, name, sep=''):
"""
It should be built from get_slices_data
"""
data = ''
for ind in self.tag_get_data(name):
data = data + ind + sep
return data
def tag_get_data(self, name):
"""
It returns an iterator with the text inside tag name.
"""
try:
map = self.tag_ranges(name)
except Exception:
pass
else:
for ind in xrange(0, len(map) - 1, 2):
data = self.get(map[ind], map[ind + 1])
yield(data)
def mark_set_next(self, tag, mark):
"""
"""
next_tag = self.tag_nextrange(tag, '%s +1c' % mark)
if next_tag:
self.mark_set(mark, next_tag[0])
def mark_set_prev(self, tag, mark):
"""
"""
prev_tag = self.tag_prevrange(tag, mark)
if prev_tag:
self.mark_set(mark, prev_tag[0])
def tag_prev_occur(self, tag_names, index0, index1, default):
for ind in tag_names:
pos = self.tag_prevrange(ind, index0, index1)
if pos: return pos[1]
return default
def tag_next_occur(self, tag_names, index0, index1, default):
for ind in tag_names:
pos = self.tag_nextrange(ind, index0, index1)
if pos: return pos[0]
return default
@staticmethod
def get_all_areavi_instances(wid):
for ind in wid.winfo_children():
if isinstance(ind, AreaVi):
yield ind
else:
for ind in AreaVi.get_all_areavi_instances(ind):
yield ind
@staticmethod
def get_opened_files(wid):
map = dict()
for ind in AreaVi.get_all_areavi_instances(wid):
map[ind.filename] = ind
return map
@staticmethod
def find_on_all(wid, chunk):
for indi in AreaVi.get_all_areavi_instances(wid):
it = indi.find(chunk, '1.0', 'end')
for indj in it:
yield indi, indj
def get_cursor_word(self):
"""
"""
if self.compare('insert', '==', 'insert linestart'):
return ''
index = self.search(' ', 'insert',
stopindex='insert linestart',regexp=True,
backwards=True)
if not index: index = 'insert linestart'
else: index = '%s +1c' % index
if self.compare(index, '==', 'insert'): return ''
data = self.get(index, 'insert')
return data, index
def match_word(self, wid, delim=' '):
data, index = self.get_cursor_word()
for area, (chk, pos0, pos1) in self.find_on_all(wid, '[^ ]*%s[^ ]+' % data):
yield chk, index
def complete_word(self, wid):
seq = self.match_word(wid)
table = []
for data, index in seq:
if not data in table:
table.append(data)
else:
continue
self.delete(index, 'insert')
self.insert(index, data)
yield
|
import logging
import logging.config
import logging.handlers
import six
import sys
from ..utils import from_human_readable
def configure(config):
format = config.get('format', None)
datefmt = config.get('datefmt', None)
fmtstyle = config.get('fmtstyle', '%')
if six.PY2:
formatter = logging.Formatter(format, datefmt)
else:
formatter = logging.Formatter(format, datefmt, fmtstyle)
handlers = []
# Console handler
h = logging.StreamHandler(sys.stdout)
h.setLevel(config['console']['level'])
h.setFormatter(formatter)
handlers.append(h)
# File handlers
for f in config['files']:
file_config = config['files'][f]
maxsize = file_config.get('maxsize', '1M')
maxsize = from_human_readable(str(maxsize))
count = file_config.get('count', 1)
h = logging.handlers.RotatingFileHandler(f, maxBytes=maxsize, backupCount=count)
h.setLevel(file_config['level'])
h.setFormatter(formatter)
handlers.append(h)
logging.getLogger().setLevel(logging.DEBUG)
for h in handlers:
logging.getLogger().addHandler(h)
print(h)
Remove support for logging format style - doesn't work in Python 2 at all
import logging
import logging.config
import logging.handlers
import six
import sys
from ..utils import from_human_readable
def configure(config):
format = config.get('format', None)
datefmt = config.get('datefmt', None)
formatter = logging.Formatter(format, datefmt)
handlers = []
# Console handler
h = logging.StreamHandler(sys.stdout)
h.setLevel(config['console']['level'])
h.setFormatter(formatter)
handlers.append(h)
# File handlers
for f in config['files']:
file_config = config['files'][f]
maxsize = file_config.get('maxsize', '1M')
maxsize = from_human_readable(str(maxsize))
count = file_config.get('count', 1)
h = logging.handlers.RotatingFileHandler(f, maxBytes=maxsize, backupCount=count)
h.setLevel(file_config['level'])
h.setFormatter(formatter)
handlers.append(h)
logging.getLogger().setLevel(logging.DEBUG)
for h in handlers:
logging.getLogger().addHandler(h)
print(h)
|
e17b57ba-2ead-11e5-8cdd-7831c1d44c14
e1810ade-2ead-11e5-8cd6-7831c1d44c14
e1810ade-2ead-11e5-8cd6-7831c1d44c14 |
PL datamodule (#574)
Co-authored-by: Dristi <adffd619eaf1b3df14b444e15e3a3422a41d26ed@Dristis-MacBook-Air.local>
|
"""
This will parse the large ucd xml into a simple list that is workable and
is fair enough for download and embeding.
starts with the # unicode description/version
format
<unicode> \t <unicode name>
download from
http://www.unicode.org/Public/8.0.0/ucdxml/ucd.all.flat.zip
"""
import os
from xml.etree import cElementTree as ET
# after download it will be in the download folder :)
path = os.path.expanduser("~/Downloads/ucd.all.flat.xml")
tree = ET.parse(path)
flat = []
for i in tree.iter():
if i.tag.endswith("description"):
flat.insert(0, "# %s" % i.text)
if i.tag.endswith("char"):
n = i.attrib.get("na")
if n:
flat.append("%s\t%s" % (i.attrib.get("cp"), n))
f = open("flatUnicode.txt", "w")
f.write("\n".join(flat))
f.close()
buildFlatUnicodeList: whitespace
"""
This will parse the large ucd xml into a simple list that is workable and
is fair enough for download and embeding.
starts with the # unicode description/version
format
<unicode> \t <unicode name>
download from
http://www.unicode.org/Public/8.0.0/ucdxml/ucd.all.flat.zip
"""
import os
from xml.etree import cElementTree as ET
# after download it will be in the download folder :)
path = os.path.expanduser("~/Downloads/ucd.all.flat.xml")
tree = ET.parse(path)
flat = []
for i in tree.iter():
if i.tag.endswith("description"):
flat.insert(0, "# %s" % i.text)
if i.tag.endswith("char"):
n = i.attrib.get("na")
if n:
flat.append("%s\t%s" % (i.attrib.get("cp"), n))
f = open("flatUnicode.txt", "w")
f.write("\n".join(flat))
f.close()
|
"""
"""
from Tkinter import *
from re import escape
import mimetypes
class AreaVi(Text):
ACTIVE = None
def __init__(self, default_filename, *args, **kwargs):
"""
This class receives all Text widget arguments
and one named default_filename which means
the filename that is saved when no filename
is specified.
default_filename:
The default path file where contents are saved.
It implements a system of modes to handle
tkinter keypresses events.
The method hook can be used to bind events
to callbacks mapped to specific modes.
"""
Text.__init__(self, *args, **kwargs)
self.setup = dict()
# Maybe it should be?
# abspath(default_filename)
self.default_filename = default_filename
# The file's path and name.
self.filename = default_filename
# Shouldn't it be LAST_COL and MSEL?
self.last_col = '_last_col_'
self.mark_set(self.last_col, '1.0')
# This mark is used in AreaVi.replace_all.
self.STOP_REPLACE_INDEX = '_stop_replace_index_'
# Tags have name starting and ending with __
# def cave(event):
# AreaVi.ACTIVE = event.widget
# self.hook(-1, '<FocusIn>', cave)
AreaVi.ACTIVE = self
self.charset = 'utf-8'
def active(self):
"""
It is used to create a model of target for plugins
defining python functions to access the AreaVi instance that was
set as target.
Plugins that expose python functions to be executed from vy
should access AreaVi.ACTIVE when having to manipulate some AreaVi
instance content.
"""
AreaVi.ACTIVE = self
def chmode(self, id):
"""
This function is used to change the AreaVi instance's mode.
It receives one parameter named id which means the
mode name.
area = AreaVi('None')
area.chmode('INSERT')
It would make area be in INSERT mode.
"""
opt = self.setup[id]
self.id = id
MODE_X = 'mode%s-1' % self
MODE_Y = 'mode%s%s' % (self, id)
if opt: self.bindtags((MODE_X, MODE_Y, self, 'Text', '.'))
else: self.bindtags((MODE_X, MODE_Y, self, '.'))
def add_mode(self, id, opt=False):
"""
It adds a new mode. The opt argument means whether
it should propagate the event to the internal text widget callbacks.
def install(area):
area.add_mode('MODE')
The code above would add a mode named MODE to the AreaVi instance.
def install(area):
area.add_mode('TYPING', opt=True)
The code above would add a mode named 'TYPING' that is possible to edit
the content of the AreaVi instance. It means that keystrokes that maps
printable characters it would be dropped over the AreaVi instance that has focus.
"""
self.setup[id] = opt
def del_mode(self, id):
"""
"""
pass
def hook(self, id, seq, callback):
"""
This method is used to hook a callback to a sequence
specified with its mode:
def callback(event):
event.widget.insert('An event happened!')
def install(area):
area.hook(('INSERT' '<Key-i>', callback))
In the example above, whenever the event <Key-i> happens then
the function named callback will be called with the event object.
"""
MODE_Y = 'mode%s%s' % (self, id)
self.bind_class(MODE_Y, seq, callback, add=True)
def unhook(self, id, seq, callback=None):
"""
"""
MODE_Y = 'mode%s%s' % (self, id)
self.unbind_class(MODE_Y, seq)
def install(self, *args):
"""
It is a shorthand for AreaVi.hook. It is used as follows:
def install(area):
area.install(('MODE1', '<Event1>', callback1),
('MODE2', '<Event2>', callback2),
('MODE3', '<Event3>', callback3), ...)
"""
for id, seq, callback in args:
self.hook(id, seq, callback)
def uninstall(self, *args):
"""
"""
for id, seq, callback in args:
self.unhook(id, seq, callback)
def append(self, data):
"""
This method is used to insert data to the end of the AreaVi instance widget
and place the cursor at the end of the data that was appended. It makes the cursor
visible.
"""
self.insert('end', data)
self.mark_set('insert', 'end')
self.see('end')
def curline(self):
"""
This method returns the string that corresponds to the cursor line.
"""
return self.get('insert linestart', 'insert +1l linestart')
def tag_update(self, name, index0, index1, *args):
"""
It removes a given tag from index0 to index1 and re adds
the tag to the ranges of text delimited in args.
Example:
DATA_X = 'It is black.\n'
DATA_Y = 'It is blue.\n'
text = Text()
text.pack()
text.insert('1.0', DATA_X)
text.insert('2.0', DATA_Y)
text.tag_add('X', '1.0', '1.0 lineend')
text.tag_add('Y', '2.0', '2.0 lineend')
text.tag_config('X', background='black')
text.tag_config('Y', foreground='blue')
text.tag_update(text, 'X', '1.0', 'end', ('2.0', '2.0 lineend'))
It removes the X tag from '1.0' to 'end' then adds
the X tag to the range '2.0' '2.0 lineend'.
"""
self.tag_remove(name, index0, index1)
for indi, indj in args:
self.tag_add(name, indi, indj)
def insee(self, index, data):
"""
This method inserts data at index position then makes the cursor visible.
"""
self.insert(index, data)
self.see('insert')
def cmd_like(self):
"""
This method retrieves the cursor line then deletes it afterwards.
"""
data = self.get('insert linestart', 'insert lineend')
self.delete('insert linestart', 'insert lineend')
return data
def indref(self, index):
"""
This is a short hand function. It is used to convert a Text index
into two integers like:
a, b = area.indref('insert')
Now, a and b can be manipulated
as numbers.
"""
a, b = self.index(index).split('.')
return int(a), int(b)
def setcur(self, line, col):
"""
It is used to set the cursor position at a given index using line
and col.
"""
self.mark_set('insert', '%s.%s' % (line, col))
self.see('insert')
def setcurl(self, line):
"""
It is used to set the cursor position at a given
line. It sets the cursor at line.0 position.
"""
self.mark_set('insert', '%s.%s' % (line, '0'))
self.see('insert')
def indint(self, index):
"""
Just a shorthand for:
a, b = index.split('2.3')
a, b = int(a), int(b)
"""
a, b = index.split('.')
return int(a), int(b)
def indcol(self):
"""
This is a short hand method for getting
the last col in which the cursor was in.
It is useful when implementing functions to
select pieces of text.
"""
a, b = self.indref(self.last_col)
return int(a), int(b)
def setcol(self, line, col):
"""
It sets the mark used by the arrows
keys and selection state.
"""
self.mark_set(self.last_col, '%s.%s' % (line, col))
def indcur(self):
"""
It returns the actual line, col for the
cursor position. So, the values can be
manipulated with integers.
"""
a, b = self.indref('insert')
return int(a), int(b)
def seecur(self):
"""
Just a shorthand for area.see('insert')
which makes the cursor visible wherever it is in.
"""
self.see('insert')
def inset(self, index):
"""
Just a shorthand for area.mark_set('insert', index)
so we spare some typing.
"""
self.mark_set('insert', index)
def is_end(self):
"""
This function returns True if the cursor is positioned
at the end of the AreaVi instance.
This is useful when implementing other methods.
Like those from visual block selection to avoid
the cursor jumping to odd places when it achieves
the end of the text region.
"""
# I have to use 'end -1l linestart' since it seems the 'end' tag
# corresponds to a one line after the last visible line.
# So last line lineend != 'end'.
return self.compare('insert linestart', '!=', 'end -1l linestart')
def is_start(self):
"""
This function returns True if the cursor is
at the start of the text region. It is on index '1.0'
"""
return self.compare('insert linestart', '!=', '1.0')
def down(self):
"""
It sets the cursor position one line down.
"""
if self.is_end():
# We first check if it is at the end
# so we avoid the cursor jumping at odd positions.
a, b = self.indcol()
c, d = self.indcur()
self.setcur(c + 1, b)
def up(self):
"""
It sets the cursor one line up.
"""
if self.is_start():
a, b = self.indcol()
c, d = self.indcur()
self.setcur(c - 1, b)
def left(self):
"""
It moves the cursor one character left.
"""
self.mark_set('insert', 'insert -1c')
# The mark used by self.down, self.up.
self.mark_set(self.last_col, 'insert')
def right(self):
"""
It moves the cursor one character right.
"""
self.mark_set('insert', 'insert +1c')
# The mark used by self.down, self.up.
self.mark_set(self.last_col, 'insert')
def start_selection(self):
"""
It sets the mark sel_start to the insert position.
So, when sel_up, sel_down, sel_right, sel_left are
called then they will select a region from this mark.
"""
self.mark_set('_sel_start_', 'insert')
def start_block_selection(self):
self.mark_set('_block_sel_start_', 'insert')
def is_add_up(self, index):
"""
It checks whether the selection must be
removed or added.
If it returns True then the selection must be
removed. True means that the '_sel_start_'
mark is positioned above the cursor position.
So, it must remove the selection instead of
adding it.
"""
return self.compare('%s linestart' % index, '<=', 'insert linestart')
def rmsel(self, index0, index1):
"""
It removes the tag sel from the range that is delimited by index0 and index1
regardless whether index0 <= index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_remove('sel', index2, index3)
def addsel(self, index0, index1):
"""
It adds the tag sel to the range delimited by index0 and index1 regardless
whether index0 <= index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_add('sel', index2, index3)
def min(self, index0, index1):
"""
It returns the min between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index0
else:
return index1
def max(self, index0, index1):
"""
It returns the max between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index1
else:
return index0
def sel_up(self):
"""
It adds 'sel' one line up the 'insert' position
and sets the cursor one line up.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.up()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_down(self, index):
"""
It returns True if the cursor is positioned below
the initial mark for selection.
It determins if the selection must be removed or added when
sel_down is called.
"""
return self.compare('%s linestart' % index, '>=', 'insert linestart')
def sel_down(self):
"""
It adds or removes selection one line down.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.down()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_right(self, index):
"""
It returns True if the cursor is positioned at the left
of the initial selection mark. It is useful for sel_right method.
"""
return self.compare(index, '>=', 'insert')
def sel_right(self):
"""
It adds or removes selection one character right.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.right()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_left(self, index):
"""
It returns True if the cursor is positioned at the right of
the initial mark selection.
"""
return self.compare(index, '<=', 'insert')
def sel_left(self):
"""
It adds or removes selection one character left.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.left()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def indmsel(self):
"""
It is just a shorthand for getting the last selection mark.
"""
a, b = self.indref('_sel_start_')
return int(a), int(b)
def addblock(self, index0, index1):
"""
It adds block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
e = min(b, d)
f = max(b, d)
self.addsel('%s.%s' % (ind, e), '%s.%s' % (ind, f))
def rmblock(self, index0, index1):
"""
It removes block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
e = min(b, d)
f = max(b, d)
self.rmsel('%s.%s' % (ind, e), '%s.%s' % (ind, f))
def block_down(self):
"""
It adds or removes block selection one line down.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.down()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def block_up(self):
"""
It adds or removes block selection one line up.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.up()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_start(self):
"""
It returns True if the cursor is at the start of the cursor line.
"""
return self.compare('insert', '!=', 'insert linestart')
def block_left(self):
"""
It adds block selection to the left.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.left()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_end(self):
"""
It returns True if the cursor is at the end of the cursor line.
"""
return self.compare('insert', '!=', 'insert lineend')
def block_right(self):
"""
It adds a block selection to the right.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.right()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def clear_selection(self):
"""
It removes 'sel' tag from all ranges.
"""
try:
self.tag_remove('sel', 'sel.first', 'sel.last')
except Exception:
pass
def select_char(self):
"""
it adds 'sel' a char ahead the cursor position.
"""
self.addsel('insert', 'insert +1c')
def unselect_char(self):
"""
It removes 'sel' a char from the cursor position.
"""
self.rmsel('insert', 'insert +1c')
def clchar(self):
"""
It deletes a char from the cursor position.
"""
self.edit_separator()
self.delete('insert', 'insert +1c')
def do_undo(self):
"""
It does undo.
"""
try:
self.edit_undo()
except TclError:
pass
def do_redo(self):
"""
It redoes.
"""
try:
self.edit_redo()
except TclError:
pass
def sel_text_start(self):
"""
It selects all text from insert position to the start position
of the text.
"""
index = self.index('insert')
self.go_text_start()
self.addsel(index, 'insert')
def sel_text_end(self):
"""
It selects all text from the insert position to the end of the text.
"""
index = self.index('insert')
self.go_text_end()
self.addsel(index, 'insert')
def go_text_start(self):
"""
It goes to the first position in the text.
"""
self.mark_set('insert', '1.0')
self.see('insert')
def go_text_end(self):
"""
It goes to the end of the text.
"""
self.mark_set('insert', 'end linestart')
self.see('insert')
def sel_line_start(self):
"""
It adds selection from the insert position to the
start of the line.
"""
index = self.index('insert')
self.go_line_start()
self.addsel(index, 'insert')
def sel_line_end(self):
"""
It selects all text from insert position to the end of the line.
"""
index = self.index('insert')
self.go_line_end()
self.addsel(index, 'insert')
def go_line_start(self):
"""
It goes to the beginning of the cursor position line.
"""
self.mark_set('insert', 'insert linestart')
def go_line_end(self):
"""
It goes to the end of the cursor position line.
"""
self.mark_set('insert', 'insert lineend')
def go_next_word(self):
"""
It puts the cursor on the beginning of the next word.
"""
self.seek_next_down('\M')
def go_prev_word(self):
"""
It puts the cursor in the beginning of the previous word.
"""
self.seek_next_up('\M')
def go_next_sym(self, chars):
"""
It puts the cursor on the next occurency of the symbols in chars.
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.seek_next_down(REG)
def go_prev_sym(self, chars):
"""
It puts the cursor on the previous occurency of:
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.seek_next_up(REG)
def cllin(self):
"""
It deletes the cursor position line, makes the cursor visible
and adds a separator to the undo stack.
"""
self.edit_separator()
self.delete('insert linestart', 'insert +1l linestart')
self.see('insert')
def cpsel(self):
"""
It copies to the clip board ranges of text
that are selected and removes the selection.
"""
data = self.join_ranges('sel')
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def cpblock(self):
"""
It copies blocks of text that are selected
with a separator '\n'.
"""
data = self.join_ranges('sel', '\n')
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def ctblock(self):
"""
It cuts blocks of text with a separator '\n'.
"""
data = self.join_ranges('sel', '\n')
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.delete_ranges('sel')
def ctsel(self):
"""
It cuts the selected text.
"""
data = self.join_ranges('sel')
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.delete_ranges('sel')
def clsel(self):
"""
It deletes all selected text.
"""
self.edit_separator()
self.delete_ranges('sel')
def ptsel(self):
"""
It pastes over the cursor position data from the clipboard
and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert', data)
def ptsel_after(self):
"""
It pastes one line after the cursor position data from clipboard
and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert +1l linestart', data)
def ptsel_before(self):
"""
It pastes data from the cursor position one line before the cursor
position and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert linestart', data)
def select_line(self):
"""
It adds selection to the cursor position line.
"""
self.tag_add('sel', 'insert linestart', 'insert +1l linestart')
def unselect_line(self):
"""
It removes selection from the cursor position line.
"""
self.tag_remove('sel', 'insert linestart', 'insert +1l linestart')
def toggle_line_selection(self):
"""
"""
self.toggle_sel('insert linestart', 'insert +1l linestart')
def toggle_sel(self, index0, index1):
"""
"""
self.toggle_range('sel', index0, index1)
def toggle_range(self, name, index0, index1):
index2 = index0
index0 = self.min(index0, index1)
index1 = self.max(index2, index1)
map = self.is_tag_range(name, index0, index1)
if map:
self.tag_remove('sel', index0, index1)
else:
self.tag_add('sel', index0, index1)
def select_word(self):
"""
It selects a word from the cursor position.
"""
index1 = self.search('\W', 'insert', regexp=True, stopindex='insert linestart', backwards=True)
index2 = self.search('\W', 'insert', regexp=True, stopindex='insert lineend')
self.tag_add('sel', 'insert linestart' if not index1 else '%s +1c' % index1,
'insert lineend' if not index2 else index2)
def select_seq(self):
index1 = self.search(' ', 'insert', regexp=True, stopindex='insert linestart', backwards=True)
index2 = self.search(' ', 'insert', regexp=True, stopindex='insert lineend')
self.tag_add('sel', 'insert linestart' if not index1 else '%s +1c' % index1,
'insert lineend' if not index2 else index2)
def scroll_line_up(self):
"""
It scrolls one line up
"""
self.yview(SCROLL, -1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert -1l')
def scroll_line_down(self):
"""
It scrolls one line down.
"""
self.yview(SCROLL, 1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert +1l')
def scroll_page_down(self):
"""
It goes one page down.
"""
self.yview(SCROLL, 1, 'page')
self.mark_set('insert', '@0,0')
def scroll_page_up(self):
"""
It goes one page up.
"""
self.yview(SCROLL, -1, 'page')
self.mark_set('insert', '@0,0')
def insert_line_down(self):
"""
It inserts one line down from the cursor position.
"""
self.edit_separator()
self.insert('insert +1l linestart', '\n')
self.mark_set('insert', 'insert +1l linestart')
self.see('insert')
def select_all(self):
"""
It selects all text.
"""
self.tag_add('sel', '1.0', 'end')
def insert_line_up(self):
"""
It inserts one line up.
"""
self.edit_separator()
self.insert('insert linestart', '\n')
self.mark_set('insert', 'insert -1l linestart')
self.see('insert')
def shift_sel_right(self, width, char):
"""
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_right(srow, erow, width, char)
def shift_sel_left(self, width):
"""
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_left(srow, erow, width)
def shift_right(self, srow, erow, width, char):
"""
Given a start row and a end row it shifts
a block of text to the right.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.insert('%s.0' % ind, width * char)
def shift_left(self, srow, erow, width):
"""
Given a start row and a end row it shifts
a block of text to the left.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.delete('%s.0' % ind, '%s.%s' % (ind, width))
def collect(self, name, regex, *args, **kwargs):
"""
It returns an interator corresponding to calling AreaVi.find
between the ranges of the tag specified by name.
You shouldn't delete or insert data while performing this operation.
"""
# It should be built on top of nextrange.
map = self.tag_ranges(name)
for indi in range(0, len(map) - 1, 2):
seq = self.find(regex, map[indi], map[indi + 1], *args, **kwargs)
for indj in seq:
yield indj
def rep_match_ranges(self, name, regex, data, index='1.0', stopindex='end',
*args, **kwargs):
"""
It replaces all occurrences of regex inside a tag ranges
for data.
name - Name of the tag.
regex - The pattern.
data - The data to replace.
args - Arguments given to AreaVi.find.
**kwargs - A dictionary of arguments given to AreaVi.find.
"""
while True:
map = self.tag_nextrange(name, index, stopindex)
if not map: break
index3, index4 = map
index = self.replace_all(regex, data, index3, index4, *args, **kwargs)
def setup_tags_conf(self, kwargs):
"""
kwargs is a dictionary like:
kwargs = {'tag_name': {'background': 'blue'}}
In the kwargs above, this method would set the background value to 'blue'
for the tag named 'tag_name'.
"""
for name, kwargs in kwargs.iteritems():
self.tag_config(name, **kwargs)
self.tag_lower(name)
def map_matches(self, name, matches):
""""
It adds a tag to the match ranges from either AreaVi.find or
AreaVi.collect.
name - The tag to be added.
map - An iterator from AreaVi.find or AreaVi.collect.
"""
for _, index0, index1 in matches:
self.tag_add(name, index0, index1)
def tokenize(self, *args, **kwargs):
"""
It tokenizes the contents of an AreaVi widget based on a regex.
"""
index0 = '1.0'
for chk, index1, index2 in self.find(*args, **kwargs):
if self.compare(index1, '>', index0):
yield(self.get(index0, index1), index0, index1)
yield(chk, index1, index2)
index0 = index2
def find(self, regex, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None, step=''):
"""
It returns an iterator of matches. It is based on the Text.search method.
"""
count = IntVar()
while True:
index = self.search(regex, index, stopindex, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index:
break
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
index = '%s%s' % (tmp, step)
yield(chunk, pos0, pos1)
def search(self, pattern, index, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=None, nocase=None,
count=None, elide=None, nolinestop=None):
'''Standard search method, but with support for the nolinestop
option which is new in tk 8.5 but not supported by tkinter out
of the box.
'''
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if nolinestop: args.append("-nolinestop")
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def seek_next_up(self, regex, index0='insert', stopindex='1.0', exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find the next match with regex up the cursor.
It sets the cursor at the index of the occurrence.
"""
count = IntVar()
index = self.search(regex, index0, stopindex=stopindex, regexp=regexp, exact=exact,
nocase=nocase, elide=elide, nolinestop=nolinestop, backwards=True, count=count)
if not index: return
index1 = self.index('%s +%sc' % (index, count.get()))
self.mark_set('insert', index)
self.see('insert')
return index, index1
def seek_next_down(self, regex, index0='insert', stopindex='end', exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find the next match with regex down.
It sets the cursor at the index of the occurrence.
"""
count = IntVar()
index = self.search(regex, index0, stopindex=stopindex, regexp=regexp, exact=exact, nocase=nocase,
elide=elide, nolinestop=nolinestop, count=count)
if not index: return
index1 = self.index('%s +%sc' % (index, count.get()))
self.mark_set('insert', index1)
self.see('insert')
return index, index1
def pick_next_up(self, name, *args, **kwargs):
"""
"""
index = self.seek_next_up(*args, **kwargs)
if not index:
return
self.tag_add(name, *index)
return index
def pick_next_down(self, name, *args, **kwargs):
"""
"""
index = self.seek_next_down(*args, **kwargs)
if not index:
return
self.tag_add(name, *index)
return index
def replace(self, regex, data, index=None, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=True, nocase=None, elide=None, nolinestop=None):
"""
It is used to replace occurrences of a given match.
It is possible to use a callback function to return what is replaced
as well.
"""
count = IntVar()
index = self.search(regex, index, stopindex, forwards=forwards, backwards=backwards, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index: return
if callable(data): data = data(index, self.index('%s +%sc' % (index, count.get())))
index0 = self.index('%s +%sc' % (index, count.get()))
self.delete(index, index0)
self.insert(index, data)
return index, len(data)
def replace_all(self, regex, data, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
"""
It is used to replace all occurrences of a given match in a range.
It accepts a callback function that determines what is replaced.
"""
# It is needed because the range will grow
# when data is inserted, the intent is searching
# over a pre defined range.
self.mark_set(self.STOP_REPLACE_INDEX, stopindex)
while True:
map = self.replace(regex, data, index, self.STOP_REPLACE_INDEX, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide)
if not map:
return index
index, size = map
index = self.index('%s +%sc' % (index, size))
def get_paren_search_dir(self, start, end):
"""
"""
char = self.get('insert', 'insert +1c')
if char == start:
return False
elif char == end:
return True
else:
return None
def get_paren_search_sign(self, start, end):
"""
"""
char = self.get('insert', 'insert +1c')
if char == start:
return '+'
elif char == end:
return '-'
else:
return None
def select_case_pair(self, pair, MAX=1500):
"""
"""
index = self.case_pair(MAX, *pair)
if not index: return
min = self.min(index, 'insert')
if self.compare(min, '==', 'insert'): min = '%s +1c' % min
max = self.max(index, 'insert')
if self.compare(max, '==', 'insert'): min = '%s +1c' % min
self.tag_add('sel', min, max)
def case_pair(self, max, start='(', end=')'):
"""
Once this method is called, it returns an index for the next
matching parenthesis or None if the char over the cursor
isn't either '(' or ')'.
"""
dir = self.get_paren_search_dir(start, end)
# If dir is None then there is no match.
if dir == None: return ''
REG = '\%s|\%s' % (start, end)
sign = self.get_paren_search_sign(start, end)
count = 0
# If we are searching fowards we don't need
# to add 1c.
index = 'insert %s' % ('+1c' if dir else '')
size = IntVar(0)
while True:
index = self.search(REG, index = index,
stopindex = 'insert %s%sc' % (sign, max),
count = size,
backwards = dir,
regexp = True)
if not index: return ''
char = self.get(index, '%s +1c' % index)
count = count + (1 if char == start else -1)
if not count:
return index
# When we are searching backwards we don't need
# to set a character back because index will point
# to the start of the match.
index = '%s %s' % (index, '+1c' if not dir else '')
def clear_data(self):
"""
It clears all text inside an AreaVi instance.
"""
import os
self.delete('1.0', 'end')
self.filename = os.path.abspath(self.default_filename)
self.event_generate('<<ClearData>>')
def load_data(self, filename):
"""
It dumps all text from a file into an AreaVi instance.
filename - Name of the file.
"""
import os
filename = os.path.abspath(filename)
self.filename = filename
fd = open(filename, 'r')
data = fd.read()
fd.close()
# i could generate a tk event here.
try:
data = data.decode(self.charset)
except UnicodeDecodeError:
self.charset = ''
self.delete('1.0', 'end')
self.insert('1.0', data)
self.event_generate('<<LoadData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Load-%s>>' % type)
def decode(self, name):
self.charset = name
self.load_data(self.filename)
def save_data(self):
"""
It saves the actual text content in the current file.
"""
data = self.get('1.0', 'end')
data = data.encode(self.charset)
fd = open(self.filename, 'w')
fd.write(data)
fd.close()
self.event_generate('<<SaveData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Save-%s>>' % type)
def save_data_as(self, filename):
"""
It saves the content of the given AreaVi instance into
a file whose name is specified in filename.
filename - Name of the file to save the data.
"""
self.filename = filename
self.save_data()
def is_tag_range(self, name, index0, index1):
"""
"""
ranges = self.tag_ranges(name)
for ind in xrange(0, len(ranges) - 1, 2):
if self.is_subrange(index0, index1, ranges[ind].string,
ranges[ind + 1].string):
return ranges[ind].string, ranges[ind + 1].string
def is_in_range(self, index, index0, index1):
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
r1 = self.compare(index2, '<=', index)
r2 = self.compare(index3, '>=', index)
if r1 and r2: return True
else: return False
def is_subrange(self, index0, index1, index2, index3):
r1 = self.is_in_range(index0, index2, index3)
r2 = self.is_in_range(index1, index2, index3)
return r1 and r2
def replace_range(self, data, index0, index1):
self.delete(index0, index1)
self.insert(index0, data)
def replace_ranges(self, name, data, index0='1.0', index1='end'):
"""
It replaces ranges of text that are mapped to a tag name for data between index0
and index1.
"""
while True:
range = self.tag_nextrange(name, index0, index1)
if not range: break
self.replace_range(data, *range)
def delete_ranges(self, name, index0='1.0', index1='end'):
"""
It deletes ranges of text that are mapped to tag name between index0 and index1.
"""
self.replace_ranges(name, '', index0, index1)
def join_ranges(self, name, sep=''):
"""
"""
data = ''
for ind in self.get_ranges(name):
data = data + ind + sep
return data
def get_ranges(self, name):
"""
"""
ranges = self.tag_ranges(name)
for ind in xrange(0, len(ranges) - 1, 2):
data = self.get(ranges[ind], ranges[ind + 1])
yield(data)
def tag_prev_occur(self, tag_names, index0, index1, default):
for ind in tag_names:
pos = self.tag_prevrange(ind, index0, index1)
if pos: return pos[1]
return default
def tag_next_occur(self, tag_names, index0, index1, default):
for ind in tag_names:
pos = self.tag_nextrange(ind, index0, index1)
if pos: return pos[0]
return default
@staticmethod
def areavi_widgets(wid):
"""
This method is a static method that receives a widget as argument
then returns an iterator of AreaVi instances that have the wid paramater as
master widget. It is used like:
from vyapp.app import root
for ind in AreaVi.areavi_widgets(root):
ind.insert('end', 'FOO')
The code above would insert 'FOO' at the end of all AreaVi widgets
that have root as one of its master widget.
"""
for ind in wid.winfo_children():
if isinstance(ind, AreaVi):
yield ind
else:
for ind in AreaVi.areavi_widgets(ind):
yield ind
@staticmethod
def get_opened_files(wid):
"""
This method returns a dictionary that maps all AreaVi instances
that have widget as master like:
from vyapp.app import root
map = area.get_opened_files(root)
Where map is a dictionary like:
map = { '/home/tau/file.c':AreaVi_Instance,
'/home/tau/file.b': AreaVi_Instance}
"""
map = dict()
for ind in AreaVi.areavi_widgets(wid):
map[ind.filename] = ind
return map
@staticmethod
def find_all(wid, regex, index='1.0', stopindex='end', *args, **kwargs):
"""
This method is used to perform pattern searches over all AreaVi instances that have
wid as master. It basically returns an iterator that corresponds to:
from vyapp.app import root
for ind, (match, index0, index1) in area.find_all(root, 'pattern'):
pass
Where ind is the AreaVi widget that the pattern matched and match is the match,
index0 and index1 are the positions in the text.
"""
for indi in AreaVi.areavi_widgets(wid):
it = indi.find(regex, index, stopindex, *args, **kwargs)
for indj in it:
yield indi, indj
def get_cursor_word(self):
"""
"""
if self.compare('insert', '==', 'insert linestart'):
return ''
index = self.search(' ', 'insert',
stopindex='insert linestart',regexp=True,
backwards=True)
if not index: index = 'insert linestart'
else: index = '%s +1c' % index
if self.compare(index, '==', 'insert'): return ''
data = self.get(index, 'insert')
return data, index
def match_word(self, wid, delim=' '):
data, index = self.get_cursor_word()
for area, (chk, pos0, pos1) in self.find_all(wid, '[^ ]*%s[^ ]+' % data):
yield chk, index
def complete_word(self, wid):
seq = self.match_word(wid)
table = []
for data, index in seq:
if not data in table:
table.append(data)
else:
continue
self.delete(index, 'insert')
self.insert(index, data)
yield
Fixing toggle range method.
"""
"""
from Tkinter import *
from re import escape
import mimetypes
class AreaVi(Text):
ACTIVE = None
def __init__(self, default_filename, *args, **kwargs):
"""
This class receives all Text widget arguments
and one named default_filename which means
the filename that is saved when no filename
is specified.
default_filename:
The default path file where contents are saved.
It implements a system of modes to handle
tkinter keypresses events.
The method hook can be used to bind events
to callbacks mapped to specific modes.
"""
Text.__init__(self, *args, **kwargs)
self.setup = dict()
# Maybe it should be?
# abspath(default_filename)
self.default_filename = default_filename
# The file's path and name.
self.filename = default_filename
# Shouldn't it be LAST_COL and MSEL?
self.last_col = '_last_col_'
self.mark_set(self.last_col, '1.0')
# This mark is used in AreaVi.replace_all.
self.STOP_REPLACE_INDEX = '_stop_replace_index_'
# Tags have name starting and ending with __
# def cave(event):
# AreaVi.ACTIVE = event.widget
# self.hook(-1, '<FocusIn>', cave)
AreaVi.ACTIVE = self
self.charset = 'utf-8'
def active(self):
"""
It is used to create a model of target for plugins
defining python functions to access the AreaVi instance that was
set as target.
Plugins that expose python functions to be executed from vy
should access AreaVi.ACTIVE when having to manipulate some AreaVi
instance content.
"""
AreaVi.ACTIVE = self
def chmode(self, id):
"""
This function is used to change the AreaVi instance's mode.
It receives one parameter named id which means the
mode name.
area = AreaVi('None')
area.chmode('INSERT')
It would make area be in INSERT mode.
"""
opt = self.setup[id]
self.id = id
MODE_X = 'mode%s-1' % self
MODE_Y = 'mode%s%s' % (self, id)
if opt: self.bindtags((MODE_X, MODE_Y, self, 'Text', '.'))
else: self.bindtags((MODE_X, MODE_Y, self, '.'))
def add_mode(self, id, opt=False):
"""
It adds a new mode. The opt argument means whether
it should propagate the event to the internal text widget callbacks.
def install(area):
area.add_mode('MODE')
The code above would add a mode named MODE to the AreaVi instance.
def install(area):
area.add_mode('TYPING', opt=True)
The code above would add a mode named 'TYPING' that is possible to edit
the content of the AreaVi instance. It means that keystrokes that maps
printable characters it would be dropped over the AreaVi instance that has focus.
"""
self.setup[id] = opt
def del_mode(self, id):
"""
"""
pass
def hook(self, id, seq, callback):
"""
This method is used to hook a callback to a sequence
specified with its mode:
def callback(event):
event.widget.insert('An event happened!')
def install(area):
area.hook(('INSERT' '<Key-i>', callback))
In the example above, whenever the event <Key-i> happens then
the function named callback will be called with the event object.
"""
MODE_Y = 'mode%s%s' % (self, id)
self.bind_class(MODE_Y, seq, callback, add=True)
def unhook(self, id, seq, callback=None):
"""
"""
MODE_Y = 'mode%s%s' % (self, id)
self.unbind_class(MODE_Y, seq)
def install(self, *args):
"""
It is a shorthand for AreaVi.hook. It is used as follows:
def install(area):
area.install(('MODE1', '<Event1>', callback1),
('MODE2', '<Event2>', callback2),
('MODE3', '<Event3>', callback3), ...)
"""
for id, seq, callback in args:
self.hook(id, seq, callback)
def uninstall(self, *args):
"""
"""
for id, seq, callback in args:
self.unhook(id, seq, callback)
def append(self, data):
"""
This method is used to insert data to the end of the AreaVi instance widget
and place the cursor at the end of the data that was appended. It makes the cursor
visible.
"""
self.insert('end', data)
self.mark_set('insert', 'end')
self.see('end')
def curline(self):
"""
This method returns the string that corresponds to the cursor line.
"""
return self.get('insert linestart', 'insert +1l linestart')
def tag_update(self, name, index0, index1, *args):
"""
It removes a given tag from index0 to index1 and re adds
the tag to the ranges of text delimited in args.
Example:
DATA_X = 'It is black.\n'
DATA_Y = 'It is blue.\n'
text = Text()
text.pack()
text.insert('1.0', DATA_X)
text.insert('2.0', DATA_Y)
text.tag_add('X', '1.0', '1.0 lineend')
text.tag_add('Y', '2.0', '2.0 lineend')
text.tag_config('X', background='black')
text.tag_config('Y', foreground='blue')
text.tag_update(text, 'X', '1.0', 'end', ('2.0', '2.0 lineend'))
It removes the X tag from '1.0' to 'end' then adds
the X tag to the range '2.0' '2.0 lineend'.
"""
self.tag_remove(name, index0, index1)
for indi, indj in args:
self.tag_add(name, indi, indj)
def insee(self, index, data):
"""
This method inserts data at index position then makes the cursor visible.
"""
self.insert(index, data)
self.see('insert')
def cmd_like(self):
"""
This method retrieves the cursor line then deletes it afterwards.
"""
data = self.get('insert linestart', 'insert lineend')
self.delete('insert linestart', 'insert lineend')
return data
def indref(self, index):
"""
This is a short hand function. It is used to convert a Text index
into two integers like:
a, b = area.indref('insert')
Now, a and b can be manipulated
as numbers.
"""
a, b = self.index(index).split('.')
return int(a), int(b)
def setcur(self, line, col):
"""
It is used to set the cursor position at a given index using line
and col.
"""
self.mark_set('insert', '%s.%s' % (line, col))
self.see('insert')
def setcurl(self, line):
"""
It is used to set the cursor position at a given
line. It sets the cursor at line.0 position.
"""
self.mark_set('insert', '%s.%s' % (line, '0'))
self.see('insert')
def indint(self, index):
"""
Just a shorthand for:
a, b = index.split('2.3')
a, b = int(a), int(b)
"""
a, b = index.split('.')
return int(a), int(b)
def indcol(self):
"""
This is a short hand method for getting
the last col in which the cursor was in.
It is useful when implementing functions to
select pieces of text.
"""
a, b = self.indref(self.last_col)
return int(a), int(b)
def setcol(self, line, col):
"""
It sets the mark used by the arrows
keys and selection state.
"""
self.mark_set(self.last_col, '%s.%s' % (line, col))
def indcur(self):
"""
It returns the actual line, col for the
cursor position. So, the values can be
manipulated with integers.
"""
a, b = self.indref('insert')
return int(a), int(b)
def seecur(self):
"""
Just a shorthand for area.see('insert')
which makes the cursor visible wherever it is in.
"""
self.see('insert')
def inset(self, index):
"""
Just a shorthand for area.mark_set('insert', index)
so we spare some typing.
"""
self.mark_set('insert', index)
def is_end(self):
"""
This function returns True if the cursor is positioned
at the end of the AreaVi instance.
This is useful when implementing other methods.
Like those from visual block selection to avoid
the cursor jumping to odd places when it achieves
the end of the text region.
"""
# I have to use 'end -1l linestart' since it seems the 'end' tag
# corresponds to a one line after the last visible line.
# So last line lineend != 'end'.
return self.compare('insert linestart', '!=', 'end -1l linestart')
def is_start(self):
"""
This function returns True if the cursor is
at the start of the text region. It is on index '1.0'
"""
return self.compare('insert linestart', '!=', '1.0')
def down(self):
"""
It sets the cursor position one line down.
"""
if self.is_end():
# We first check if it is at the end
# so we avoid the cursor jumping at odd positions.
a, b = self.indcol()
c, d = self.indcur()
self.setcur(c + 1, b)
def up(self):
"""
It sets the cursor one line up.
"""
if self.is_start():
a, b = self.indcol()
c, d = self.indcur()
self.setcur(c - 1, b)
def left(self):
"""
It moves the cursor one character left.
"""
self.mark_set('insert', 'insert -1c')
# The mark used by self.down, self.up.
self.mark_set(self.last_col, 'insert')
def right(self):
"""
It moves the cursor one character right.
"""
self.mark_set('insert', 'insert +1c')
# The mark used by self.down, self.up.
self.mark_set(self.last_col, 'insert')
def start_selection(self):
"""
It sets the mark sel_start to the insert position.
So, when sel_up, sel_down, sel_right, sel_left are
called then they will select a region from this mark.
"""
self.mark_set('_sel_start_', 'insert')
def start_block_selection(self):
self.mark_set('_block_sel_start_', 'insert')
def is_add_up(self, index):
"""
It checks whether the selection must be
removed or added.
If it returns True then the selection must be
removed. True means that the '_sel_start_'
mark is positioned above the cursor position.
So, it must remove the selection instead of
adding it.
"""
return self.compare('%s linestart' % index, '<=', 'insert linestart')
def rmsel(self, index0, index1):
"""
It removes the tag sel from the range that is delimited by index0 and index1
regardless whether index0 <= index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_remove('sel', index2, index3)
def addsel(self, index0, index1):
"""
It adds the tag sel to the range delimited by index0 and index1 regardless
whether index0 <= index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_add('sel', index2, index3)
def min(self, index0, index1):
"""
It returns the min between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index0
else:
return index1
def max(self, index0, index1):
"""
It returns the max between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index1
else:
return index0
def sel_up(self):
"""
It adds 'sel' one line up the 'insert' position
and sets the cursor one line up.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.up()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_down(self, index):
"""
It returns True if the cursor is positioned below
the initial mark for selection.
It determins if the selection must be removed or added when
sel_down is called.
"""
return self.compare('%s linestart' % index, '>=', 'insert linestart')
def sel_down(self):
"""
It adds or removes selection one line down.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.down()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_right(self, index):
"""
It returns True if the cursor is positioned at the left
of the initial selection mark. It is useful for sel_right method.
"""
return self.compare(index, '>=', 'insert')
def sel_right(self):
"""
It adds or removes selection one character right.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.right()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_left(self, index):
"""
It returns True if the cursor is positioned at the right of
the initial mark selection.
"""
return self.compare(index, '<=', 'insert')
def sel_left(self):
"""
It adds or removes selection one character left.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.left()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def indmsel(self):
"""
It is just a shorthand for getting the last selection mark.
"""
a, b = self.indref('_sel_start_')
return int(a), int(b)
def addblock(self, index0, index1):
"""
It adds block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
e = min(b, d)
f = max(b, d)
self.addsel('%s.%s' % (ind, e), '%s.%s' % (ind, f))
def rmblock(self, index0, index1):
"""
It removes block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
e = min(b, d)
f = max(b, d)
self.rmsel('%s.%s' % (ind, e), '%s.%s' % (ind, f))
def block_down(self):
"""
It adds or removes block selection one line down.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.down()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def block_up(self):
"""
It adds or removes block selection one line up.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.up()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_start(self):
"""
It returns True if the cursor is at the start of the cursor line.
"""
return self.compare('insert', '!=', 'insert linestart')
def block_left(self):
"""
It adds block selection to the left.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.left()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_end(self):
"""
It returns True if the cursor is at the end of the cursor line.
"""
return self.compare('insert', '!=', 'insert lineend')
def block_right(self):
"""
It adds a block selection to the right.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.right()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def clear_selection(self):
"""
It removes 'sel' tag from all ranges.
"""
try:
self.tag_remove('sel', 'sel.first', 'sel.last')
except Exception:
pass
def select_char(self):
"""
it adds 'sel' a char ahead the cursor position.
"""
self.addsel('insert', 'insert +1c')
def unselect_char(self):
"""
It removes 'sel' a char from the cursor position.
"""
self.rmsel('insert', 'insert +1c')
def clchar(self):
"""
It deletes a char from the cursor position.
"""
self.edit_separator()
self.delete('insert', 'insert +1c')
def do_undo(self):
"""
It does undo.
"""
try:
self.edit_undo()
except TclError:
pass
def do_redo(self):
"""
It redoes.
"""
try:
self.edit_redo()
except TclError:
pass
def sel_text_start(self):
"""
It selects all text from insert position to the start position
of the text.
"""
index = self.index('insert')
self.go_text_start()
self.addsel(index, 'insert')
def sel_text_end(self):
"""
It selects all text from the insert position to the end of the text.
"""
index = self.index('insert')
self.go_text_end()
self.addsel(index, 'insert')
def go_text_start(self):
"""
It goes to the first position in the text.
"""
self.mark_set('insert', '1.0')
self.see('insert')
def go_text_end(self):
"""
It goes to the end of the text.
"""
self.mark_set('insert', 'end linestart')
self.see('insert')
def sel_line_start(self):
"""
It adds selection from the insert position to the
start of the line.
"""
index = self.index('insert')
self.go_line_start()
self.addsel(index, 'insert')
def sel_line_end(self):
"""
It selects all text from insert position to the end of the line.
"""
index = self.index('insert')
self.go_line_end()
self.addsel(index, 'insert')
def go_line_start(self):
"""
It goes to the beginning of the cursor position line.
"""
self.mark_set('insert', 'insert linestart')
def go_line_end(self):
"""
It goes to the end of the cursor position line.
"""
self.mark_set('insert', 'insert lineend')
def go_next_word(self):
"""
It puts the cursor on the beginning of the next word.
"""
self.seek_next_down('\M')
def go_prev_word(self):
"""
It puts the cursor in the beginning of the previous word.
"""
self.seek_next_up('\M')
def go_next_sym(self, chars):
"""
It puts the cursor on the next occurency of the symbols in chars.
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.seek_next_down(REG)
def go_prev_sym(self, chars):
"""
It puts the cursor on the previous occurency of:
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.seek_next_up(REG)
def cllin(self):
"""
It deletes the cursor position line, makes the cursor visible
and adds a separator to the undo stack.
"""
self.edit_separator()
self.delete('insert linestart', 'insert +1l linestart')
self.see('insert')
def cpsel(self):
"""
It copies to the clip board ranges of text
that are selected and removes the selection.
"""
data = self.join_ranges('sel')
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def cpblock(self):
"""
It copies blocks of text that are selected
with a separator '\n'.
"""
data = self.join_ranges('sel', '\n')
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def ctblock(self):
"""
It cuts blocks of text with a separator '\n'.
"""
data = self.join_ranges('sel', '\n')
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.delete_ranges('sel')
def ctsel(self):
"""
It cuts the selected text.
"""
data = self.join_ranges('sel')
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.delete_ranges('sel')
def clsel(self):
"""
It deletes all selected text.
"""
self.edit_separator()
self.delete_ranges('sel')
def ptsel(self):
"""
It pastes over the cursor position data from the clipboard
and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert', data)
def ptsel_after(self):
"""
It pastes one line after the cursor position data from clipboard
and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert +1l linestart', data)
def ptsel_before(self):
"""
It pastes data from the cursor position one line before the cursor
position and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert linestart', data)
def select_line(self):
"""
It adds selection to the cursor position line.
"""
self.tag_add('sel', 'insert linestart', 'insert +1l linestart')
def unselect_line(self):
"""
It removes selection from the cursor position line.
"""
self.tag_remove('sel', 'insert linestart', 'insert +1l linestart')
def toggle_line_selection(self):
"""
"""
self.toggle_sel('insert linestart', 'insert +1l linestart')
def toggle_sel(self, index0, index1):
"""
"""
self.toggle_range('sel', index0, index1)
def toggle_range(self, name, index0, index1):
index2 = index0
index0 = self.min(index0, index1)
index1 = self.max(index2, index1)
map = self.is_tag_range(name, index0, index1)
if map:
self.tag_remove(name, index0, index1)
else:
self.tag_add(name, index0, index1)
def select_word(self):
"""
It selects a word from the cursor position.
"""
index1 = self.search('\W', 'insert', regexp=True, stopindex='insert linestart', backwards=True)
index2 = self.search('\W', 'insert', regexp=True, stopindex='insert lineend')
self.tag_add('sel', 'insert linestart' if not index1 else '%s +1c' % index1,
'insert lineend' if not index2 else index2)
def select_seq(self):
index1 = self.search(' ', 'insert', regexp=True, stopindex='insert linestart', backwards=True)
index2 = self.search(' ', 'insert', regexp=True, stopindex='insert lineend')
self.tag_add('sel', 'insert linestart' if not index1 else '%s +1c' % index1,
'insert lineend' if not index2 else index2)
def scroll_line_up(self):
"""
It scrolls one line up
"""
self.yview(SCROLL, -1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert -1l')
def scroll_line_down(self):
"""
It scrolls one line down.
"""
self.yview(SCROLL, 1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert +1l')
def scroll_page_down(self):
"""
It goes one page down.
"""
self.yview(SCROLL, 1, 'page')
self.mark_set('insert', '@0,0')
def scroll_page_up(self):
"""
It goes one page up.
"""
self.yview(SCROLL, -1, 'page')
self.mark_set('insert', '@0,0')
def insert_line_down(self):
"""
It inserts one line down from the cursor position.
"""
self.edit_separator()
self.insert('insert +1l linestart', '\n')
self.mark_set('insert', 'insert +1l linestart')
self.see('insert')
def select_all(self):
"""
It selects all text.
"""
self.tag_add('sel', '1.0', 'end')
def insert_line_up(self):
"""
It inserts one line up.
"""
self.edit_separator()
self.insert('insert linestart', '\n')
self.mark_set('insert', 'insert -1l linestart')
self.see('insert')
def shift_sel_right(self, width, char):
"""
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_right(srow, erow, width, char)
def shift_sel_left(self, width):
"""
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_left(srow, erow, width)
def shift_right(self, srow, erow, width, char):
"""
Given a start row and a end row it shifts
a block of text to the right.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.insert('%s.0' % ind, width * char)
def shift_left(self, srow, erow, width):
"""
Given a start row and a end row it shifts
a block of text to the left.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.delete('%s.0' % ind, '%s.%s' % (ind, width))
def collect(self, name, regex, *args, **kwargs):
"""
It returns an interator corresponding to calling AreaVi.find
between the ranges of the tag specified by name.
You shouldn't delete or insert data while performing this operation.
"""
# It should be built on top of nextrange.
map = self.tag_ranges(name)
for indi in range(0, len(map) - 1, 2):
seq = self.find(regex, map[indi], map[indi + 1], *args, **kwargs)
for indj in seq:
yield indj
def rep_match_ranges(self, name, regex, data, index='1.0', stopindex='end',
*args, **kwargs):
"""
It replaces all occurrences of regex inside a tag ranges
for data.
name - Name of the tag.
regex - The pattern.
data - The data to replace.
args - Arguments given to AreaVi.find.
**kwargs - A dictionary of arguments given to AreaVi.find.
"""
while True:
map = self.tag_nextrange(name, index, stopindex)
if not map: break
index3, index4 = map
index = self.replace_all(regex, data, index3, index4, *args, **kwargs)
def setup_tags_conf(self, kwargs):
"""
kwargs is a dictionary like:
kwargs = {'tag_name': {'background': 'blue'}}
In the kwargs above, this method would set the background value to 'blue'
for the tag named 'tag_name'.
"""
for name, kwargs in kwargs.iteritems():
self.tag_config(name, **kwargs)
self.tag_lower(name)
def map_matches(self, name, matches):
""""
It adds a tag to the match ranges from either AreaVi.find or
AreaVi.collect.
name - The tag to be added.
map - An iterator from AreaVi.find or AreaVi.collect.
"""
for _, index0, index1 in matches:
self.tag_add(name, index0, index1)
def tokenize(self, *args, **kwargs):
"""
It tokenizes the contents of an AreaVi widget based on a regex.
"""
index0 = '1.0'
for chk, index1, index2 in self.find(*args, **kwargs):
if self.compare(index1, '>', index0):
yield(self.get(index0, index1), index0, index1)
yield(chk, index1, index2)
index0 = index2
def find(self, regex, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None, step=''):
"""
It returns an iterator of matches. It is based on the Text.search method.
"""
count = IntVar()
while True:
index = self.search(regex, index, stopindex, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index:
break
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
index = '%s%s' % (tmp, step)
yield(chunk, pos0, pos1)
def search(self, pattern, index, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=None, nocase=None,
count=None, elide=None, nolinestop=None):
'''Standard search method, but with support for the nolinestop
option which is new in tk 8.5 but not supported by tkinter out
of the box.
'''
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if nolinestop: args.append("-nolinestop")
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def seek_next_up(self, regex, index0='insert', stopindex='1.0', exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find the next match with regex up the cursor.
It sets the cursor at the index of the occurrence.
"""
count = IntVar()
index = self.search(regex, index0, stopindex=stopindex, regexp=regexp, exact=exact,
nocase=nocase, elide=elide, nolinestop=nolinestop, backwards=True, count=count)
if not index: return
index1 = self.index('%s +%sc' % (index, count.get()))
self.mark_set('insert', index)
self.see('insert')
return index, index1
def seek_next_down(self, regex, index0='insert', stopindex='end', exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find the next match with regex down.
It sets the cursor at the index of the occurrence.
"""
count = IntVar()
index = self.search(regex, index0, stopindex=stopindex, regexp=regexp, exact=exact, nocase=nocase,
elide=elide, nolinestop=nolinestop, count=count)
if not index: return
index1 = self.index('%s +%sc' % (index, count.get()))
self.mark_set('insert', index1)
self.see('insert')
return index, index1
def pick_next_up(self, name, *args, **kwargs):
"""
"""
index = self.seek_next_up(*args, **kwargs)
if not index:
return
self.tag_add(name, *index)
return index
def pick_next_down(self, name, *args, **kwargs):
"""
"""
index = self.seek_next_down(*args, **kwargs)
if not index:
return
self.tag_add(name, *index)
return index
def replace(self, regex, data, index=None, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=True, nocase=None, elide=None, nolinestop=None):
"""
It is used to replace occurrences of a given match.
It is possible to use a callback function to return what is replaced
as well.
"""
count = IntVar()
index = self.search(regex, index, stopindex, forwards=forwards, backwards=backwards, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index: return
if callable(data): data = data(index, self.index('%s +%sc' % (index, count.get())))
index0 = self.index('%s +%sc' % (index, count.get()))
self.delete(index, index0)
self.insert(index, data)
return index, len(data)
def replace_all(self, regex, data, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
"""
It is used to replace all occurrences of a given match in a range.
It accepts a callback function that determines what is replaced.
"""
# It is needed because the range will grow
# when data is inserted, the intent is searching
# over a pre defined range.
self.mark_set(self.STOP_REPLACE_INDEX, stopindex)
while True:
map = self.replace(regex, data, index, self.STOP_REPLACE_INDEX, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide)
if not map:
return index
index, size = map
index = self.index('%s +%sc' % (index, size))
def get_paren_search_dir(self, start, end):
"""
"""
char = self.get('insert', 'insert +1c')
if char == start:
return False
elif char == end:
return True
else:
return None
def get_paren_search_sign(self, start, end):
"""
"""
char = self.get('insert', 'insert +1c')
if char == start:
return '+'
elif char == end:
return '-'
else:
return None
def select_case_pair(self, pair, MAX=1500):
"""
"""
index = self.case_pair(MAX, *pair)
if not index: return
min = self.min(index, 'insert')
if self.compare(min, '==', 'insert'): min = '%s +1c' % min
max = self.max(index, 'insert')
if self.compare(max, '==', 'insert'): min = '%s +1c' % min
self.tag_add('sel', min, max)
def case_pair(self, max, start='(', end=')'):
"""
Once this method is called, it returns an index for the next
matching parenthesis or None if the char over the cursor
isn't either '(' or ')'.
"""
dir = self.get_paren_search_dir(start, end)
# If dir is None then there is no match.
if dir == None: return ''
REG = '\%s|\%s' % (start, end)
sign = self.get_paren_search_sign(start, end)
count = 0
# If we are searching fowards we don't need
# to add 1c.
index = 'insert %s' % ('+1c' if dir else '')
size = IntVar(0)
while True:
index = self.search(REG, index = index,
stopindex = 'insert %s%sc' % (sign, max),
count = size,
backwards = dir,
regexp = True)
if not index: return ''
char = self.get(index, '%s +1c' % index)
count = count + (1 if char == start else -1)
if not count:
return index
# When we are searching backwards we don't need
# to set a character back because index will point
# to the start of the match.
index = '%s %s' % (index, '+1c' if not dir else '')
def clear_data(self):
"""
It clears all text inside an AreaVi instance.
"""
import os
self.delete('1.0', 'end')
self.filename = os.path.abspath(self.default_filename)
self.event_generate('<<ClearData>>')
def load_data(self, filename):
"""
It dumps all text from a file into an AreaVi instance.
filename - Name of the file.
"""
import os
filename = os.path.abspath(filename)
self.filename = filename
fd = open(filename, 'r')
data = fd.read()
fd.close()
# i could generate a tk event here.
try:
data = data.decode(self.charset)
except UnicodeDecodeError:
self.charset = ''
self.delete('1.0', 'end')
self.insert('1.0', data)
self.event_generate('<<LoadData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Load-%s>>' % type)
def decode(self, name):
self.charset = name
self.load_data(self.filename)
def save_data(self):
"""
It saves the actual text content in the current file.
"""
data = self.get('1.0', 'end')
data = data.encode(self.charset)
fd = open(self.filename, 'w')
fd.write(data)
fd.close()
self.event_generate('<<SaveData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Save-%s>>' % type)
def save_data_as(self, filename):
"""
It saves the content of the given AreaVi instance into
a file whose name is specified in filename.
filename - Name of the file to save the data.
"""
self.filename = filename
self.save_data()
def is_tag_range(self, name, index0, index1):
"""
"""
ranges = self.tag_ranges(name)
for ind in xrange(0, len(ranges) - 1, 2):
if self.is_subrange(index0, index1, ranges[ind].string,
ranges[ind + 1].string):
return ranges[ind].string, ranges[ind + 1].string
def is_in_range(self, index, index0, index1):
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
r1 = self.compare(index2, '<=', index)
r2 = self.compare(index3, '>=', index)
if r1 and r2: return True
else: return False
def is_subrange(self, index0, index1, index2, index3):
r1 = self.is_in_range(index0, index2, index3)
r2 = self.is_in_range(index1, index2, index3)
return r1 and r2
def replace_range(self, data, index0, index1):
self.delete(index0, index1)
self.insert(index0, data)
def replace_ranges(self, name, data, index0='1.0', index1='end'):
"""
It replaces ranges of text that are mapped to a tag name for data between index0
and index1.
"""
while True:
range = self.tag_nextrange(name, index0, index1)
if not range: break
self.replace_range(data, *range)
def delete_ranges(self, name, index0='1.0', index1='end'):
"""
It deletes ranges of text that are mapped to tag name between index0 and index1.
"""
self.replace_ranges(name, '', index0, index1)
def join_ranges(self, name, sep=''):
"""
"""
data = ''
for ind in self.get_ranges(name):
data = data + ind + sep
return data
def get_ranges(self, name):
"""
"""
ranges = self.tag_ranges(name)
for ind in xrange(0, len(ranges) - 1, 2):
data = self.get(ranges[ind], ranges[ind + 1])
yield(data)
def tag_prev_occur(self, tag_names, index0, index1, default):
for ind in tag_names:
pos = self.tag_prevrange(ind, index0, index1)
if pos: return pos[1]
return default
def tag_next_occur(self, tag_names, index0, index1, default):
for ind in tag_names:
pos = self.tag_nextrange(ind, index0, index1)
if pos: return pos[0]
return default
@staticmethod
def areavi_widgets(wid):
"""
This method is a static method that receives a widget as argument
then returns an iterator of AreaVi instances that have the wid paramater as
master widget. It is used like:
from vyapp.app import root
for ind in AreaVi.areavi_widgets(root):
ind.insert('end', 'FOO')
The code above would insert 'FOO' at the end of all AreaVi widgets
that have root as one of its master widget.
"""
for ind in wid.winfo_children():
if isinstance(ind, AreaVi):
yield ind
else:
for ind in AreaVi.areavi_widgets(ind):
yield ind
@staticmethod
def get_opened_files(wid):
"""
This method returns a dictionary that maps all AreaVi instances
that have widget as master like:
from vyapp.app import root
map = area.get_opened_files(root)
Where map is a dictionary like:
map = { '/home/tau/file.c':AreaVi_Instance,
'/home/tau/file.b': AreaVi_Instance}
"""
map = dict()
for ind in AreaVi.areavi_widgets(wid):
map[ind.filename] = ind
return map
@staticmethod
def find_all(wid, regex, index='1.0', stopindex='end', *args, **kwargs):
"""
This method is used to perform pattern searches over all AreaVi instances that have
wid as master. It basically returns an iterator that corresponds to:
from vyapp.app import root
for ind, (match, index0, index1) in area.find_all(root, 'pattern'):
pass
Where ind is the AreaVi widget that the pattern matched and match is the match,
index0 and index1 are the positions in the text.
"""
for indi in AreaVi.areavi_widgets(wid):
it = indi.find(regex, index, stopindex, *args, **kwargs)
for indj in it:
yield indi, indj
def get_cursor_word(self):
"""
"""
if self.compare('insert', '==', 'insert linestart'):
return ''
index = self.search(' ', 'insert',
stopindex='insert linestart',regexp=True,
backwards=True)
if not index: index = 'insert linestart'
else: index = '%s +1c' % index
if self.compare(index, '==', 'insert'): return ''
data = self.get(index, 'insert')
return data, index
def match_word(self, wid, delim=' '):
data, index = self.get_cursor_word()
for area, (chk, pos0, pos1) in self.find_all(wid, '[^ ]*%s[^ ]+' % data):
yield chk, index
def complete_word(self, wid):
seq = self.match_word(wid)
table = []
for data, index in seq:
if not data in table:
table.append(data)
else:
continue
self.delete(index, 'insert')
self.insert(index, data)
yield
|
#!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import AWSMockServiceTestCase, MockServiceWithConfigTestCase
from tests.compat import mock
from boto.sqs.connection import SQSConnection
from boto.sqs.regioninfo import SQSRegionInfo
from boto.sqs.message import RawMessage
from boto.sqs.queue import Queue
from boto.connection import AWSQueryConnection
from nose.plugins.attrib import attr
class SQSAuthParams(AWSMockServiceTestCase):
connection_class = SQSConnection
def setUp(self):
super(SQSAuthParams, self).setUp()
def default_body(self):
return """<?xml version="1.0"?>
<CreateQueueResponse>
<CreateQueueResult>
<QueueUrl>
https://queue.amazonaws.com/599169622985/myqueue1
</QueueUrl>
</CreateQueueResult>
<ResponseMetadata>
<RequestId>54d4c94d-2307-54a8-bb27-806a682a5abd</RequestId>
</ResponseMetadata>
</CreateQueueResponse>"""
@attr(sqs=True)
def test_auth_service_name_override(self):
self.set_http_response(status_code=200)
# We can use the auth_service_name to change what service
# name to use for the credential scope for sigv4.
self.service_connection.auth_service_name = 'service_override'
self.service_connection.create_queue('my_queue')
# Note the service_override value instead.
self.assertIn('us-east-1/service_override/aws4_request',
self.actual_request.headers['Authorization'])
@attr(sqs=True)
def test_class_attribute_can_set_service_name(self):
self.set_http_response(status_code=200)
# The SQS class has an 'AuthServiceName' param of 'sqs':
self.assertEqual(self.service_connection.AuthServiceName, 'sqs')
self.service_connection.create_queue('my_queue')
# And because of this, the value of 'sqs' will be used instead of
# 'queue' for the credential scope:
self.assertIn('us-east-1/sqs/aws4_request',
self.actual_request.headers['Authorization'])
@attr(sqs=True)
def test_auth_region_name_is_automatically_updated(self):
region = SQSRegionInfo(name='us-west-2',
endpoint='us-west-2.queue.amazonaws.com')
self.service_connection = SQSConnection(
https_connection_factory=self.https_connection_factory,
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key',
region=region)
self.initialize_service_connection()
self.set_http_response(status_code=200)
self.service_connection.create_queue('my_queue')
# Note the region name below is 'us-west-2'.
self.assertIn('us-west-2/sqs/aws4_request',
self.actual_request.headers['Authorization'])
@attr(sqs=True)
def test_set_get_auth_service_and_region_names(self):
self.service_connection.auth_service_name = 'service_name'
self.service_connection.auth_region_name = 'region_name'
self.assertEqual(self.service_connection.auth_service_name,
'service_name')
self.assertEqual(self.service_connection.auth_region_name, 'region_name')
@attr(sqs=True)
def test_get_queue_with_owner_account_id_returns_queue(self):
self.set_http_response(status_code=200)
self.service_connection.create_queue('my_queue')
self.service_connection.get_queue('my_queue', '599169622985')
assert 'QueueOwnerAWSAccountId' in self.actual_request.params.keys()
self.assertEquals(self.actual_request.params['QueueOwnerAWSAccountId'], '599169622985')
class SQSProfileName(MockServiceWithConfigTestCase):
connection_class = SQSConnection
profile_name = 'prod'
def setUp(self):
super(SQSProfileName, self).setUp()
self.config = {
"profile prod": {
'aws_access_key_id': 'access_key',
'aws_secret_access_key': 'secret_access',
}
}
@attr(sqs=True)
def test_profile_name_gets_passed(self):
region = SQSRegionInfo(name='us-west-2',
endpoint='us-west-2.queue.amazonaws.com')
self.service_connection = SQSConnection(
https_connection_factory=self.https_connection_factory,
region=region,
profile_name=self.profile_name)
self.initialize_service_connection()
self.set_http_response(status_code=200)
class SQSMessageAttributesParsing(AWSMockServiceTestCase):
connection_class = SQSConnection
def default_body(self):
return """<?xml version="1.0"?>
<ReceiveMessageResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/">
<ReceiveMessageResult>
<Message>
<Body>This is a test</Body>
<ReceiptHandle>+eXJYhj5rDql5hp2VwGkXvQVsefdjAlsQe5EGS57gyORPB48KwP1d/3Rfy4DrQXt+MgfRPHUCUH36xL9+Ol/UWD/ylKrrWhiXSY0Ip4EsI8jJNTo/aneEjKE/iZnz/nL8MFP5FmMj8PbDAy5dgvAqsdvX1rm8Ynn0bGnQLJGfH93cLXT65p6Z/FDyjeBN0M+9SWtTcuxOIcMdU8NsoFIwm/6mLWgWAV46OhlYujzvyopCvVwsj+Y8jLEpdSSvTQHNlQEaaY/V511DqAvUwru2p0ZbW7ZzcbhUTn6hHkUROo=</ReceiptHandle>
<MD5OfBody>ce114e4501d2f4e2dcea3e17b546f339</MD5OfBody>
<MessageAttribute>
<Name>Count</Name>
<Value>
<DataType>Number</DataType>
<StringValue>1</StringValue>
</Value>
</MessageAttribute>
<MessageAttribute>
<Name>Foo</Name>
<Value>
<DataType>String</DataType>
<StringValue>Bar</StringValue>
</Value>
</MessageAttribute>
<MessageId>7049431b-e5f6-430b-93c4-ded53864d02b</MessageId>
<MD5OfMessageAttributes>324758f82d026ac6ec5b31a3b192d1e3</MD5OfMessageAttributes>
</Message>
</ReceiveMessageResult>
<ResponseMetadata>
<RequestId>73f978f2-400b-5460-8d38-3316e39e79c6</RequestId>
</ResponseMetadata>
</ReceiveMessageResponse>"""
@attr(sqs=True)
def test_message_attribute_response(self):
self.set_http_response(status_code=200)
queue = Queue(
url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/',
message_class=RawMessage)
message = self.service_connection.receive_message(queue)[0]
self.assertEqual(message.get_body(), 'This is a test')
self.assertEqual(message.id, '7049431b-e5f6-430b-93c4-ded53864d02b')
self.assertEqual(message.md5, 'ce114e4501d2f4e2dcea3e17b546f339')
self.assertEqual(message.md5_message_attributes,
'324758f82d026ac6ec5b31a3b192d1e3')
mattributes = message.message_attributes
self.assertEqual(len(mattributes.keys()), 2)
self.assertEqual(mattributes['Count']['data_type'], 'Number')
self.assertEqual(mattributes['Foo']['string_value'], 'Bar')
class SQSSendMessageAttributes(AWSMockServiceTestCase):
connection_class = SQSConnection
def default_body(self):
return """<SendMessageResponse>
<SendMessageResult>
<MD5OfMessageBody>
fafb00f5732ab283681e124bf8747ed1
</MD5OfMessageBody>
<MD5OfMessageAttributes>
3ae8f24a165a8cedc005670c81a27295
</MD5OfMessageAttributes>
<MessageId>
5fea7756-0ea4-451a-a703-a558b933e274
</MessageId>
</SendMessageResult>
<ResponseMetadata>
<RequestId>
27daac76-34dd-47df-bd01-1f6e873584a0
</RequestId>
</ResponseMetadata>
</SendMessageResponse>
"""
@attr(sqs=True)
def test_send_message_attributes(self):
self.set_http_response(status_code=200)
queue = Queue(
url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/',
message_class=RawMessage)
self.service_connection.send_message(queue, 'Test message',
message_attributes={
'name1': {
'data_type': 'String',
'string_value': 'Bob'
},
'name2': {
'data_type': 'Number',
'string_value': '1'
}
})
self.assert_request_parameters({
'Action': 'SendMessage',
'MessageAttribute.1.Name': 'name1',
'MessageAttribute.1.Value.DataType': 'String',
'MessageAttribute.1.Value.StringValue': 'Bob',
'MessageAttribute.2.Name': 'name2',
'MessageAttribute.2.Value.DataType': 'Number',
'MessageAttribute.2.Value.StringValue': '1',
'MessageBody': 'Test message',
'Version': '2012-11-05'
})
class SQSSendBatchMessageAttributes(AWSMockServiceTestCase):
connection_class = SQSConnection
def default_body(self):
return """<SendMessageBatchResponse>
<SendMessageBatchResult>
<SendMessageBatchResultEntry>
<Id>test_msg_001</Id>
<MessageId>0a5231c7-8bff-4955-be2e-8dc7c50a25fa</MessageId>
<MD5OfMessageBody>0e024d309850c78cba5eabbeff7cae71</MD5OfMessageBody>
</SendMessageBatchResultEntry>
<SendMessageBatchResultEntry>
<Id>test_msg_002</Id>
<MessageId>15ee1ed3-87e7-40c1-bdaa-2e49968ea7e9</MessageId>
<MD5OfMessageBody>7fb8146a82f95e0af155278f406862c2</MD5OfMessageBody>
<MD5OfMessageAttributes>295c5fa15a51aae6884d1d7c1d99ca50</MD5OfMessageAttributes>
</SendMessageBatchResultEntry>
</SendMessageBatchResult>
<ResponseMetadata>
<RequestId>ca1ad5d0-8271-408b-8d0f-1351bf547e74</RequestId>
</ResponseMetadata>
</SendMessageBatchResponse>
"""
@attr(sqs=True)
def test_send_message_attributes(self):
self.set_http_response(status_code=200)
queue = Queue(
url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/',
message_class=RawMessage)
message1 = (1, 'Message 1', 0, {'name1': {'data_type': 'String',
'string_value': 'foo'}})
message2 = (2, 'Message 2', 0, {'name2': {'data_type': 'Number',
'string_value': '1'}})
self.service_connection.send_message_batch(queue, (message1, message2))
self.assert_request_parameters({
'Action': 'SendMessageBatch',
'SendMessageBatchRequestEntry.1.DelaySeconds': 0,
'SendMessageBatchRequestEntry.1.Id': 1,
'SendMessageBatchRequestEntry.1.MessageAttribute.1.DataType': 'String',
'SendMessageBatchRequestEntry.1.MessageAttribute.1.Name': 'name1',
'SendMessageBatchRequestEntry.1.MessageAttribute.1.StringValue': 'foo',
'SendMessageBatchRequestEntry.1.MessageBody': 'Message 1',
'SendMessageBatchRequestEntry.2.DelaySeconds': 0,
'SendMessageBatchRequestEntry.2.Id': 2,
'SendMessageBatchRequestEntry.2.MessageAttribute.1.DataType': 'Number',
'SendMessageBatchRequestEntry.2.MessageAttribute.1.Name': 'name2',
'SendMessageBatchRequestEntry.2.MessageAttribute.1.StringValue': '1',
'SendMessageBatchRequestEntry.2.MessageBody': 'Message 2',
'Version': '2012-11-05'
})
if __name__ == '__main__':
unittest.main()
Add an assertion to confirm profile_name is properly passed through.
#!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import AWSMockServiceTestCase, MockServiceWithConfigTestCase
from tests.compat import mock
from boto.sqs.connection import SQSConnection
from boto.sqs.regioninfo import SQSRegionInfo
from boto.sqs.message import RawMessage
from boto.sqs.queue import Queue
from boto.connection import AWSQueryConnection
from nose.plugins.attrib import attr
class SQSAuthParams(AWSMockServiceTestCase):
connection_class = SQSConnection
def setUp(self):
super(SQSAuthParams, self).setUp()
def default_body(self):
return """<?xml version="1.0"?>
<CreateQueueResponse>
<CreateQueueResult>
<QueueUrl>
https://queue.amazonaws.com/599169622985/myqueue1
</QueueUrl>
</CreateQueueResult>
<ResponseMetadata>
<RequestId>54d4c94d-2307-54a8-bb27-806a682a5abd</RequestId>
</ResponseMetadata>
</CreateQueueResponse>"""
@attr(sqs=True)
def test_auth_service_name_override(self):
self.set_http_response(status_code=200)
# We can use the auth_service_name to change what service
# name to use for the credential scope for sigv4.
self.service_connection.auth_service_name = 'service_override'
self.service_connection.create_queue('my_queue')
# Note the service_override value instead.
self.assertIn('us-east-1/service_override/aws4_request',
self.actual_request.headers['Authorization'])
@attr(sqs=True)
def test_class_attribute_can_set_service_name(self):
self.set_http_response(status_code=200)
# The SQS class has an 'AuthServiceName' param of 'sqs':
self.assertEqual(self.service_connection.AuthServiceName, 'sqs')
self.service_connection.create_queue('my_queue')
# And because of this, the value of 'sqs' will be used instead of
# 'queue' for the credential scope:
self.assertIn('us-east-1/sqs/aws4_request',
self.actual_request.headers['Authorization'])
@attr(sqs=True)
def test_auth_region_name_is_automatically_updated(self):
region = SQSRegionInfo(name='us-west-2',
endpoint='us-west-2.queue.amazonaws.com')
self.service_connection = SQSConnection(
https_connection_factory=self.https_connection_factory,
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key',
region=region)
self.initialize_service_connection()
self.set_http_response(status_code=200)
self.service_connection.create_queue('my_queue')
# Note the region name below is 'us-west-2'.
self.assertIn('us-west-2/sqs/aws4_request',
self.actual_request.headers['Authorization'])
@attr(sqs=True)
def test_set_get_auth_service_and_region_names(self):
self.service_connection.auth_service_name = 'service_name'
self.service_connection.auth_region_name = 'region_name'
self.assertEqual(self.service_connection.auth_service_name,
'service_name')
self.assertEqual(self.service_connection.auth_region_name, 'region_name')
@attr(sqs=True)
def test_get_queue_with_owner_account_id_returns_queue(self):
self.set_http_response(status_code=200)
self.service_connection.create_queue('my_queue')
self.service_connection.get_queue('my_queue', '599169622985')
assert 'QueueOwnerAWSAccountId' in self.actual_request.params.keys()
self.assertEquals(self.actual_request.params['QueueOwnerAWSAccountId'], '599169622985')
class SQSProfileName(MockServiceWithConfigTestCase):
connection_class = SQSConnection
profile_name = 'prod'
def setUp(self):
super(SQSProfileName, self).setUp()
self.config = {
"profile prod": {
'aws_access_key_id': 'access_key',
'aws_secret_access_key': 'secret_access',
}
}
@attr(sqs=True)
def test_profile_name_gets_passed(self):
region = SQSRegionInfo(name='us-west-2',
endpoint='us-west-2.queue.amazonaws.com')
self.service_connection = SQSConnection(
https_connection_factory=self.https_connection_factory,
region=region,
profile_name=self.profile_name)
self.initialize_service_connection()
self.set_http_response(status_code=200)
self.assertEquals(self.service_connection.profile_name, self.profile_name)
class SQSMessageAttributesParsing(AWSMockServiceTestCase):
connection_class = SQSConnection
def default_body(self):
return """<?xml version="1.0"?>
<ReceiveMessageResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/">
<ReceiveMessageResult>
<Message>
<Body>This is a test</Body>
<ReceiptHandle>+eXJYhj5rDql5hp2VwGkXvQVsefdjAlsQe5EGS57gyORPB48KwP1d/3Rfy4DrQXt+MgfRPHUCUH36xL9+Ol/UWD/ylKrrWhiXSY0Ip4EsI8jJNTo/aneEjKE/iZnz/nL8MFP5FmMj8PbDAy5dgvAqsdvX1rm8Ynn0bGnQLJGfH93cLXT65p6Z/FDyjeBN0M+9SWtTcuxOIcMdU8NsoFIwm/6mLWgWAV46OhlYujzvyopCvVwsj+Y8jLEpdSSvTQHNlQEaaY/V511DqAvUwru2p0ZbW7ZzcbhUTn6hHkUROo=</ReceiptHandle>
<MD5OfBody>ce114e4501d2f4e2dcea3e17b546f339</MD5OfBody>
<MessageAttribute>
<Name>Count</Name>
<Value>
<DataType>Number</DataType>
<StringValue>1</StringValue>
</Value>
</MessageAttribute>
<MessageAttribute>
<Name>Foo</Name>
<Value>
<DataType>String</DataType>
<StringValue>Bar</StringValue>
</Value>
</MessageAttribute>
<MessageId>7049431b-e5f6-430b-93c4-ded53864d02b</MessageId>
<MD5OfMessageAttributes>324758f82d026ac6ec5b31a3b192d1e3</MD5OfMessageAttributes>
</Message>
</ReceiveMessageResult>
<ResponseMetadata>
<RequestId>73f978f2-400b-5460-8d38-3316e39e79c6</RequestId>
</ResponseMetadata>
</ReceiveMessageResponse>"""
@attr(sqs=True)
def test_message_attribute_response(self):
self.set_http_response(status_code=200)
queue = Queue(
url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/',
message_class=RawMessage)
message = self.service_connection.receive_message(queue)[0]
self.assertEqual(message.get_body(), 'This is a test')
self.assertEqual(message.id, '7049431b-e5f6-430b-93c4-ded53864d02b')
self.assertEqual(message.md5, 'ce114e4501d2f4e2dcea3e17b546f339')
self.assertEqual(message.md5_message_attributes,
'324758f82d026ac6ec5b31a3b192d1e3')
mattributes = message.message_attributes
self.assertEqual(len(mattributes.keys()), 2)
self.assertEqual(mattributes['Count']['data_type'], 'Number')
self.assertEqual(mattributes['Foo']['string_value'], 'Bar')
class SQSSendMessageAttributes(AWSMockServiceTestCase):
connection_class = SQSConnection
def default_body(self):
return """<SendMessageResponse>
<SendMessageResult>
<MD5OfMessageBody>
fafb00f5732ab283681e124bf8747ed1
</MD5OfMessageBody>
<MD5OfMessageAttributes>
3ae8f24a165a8cedc005670c81a27295
</MD5OfMessageAttributes>
<MessageId>
5fea7756-0ea4-451a-a703-a558b933e274
</MessageId>
</SendMessageResult>
<ResponseMetadata>
<RequestId>
27daac76-34dd-47df-bd01-1f6e873584a0
</RequestId>
</ResponseMetadata>
</SendMessageResponse>
"""
@attr(sqs=True)
def test_send_message_attributes(self):
self.set_http_response(status_code=200)
queue = Queue(
url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/',
message_class=RawMessage)
self.service_connection.send_message(queue, 'Test message',
message_attributes={
'name1': {
'data_type': 'String',
'string_value': 'Bob'
},
'name2': {
'data_type': 'Number',
'string_value': '1'
}
})
self.assert_request_parameters({
'Action': 'SendMessage',
'MessageAttribute.1.Name': 'name1',
'MessageAttribute.1.Value.DataType': 'String',
'MessageAttribute.1.Value.StringValue': 'Bob',
'MessageAttribute.2.Name': 'name2',
'MessageAttribute.2.Value.DataType': 'Number',
'MessageAttribute.2.Value.StringValue': '1',
'MessageBody': 'Test message',
'Version': '2012-11-05'
})
class SQSSendBatchMessageAttributes(AWSMockServiceTestCase):
connection_class = SQSConnection
def default_body(self):
return """<SendMessageBatchResponse>
<SendMessageBatchResult>
<SendMessageBatchResultEntry>
<Id>test_msg_001</Id>
<MessageId>0a5231c7-8bff-4955-be2e-8dc7c50a25fa</MessageId>
<MD5OfMessageBody>0e024d309850c78cba5eabbeff7cae71</MD5OfMessageBody>
</SendMessageBatchResultEntry>
<SendMessageBatchResultEntry>
<Id>test_msg_002</Id>
<MessageId>15ee1ed3-87e7-40c1-bdaa-2e49968ea7e9</MessageId>
<MD5OfMessageBody>7fb8146a82f95e0af155278f406862c2</MD5OfMessageBody>
<MD5OfMessageAttributes>295c5fa15a51aae6884d1d7c1d99ca50</MD5OfMessageAttributes>
</SendMessageBatchResultEntry>
</SendMessageBatchResult>
<ResponseMetadata>
<RequestId>ca1ad5d0-8271-408b-8d0f-1351bf547e74</RequestId>
</ResponseMetadata>
</SendMessageBatchResponse>
"""
@attr(sqs=True)
def test_send_message_attributes(self):
self.set_http_response(status_code=200)
queue = Queue(
url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/',
message_class=RawMessage)
message1 = (1, 'Message 1', 0, {'name1': {'data_type': 'String',
'string_value': 'foo'}})
message2 = (2, 'Message 2', 0, {'name2': {'data_type': 'Number',
'string_value': '1'}})
self.service_connection.send_message_batch(queue, (message1, message2))
self.assert_request_parameters({
'Action': 'SendMessageBatch',
'SendMessageBatchRequestEntry.1.DelaySeconds': 0,
'SendMessageBatchRequestEntry.1.Id': 1,
'SendMessageBatchRequestEntry.1.MessageAttribute.1.DataType': 'String',
'SendMessageBatchRequestEntry.1.MessageAttribute.1.Name': 'name1',
'SendMessageBatchRequestEntry.1.MessageAttribute.1.StringValue': 'foo',
'SendMessageBatchRequestEntry.1.MessageBody': 'Message 1',
'SendMessageBatchRequestEntry.2.DelaySeconds': 0,
'SendMessageBatchRequestEntry.2.Id': 2,
'SendMessageBatchRequestEntry.2.MessageAttribute.1.DataType': 'Number',
'SendMessageBatchRequestEntry.2.MessageAttribute.1.Name': 'name2',
'SendMessageBatchRequestEntry.2.MessageAttribute.1.StringValue': '1',
'SendMessageBatchRequestEntry.2.MessageBody': 'Message 2',
'Version': '2012-11-05'
})
if __name__ == '__main__':
unittest.main()
|
import csv
import os
import six
import random
import sys
from collections import defaultdict
this_dir = os.path.realpath(os.path.dirname(__file__))
sys.path.append(os.path.realpath(os.path.join(this_dir, os.pardir, os.pardir)))
from geodata.encoding import safe_decode
CATEGORIES_DIR = os.path.join(this_dir, os.pardir, os.pardir, os.pardir,
'resources', 'categories')
class CategoryConfig(object):
def __init__(self, base_dir=CATEGORIES_DIR):
self.language_categories_singular = {}
self.language_categories_plural = {}
self.property_names = set()
if not os.path.exists(base_dir):
raise RuntimeError('{} does not exist'.format(base_dir))
for filename in os.listdir(base_dir):
if not filename.endswith('.tsv'):
continue
lang = filename.rsplit('.tsv')[0]
base_lang = lang.split('_')[0]
singular_rules = self.language_categories_singular.get(base_lang, defaultdict(list))
plural_rules = self.language_categories_plural.get(base_lang, defaultdict(list))
reader = csv.reader(open(os.path.join(CATEGORIES_DIR, filename)), delimiter='\t')
reader.next() # headers
for key, value, is_plural, phrase in reader:
self.property_names.add(key)
is_plural = bool(int(is_plural))
if is_plural:
plural_rules[(key, value)].append(phrase)
else:
singular_rules[(key, value)].append(phrase)
self.language_categories_singular[base_lang] = singular_rules
self.language_categories_plural[base_lang] = plural_rules
self.language_categories_singular = {key: dict(value) for key, value
in six.iteritems(self.language_categories_singular)}
self.language_categories_plural = {key: dict(value) for key, value
in six.iteritems(self.language_categories_plural)}
def get_phrase(self, language, key, value, is_plural=False):
config = self.language_categories_singular if not is_plural else self.language_categories_plural
if language not in config:
return None
language_config = config[language]
choices = language_config.get((key, value))
if not choices:
return None
return random.choice(choices)
category_config = CategoryConfig()
[categories] Method to determine which keys to check for category matches given a dict of OSM tags
import csv
import os
import six
import random
import sys
from collections import defaultdict
this_dir = os.path.realpath(os.path.dirname(__file__))
sys.path.append(os.path.realpath(os.path.join(this_dir, os.pardir, os.pardir)))
from geodata.encoding import safe_decode
CATEGORIES_DIR = os.path.join(this_dir, os.pardir, os.pardir, os.pardir,
'resources', 'categories')
class CategoryConfig(object):
def __init__(self, base_dir=CATEGORIES_DIR):
self.language_categories_singular = {}
self.language_categories_plural = {}
self.language_property_names = defaultdict(set)
if not os.path.exists(base_dir):
raise RuntimeError('{} does not exist'.format(base_dir))
for filename in os.listdir(base_dir):
if not filename.endswith('.tsv'):
continue
lang = filename.rsplit('.tsv')[0]
base_lang = lang.split('_')[0]
singular_rules = self.language_categories_singular.get(base_lang, defaultdict(list))
plural_rules = self.language_categories_plural.get(base_lang, defaultdict(list))
reader = csv.reader(open(os.path.join(CATEGORIES_DIR, filename)), delimiter='\t')
reader.next() # headers
for key, value, is_plural, phrase in reader:
self.language_property_names[lang].add(key)
is_plural = bool(int(is_plural))
if is_plural:
plural_rules[(key, value)].append(phrase)
else:
singular_rules[(key, value)].append(phrase)
self.language_categories_singular[base_lang] = singular_rules
self.language_categories_plural[base_lang] = plural_rules
self.language_categories_singular = {key: dict(value) for key, value
in six.iteritems(self.language_categories_singular)}
self.language_categories_plural = {key: dict(value) for key, value
in six.iteritems(self.language_categories_plural)}
def has_keys(self, language, keys):
prop_names = self.language_property_names.get(language, set())
return [k for k in keys if k in prop_names]
def get_phrase(self, language, key, value, is_plural=False):
config = self.language_categories_singular if not is_plural else self.language_categories_plural
if language not in config:
return None
language_config = config[language]
choices = language_config.get((key, value))
if not choices:
return None
return random.choice(choices)
category_config = CategoryConfig()
|
#!/usr/bin/env python
"""A tool for installing Lua and LuaRocks locally."""
from __future__ import print_function
import argparse
import os
import re
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import zipfile
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
hererocks_version = "Hererocks 0.3.1"
__all__ = ["main"]
opts = None
temp_dir = None
platform_to_lua_target = {
"linux": "linux",
"win": "mingw",
"darwin": "macosx",
"freebsd": "freebsd"
}
def get_default_lua_target():
for platform, lua_target in platform_to_lua_target.items():
if sys.platform.startswith(platform):
return lua_target
return "posix" if os.name == "posix" else "generic"
def get_default_cache():
if os.name == "nt":
cache_root = os.getenv("LOCALAPPDATA") or os.path.join(
os.getenv("USERPROFILE"), "Local Settings", "Application Data")
return os.path.join(cache_root, "HereRocks", "Cache")
else:
return os.path.join(os.getenv("HOME"), ".cache", "hererocks")
def quote(command_arg):
return "'" + command_arg.replace("'", "'\"'\"'") + "'"
def exec_command(capture, *args):
command = " ".join(args)
if opts.verbose:
print("Running " + command)
live_output = opts.verbose and not capture
runner = subprocess.check_call if live_output else subprocess.check_output
try:
output = runner(command, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as exception:
if capture and not exception.output.strip():
# Ignore errors if output is empty.
return ""
if not live_output:
sys.stdout.write(exception.output)
sys.exit("Error: got exitcode {} from command {}".format(
exception.returncode, command))
if opts.verbose and capture:
sys.stdout.write(output.decode("UTF-8"))
return capture and output.decode("UTF-8")
def run_command(*args):
exec_command(False, *args)
def copy_dir(src, dst):
shutil.copytree(src, dst, ignore=lambda _, __: {".git"})
clever_http_git_whitelist = [
"http://github.com/", "https://github.com/",
"http://bitbucket.com/", "https://bitbucket.com/"
]
git_branch_accepts_tags = None
def set_git_branch_accepts_tags():
global git_branch_accepts_tags
if git_branch_accepts_tags is None:
version_output = exec_command(True, "git --version")
match = re.search("(\d+)\.(\d+)\.?(\d*)", version_output)
if match:
major = int(match.group(1))
minor = int(match.group(2))
tiny = int(match.group(3) or "0")
git_branch_accepts_tags = major > 1 or (
major == 1 and (minor > 7 or (minor == 7 and tiny >= 10)))
def git_clone_command(repo, ref, is_cache):
if is_cache:
# Cache full repos.
return "git clone", True
# Http(s) transport may be dumb and not understand --depth.
if repo.startswith("http://") or repo.startswith("https://"):
if not any(map(repo.startswith, clever_http_git_whitelist)):
return "git clone", True
# Have to clone whole repo to get a specific commit.
if all(c in string.hexdigits for c in ref):
return "git clone", True
set_git_branch_accepts_tags()
if git_branch_accepts_tags:
return "git clone --depth=1 --branch=" + quote(ref), False
else:
return "git clone --depth=1", True
def url_to_name(s):
return re.sub("[^\w]", "_", s)
def identifiers_to_string(identifiers):
return "-".join(identifiers)
def copy_files(path, *files):
if not os.path.exists(path):
os.makedirs(path)
for src in files:
if src is not None:
shutil.copy(src, path)
def exe(name):
if os.name == "nt":
return name + ".exe"
else:
return name
class Program(object):
def __init__(self, version):
version = self.translations.get(version, version)
if version in self.versions:
# Simple version.
self.source_kind = "fixed"
self.fetched = False
self.version = version
self.version_suffix = " " + version
elif "@" in version:
# Version from a git repo.
self.source_kind = "git"
if version.startswith("@"):
# Use the default git repo for this program.
self.repo = self.default_repo
ref = version[1:] or "master"
else:
self.repo, _, ref = version.partition("@")
# Have to clone the repo to get the commit ref points to.
self.fetch_repo(ref)
self.commit = exec_command(True, "git rev-parse HEAD").strip()
self.version_suffix = " @" + self.commit[:7]
else:
# Local directory.
self.source_kind = "local"
if not os.path.exists(version):
sys.exit("Error: bad {} version {}".format(self.title, version))
print("Using {} from {}".format(self.title, version))
result_dir = os.path.join(temp_dir, self.name)
copy_dir(version, result_dir)
os.chdir(result_dir)
self.fetched = True
self.version_suffix = ""
def fetch_repo(self, ref):
message = "Cloning {} from {} @{}".format(self.title, self.repo, ref)
if self.repo == self.default_repo and not opts.no_git_cache:
# Default repos are cached.
if not os.path.exists(opts.downloads):
os.makedirs(opts.downloads)
repo_path = os.path.join(opts.downloads, self.name)
self.fetched = False
if os.path.exists(repo_path):
print(message + " (cached)")
# Sync with origin first.
os.chdir(repo_path)
if not exec_command(True, "git rev-parse --quiet --verify", quote(ref)):
run_command("git fetch")
run_command("git checkout", quote(ref))
# If HEAD is not detached, we are on a branch that must be synced.
if exec_command(True, "git symbolic-ref -q HEAD"):
run_command("git pull --rebase")
return
else:
self.fetched = True
repo_path = os.path.join(temp_dir, self.name)
print(message)
clone_command, need_checkout = git_clone_command(self.repo, ref, not self.fetched)
run_command(clone_command, quote(self.repo), quote(repo_path))
os.chdir(repo_path)
if need_checkout and ref != "master":
run_command("git checkout", quote(ref))
def get_download_name(self):
return self.name + "-" + self.version + ("-win32" if self.win32_zip else "")
def get_download_url(self):
return self.downloads + "/" + self.get_download_name() + (
".zip" if self.win32_zip else ".tar.gz")
def fetch(self):
if self.fetched:
return
if self.source_kind == "git":
# Currently inside the cached git repo, just copy it somewhere.
result_dir = os.path.join(temp_dir, self.name)
copy_dir(".", result_dir)
os.chdir(result_dir)
return
if not os.path.exists(opts.downloads):
os.makedirs(opts.downloads)
archive_name = os.path.join(opts.downloads, self.name + self.version)
url = self.get_download_url()
message = "Fetching {} from {}".format(self.title, url)
if not os.path.exists(archive_name):
print(message)
urlretrieve(url, archive_name)
else:
print(message + " (cached)")
if self.win32_zip:
archive = zipfile.ZipFile(archive_name)
else:
archive = tarfile.open(archive_name, "r:gz")
archive.extractall(temp_dir)
archive.close()
os.chdir(os.path.join(temp_dir, self.get_download_name()))
self.fetched = True
def set_identifiers(self):
if self.source_kind == "fixed":
self.identifiers = [self.name, self.version]
elif self.source_kind == "git":
self.identifiers = [self.name, "git", url_to_name(self.repo), url_to_name(self.commit)]
else:
self.identifiers = None
def update_identifiers(self, all_identifiers):
installed_identifiers = all_identifiers.get(self.name)
self.set_identifiers()
if not opts.ignore_installed:
if self.identifiers is not None and self.identifiers == installed_identifiers:
print(self.title + self.version_suffix + " already installed")
return False
self.build()
self.install()
all_identifiers[self.name] = self.identifiers
return True
class Lua(Program):
def __init__(self, version):
super(Lua, self).__init__(version)
if self.source_kind == "fixed":
self.major_version = self.major_version_from_version()
else:
self.major_version = self.major_version_from_source()
if not self.version_suffix:
self.version_suffix = " " + self.major_version
self.set_compat()
self.add_options_to_version_suffix()
self.defines = []
self.redefines = []
self.add_compat_to_defines()
self.set_package_paths()
self.add_package_paths_to_defines()
@staticmethod
def major_version_from_source():
lua_h = open(os.path.join("src", "lua.h"))
for line in lua_h:
match = re.match("^\\s*#define\\s+LUA_VERSION_NUM\\s+50(\d)\\s*$", line)
if match:
return "5." + match.group(1)
def set_identifiers(self):
super(Lua, self).set_identifiers()
if self.identifiers is not None:
self.identifiers.extend(map(url_to_name, [
opts.target, self.compat, opts.cflags or "", opts.location
]))
def add_options_to_version_suffix(self):
options = []
if opts.target != get_default_lua_target():
options.append(("target", opts.target))
if self.compat != "default":
options.append(("compat", self.compat))
if opts.cflags is not None:
options.append(("cflags", opts.cflags))
if options:
self.version_suffix += " (" + (", ".join(
opt + ": " + value for opt, value in options)) + ")"
def set_package_paths(self):
local_paths_first = self.major_version == "5.1"
module_path = os.path.join(opts.location, "share", "lua", self.major_version)
module_path_parts = [
os.path.join(module_path, "?.lua"),
os.path.join(module_path, "?", "init.lua")
]
module_path_parts.insert(0 if local_paths_first else 2, os.path.join(".", "?.lua"))
self.package_path = ";".join(module_path_parts)
cmodule_path = os.path.join(opts.location, "lib", "lua", self.major_version)
so_extension = ".dll" if os.name == "nt" else ".so"
cmodule_path_parts = [
os.path.join(cmodule_path, "?" + so_extension),
os.path.join(cmodule_path, "loadall" + so_extension)
]
cmodule_path_parts.insert(0 if local_paths_first else 2,
os.path.join(".", "?" + so_extension))
self.package_cpath = ";".join(cmodule_path_parts)
def add_package_paths_to_defines(self):
package_path = self.package_path.replace("\\", "\\\\")
package_cpath = self.package_cpath.replace("\\", "\\\\")
self.redefines.extend([
"#undef LUA_PATH_DEFAULT",
"#undef LUA_CPATH_DEFAULT",
"#define LUA_PATH_DEFAULT \"{}\"".format(package_path),
"#define LUA_CPATH_DEFAULT \"{}\"".format(package_cpath)
])
def patch_defines(self):
defines = "\n".join(self.defines)
redefines = "\n".join(self.redefines)
luaconf_h = open(os.path.join("src", "luaconf.h"), "rb")
luaconf_src = luaconf_h.read()
luaconf_h.close()
body, _, tail = luaconf_src.rpartition(b"#endif")
header, _, main = body.partition(b"#define")
first_define, main = main.split(b"\n", 1)
luaconf_h = open(os.path.join("src", "luaconf.h"), "wb")
luaconf_h.write(header + b"#define" + first_define + b"\n")
luaconf_h.write(defines.encode("UTF-8") + b"\n")
luaconf_h.write(main)
luaconf_h.write(redefines.encode("UTF-8") + b"\n")
luaconf_h.write(b"#endif")
luaconf_h.write(tail)
luaconf_h.close()
def build(self):
if opts.builds and self.identifiers is not None:
self.cached_build_path = os.path.join(opts.builds,
identifiers_to_string(self.identifiers))
if os.path.exists(self.cached_build_path):
print("Building " + self.title + self.version_suffix + " (cached)")
os.chdir(self.cached_build_path)
return
else:
self.cached_build_path = None
self.fetch()
print("Building " + self.title + self.version_suffix)
self.patch_defines()
self.make()
if self.cached_build_path is not None:
copy_dir(".", self.cached_build_path)
def install(self):
print("Installing " + self.title + self.version_suffix)
self.make_install()
class RioLua(Lua):
name = "lua"
title = "Lua"
downloads = "http://www.lua.org/ftp"
win32_zip = False
default_repo = "https://github.com/lua/lua"
versions = [
"5.1", "5.1.1", "5.1.2", "5.1.3", "5.1.4", "5.1.5",
"5.2.0", "5.2.1", "5.2.2", "5.2.3", "5.2.4",
"5.3.0", "5.3.1", "5.3.2"
]
translations = {
"5": "5.3.2",
"5.1": "5.1.5",
"5.1.0": "5.1",
"5.2": "5.2.4",
"5.3": "5.3.2",
"^": "5.3.2"
}
def major_version_from_version(self):
return self.version[:3]
def set_compat(self):
if self.major_version == "5.1":
self.compat = "none" if opts.compat == "none" else "default"
elif self.major_version == "5.2":
self.compat = "none" if opts.compat in ["none", "5.2"] else "default"
else:
self.compat = "default" if opts.compat in ["default", "5.2"] else opts.compat
def add_compat_to_defines(self):
if self.compat != "default":
if self.major_version == "5.1":
if self.compat == "none":
self.redefines.extend([
"#undef LUA_COMPAT_VARARG", "#undef LUA_COMPAT_MOD",
"#undef LUA_COMPAT_LSTR", "#undef LUA_COMPAT_GFIND",
"#undef LUA_COMPAT_OPENLIB"
])
elif self.major_version == "5.2":
self.defines.append("#undef LUA_COMPAT_ALL")
elif self.compat == "none":
self.defines.append("#undef LUA_COMPAT_5_2")
elif self.compat == "5.1":
self.defines.append("#undef LUA_COMPAT_5_2")
self.defines.append("#define LUA_COMPAT_5_1")
else:
self.defines.append("#define LUA_COMPAT_5_1")
def set_files(self):
self.lua_file = exe("lua")
self.luac_file = exe("luac")
self.arch_file = "liblua.a"
self.dll_file = None
if os.name == "nt":
self.dll_file = "lua5" + self.major_version[2] + ".dll"
if opts.target == "cl":
self.arch_file = None
def make(self):
cmd = "make"
if opts.cflags is not None:
if self.major_version == "5.1":
# Lua 5.1 doesn't support passing MYCFLAGS to Makefile.
makefile_h = open(os.path.join("src", "Makefile"), "rb")
makefile_src = makefile_h.read()
makefile_h.close()
before, it, after = makefile_src.partition(b"CFLAGS= -O2 -Wall $(MYCFLAGS)")
makefile_src = before + it + " " + opts.cflags + after
makefile_h = open(os.path.join("src", "Makefile"), "wb")
makefile_h.write(makefile_src)
makefile_h.close()
else:
cmd = "make MYCFLAGS=" + quote(opts.cflags)
run_command(cmd, opts.target)
def make_install(self):
self.set_files()
os.chdir("src")
copy_files(os.path.join(opts.location, "bin"),
self.lua_file, self.luac_file, self.dll_file)
lua_hpp = "lua.hpp"
if not os.path.exists(lua_hpp):
lua_hpp = "../etc/lua.hpp"
copy_files(os.path.join(opts.location, "include"),
"lua.h", "luaconf.h", "lualib.h", "lauxlib.h", lua_hpp)
copy_files(os.path.join(opts.location, "lib"), self.arch_file)
class LuaJIT(Lua):
name = "LuaJIT"
title = "LuaJIT"
downloads = "https://github.com/LuaJIT/LuaJIT/archive"
win32_zip = False
default_repo = "https://github.com/LuaJIT/LuaJIT"
versions = [
"2.0.0", "2.0.1", "2.0.2", "2.0.3", "2.0.4"
]
translations = {
"2": "2.0.4",
"2.0": "2.0.4",
"2.1": "@v2.1",
"^": "2.0.4"
}
def get_download_url(self):
return self.downloads + "/v" + self.version + ".tar.gz"
@staticmethod
def major_version_from_version():
return "5.1"
def set_compat(self):
self.compat = "5.2" if opts.compat in ["all", "5.2"] else "default"
def add_compat_to_defines(self):
if self.compat != "default":
self.defines.append("#define LUAJIT_ENABLE_LUA52COMPAT")
@staticmethod
def make():
if os.name == "nt" and opts.target == "cl":
os.chdir("src")
run_command("msvcbuild.bat")
os.chdir("..")
else:
run_command("make" if opts.cflags is None else "make XCFLAGS=" + quote(opts.cflags))
def make_install(self):
luajit_file = exe("luajit")
lua_file = exe("lua")
arch_file = "libluajit.a"
target_arch_file = "libluajit-5.1.a"
so_file = "libluajit.so"
target_so_file = "libluajit-5.1.so.2"
dll_file = None
if os.name == "nt":
self.arch_file = "lua51.lib"
target_arch_file = "lua51.lib"
dll_file = "lua51.dll"
os.chdir("src")
copy_files(os.path.join(opts.location, "bin"), dll_file)
shutil.copy(luajit_file, os.path.join(opts.location, "bin", lua_file))
copy_files(os.path.join(opts.location, "include"),
"lua.h", "luaconf.h", "lualib.h", "lauxlib.h", "lua.hpp")
copy_files(os.path.join(opts.location, "lib"))
shutil.copy(arch_file, os.path.join(opts.location, "lib", target_arch_file))
shutil.copy(so_file, os.path.join(opts.location, "lib", target_so_file))
jitlib_path = os.path.join(
opts.location, "share", "lua", self.major_version, "jit")
if os.path.exists(jitlib_path):
shutil.rmtree(jitlib_path)
copy_dir("jit", jitlib_path)
class LuaRocks(Program):
name = "luarocks"
title = "LuaRocks"
downloads = "http://keplerproject.github.io/luarocks/releases"
win32_zip = os.name == "nt"
default_repo = "https://github.com/keplerproject/luarocks"
versions = [
"2.0.8", "2.0.9", "2.0.10", "2.0.11", "2.0.12",
"2.1.0", "2.1.1", "2.1.2",
"2.2.0", "2.2.1", "2.2.2"
]
translations = {
"2": "2.2.2",
"2.0": "2.0.12",
"2.1": "2.1.2",
"2.2": "2.2.2",
"3": "@luarocks-3",
"^": "2.2.2"
}
def is_luarocks_2_0(self):
if self.source_kind == "fixed":
return self.versions.index(self.version) < self.versions.index("2.1.0")
makefile = open("Makefile")
for line in makefile:
if re.match("^\\s*all:\\s+built\\s*$", line):
return True
return False
def build(self):
self.fetch()
print("Building LuaRocks" + self.version_suffix)
run_command("./configure", "--prefix=" + quote(opts.location),
"--with-lua=" + quote(opts.location), "--force-config")
run_command("make" if self.is_luarocks_2_0() else "make build")
def install(self):
print("Installing LuaRocks" + self.version_suffix)
run_command("make install")
def get_manifest_name():
return os.path.join(opts.location, "hererocks.manifest")
def get_installed_identifiers():
if not os.path.exists(get_manifest_name()):
return {}
manifest_h = open(get_manifest_name())
identifiers = {}
for line in manifest_h:
cur_identifiers = line.strip().split("-")
if cur_identifiers:
identifiers[cur_identifiers[0]] = cur_identifiers
return identifiers
def save_installed_identifiers(identifiers):
manifest_h = open(get_manifest_name(), "w")
for program in [RioLua, LuaJIT, LuaRocks]:
if identifiers.get(program.name) is not None:
manifest_h.write(identifiers_to_string(identifiers[program.name]))
manifest_h.write("\n")
manifest_h.close()
def main():
parser = argparse.ArgumentParser(
description=hererocks_version + " a tool for installing Lua and/or LuaRocks locally.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)
parser.add_argument(
"location", help="Path to directory in which Lua and/or LuaRocks will be installed. "
"Their binaries will be found in its 'bin' subdirectory. "
"Scripts from modules installed using LuaRocks will also turn up there. "
"If an incompatible version of Lua is already installed there it should be "
"removed before installing the new one. ")
parser.add_argument(
"-l", "--lua", help="Version of standard PUC-Rio Lua to install. "
"Version can be specified as a version number, e.g. 5.2 or 5.3.1. "
"Versions 5.1.0 - 5.3.2 are supported, "
"'^' can be used to install the latest stable version. "
"If the argument contains '@', sources will be downloaded "
"from a git repo using URI before '@' and using part after '@' as git reference "
"to checkout, 'master' by default. "
"Default git repo is https://github.com/lua/lua which contains tags for most "
"unstable versions, i.e. Lua 5.3.2-rc1 can be installed using '@5.3.2-rc1' as version. "
"The argument can also be a path to local directory.")
parser.add_argument(
"-j", "--luajit", help="Version of LuaJIT to install. "
"Version can be specified in the same way as for standard Lua. "
"Versions 2.0.0 - 2.1 are supported. "
"When installing from the LuaJIT main git repo its URI can be left out, "
"so that '@458a40b' installs from a commit and '@' installs from the master branch.")
parser.add_argument(
"-r", "--luarocks", help="Version of LuaRocks to install. "
"As with Lua, a version number (in range 2.0.8 - 2.2.2), '^', git URI with reference or "
"a local path can be used. '3' can be used as a version number and installs from "
"the 'luarocks-3' branch of the standard LuaRocks git repo. "
"Note that Lua 5.2 is not supported in LuaRocks 2.0.8 "
"and Lua 5.3 is supported only since LuaRocks 2.2.0.")
parser.add_argument("-i", "--ignore-installed", default=False, action="store_true",
help="Install even if requested version is already present.")
parser.add_argument(
"--compat", default="default", choices=["default", "none", "all", "5.1", "5.2"],
help="Select compatibility flags for Lua.")
parser.add_argument(
"--cflags", default=None,
help="Pass additional options to C compiler when building Lua or LuaJIT.")
parser.add_argument("--target", help="Use 'make TARGET' when building standard Lua.",
default=get_default_lua_target())
parser.add_argument("--downloads",
# help="Cache downloads in 'DOWNLOADS' directory.",
help=argparse.SUPPRESS, default=get_default_cache())
parser.add_argument("--no-git-cache",
# help="Do not cache default git repos.",
help=argparse.SUPPRESS, action="store_true", default=False)
parser.add_argument("--builds",
# help="Cache Lua and LuaJIT builds in 'BUILDS' directory.",
help=argparse.SUPPRESS, default=None)
parser.add_argument("--verbose", default=False, action="store_true",
help="Show executed commands and their output.")
parser.add_argument("-v", "--version", help="Show program's version number and exit.",
action="version", version=hererocks_version)
parser.add_argument("-h", "--help", help="Show this help message and exit.", action="help")
global opts, temp_dir
opts = parser.parse_args()
if not opts.lua and not opts.luajit and not opts.luarocks:
parser.error("nothing to install")
if opts.lua and opts.luajit:
parser.error("can't install both PUC-Rio Lua and LuaJIT")
opts.location = os.path.abspath(opts.location)
opts.downloads = os.path.abspath(opts.downloads)
if opts.builds is not None:
opts.builds = os.path.abspath(opts.builds)
start_dir = os.getcwd()
temp_dir = tempfile.mkdtemp()
identifiers = get_installed_identifiers()
identifiers_changed = False
if not os.path.exists(opts.location):
os.makedirs(opts.location)
if opts.lua:
identifiers["LuaJIT"] = None
identifiers_changed = RioLua(opts.lua).update_identifiers(identifiers)
os.chdir(start_dir)
if opts.luajit:
identifiers["lua"] = None
identifiers_changed = LuaJIT(opts.luajit).update_identifiers(identifiers)
os.chdir(start_dir)
if opts.luarocks:
if LuaRocks(opts.luarocks).update_identifiers(identifiers):
identifiers_changed = True
os.chdir(start_dir)
if identifiers_changed:
save_installed_identifiers(identifiers)
shutil.rmtree(temp_dir)
print("Done.")
if __name__ == "__main__":
main()
Mention --no-git-cache in hererocks help
#!/usr/bin/env python
"""A tool for installing Lua and LuaRocks locally."""
from __future__ import print_function
import argparse
import os
import re
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import zipfile
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
hererocks_version = "Hererocks 0.3.1"
__all__ = ["main"]
opts = None
temp_dir = None
platform_to_lua_target = {
"linux": "linux",
"win": "mingw",
"darwin": "macosx",
"freebsd": "freebsd"
}
def get_default_lua_target():
for platform, lua_target in platform_to_lua_target.items():
if sys.platform.startswith(platform):
return lua_target
return "posix" if os.name == "posix" else "generic"
def get_default_cache():
if os.name == "nt":
cache_root = os.getenv("LOCALAPPDATA") or os.path.join(
os.getenv("USERPROFILE"), "Local Settings", "Application Data")
return os.path.join(cache_root, "HereRocks", "Cache")
else:
return os.path.join(os.getenv("HOME"), ".cache", "hererocks")
def quote(command_arg):
return "'" + command_arg.replace("'", "'\"'\"'") + "'"
def exec_command(capture, *args):
command = " ".join(args)
if opts.verbose:
print("Running " + command)
live_output = opts.verbose and not capture
runner = subprocess.check_call if live_output else subprocess.check_output
try:
output = runner(command, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as exception:
if capture and not exception.output.strip():
# Ignore errors if output is empty.
return ""
if not live_output:
sys.stdout.write(exception.output)
sys.exit("Error: got exitcode {} from command {}".format(
exception.returncode, command))
if opts.verbose and capture:
sys.stdout.write(output.decode("UTF-8"))
return capture and output.decode("UTF-8")
def run_command(*args):
exec_command(False, *args)
def copy_dir(src, dst):
shutil.copytree(src, dst, ignore=lambda _, __: {".git"})
clever_http_git_whitelist = [
"http://github.com/", "https://github.com/",
"http://bitbucket.com/", "https://bitbucket.com/"
]
git_branch_accepts_tags = None
def set_git_branch_accepts_tags():
global git_branch_accepts_tags
if git_branch_accepts_tags is None:
version_output = exec_command(True, "git --version")
match = re.search("(\d+)\.(\d+)\.?(\d*)", version_output)
if match:
major = int(match.group(1))
minor = int(match.group(2))
tiny = int(match.group(3) or "0")
git_branch_accepts_tags = major > 1 or (
major == 1 and (minor > 7 or (minor == 7 and tiny >= 10)))
def git_clone_command(repo, ref, is_cache):
if is_cache:
# Cache full repos.
return "git clone", True
# Http(s) transport may be dumb and not understand --depth.
if repo.startswith("http://") or repo.startswith("https://"):
if not any(map(repo.startswith, clever_http_git_whitelist)):
return "git clone", True
# Have to clone whole repo to get a specific commit.
if all(c in string.hexdigits for c in ref):
return "git clone", True
set_git_branch_accepts_tags()
if git_branch_accepts_tags:
return "git clone --depth=1 --branch=" + quote(ref), False
else:
return "git clone --depth=1", True
def url_to_name(s):
return re.sub("[^\w]", "_", s)
def identifiers_to_string(identifiers):
return "-".join(identifiers)
def copy_files(path, *files):
if not os.path.exists(path):
os.makedirs(path)
for src in files:
if src is not None:
shutil.copy(src, path)
def exe(name):
if os.name == "nt":
return name + ".exe"
else:
return name
class Program(object):
def __init__(self, version):
version = self.translations.get(version, version)
if version in self.versions:
# Simple version.
self.source_kind = "fixed"
self.fetched = False
self.version = version
self.version_suffix = " " + version
elif "@" in version:
# Version from a git repo.
self.source_kind = "git"
if version.startswith("@"):
# Use the default git repo for this program.
self.repo = self.default_repo
ref = version[1:] or "master"
else:
self.repo, _, ref = version.partition("@")
# Have to clone the repo to get the commit ref points to.
self.fetch_repo(ref)
self.commit = exec_command(True, "git rev-parse HEAD").strip()
self.version_suffix = " @" + self.commit[:7]
else:
# Local directory.
self.source_kind = "local"
if not os.path.exists(version):
sys.exit("Error: bad {} version {}".format(self.title, version))
print("Using {} from {}".format(self.title, version))
result_dir = os.path.join(temp_dir, self.name)
copy_dir(version, result_dir)
os.chdir(result_dir)
self.fetched = True
self.version_suffix = ""
def fetch_repo(self, ref):
message = "Cloning {} from {} @{}".format(self.title, self.repo, ref)
if self.repo == self.default_repo and not opts.no_git_cache:
# Default repos are cached.
if not os.path.exists(opts.downloads):
os.makedirs(opts.downloads)
repo_path = os.path.join(opts.downloads, self.name)
self.fetched = False
if os.path.exists(repo_path):
print(message + " (cached)")
# Sync with origin first.
os.chdir(repo_path)
if not exec_command(True, "git rev-parse --quiet --verify", quote(ref)):
run_command("git fetch")
run_command("git checkout", quote(ref))
# If HEAD is not detached, we are on a branch that must be synced.
if exec_command(True, "git symbolic-ref -q HEAD"):
run_command("git pull --rebase")
return
else:
self.fetched = True
repo_path = os.path.join(temp_dir, self.name)
print(message)
clone_command, need_checkout = git_clone_command(self.repo, ref, not self.fetched)
run_command(clone_command, quote(self.repo), quote(repo_path))
os.chdir(repo_path)
if need_checkout and ref != "master":
run_command("git checkout", quote(ref))
def get_download_name(self):
return self.name + "-" + self.version + ("-win32" if self.win32_zip else "")
def get_download_url(self):
return self.downloads + "/" + self.get_download_name() + (
".zip" if self.win32_zip else ".tar.gz")
def fetch(self):
if self.fetched:
return
if self.source_kind == "git":
# Currently inside the cached git repo, just copy it somewhere.
result_dir = os.path.join(temp_dir, self.name)
copy_dir(".", result_dir)
os.chdir(result_dir)
return
if not os.path.exists(opts.downloads):
os.makedirs(opts.downloads)
archive_name = os.path.join(opts.downloads, self.name + self.version)
url = self.get_download_url()
message = "Fetching {} from {}".format(self.title, url)
if not os.path.exists(archive_name):
print(message)
urlretrieve(url, archive_name)
else:
print(message + " (cached)")
if self.win32_zip:
archive = zipfile.ZipFile(archive_name)
else:
archive = tarfile.open(archive_name, "r:gz")
archive.extractall(temp_dir)
archive.close()
os.chdir(os.path.join(temp_dir, self.get_download_name()))
self.fetched = True
def set_identifiers(self):
if self.source_kind == "fixed":
self.identifiers = [self.name, self.version]
elif self.source_kind == "git":
self.identifiers = [self.name, "git", url_to_name(self.repo), url_to_name(self.commit)]
else:
self.identifiers = None
def update_identifiers(self, all_identifiers):
installed_identifiers = all_identifiers.get(self.name)
self.set_identifiers()
if not opts.ignore_installed:
if self.identifiers is not None and self.identifiers == installed_identifiers:
print(self.title + self.version_suffix + " already installed")
return False
self.build()
self.install()
all_identifiers[self.name] = self.identifiers
return True
class Lua(Program):
def __init__(self, version):
super(Lua, self).__init__(version)
if self.source_kind == "fixed":
self.major_version = self.major_version_from_version()
else:
self.major_version = self.major_version_from_source()
if not self.version_suffix:
self.version_suffix = " " + self.major_version
self.set_compat()
self.add_options_to_version_suffix()
self.defines = []
self.redefines = []
self.add_compat_to_defines()
self.set_package_paths()
self.add_package_paths_to_defines()
@staticmethod
def major_version_from_source():
lua_h = open(os.path.join("src", "lua.h"))
for line in lua_h:
match = re.match("^\\s*#define\\s+LUA_VERSION_NUM\\s+50(\d)\\s*$", line)
if match:
return "5." + match.group(1)
def set_identifiers(self):
super(Lua, self).set_identifiers()
if self.identifiers is not None:
self.identifiers.extend(map(url_to_name, [
opts.target, self.compat, opts.cflags or "", opts.location
]))
def add_options_to_version_suffix(self):
options = []
if opts.target != get_default_lua_target():
options.append(("target", opts.target))
if self.compat != "default":
options.append(("compat", self.compat))
if opts.cflags is not None:
options.append(("cflags", opts.cflags))
if options:
self.version_suffix += " (" + (", ".join(
opt + ": " + value for opt, value in options)) + ")"
def set_package_paths(self):
local_paths_first = self.major_version == "5.1"
module_path = os.path.join(opts.location, "share", "lua", self.major_version)
module_path_parts = [
os.path.join(module_path, "?.lua"),
os.path.join(module_path, "?", "init.lua")
]
module_path_parts.insert(0 if local_paths_first else 2, os.path.join(".", "?.lua"))
self.package_path = ";".join(module_path_parts)
cmodule_path = os.path.join(opts.location, "lib", "lua", self.major_version)
so_extension = ".dll" if os.name == "nt" else ".so"
cmodule_path_parts = [
os.path.join(cmodule_path, "?" + so_extension),
os.path.join(cmodule_path, "loadall" + so_extension)
]
cmodule_path_parts.insert(0 if local_paths_first else 2,
os.path.join(".", "?" + so_extension))
self.package_cpath = ";".join(cmodule_path_parts)
def add_package_paths_to_defines(self):
package_path = self.package_path.replace("\\", "\\\\")
package_cpath = self.package_cpath.replace("\\", "\\\\")
self.redefines.extend([
"#undef LUA_PATH_DEFAULT",
"#undef LUA_CPATH_DEFAULT",
"#define LUA_PATH_DEFAULT \"{}\"".format(package_path),
"#define LUA_CPATH_DEFAULT \"{}\"".format(package_cpath)
])
def patch_defines(self):
defines = "\n".join(self.defines)
redefines = "\n".join(self.redefines)
luaconf_h = open(os.path.join("src", "luaconf.h"), "rb")
luaconf_src = luaconf_h.read()
luaconf_h.close()
body, _, tail = luaconf_src.rpartition(b"#endif")
header, _, main = body.partition(b"#define")
first_define, main = main.split(b"\n", 1)
luaconf_h = open(os.path.join("src", "luaconf.h"), "wb")
luaconf_h.write(header + b"#define" + first_define + b"\n")
luaconf_h.write(defines.encode("UTF-8") + b"\n")
luaconf_h.write(main)
luaconf_h.write(redefines.encode("UTF-8") + b"\n")
luaconf_h.write(b"#endif")
luaconf_h.write(tail)
luaconf_h.close()
def build(self):
if opts.builds and self.identifiers is not None:
self.cached_build_path = os.path.join(opts.builds,
identifiers_to_string(self.identifiers))
if os.path.exists(self.cached_build_path):
print("Building " + self.title + self.version_suffix + " (cached)")
os.chdir(self.cached_build_path)
return
else:
self.cached_build_path = None
self.fetch()
print("Building " + self.title + self.version_suffix)
self.patch_defines()
self.make()
if self.cached_build_path is not None:
copy_dir(".", self.cached_build_path)
def install(self):
print("Installing " + self.title + self.version_suffix)
self.make_install()
class RioLua(Lua):
name = "lua"
title = "Lua"
downloads = "http://www.lua.org/ftp"
win32_zip = False
default_repo = "https://github.com/lua/lua"
versions = [
"5.1", "5.1.1", "5.1.2", "5.1.3", "5.1.4", "5.1.5",
"5.2.0", "5.2.1", "5.2.2", "5.2.3", "5.2.4",
"5.3.0", "5.3.1", "5.3.2"
]
translations = {
"5": "5.3.2",
"5.1": "5.1.5",
"5.1.0": "5.1",
"5.2": "5.2.4",
"5.3": "5.3.2",
"^": "5.3.2"
}
def major_version_from_version(self):
return self.version[:3]
def set_compat(self):
if self.major_version == "5.1":
self.compat = "none" if opts.compat == "none" else "default"
elif self.major_version == "5.2":
self.compat = "none" if opts.compat in ["none", "5.2"] else "default"
else:
self.compat = "default" if opts.compat in ["default", "5.2"] else opts.compat
def add_compat_to_defines(self):
if self.compat != "default":
if self.major_version == "5.1":
if self.compat == "none":
self.redefines.extend([
"#undef LUA_COMPAT_VARARG", "#undef LUA_COMPAT_MOD",
"#undef LUA_COMPAT_LSTR", "#undef LUA_COMPAT_GFIND",
"#undef LUA_COMPAT_OPENLIB"
])
elif self.major_version == "5.2":
self.defines.append("#undef LUA_COMPAT_ALL")
elif self.compat == "none":
self.defines.append("#undef LUA_COMPAT_5_2")
elif self.compat == "5.1":
self.defines.append("#undef LUA_COMPAT_5_2")
self.defines.append("#define LUA_COMPAT_5_1")
else:
self.defines.append("#define LUA_COMPAT_5_1")
def set_files(self):
self.lua_file = exe("lua")
self.luac_file = exe("luac")
self.arch_file = "liblua.a"
self.dll_file = None
if os.name == "nt":
self.dll_file = "lua5" + self.major_version[2] + ".dll"
if opts.target == "cl":
self.arch_file = None
def make(self):
cmd = "make"
if opts.cflags is not None:
if self.major_version == "5.1":
# Lua 5.1 doesn't support passing MYCFLAGS to Makefile.
makefile_h = open(os.path.join("src", "Makefile"), "rb")
makefile_src = makefile_h.read()
makefile_h.close()
before, it, after = makefile_src.partition(b"CFLAGS= -O2 -Wall $(MYCFLAGS)")
makefile_src = before + it + " " + opts.cflags + after
makefile_h = open(os.path.join("src", "Makefile"), "wb")
makefile_h.write(makefile_src)
makefile_h.close()
else:
cmd = "make MYCFLAGS=" + quote(opts.cflags)
run_command(cmd, opts.target)
def make_install(self):
self.set_files()
os.chdir("src")
copy_files(os.path.join(opts.location, "bin"),
self.lua_file, self.luac_file, self.dll_file)
lua_hpp = "lua.hpp"
if not os.path.exists(lua_hpp):
lua_hpp = "../etc/lua.hpp"
copy_files(os.path.join(opts.location, "include"),
"lua.h", "luaconf.h", "lualib.h", "lauxlib.h", lua_hpp)
copy_files(os.path.join(opts.location, "lib"), self.arch_file)
class LuaJIT(Lua):
name = "LuaJIT"
title = "LuaJIT"
downloads = "https://github.com/LuaJIT/LuaJIT/archive"
win32_zip = False
default_repo = "https://github.com/LuaJIT/LuaJIT"
versions = [
"2.0.0", "2.0.1", "2.0.2", "2.0.3", "2.0.4"
]
translations = {
"2": "2.0.4",
"2.0": "2.0.4",
"2.1": "@v2.1",
"^": "2.0.4"
}
def get_download_url(self):
return self.downloads + "/v" + self.version + ".tar.gz"
@staticmethod
def major_version_from_version():
return "5.1"
def set_compat(self):
self.compat = "5.2" if opts.compat in ["all", "5.2"] else "default"
def add_compat_to_defines(self):
if self.compat != "default":
self.defines.append("#define LUAJIT_ENABLE_LUA52COMPAT")
@staticmethod
def make():
if os.name == "nt" and opts.target == "cl":
os.chdir("src")
run_command("msvcbuild.bat")
os.chdir("..")
else:
run_command("make" if opts.cflags is None else "make XCFLAGS=" + quote(opts.cflags))
def make_install(self):
luajit_file = exe("luajit")
lua_file = exe("lua")
arch_file = "libluajit.a"
target_arch_file = "libluajit-5.1.a"
so_file = "libluajit.so"
target_so_file = "libluajit-5.1.so.2"
dll_file = None
if os.name == "nt":
self.arch_file = "lua51.lib"
target_arch_file = "lua51.lib"
dll_file = "lua51.dll"
os.chdir("src")
copy_files(os.path.join(opts.location, "bin"), dll_file)
shutil.copy(luajit_file, os.path.join(opts.location, "bin", lua_file))
copy_files(os.path.join(opts.location, "include"),
"lua.h", "luaconf.h", "lualib.h", "lauxlib.h", "lua.hpp")
copy_files(os.path.join(opts.location, "lib"))
shutil.copy(arch_file, os.path.join(opts.location, "lib", target_arch_file))
shutil.copy(so_file, os.path.join(opts.location, "lib", target_so_file))
jitlib_path = os.path.join(
opts.location, "share", "lua", self.major_version, "jit")
if os.path.exists(jitlib_path):
shutil.rmtree(jitlib_path)
copy_dir("jit", jitlib_path)
class LuaRocks(Program):
name = "luarocks"
title = "LuaRocks"
downloads = "http://keplerproject.github.io/luarocks/releases"
win32_zip = os.name == "nt"
default_repo = "https://github.com/keplerproject/luarocks"
versions = [
"2.0.8", "2.0.9", "2.0.10", "2.0.11", "2.0.12",
"2.1.0", "2.1.1", "2.1.2",
"2.2.0", "2.2.1", "2.2.2"
]
translations = {
"2": "2.2.2",
"2.0": "2.0.12",
"2.1": "2.1.2",
"2.2": "2.2.2",
"3": "@luarocks-3",
"^": "2.2.2"
}
def is_luarocks_2_0(self):
if self.source_kind == "fixed":
return self.versions.index(self.version) < self.versions.index("2.1.0")
makefile = open("Makefile")
for line in makefile:
if re.match("^\\s*all:\\s+built\\s*$", line):
return True
return False
def build(self):
self.fetch()
print("Building LuaRocks" + self.version_suffix)
run_command("./configure", "--prefix=" + quote(opts.location),
"--with-lua=" + quote(opts.location), "--force-config")
run_command("make" if self.is_luarocks_2_0() else "make build")
def install(self):
print("Installing LuaRocks" + self.version_suffix)
run_command("make install")
def get_manifest_name():
return os.path.join(opts.location, "hererocks.manifest")
def get_installed_identifiers():
if not os.path.exists(get_manifest_name()):
return {}
manifest_h = open(get_manifest_name())
identifiers = {}
for line in manifest_h:
cur_identifiers = line.strip().split("-")
if cur_identifiers:
identifiers[cur_identifiers[0]] = cur_identifiers
return identifiers
def save_installed_identifiers(identifiers):
manifest_h = open(get_manifest_name(), "w")
for program in [RioLua, LuaJIT, LuaRocks]:
if identifiers.get(program.name) is not None:
manifest_h.write(identifiers_to_string(identifiers[program.name]))
manifest_h.write("\n")
manifest_h.close()
def main():
parser = argparse.ArgumentParser(
description=hererocks_version + " a tool for installing Lua and/or LuaRocks locally.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)
parser.add_argument(
"location", help="Path to directory in which Lua and/or LuaRocks will be installed. "
"Their binaries will be found in its 'bin' subdirectory. "
"Scripts from modules installed using LuaRocks will also turn up there. "
"If an incompatible version of Lua is already installed there it should be "
"removed before installing the new one. ")
parser.add_argument(
"-l", "--lua", help="Version of standard PUC-Rio Lua to install. "
"Version can be specified as a version number, e.g. 5.2 or 5.3.1. "
"Versions 5.1.0 - 5.3.2 are supported, "
"'^' can be used to install the latest stable version. "
"If the argument contains '@', sources will be downloaded "
"from a git repo using URI before '@' and using part after '@' as git reference "
"to checkout, 'master' by default. "
"Default git repo is https://github.com/lua/lua which contains tags for most "
"unstable versions, i.e. Lua 5.3.2-rc1 can be installed using '@5.3.2-rc1' as version. "
"The argument can also be a path to local directory.")
parser.add_argument(
"-j", "--luajit", help="Version of LuaJIT to install. "
"Version can be specified in the same way as for standard Lua. "
"Versions 2.0.0 - 2.1 are supported. "
"When installing from the LuaJIT main git repo its URI can be left out, "
"so that '@458a40b' installs from a commit and '@' installs from the master branch.")
parser.add_argument(
"-r", "--luarocks", help="Version of LuaRocks to install. "
"As with Lua, a version number (in range 2.0.8 - 2.2.2), '^', git URI with reference or "
"a local path can be used. '3' can be used as a version number and installs from "
"the 'luarocks-3' branch of the standard LuaRocks git repo. "
"Note that Lua 5.2 is not supported in LuaRocks 2.0.8 "
"and Lua 5.3 is supported only since LuaRocks 2.2.0.")
parser.add_argument("-i", "--ignore-installed", default=False, action="store_true",
help="Install even if requested version is already present.")
parser.add_argument(
"--compat", default="default", choices=["default", "none", "all", "5.1", "5.2"],
help="Select compatibility flags for Lua.")
parser.add_argument(
"--cflags", default=None,
help="Pass additional options to C compiler when building Lua or LuaJIT.")
parser.add_argument("--target", help="Use 'make TARGET' when building standard Lua.",
default=get_default_lua_target())
parser.add_argument("--downloads",
# help="Cache downloads in 'DOWNLOADS' directory.",
help=argparse.SUPPRESS, default=get_default_cache())
parser.add_argument("--no-git-cache",
help="Do not cache default git repos.",
action="store_true", default=False)
parser.add_argument("--builds",
# help="Cache Lua and LuaJIT builds in 'BUILDS' directory.",
help=argparse.SUPPRESS, default=None)
parser.add_argument("--verbose", default=False, action="store_true",
help="Show executed commands and their output.")
parser.add_argument("-v", "--version", help="Show program's version number and exit.",
action="version", version=hererocks_version)
parser.add_argument("-h", "--help", help="Show this help message and exit.", action="help")
global opts, temp_dir
opts = parser.parse_args()
if not opts.lua and not opts.luajit and not opts.luarocks:
parser.error("nothing to install")
if opts.lua and opts.luajit:
parser.error("can't install both PUC-Rio Lua and LuaJIT")
opts.location = os.path.abspath(opts.location)
opts.downloads = os.path.abspath(opts.downloads)
if opts.builds is not None:
opts.builds = os.path.abspath(opts.builds)
start_dir = os.getcwd()
temp_dir = tempfile.mkdtemp()
identifiers = get_installed_identifiers()
identifiers_changed = False
if not os.path.exists(opts.location):
os.makedirs(opts.location)
if opts.lua:
identifiers["LuaJIT"] = None
identifiers_changed = RioLua(opts.lua).update_identifiers(identifiers)
os.chdir(start_dir)
if opts.luajit:
identifiers["lua"] = None
identifiers_changed = LuaJIT(opts.luajit).update_identifiers(identifiers)
os.chdir(start_dir)
if opts.luarocks:
if LuaRocks(opts.luarocks).update_identifiers(identifiers):
identifiers_changed = True
os.chdir(start_dir)
if identifiers_changed:
save_installed_identifiers(identifiers)
shutil.rmtree(temp_dir)
print("Done.")
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.