gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import xml.etree.ElementTree as ET
from ftplib import FTP
from tempfile import NamedTemporaryFile
from django.utils.text import slugify
from base import BaseProcessor
from efe import iptc
class EFEXMLProcessor(BaseProcessor):
def connect(self):
self.ftp = FTP(self.feed.source_url)
self.ftp.login(self.feed.source_username,
self.feed.source_password)
self.verbose_print(self.ftp.getwelcome())
return self.ftp
def get_temp_file(self):
f = NamedTemporaryFile(delete=True)
self.verbose_print('%s tempfile created' % f.name)
return f
def process_file(self, s):
self.verbose_print('-' * 78)
if self.log_model.objects.filter(type='created', text=s,
feed=self.feed).exists():
self.verbose_print('%s already exists, skipping.' % s)
return
s = s.strip()
s = s.replace('\n', '')
ext = s.split('.')[-1]
if ext not in ['XML', 'xml']:
self.verbose_print('Skipping non xml %s' % s)
return
self.verbose_print('Retrieving file %s' % s)
source_root_folder = self.feed.source_root_folder
if not source_root_folder.endswith('/'):
source_root_folder += '/'
url = \
'ftp://{0}:{1}@{2}{3}{4}'.format(self.feed.source_username,
self.feed.source_password, self.feed.source_url,
source_root_folder, s)
self.verbose_print(url)
f = self.get_temp_file()
try:
urllib.urlretrieve(url, filename=f.name)
self.verbose_print('File retrieved successfully')
except Exception, e:
self.verbose_print('error urlretrieve')
self.verbose_print(str(e))
return
try:
xml_string = f.read()
self.verbose_print('xml_string read!')
except Exception, e:
self.verbose_print('error f.read')
self.verbose_print(str(e))
return
if not xml_string:
self.verbose_print('XML Empty')
f.close()
return
data = self.parse_xml(f.name)
data = self.categorize(data)
self.verbose_print(str(data))
f.close()
created = self.create_entry(data)
if created:
self.record_log(s)
def parse_xml(self, filename):
data = {}
try:
tree = ET.parse(filename)
root = tree.getroot()
except:
return
try:
data['headline'] = \
root.find('./NewsItem/NewsComponent/NewsLines/HeadLine'
).text
data['subheadline'] = \
root.find('./NewsItem/NewsComponent/NewsLines/SubHeadLine'
).text
except:
pass
try:
tobject_attrib = \
root.find('./NewsItem/NewsComponent/ContentItem/DataContent/nitf/head/tobject/tobject.subject'
)
data['iptc_code'] = \
tobject_attrib.get('tobject.subject.refnum')
data['iptc_matter'] = \
tobject_attrib.get('tobject.subject.matter')
data['iptc_type'] = \
tobject_attrib.get('tobject.subject.type')
except:
pass
try:
pub_data_attrib = \
root.find('./NewsItem/NewsComponent/ContentItem/DataContent/nitf/head/pubdata'
)
data['pub_date'] = pub_data_attrib.get('date.publication')
data['item_len'] = pub_data_attrib.get('item-length')
except:
pass
try:
data['abstract'] = \
root.find('./NewsItem/NewsComponent/ContentItem/DataContent/nitf/body/body.head/abstract/'
).text
except:
pass
try:
data['owner'] = \
root.find('./NewsItem/NewsComponent/ContentItem/DataContent/nitf/body/body.head/rights/'
).text
except:
pass
try:
data['story_data'] = \
root.find('./NewsItem/NewsComponent/ContentItem/DataContent/nitf/body/body.head/dateline/story.date'
).get('norm')
except:
pass
try:
body = \
root.find('./NewsItem/NewsComponent/ContentItem/DataContent/nitf/body/body.content'
)
data['body'] = u'\n'.join(u'<p>{0}</p>'.format(p.text)
for p in body.getchildren())
except:
pass
if not all([data.get('body'), data.get('headline')]):
self.verbose_print('Data does not have body and headline %s'
% str(data))
return
return data
def create_entry(self, data):
if not data:
self.verbose_print('data is null')
return
# working
try:
(db_entry, created) = \
self.entry_model.objects.get_or_create(
entry_feed=self.feed,
channel=self.feed.channel,
title=entry_title[:140],
slug=slugify(entry_title[:150]),
entry_title=entry_title,
site=self.feed.site,
user=self.feed.user,
published=True,
show_on_root_channel=True,
)
except Exception, e:
self.verbose_print(str(data))
self.verbose_print(str(e))
def categorize(self, data):
if not data.get('iptc_code'):
self.verbose_print('No iptc code to categorize')
return data
iptc_info = iptc.get(data['iptc_code'])
if iptc_info:
data.update(iptc_info)
else:
data['parent_desc'] = data.get('iptc_type')
data['desc'] = data.get('iptc_matter')
data['cod'] = data['iptc_code']
data['parent'] = None
data['cat'] = None
return data
def record_log(self, s):
self.log_model.objects.create(feed=self.feed, type='created',
text=s)
def process(self):
self.connect()
self.ftp.cwd(self.feed.source_root_folder)
self.verbose_print('Root folder changed to: %s'
% self.feed.source_root_folder)
self.count = 0
self.ftp.retrlines('NLST', self.process_file)
# LIST retrieves a list of files and information about those files.
# NLST retrieves a list of file names.
# On some servers, MLSD retrieves a machine readable list of files and information
# about those files
{
'story_data': '20130712T192900+0000',
'body': u'''<p>Montevid\xe9u, 12 jul (EFE).- Os pa\xedses do Mercosul decidiram nesta sexta-feira em sua c\xfapula semestral no Uruguai revogar a partir do dia 15 de agosto a suspens\xe3o do Paraguai, uma vez que Horacio Cartes assuma a presid\xeancia do pa\xeds.</p>
<p>Ap\xf3s "avaliar positivamente" a realiza\xe7\xe3o das elei\xe7\xf5es gerais no Paraguai no \xfaltimo dia 21 de abril, os presidentes de Brasil, Dilma Rousseff; Argentina, Cristina Kirchner; Uruguai, Jos\xe9 Mujica; e Venezuela, Nicol\xe1s Maduro, decidiram "cessar" a suspens\xe3o imposta no dia 29 de junho de 2012 devido \xe0 cassa\xe7\xe3o por parte do Parlamento paraguaio do ent\xe3o presidente Fernando Lugo.</p>
<p>A partir da posse do novo governo paraguaio "ser\xe3o considerados cumpridos" os requisitos estabelecidos no artigo 7 do Protocolo de Ushuaia sobre o compromisso democr\xe1tico.</p>
<p>A partir do pr\xf3ximo m\xeas, o Paraguai "reassumir\xe1 plenamente seu direito de participar dos \xf3rg\xe3os do Mercosul e das delibera\xe7\xf5es", informa a declara\xe7\xe3o dos l\xedderes.</p>
<p>As autoridades do Paraguai, o quinto integrante do Mercado Comum do Sul, n\xe3o participam da reuni\xe3o. EFE</p>
<p>jf/rsd</p>''',
'item_len': '00166',
'iptc_matter': 'Organismos internacionais',
'headline': u'Mercosul revogar\xe1 suspens\xe3o do Paraguai a partir de 15 de agosto',
'iptc_code': '11014000',
'iptc_type': u'Pol\xedtica',
'subheadline': u'MERCOSUL C\xdaPULA',
'owner': 'Agencia EFE',
'pub_date': '20130712T192900+0000',
'abstract': u'Os pa\xedses do Mercosul decidiram nesta sexta-feira em sua c\xfapula semestral no Uruguai revogar a partir do dia 15 de agosto a suspens\xe3o do Paraguai, uma vez que Horacio Cartes assuma a presid\xeancia do pa\xeds.',
}
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import Sequence # noqa
import logging
from django.conf import settings # noqa
from horizon import exceptions
__all__ = ('APIResourceWrapper', 'APIDictWrapper',
'get_service_from_catalog', 'url_for',)
LOG = logging.getLogger(__name__)
class APIVersionManager(object):
"""Object to store and manage API versioning data and utility methods."""
SETTINGS_KEY = "OPENSTACK_API_VERSIONS"
def __init__(self, service_type, preferred_version=None):
self.service_type = service_type
self.preferred = preferred_version
self._active = None
self.supported = {}
@property
def active(self):
if self._active is None:
self.get_active_version()
return self._active
def load_supported_version(self, version, data):
self.supported[version] = data
def get_active_version(self):
if self._active is not None:
return self.supported[self._active]
key = getattr(settings, self.SETTINGS_KEY, {}).get(self.service_type)
if key is None:
# TODO(gabriel): support API version discovery here; we'll leave
# the setting in as a way of overriding the latest available
# version.
key = self.preferred
self._active = key
return self.supported[self._active]
class APIResourceWrapper(object):
"""Simple wrapper for api objects.
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattr__(self, attr):
if attr in self._attrs:
# __getattr__ won't find properties
return self._apiresource.__getattribute__(attr)
else:
msg = ('Attempted to access unknown attribute "%s" on '
'APIResource object of type "%s" wrapping resource of '
'type "%s".') % (attr, self.__class__,
self._apiresource.__class__)
LOG.debug(exceptions.error_color(msg))
raise AttributeError(attr)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
dict((attr, getattr(self, attr))
for attr in self._attrs
if hasattr(self, attr)))
class APIDictWrapper(object):
"""Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from novaclient.
"""
def __init__(self, apidict):
self._apidict = apidict
def __getattr__(self, attr):
try:
return self._apidict[attr]
except KeyError:
msg = 'Unknown attribute "%(attr)s" on APIResource object ' \
'of type "%(cls)s"' % {'attr': attr, 'cls': self.__class__}
LOG.debug(exceptions.error_color(msg))
raise AttributeError(msg)
def __getitem__(self, item):
try:
return self.__getattr__(item)
except AttributeError as e:
# caller is expecting a KeyError
raise KeyError(e)
def get(self, item, default=None):
try:
return self.__getattr__(item)
except AttributeError:
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._apidict)
class Quota(object):
"""Wrapper for individual limits in a quota."""
def __init__(self, name, limit):
self.name = name
self.limit = limit
def __repr__(self):
return "<Quota: (%s, %s)>" % (self.name, self.limit)
class QuotaSet(Sequence):
"""Wrapper for client QuotaSet objects which turns the individual quotas
into Quota objects for easier handling/iteration.
`QuotaSet` objects support a mix of `list` and `dict` methods; you can use
the bracket notiation (`qs["my_quota"] = 0`) to add new quota values, and
use the `get` method to retrieve a specific quota, but otherwise it
behaves much like a list or tuple, particularly in supporting iteration.
"""
def __init__(self, apiresource=None):
self.items = []
if apiresource:
if hasattr(apiresource, '_info'):
items = apiresource._info.items()
else:
items = apiresource.items()
for k, v in items:
if k == 'id':
continue
self[k] = v
def __setitem__(self, k, v):
v = int(v) if v is not None else v
q = Quota(k, v)
self.items.append(q)
def __getitem__(self, index):
return self.items[index]
def __add__(self, other):
"""Merge another QuotaSet into this one. Existing quotas are
not overriden.
"""
if not isinstance(other, QuotaSet):
msg = "Can only add QuotaSet to QuotaSet, " \
"but received %s instead" % type(other)
raise ValueError(msg)
for item in other:
if self.get(item.name).limit is None:
self.items.append(item)
return self
def __len__(self):
return len(self.items)
def __repr__(self):
return repr(self.items)
def get(self, key, default=None):
match = [quota for quota in self.items if quota.name == key]
return match.pop() if len(match) else Quota(key, default)
def add(self, other):
return self.__add__(other)
def get_service_from_catalog(catalog, service_type):
if catalog:
for service in catalog:
if service['type'] == service_type:
return service
return None
def get_version_from_service(service):
if service:
endpoint = service['endpoints'][0]
if 'interface' in endpoint:
return 3
else:
return 2.0
return 2.0
# Mapping of V2 Catalog Endpoint_type to V3 Catalog Interfaces
ENDPOINT_TYPE_TO_INTERFACE = {
'publicURL': 'public',
'internalURL': 'internal',
'adminURL': 'admin',
}
def get_url_for_service(service, region, endpoint_type):
identity_version = get_version_from_service(service)
for endpoint in service['endpoints']:
# ignore region for identity
if service['type'] == 'identity' or region == endpoint['region']:
try:
if identity_version < 3:
return endpoint[endpoint_type]
else:
interface = \
ENDPOINT_TYPE_TO_INTERFACE.get(endpoint_type, '')
if endpoint['interface'] == interface:
return endpoint['url']
except (IndexError, KeyError):
return None
return None
def url_for(request, service_type, endpoint_type=None, region=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'publicURL')
fallback_endpoint_type = getattr(settings, 'SECONDARY_ENDPOINT_TYPE', None)
catalog = request.user.service_catalog
service = get_service_from_catalog(catalog, service_type)
if service:
if not region:
region = request.user.services_region
url = get_url_for_service(service,
region,
endpoint_type)
if not url and fallback_endpoint_type:
url = get_url_for_service(service,
region,
fallback_endpoint_type)
if url:
return url
raise exceptions.ServiceCatalogException(service_type)
def is_service_enabled(request, service_type, service_name=None):
service = get_service_from_catalog(request.user.service_catalog,
service_type)
if service:
region = request.user.services_region
for endpoint in service['endpoints']:
# ignore region for identity
if service['type'] == 'identity' or \
endpoint['region'] == region:
if service_name:
return service['name'] == service_name
else:
return True
return False
|
|
from datetime import datetime
import re
from django.conf import settings
import olympia.core.logger
from olympia import amo
from olympia.activity import log_create
from olympia.constants.blocklist import REMOTE_SETTINGS_COLLECTION_LEGACY
from olympia.lib.remote_settings import RemoteSettings
from olympia.users.utils import get_task_user
log = olympia.core.logger.getLogger('z.amo.blocklist')
def add_version_log_for_blocked_versions(obj, old_obj, al):
from olympia.activity.models import VersionLog
VersionLog.objects.bulk_create(
[
VersionLog(activity_log=al, version_id=version.id)
for version in obj.addon_versions
if obj.is_version_blocked(version.version)
or old_obj.is_version_blocked(version.version)
]
)
def block_activity_log_save(obj, change, submission_obj=None, old_obj=None):
action = amo.LOG.BLOCKLIST_BLOCK_EDITED if change else amo.LOG.BLOCKLIST_BLOCK_ADDED
legacy_inclusion = getattr(
submission_obj if submission_obj else obj, 'in_legacy_blocklist'
)
details = {
'guid': obj.guid,
'min_version': obj.min_version,
'max_version': obj.max_version,
'url': obj.url,
'reason': obj.reason,
'include_in_legacy': legacy_inclusion,
'comments': f'Versions {obj.min_version} - {obj.max_version} blocked.',
}
if submission_obj:
details['signoff_state'] = submission_obj.SIGNOFF_STATES.get(
submission_obj.signoff_state
)
if submission_obj.signoff_by:
details['signoff_by'] = submission_obj.signoff_by.id
addon = obj.addon
al = log_create(action, addon, obj.guid, obj, details=details, user=obj.updated_by)
if submission_obj and submission_obj.signoff_by:
log_create(
amo.LOG.BLOCKLIST_SIGNOFF,
addon,
obj.guid,
action.action_class,
obj,
user=submission_obj.signoff_by,
)
add_version_log_for_blocked_versions(obj, old_obj or obj, al)
def block_activity_log_delete(obj, *, submission_obj=None, delete_user=None):
assert submission_obj or delete_user
details = {
'guid': obj.guid,
'min_version': obj.min_version,
'max_version': obj.max_version,
'url': obj.url,
'reason': obj.reason,
'include_in_legacy': obj.in_legacy_blocklist,
'comments': f'Versions {obj.min_version} - {obj.max_version} unblocked.',
}
if submission_obj:
details['signoff_state'] = submission_obj.SIGNOFF_STATES.get(
submission_obj.signoff_state
)
if submission_obj.signoff_by:
details['signoff_by'] = submission_obj.signoff_by.id
addon = obj.addon
args = (
[amo.LOG.BLOCKLIST_BLOCK_DELETED] + ([addon] if addon else []) + [obj.guid, obj]
)
al = log_create(
*args,
details=details,
user=submission_obj.updated_by if submission_obj else delete_user,
)
if addon:
add_version_log_for_blocked_versions(obj, obj, al)
if submission_obj and submission_obj.signoff_by:
args = (
[amo.LOG.BLOCKLIST_SIGNOFF]
+ ([addon] if addon else [])
+ [obj.guid, amo.LOG.BLOCKLIST_BLOCK_DELETED.action_class, obj]
)
log_create(*args, user=submission_obj.signoff_by)
def splitlines(text):
return [line.strip() for line in str(text or '').splitlines()]
def legacy_publish_blocks(blocks):
bucket = settings.REMOTE_SETTINGS_WRITER_BUCKET
server = RemoteSettings(bucket, REMOTE_SETTINGS_COLLECTION_LEGACY)
for block in blocks:
needs_creating = not block.legacy_id
if block.is_imported_from_legacy_regex:
log.info(
f'Block [{block.guid}] was imported from a regex guid so '
"can't be safely updated. Skipping."
)
continue
data = {
'guid': block.guid,
'details': {
'bug': block.url,
'why': block.reason,
'name': str(block.reason).partition('.')[0], # required
},
'enabled': True,
'versionRange': [
{
'severity': 3, # Always high severity now.
'minVersion': block.min_version,
'maxVersion': block.max_version,
}
],
}
if needs_creating:
record = server.publish_record(data)
block.update(legacy_id=record.get('id', ''))
else:
server.publish_record(data, block.legacy_id)
server.complete_session()
def legacy_delete_blocks(blocks):
bucket = settings.REMOTE_SETTINGS_WRITER_BUCKET
server = RemoteSettings(bucket, REMOTE_SETTINGS_COLLECTION_LEGACY)
for block in blocks:
if block.legacy_id:
if block.is_imported_from_legacy_regex:
log.info(
f'Block [{block.guid}] was imported from a regex guid so '
"can't be safely deleted. Skipping."
)
else:
server.delete_record(block.legacy_id)
block.update(legacy_id='')
server.complete_session()
# Started out based on the regexs in the following url but needed some changes:
# https://dxr.mozilla.org/mozilla-central/source/toolkit/mozapps/extensions/Blocklist.jsm # noqa
# The whole ID should be surrounded by literal ().
# IDs may contain alphanumerics, _, -, {}, @ and a literal '.'
# They may also contain backslashes (needed to escape the {} and dot)
# We filter out backslash escape sequences (like `\w`) separately
IS_MULTIPLE_ID_SUB_REGEX = r'\([\\\w .{}@-]+\)'
# Find regular expressions of the form:
# /^((id1)|(id2)|(id3)|...|(idN))$/
# The outer set of parens enclosing the entire list of IDs is optional.
IS_MULTIPLE_IDS = re.compile(
# Start with literal ^ then an optional `(``
r'^\^\(?'
# Then at least one ID in parens ().
+ IS_MULTIPLE_ID_SUB_REGEX
# Followed by any number of IDs in () separated by pipes.
+ r'(?:\|'
+ IS_MULTIPLE_ID_SUB_REGEX
+ r')*'
# Finally, we need to end with a literal sequence )$
# (the leading `)` is optional like at the start)
+ r'\)?\$$'
)
# Check for a backslash followed by anything other than a literal . or curlies
REGEX_ESCAPE_SEQS = re.compile(r'\\[^.{}]')
# Used to remove the following 3 things:
# leading literal ^(
# plus an optional (
# any backslash
# trailing literal )$
# plus an optional ) before the )$
REGEX_REMOVAL_REGEX = re.compile(r'^\^\(\(?|\\|\)\)?\$$')
GUID_SPLIT = re.compile(r'\)\|\(')
def datetime_to_ts(dt=None):
"""Returns the timestamp used for MLBF identifiers.
Calculated as number of milliseconds from the unix epoc."""
return int((dt or datetime.now()).timestamp() * 1000)
def split_regex_to_list(guid_re):
if not IS_MULTIPLE_IDS.match(guid_re) or REGEX_ESCAPE_SEQS.match(guid_re):
return
trimmed = REGEX_REMOVAL_REGEX.sub('', guid_re)
return GUID_SPLIT.split(trimmed)
def disable_addon_for_block(block):
"""Disable appropriate addon versions that are affected by the Block, and
the addon too if 0 - *."""
from .models import Block
from olympia.reviewers.utils import ReviewBase
review = ReviewBase(
request=None,
addon=block.addon,
version=None,
review_type='pending',
user=get_task_user(),
)
review.set_data(
{
'versions': [
ver
for ver in block.addon_versions
# We don't need to reject versions from older deleted instances
if ver.addon == block.addon and block.is_version_blocked(ver.version)
]
}
)
review.reject_multiple_versions()
for version in review.data['versions']:
# Clear needs_human_review on rejected versions, we consider that
# the admin looked at them before blocking.
review.clear_specific_needs_human_review_flags(version)
if block.min_version == Block.MIN and block.max_version == Block.MAX:
if block.addon.status == amo.STATUS_DELETED:
block.addon.deny_resubmission()
else:
block.addon.update(status=amo.STATUS_DISABLED)
def save_guids_to_blocks(guids, submission, *, fields_to_set):
from .models import Block
common_args = {field: getattr(submission, field) for field in fields_to_set}
modified_datetime = datetime.now()
blocks = Block.get_blocks_from_guids(guids)
Block.preload_addon_versions(blocks)
for block in blocks:
change = bool(block.id)
if change:
block_obj_before_change = Block(
min_version=block.min_version, max_version=block.max_version
)
setattr(block, 'modified', modified_datetime)
else:
block_obj_before_change = None
for field, val in common_args.items():
setattr(block, field, val)
block.average_daily_users_snapshot = block.current_adu
block.save()
if submission.id:
block.submission.add(submission)
block_activity_log_save(
block,
change=change,
submission_obj=submission if submission.id else None,
old_obj=block_obj_before_change,
)
disable_addon_for_block(block)
return blocks
|
|
"""
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# Deprecation of Ward class
assert_warns(DeprecationWarning, Ward).fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
"""
Check that we obtain the correct solution for structured linkage trees.
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
"""
Check that we obtain the correct solution for unstructured linkage trees.
"""
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
"""
Check that the height of the results of linkage tree is sorted.
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
"""
Check that we obtain the correct number of clusters with
agglomerative clustering.
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
"""
Check that we obtain the correct solution in a simplistic case
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
assert_warns(DeprecationWarning, WardAgglomeration)
with ignore_warnings():
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_array_equal(agglo.labels_, ward.labels_)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
"""
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
connectivity = kneighbors_graph(X, 10)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
"""
Check that children are ordered in the same way for both structured and
unstructured versions of ward_tree.
"""
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
"""Test return_distance option on linkage and ward trees"""
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
"""
Check non regression of a bug if a non item assignable connectivity is
provided with more than one component.
"""
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
"""Test that the full tree is computed if n_clusters is small"""
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
|
# -*- coding: utf-8 -*-
__doc__ = """
WebSocket within CherryPy is a tricky bit since CherryPy is
a threaded server which would choke quickly if each thread
of the server were kept attached to a long living connection
that WebSocket expects.
In order to work around this constraint, we take some advantage
of some internals of CherryPy as well as the introspection
Python provides.
Basically, when the WebSocket handshake is complete, we take over
the socket and let CherryPy take back the thread that was
associated with the upgrade request.
These operations require a bit of work at various levels of
the CherryPy framework but this module takes care of them
and from your application's perspective, this is abstracted.
Here are the various utilities provided by this module:
* WebSocketTool: The tool is in charge to perform the
HTTP upgrade and detach the socket from
CherryPy. It runs at various hook points of the
request's processing. Enable that tool at
any path you wish to handle as a WebSocket
handler.
* WebSocketPlugin: The plugin tracks the instanciated web socket handlers.
It also cleans out websocket handler which connection
have been closed down. The websocket connection then
runs in its own thread that this plugin manages.
Simple usage example:
.. code-block:: python
:linenos:
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import EchoWebSocket
cherrypy.config.update({'server.socket_port': 9000})
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
class Root(object):
@cherrypy.expose
def index(self):
return 'some HTML with a websocket javascript connection'
@cherrypy.expose
def ws(self):
pass
cherrypy.quickstart(Root(), '/', config={'/ws': {'tools.websocket.on': True,
'tools.websocket.handler_cls': EchoWebSocket}})
Note that you can set the handler class on per-path basis,
meaning you could also dynamically change the class based
on other envrionmental settings (is the user authenticated for ex).
"""
import base64
from hashlib import sha1
import inspect
import socket
import threading
import cherrypy
from cherrypy import Tool
from cherrypy.process import plugins
from cherrypy.wsgiserver import HTTPConnection, HTTPRequest
from ws4py import WS_KEY, WS_VERSION
from ws4py.exc import HandshakeError
from ws4py.websocket import WebSocket
__all__ = ['WebSocketTool', 'WebSocketPlugin']
class WebSocketTool(Tool):
def __init__(self):
Tool.__init__(self, 'before_request_body', self.upgrade)
def _setup(self):
conf = self._merged_args()
hooks = cherrypy.serving.request.hooks
p = conf.pop("priority", getattr(self.callable, "priority",
self._priority))
hooks.attach(self._point, self.callable, priority=p, **conf)
hooks.attach('before_finalize', self.complete,
priority=p)
hooks.attach('on_end_resource', self.cleanup_headers,
priority=70)
hooks.attach('on_end_request', self.start_handler,
priority=70)
def upgrade(self, protocols=None, extensions=None, version=WS_VERSION, handler_cls=WebSocket):
"""
Performs the upgrade of the connection to the WebSocket
protocol.
The provided protocols may be a list of WebSocket
protocols supported by the instance of the tool.
When no list is provided and no protocol is either
during the upgrade, then the protocol parameter is
not taken into account. On the other hand,
if the protocol from the handshake isn't part
of the provided list, the upgrade fails immediatly.
"""
request = cherrypy.serving.request
request.process_request_body = False
ws_protocols = None
ws_location = None
ws_version = version
ws_key = None
ws_extensions = []
if request.method != 'GET':
raise HandshakeError('HTTP method must be a GET')
for key, expected_value in [('Upgrade', 'websocket'),
('Connection', 'upgrade')]:
actual_value = request.headers.get(key).lower()
if not actual_value:
raise HandshakeError('Header %s is not defined' % key)
if expected_value not in actual_value:
raise HandshakeError('Illegal value for header %s: %s' %
(key, actual_value))
version = request.headers.get('Sec-WebSocket-Version')
supported_versions = ', '.join([str(v) for v in ws_version])
version_is_valid = False
if version:
try: version = int(version)
except: pass
else: version_is_valid = version in ws_version
if not version_is_valid:
cherrypy.response.headers['Sec-WebSocket-Version'] = supported_versions
raise HandshakeError('Unhandled or missing WebSocket version')
key = request.headers.get('Sec-WebSocket-Key')
if key:
ws_key = base64.b64decode(key)
if len(ws_key) != 16:
raise HandshakeError("WebSocket key's length is invalid")
protocols = protocols or []
subprotocols = request.headers.get('Sec-WebSocket-Protocol')
if subprotocols:
ws_protocols = []
for s in subprotocols.split(','):
s = s.strip()
if s in protocols:
ws_protocols.append(s)
exts = extensions or []
extensions = request.headers.get('Sec-WebSocket-Extensions')
if extensions:
for ext in extensions.split(','):
ext = ext.strip()
if ext in exts:
ws_extensions.append(ext)
location = []
include_port = False
if request.scheme == "https":
location.append("wss://")
include_port = request.local.port != 443
else:
location.append("ws://")
include_port = request.local.port != 80
location.append('localhost')
if include_port:
location.append(":%d" % request.local.port)
location.append(request.path_info)
if request.query_string != "":
location.append("?%s" % request.query_string)
ws_location = ''.join(location)
response = cherrypy.serving.response
response.stream = True
response.status = '101 Switching Protocols'
response.headers['Content-Type'] = 'text/plain'
response.headers['Upgrade'] = 'websocket'
response.headers['Connection'] = 'Upgrade'
response.headers['Sec-WebSocket-Version'] = str(version)
response.headers['Sec-WebSocket-Accept'] = base64.b64encode(sha1(key + WS_KEY).digest())
if ws_protocols:
response.headers['Sec-WebSocket-Protocol'] = ', '.join(ws_protocols)
if ws_extensions:
response.headers['Sec-WebSocket-Extensions'] = ','.join(ws_extensions)
addr = (request.remote.ip, request.remote.port)
ws_conn = request.rfile.rfile._sock
request.ws_handler = handler_cls(ws_conn, ws_protocols, ws_extensions,
request.wsgi_environ.copy())
def complete(self):
"""
Sets some internal flags of CherryPy so that it
doesn't close the socket down.
"""
self._set_internal_flags()
def cleanup_headers(self):
"""
Some clients aren't that smart when it comes to
headers lookup.
"""
response = cherrypy.response
if not response.header_list:
return
headers = response.header_list[:]
for (k, v) in headers:
if k.startswith('Sec-Web'):
response.header_list.remove((k, v))
response.header_list.append((k.replace('Sec-Websocket', 'Sec-WebSocket'), v))
def start_handler(self):
"""
Runs at the end of the request processing by calling
the opened method of the handler.
"""
request = cherrypy.request
if not hasattr(request, 'ws_handler'):
return
addr = (request.remote.ip, request.remote.port)
ws_handler = request.ws_handler
request.ws_handler = None
delattr(request, 'ws_handler')
# By doing this we detach the socket from
# the CherryPy stack avoiding memory leaks
request.rfile.rfile._sock = None
cherrypy.engine.publish('handle-websocket', ws_handler, addr)
def _set_internal_flags(self):
"""
CherryPy has two internal flags that we are interested in
to enable WebSocket within the server. They can't be set via
a public API and considering I'd want to make this extension
as compatible as possible whilst refraining in exposing more
than should be within CherryPy, I prefer performing a bit
of introspection to set those flags. Even by Python standards
such introspection isn't the cleanest but it works well
enough in this case.
This also means that we do that only on WebSocket
connections rather than globally and therefore we do not
harm the rest of the HTTP server.
"""
current = inspect.currentframe()
while True:
if not current:
break
_locals = current.f_locals
if 'self' in _locals:
if type(_locals['self']) == HTTPRequest:
_locals['self'].close_connection = True
if type(_locals['self']) == HTTPConnection:
_locals['self'].linger = True
# HTTPConnection is more inner than
# HTTPRequest so we can leave once
# we're done here
return
_locals = None
current = current.f_back
class WebSocketPlugin(plugins.SimplePlugin):
def __init__(self, bus):
plugins.SimplePlugin.__init__(self, bus)
self.pool = {}
def start(self):
cherrypy.log("Starting WebSocket processing")
self.bus.subscribe('main', self.monitor)
self.bus.subscribe('stop', self.cleanup)
self.bus.subscribe('handle-websocket', self.handle)
self.bus.subscribe('websocket-broadcast', self.broadcast)
def stop(self):
cherrypy.log("Terminating WebSocket processing")
self.bus.unsubscribe('main', self.monitor)
self.bus.unsubscribe('stop', self.cleanup)
self.bus.unsubscribe('handle-websocket', self.handle)
self.bus.unsubscribe('websocket-broadcast', self.broadcast)
def handle(self, ws_handler, peer_addr):
"""
Tracks the provided handler.
@param ws_handler: websocket handler instance
@param peer_addr: remote peer address for tracing purpose
"""
cherrypy.log("Managing WebSocket connection from %s:%d" % (peer_addr[0], peer_addr[1]))
th = threading.Thread(target=ws_handler.run, name="WebSocket client at %s:%d" % (peer_addr[0], peer_addr[1]))
th.daemon = True
self.pool[ws_handler] = (th, peer_addr)
th.start()
def monitor(self):
"""
Called within the engine's mainloop to drop connections
that have terminated since last iteration.
"""
handlers = self.pool.keys()[:]
for handler in handlers:
if handler.terminated:
th, addr = self.pool[handler]
cherrypy.log("Removing WebSocket connection %s:%d" % (addr[0], addr[1]))
th.join()
del self.pool[handler]
def cleanup(self):
"""
Terminate all connections and clear the pool. Executed when the engine stops.
"""
cherrypy.log("Closing %d WebSocket connections" % len(self.pool))
for handler in self.pool:
handler.close(code=1001, reason='Server is shutting down')
th, addr = self.pool[handler]
th.join()
self.pool.clear()
def broadcast(self, message, binary=False):
"""
Broadcasts a message to all connected clients known to
the server.
@param message: a message suitable to pass to the send() method
of the connected handler.
@param binary: whether or not the message is a binary one
"""
for ws_handler in self.pool:
try:
ws_handler.send(message, message.is_binary)
except:
cherrypy.log(traceback=True)
if __name__ == '__main__':
import random
cherrypy.config.update({'server.socket_host': '127.0.0.1',
'server.socket_port': 9000})
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
class Root(object):
@cherrypy.expose
@cherrypy.tools.websocket(on=False)
def ws(self):
return """<html>
<head>
<script type='application/javascript' src='https://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js'> </script>
<script type='application/javascript'>
$(document).ready(function() {
var ws = new WebSocket('ws://192.168.0.10:9000/');
ws.onmessage = function (evt) {
$('#chat').val($('#chat').val() + evt.data + '\\n');
};
ws.onopen = function() {
ws.send("Hello there");
};
ws.onclose = function(evt) {
$('#chat').val($('#chat').val() + 'Connection closed by server: ' + evt.code + ' \"' + evt.reason + '\"\\n');
};
$('#chatform').submit(function() {
ws.send('%(username)s: ' + $('#message').val());
$('#message').val("");
return false;
});
});
</script>
</head>
<body>
<form action='/echo' id='chatform' method='get'>
<textarea id='chat' cols='35' rows='10'></textarea>
<br />
<label for='message'>%(username)s: </label><input type='text' id='message' />
<input type='submit' value='Send' />
</form>
</body>
</html>
""" % {'username': "User%d" % random.randint(0, 100)}
@cherrypy.expose
def index(self):
cherrypy.log("Handler created: %s" % repr(cherrypy.request.ws_handler))
cherrypy.quickstart(Root(), '/', config={'/': {'tools.websocket.on': True,
'tools.websocket.handler_cls': EchoWebSocketHandler}})
|
|
#!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net
import logging
import binascii
import time
from twisted.internet import protocol, defer
from twisted.python import failure
import twisted.internet.reactor
import constants
import encoding
import msgtypes
import msgformat
from contact import Contact
reactor = twisted.internet.reactor
log = logging.getLogger(__name__)
class TimeoutError(Exception):
""" Raised when a RPC times out """
def __init__(self, remote_contact_id):
# remote_contact_id is a binary blob so we need to convert it
# into something more readable
msg = 'Timeout connecting to {}'.format(binascii.hexlify(remote_contact_id))
Exception.__init__(self, msg)
self.remote_contact_id = remote_contact_id
class KademliaProtocol(protocol.DatagramProtocol):
""" Implements all low-level network-related functions of a Kademlia node """
msgSizeLimit = constants.udpDatagramMaxSize-26
maxToSendDelay = 10**-3#0.05
minToSendDelay = 10**-5#0.01
def __init__(self, node, msgEncoder=encoding.Bencode(), msgTranslator=msgformat.DefaultFormat()):
self._node = node
self._encoder = msgEncoder
self._translator = msgTranslator
self._sentMessages = {}
self._partialMessages = {}
self._partialMessagesProgress = {}
self._next = 0
self._callLaterList = {}
def sendRPC(self, contact, method, args, rawResponse=False):
""" Sends an RPC to the specified contact
@param contact: The contact (remote node) to send the RPC to
@type contact: kademlia.contacts.Contact
@param method: The name of remote method to invoke
@type method: str
@param args: A list of (non-keyword) arguments to pass to the remote
method, in the correct order
@type args: tuple
@param rawResponse: If this is set to C{True}, the caller of this RPC
will receive a tuple containing the actual response
message object and the originating address tuple as
a result; in other words, it will not be
interpreted by this class. Unless something special
needs to be done with the metadata associated with
the message, this should remain C{False}.
@type rawResponse: bool
@return: This immediately returns a deferred object, which will return
the result of the RPC call, or raise the relevant exception
if the remote node raised one. If C{rawResponse} is set to
C{True}, however, it will always return the actual response
message (which may be a C{ResponseMessage} or an
C{ErrorMessage}).
@rtype: twisted.internet.defer.Deferred
"""
msg = msgtypes.RequestMessage(self._node.id, method, args)
msgPrimitive = self._translator.toPrimitive(msg)
encodedMsg = self._encoder.encode(msgPrimitive)
df = defer.Deferred()
if rawResponse:
df._rpcRawResponse = True
# Set the RPC timeout timer
timeoutCall = reactor.callLater(constants.rpcTimeout, self._msgTimeout, msg.id) #IGNORE:E1101
# Transmit the data
self._send(encodedMsg, msg.id, (contact.address, contact.port))
self._sentMessages[msg.id] = (contact.id, df, timeoutCall)
return df
def datagramReceived(self, datagram, address):
""" Handles and parses incoming RPC messages (and responses)
@note: This is automatically called by Twisted when the protocol
receives a UDP datagram
"""
if datagram[0] == '\x00' and datagram[25] == '\x00':
totalPackets = (ord(datagram[1]) << 8) | ord(datagram[2])
msgID = datagram[5:25]
seqNumber = (ord(datagram[3]) << 8) | ord(datagram[4])
if msgID not in self._partialMessages:
self._partialMessages[msgID] = {}
self._partialMessages[msgID][seqNumber] = datagram[26:]
if len(self._partialMessages[msgID]) == totalPackets:
keys = self._partialMessages[msgID].keys()
keys.sort()
data = ''
for key in keys:
data += self._partialMessages[msgID][key]
datagram = data
del self._partialMessages[msgID]
else:
return
try:
msgPrimitive = self._encoder.decode(datagram)
except encoding.DecodeError:
# We received some rubbish here
return
except IndexError:
log.warning("Couldn't decode dht datagram from %s", address)
return
message = self._translator.fromPrimitive(msgPrimitive)
remoteContact = Contact(message.nodeID, address[0], address[1], self)
# Refresh the remote node's details in the local node's k-buckets
self._node.addContact(remoteContact)
if isinstance(message, msgtypes.RequestMessage):
# This is an RPC method request
self._handleRPC(remoteContact, message.id, message.request, message.args)
elif isinstance(message, msgtypes.ResponseMessage):
# Find the message that triggered this response
if self._sentMessages.has_key(message.id):
# Cancel timeout timer for this RPC
df, timeoutCall = self._sentMessages[message.id][1:3]
timeoutCall.cancel()
del self._sentMessages[message.id]
if hasattr(df, '_rpcRawResponse'):
# The RPC requested that the raw response message and originating address be returned; do not interpret it
df.callback((message, address))
elif isinstance(message, msgtypes.ErrorMessage):
# The RPC request raised a remote exception; raise it locally
if message.exceptionType.startswith('exceptions.'):
exceptionClassName = message.exceptionType[11:]
else:
localModuleHierarchy = self.__module__.split('.')
remoteHierarchy = message.exceptionType.split('.')
#strip the remote hierarchy
while remoteHierarchy[0] == localModuleHierarchy[0]:
remoteHierarchy.pop(0)
localModuleHierarchy.pop(0)
exceptionClassName = '.'.join(remoteHierarchy)
remoteException = None
try:
exec 'remoteException = %s("%s")' % (exceptionClassName, message.response)
except Exception:
# We could not recreate the exception; create a generic one
remoteException = Exception(message.response)
df.errback(remoteException)
else:
# We got a result from the RPC
df.callback(message.response)
else:
# If the original message isn't found, it must have timed out
#TODO: we should probably do something with this...
pass
def _send(self, data, rpcID, address):
""" Transmit the specified data over UDP, breaking it up into several
packets if necessary
If the data is spread over multiple UDP datagrams, the packets have the
following structure::
| | | | | |||||||||||| 0x00 |
|Transmision|Total number|Sequence number| RPC ID |Header end|
| type ID | of packets |of this packet | | indicator|
| (1 byte) | (2 bytes) | (2 bytes) |(20 bytes)| (1 byte) |
| | | | | |||||||||||| |
@note: The header used for breaking up large data segments will
possibly be moved out of the KademliaProtocol class in the
future, into something similar to a message translator/encoder
class (see C{kademlia.msgformat} and C{kademlia.encoding}).
"""
if len(data) > self.msgSizeLimit:
# We have to spread the data over multiple UDP datagrams, and provide sequencing information
# 1st byte is transmission type id, bytes 2 & 3 are the total number of packets in this transmission, bytes 4 & 5 are the sequence number for this specific packet
totalPackets = len(data) / self.msgSizeLimit
if len(data) % self.msgSizeLimit > 0:
totalPackets += 1
encTotalPackets = chr(totalPackets >> 8) + chr(totalPackets & 0xff)
seqNumber = 0
startPos = 0
while seqNumber < totalPackets:
#reactor.iterate() #IGNORE:E1101
packetData = data[startPos:startPos+self.msgSizeLimit]
encSeqNumber = chr(seqNumber >> 8) + chr(seqNumber & 0xff)
txData = '\x00%s%s%s\x00%s' % (encTotalPackets, encSeqNumber, rpcID, packetData)
self._sendNext(txData, address)
startPos += self.msgSizeLimit
seqNumber += 1
else:
self._sendNext(data, address)
def _sendNext(self, txData, address):
""" Send the next UDP packet """
ts = time.time()
delay = 0
if ts >= self._next:
delay = self.minToSendDelay
self._next = ts + self.minToSendDelay
else:
delay = (self._next-ts) + self.maxToSendDelay
self._next += self.maxToSendDelay
if self.transport:
laterCall = reactor.callLater(delay, self.transport.write, txData, address)
for key in self._callLaterList.keys():
if key <= ts:
del self._callLaterList[key]
self._callLaterList[self._next] = laterCall
def _sendResponse(self, contact, rpcID, response):
""" Send a RPC response to the specified contact
"""
msg = msgtypes.ResponseMessage(rpcID, self._node.id, response)
msgPrimitive = self._translator.toPrimitive(msg)
encodedMsg = self._encoder.encode(msgPrimitive)
self._send(encodedMsg, rpcID, (contact.address, contact.port))
def _sendError(self, contact, rpcID, exceptionType, exceptionMessage):
""" Send an RPC error message to the specified contact
"""
msg = msgtypes.ErrorMessage(rpcID, self._node.id, exceptionType, exceptionMessage)
msgPrimitive = self._translator.toPrimitive(msg)
encodedMsg = self._encoder.encode(msgPrimitive)
self._send(encodedMsg, rpcID, (contact.address, contact.port))
def _handleRPC(self, senderContact, rpcID, method, args):
""" Executes a local function in response to an RPC request """
# Set up the deferred callchain
def handleError(f):
self._sendError(senderContact, rpcID, f.type, f.getErrorMessage())
def handleResult(result):
self._sendResponse(senderContact, rpcID, result)
df = defer.Deferred()
df.addCallback(handleResult)
df.addErrback(handleError)
# Execute the RPC
func = getattr(self._node, method, None)
if callable(func) and hasattr(func, 'rpcmethod'):
# Call the exposed Node method and return the result to the deferred callback chain
try:
##try:
## # Try to pass the sender's node id to the function...
result = func(*args, **{'_rpcNodeID': senderContact.id, '_rpcNodeContact': senderContact})
##except TypeError:
## # ...or simply call it if that fails
## result = func(*args)
except Exception, e:
df.errback(failure.Failure(e))
else:
df.callback(result)
else:
# No such exposed method
df.errback( failure.Failure( AttributeError('Invalid method: %s' % method) ) )
def _msgTimeout(self, messageID):
""" Called when an RPC request message times out """
# Find the message that timed out
if self._sentMessages.has_key(messageID):
remoteContactID, df = self._sentMessages[messageID][0:2]
if self._partialMessages.has_key(messageID):
# We are still receiving this message
# See if any progress has been made; if not, kill the message
if self._partialMessagesProgress.has_key(messageID):
if len(self._partialMessagesProgress[messageID]) == len(self._partialMessages[messageID]):
# No progress has been made
del self._partialMessagesProgress[messageID]
del self._partialMessages[messageID]
df.errback(failure.Failure(TimeoutError(remoteContactID)))
return
# Reset the RPC timeout timer
timeoutCall = reactor.callLater(constants.rpcTimeout, self._msgTimeout, messageID) #IGNORE:E1101
self._sentMessages[messageID] = (remoteContactID, df, timeoutCall)
return
del self._sentMessages[messageID]
# The message's destination node is now considered to be dead;
# raise an (asynchronous) TimeoutError exception and update the host node
self._node.removeContact(remoteContactID)
df.errback(failure.Failure(TimeoutError(remoteContactID)))
else:
# This should never be reached
print "ERROR: deferred timed out, but is not present in sent messages list!"
def stopProtocol(self):
""" Called when the transport is disconnected.
Will only be called once, after all ports are disconnected.
"""
for key in self._callLaterList.keys():
try:
if key > time.time():
self._callLaterList[key].cancel()
except Exception, e:
print e
del self._callLaterList[key]
#TODO: test: do we really need the reactor.iterate() call?
reactor.iterate()
|
|
__author__ = 'aarongary'
import sys
import pymongo
import requests
import MyGeneInfo
from itertools import islice
from app.util import set_status, create_edges_index
from app.status import Status
from bson.json_util import dumps
from models.TermResolver import TermAnalyzer
import ElasticSearch
import os
from sklearn.linear_model import LinearRegression
import numpy as np
import app
import ESearch
def bulk_identify_terms(terms):
tr = TermAnalyzer()
termsClassified = tr.process_terms_bulk(terms)
return_value = {
'termClassification': termsClassified
}
return return_value
def search_term_description(term):
tr = TermAnalyzer()
termsClassified = tr.process_terms_bulk(term)
entrez_summary = ESearch.get_gene_summary_from_entrez(term)
return_value = {
'termClassification': termsClassified,
'entrez_summary': entrez_summary
}
return return_value
def bulk_identify_terms2(terms):
term_with_id = []
#========================
# Process GENOME terms
#========================
analyzed_terms = process_genome_terms(terms)
for genome_term in analyzed_terms['special_terms']:
a = {
'probabilitiesMap': {
'gene': '0.0',
'icd10': '0.0',
'drug': '0.0',
'disease': '0.0',
'genome': '1.0'
},
'status': 'success',
'termId': genome_term['familiar_term'],
'desc': 'Genome',
'geneSymbol': genome_term['familiar_term'],
'termTitle': genome_term['familiar_term'] + ' (' + genome_term['latin'] + ')'
}
term_with_id.append(a)
terms = analyzed_terms['terms']
#========================
# Process DISEASE terms
#========================
analyzed_terms = process_disease_terms(terms)
for disease_term in analyzed_terms['special_terms']:
a = {
'probabilitiesMap': {
'gene': '0.0',
'icd10': '0.0',
'drug': '0.0',
'disease': '1.0',
'genome': '0.0'
},
'status': 'success',
'termId': disease_term['familiar_term'],
'desc': 'Disease',
'geneSymbol': disease_term['familiar_term'],
'termTitle': disease_term['familiar_term'] + ' (' + disease_term['latin'] + ')'
}
term_with_id.append(a)
terms = analyzed_terms['terms']
if(len(terms) > 0):
queryTermArray = terms.split(',')
types = ['gene','icd10','drug','disease','genome']
for queryTerm in queryTermArray:
termTitle = queryTerm
print queryTerm
a = {
'probabilitiesMap': {},
'status': 'success',
'termId': queryTerm.upper(),
'desc': '',
'geneSymbol': '',
'termTitle': queryTerm
}
term_result = identify_term(queryTerm)
#tt = dumps(term_result)
if(term_result is None or term_result.count() < 1):
term_alt_result = identify_alt_term(queryTerm) #MyGeneInfo.get_gene_info_by_id(queryTerm)
cc = dumps(term_alt_result)
if(term_alt_result['term'] == 'UNKNOWN'):
a['probabilitiesMap'] = {
'gene': '0.0',
'icd10': '0.0',
'drug': '0.0',
'disease': '0.0',
'genome': '0.0'
}
a['status'] = 'unknown'
term_with_id.append(a)
else:
termDesc = ''
termGeneSymbol = ''
term_result_types_array = []
if(term_alt_result['type'] == 'GENE'):
termDesc = term_alt_result['desc']
termGeneSymbol = term_alt_result['geneSymbol']
termTitle = queryTerm.upper() + ' (' + termGeneSymbol.upper() + ')'
a['termId'] = termGeneSymbol.upper()
if(term_alt_result['type'] not in term_result_types_array):
term_result_types_array.append(term_alt_result['type'])
total_found_terms = float(len(term_result_types_array))
for k in types:
if(k.upper() in term_result_types_array):
a['probabilitiesMap'][k] = str(1.0/total_found_terms)
else:
a['probabilitiesMap'][k] = str(0.0)
a['desc'] = termDesc
a['geneSymbol'] = termGeneSymbol
a['termTitle'] = termTitle
term_with_id.append(a)
else:
termDesc = ''
termGeneSymbol = ''
term_result_types_array = []
#tr = dumps(term_result)
for item_type in term_result:
if(item_type['type'] == 'GENE'):
termDesc = item_type['desc']
termGeneSymbol = item_type['geneSymbol']
if(len(queryTerm) > 12 and queryTerm[:3] == 'ENS'):
termTitle = termGeneSymbol.upper() + ' (' + queryTerm.upper() + ')'
a['termId'] = termGeneSymbol.upper()
if(item_type['type'] not in term_result_types_array):
term_result_types_array.append(item_type['type'])
total_found_terms = float(len(term_result_types_array))
for k in types:
if(k.upper() in term_result_types_array):
a['probabilitiesMap'][k] = str(1.0/total_found_terms)
else:
a['probabilitiesMap'][k] = str(0.0)
a['desc'] = termDesc
a['geneSymbol'] = termGeneSymbol
a['termTitle'] = termTitle
term_with_id.append(a)
#print dumps(a)
#term_with_id.append(term_result)
return_value = {
'termClassification': term_with_id
}
#print dumps(return_value)
return dumps(return_value)
def identify_term(name):
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
results = allterms.find({'term': name.upper(),'genomeType': 'human'})
return None if results is None else results
def identify_alt_term(name):
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
gene_alt_id = MyGeneInfo.get_gene_info_by_id(name)
results = allterms.find_one({'term': gene_alt_id.upper(),'genomeType': 'human'})
if(results is None):
results = {
'term': 'UNKNOWN',
'desc': 'UNKNOWN'
}
return results
#def identify_term(name):
# client = pymongo.MongoClient()
# db = client.identifiers
# allterms = db.allterms
# result = allterms.find_one({'term': name.upper()})
# return None if result is None else result
def add_terms_from_file():
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
#url = 'http://geneli.st:8181/add-terms1.tsv'
#url = 'http://geneli.st:8181/mirna-terms.txt'
url = 'http://geneli.st:8181/mirna_label.txt'
r = requests.get(url)
lines = list(r.iter_lines())
count=0
for idx, line in enumerate(lines):
term, term_type = line.split('\t')
term_to_add = {
'term': term.upper(),
'type': term_type
}
allterms.save(term_to_add)
count = count + 1
print 'Done'
print str(count)
def load_variant_to_gene_from_file():
client = pymongo.MongoClient()
db = client.identifiers
variants = db.variants
variants.drop()
f_path = os.path.abspath('./variant_vs_gene.txt')
f = open(f_path, 'r')
count = 0
for line in f:
count += 1
if(count % 5000 == 0):
print str(count) + ' (' + "{0:.2f}%".format(float(count)/89000000 * 100) + ')'
#print str(count) + ' (' + str(count/89000000) + ')c'
#if(count > 10000):
# break
variant, gene = line.split('\t')
#print variant + ' - ' + gene
insertThisRecord = {
'geneSymbol': gene.rstrip().upper(),
'genomeType': 'human',
'term': variant.upper(),
'type': 'GENE'
}
variants.save(insertThisRecord)
variants.create_index([
("term", pymongo.ASCENDING)
])
def get_mirna_from_cluster_file():
f = open('/Users/aarongary/Development/DataSets/Terms/BRCA.json', 'r')
count = 0
for line in f:
if('hsa-' in line):
print count
count += 1
hsa_items = line.split('hsa-')
for hsa_item in hsa_items:
print hsa_item
def add_biomart_terms_from_file():
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
allterms.drop()
#filesToParse = [{'genomeType': 'human', 'url': 'http://geneli.st:8181/biomart/human Homo sapiens protein coding genes.txt','termType': 'GENE'},
# {'genomeType': 'human', 'url': 'http://geneli.st:8181/biomart/add-terms-non-GENE.tsv','termType': 'NONGENE'}]
terms_host = 'http://ec2-52-40-169-254.us-west-2.compute.amazonaws.com:3000/Biomart'
filesToParse = [
#{'genomeType': 'dog', 'url': terms_host + '/dog Canis familiaris protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'fruitfly', 'url': terms_host + '/fruitfly Drosophila melanogaster protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'monkey', 'url': terms_host + '/monkey Macaca mulatta protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'mouse', 'url': terms_host + '/mouse Mus musculus protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'rat', 'url': terms_host + '/rat Rattus norvegicus protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'worm', 'url': terms_host + '/worm Caenorhabditis elegans protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'zebrafish', 'url': terms_host + '/zebrafish Danio rerio protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'dog', 'url': terms_host + '/dog Canis familiaris mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'fruitfly', 'url': terms_host + '/fruitfly Drosophila melanogaster pre-mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'monkey', 'url': terms_host + '/monkey Macaca mulatta mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'mouse', 'url': terms_host + '/mouse Mus musculus mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'rat', 'url': terms_host + '/rat Rattus norvegicus mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'worm', 'url': terms_host + '/worm Caenorhabditis elegans mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'zebrafish', 'url': terms_host + '/zebrafish Danio rerio mirna genes.txt','termType': 'GENE'},
{'genomeType': 'human', 'url': terms_host + '/add-terms-DISEASE.tsv','termType': 'NONGENE'},
{'genomeType': 'human', 'url': terms_host + '/human Homo sapiens protein coding genes.txt','termType': 'GENE'},
{'genomeType': 'human', 'url': terms_host + '/human Homo sapiens miRNA genes.txt','termType': 'GENE'}
]
for f in filesToParse:
r = requests.get(f['url'], stream=True)
lines = r.iter_lines()
lines.next() # ignore header
count = 0
for line in lines:
count += 1
if(count % 1000 == 0):
print count
try:
if(f['termType'] == 'GENE'):
ensGID, desc, geneType, geneStatus, geneSymbol = line.split('\t')
insertThisRecord = {
'ensGID': ensGID,
'desc': desc,
'geneType': geneType,
'geneStatus': geneStatus,
'geneSymbol': geneSymbol,
'genomeType': f['genomeType'],
'term': ensGID.upper(),
'type': 'GENE'
}
allterms.save(insertThisRecord)
insertThisInvertedRecord = {
'ensGID': ensGID,
'desc': desc,
'geneType': geneType,
'geneStatus': geneStatus,
'geneSymbol': geneSymbol,
'genomeType': f['genomeType'],
'term': geneSymbol.upper(),
'type': 'GENE'
}
allterms.save(insertThisInvertedRecord)
else:
fTerm, fType = line.split('\t')
allterms.save({'genomeType': 'human','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'dog','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'fruitfly','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'monkey','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'mouse','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'rat','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'worm','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'zebrafish','term': fTerm.upper(),'type': fType})
except Exception as e:
print 'Didnt work' + e.message
print 'Done with file'
allterms.ensure_index([("ensGID" , pymongo.ASCENDING)])
allterms.ensure_index([("term" , pymongo.ASCENDING)])
allterms.ensure_index([("type" , pymongo.ASCENDING)])
allterms.ensure_index([("geneType" , pymongo.ASCENDING)])
# allterms.create_indexes([
# pymongo.IndexModel([('ensGID', pymongo.ASCENDING)]),
# pymongo.IndexModel([('term', pymongo.ASCENDING)]),
# pymongo.IndexModel([('type', pymongo.ASCENDING)]),
# pymongo.IndexModel([('geneType', pymongo.ASCENDING)])
# ])
print 'Done'
return ""
def add_terms_from_file_autocomplete():
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms
#url = 'http://geneli.st:8181/add-terms3a.tsv'
url = 'http://geneli.st:8181/add-terms3.tsv'
r = requests.get(url)
lines = list(r.iter_lines())
count=0
for idx, line in enumerate(lines):
term, term_type = line.split('\t')
#print term
term_to_add = {
'term': term.upper(),
'type': term_type
}
allterms.save(term_to_add)
count = count + 1
if(count % 200 == 0):
print count #dumps(term_to_add)
#allterms.create_indexes([pymongo.IndexModel([('term', pymongo.ASCENDING)])])
print 'Done'
def add_terms_from_elasticsearch_autocomplete():
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms3
count=0
phenotypes = ElasticSearch.get_clinvar_phenotypes()
for term in phenotypes:
term_to_add = {
'term': term.upper(),
'type': 'ICD10'
}
allterms.save(term_to_add)
count = count + 1
if(count % 200 == 0):
print count #dumps(term_to_add)
#allterms.create_indexes([pymongo.IndexModel([('term', pymongo.ASCENDING)])])
print 'Done'
def load_terms_from_file():
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms
allterms.drop()
url = 'http://ec2-52-26-19-122.us-west-2.compute.amazonaws.com:8080/all-terms3.tsv'
r = requests.get(url)
lines = list(r.iter_lines())
count=0
for idx, line in enumerate(lines):
term, term_type = line.split('\t')
#print term
term_to_add = {
'term': term.upper(),
'type': term_type
}
allterms.save(term_to_add)
count = count + 1
if(count % 200 == 0):
print count #dumps(term_to_add)
allterms.create_indexes([pymongo.IndexModel([('term', pymongo.ASCENDING)])])
print 'Done'
def process_genome_terms(terms):
terms_uppercase = terms.upper()
return_value = []
genome_id_kv = [
{'k': 'CANIS,FAMILIARIS', 'v': 'DOG'},
{'k': 'DROSOPHILA,MELANOGASTER', 'v': 'FRUITFLY'},
{'k': 'HOMO,SAPIEN', 'v': 'HUMAN'},
{'k': 'MACACA,MULATTA', 'v': 'MONKEY'},
{'k': 'MUS,MUSCULUS', 'v': 'MOUSE'},
{'k': 'RATTUS,NORVEGICUS', 'v': 'RAT'},
{'k': 'CAENORHABDITIS,ELEGANS', 'v': 'WORM'},
{'k': 'DANIO,RERIO', 'v': 'ZEBRAFISH'}
]
for kv in genome_id_kv:
if(kv['k'] in terms_uppercase):
terms_uppercase = terms_uppercase.replace(kv['k'], '').replace(',,',',')
return_value.append({'latin': kv['k'].replace(',',' '), 'familiar_term': kv['v']})
if(terms_uppercase[0:1] == ','):
terms_uppercase = terms_uppercase[1:-1]
if(terms_uppercase == ','):
terms_uppercase = ''
print terms_uppercase
return {'terms': terms_uppercase, 'special_terms': return_value}
def process_disease_terms(terms):
terms_uppercase = terms.upper()
return_value = []
genome_id_kv = [
{'k': 'BLADDER,CANCER', 'v': 'BLCA'},
{'k': 'BRAIN,CANCER', 'v': 'LGG'},
{'k': 'BREAST,CANCER', 'v': 'BRCA'},
{'k': 'CERVICAL,CANCER', 'v': 'CESC'},
{'k': 'ENDOCERVICAL,CANCER', 'v': 'CESC'},
{'k': 'CERVICAL,CANCER', 'v': 'CESC'},
{'k': 'CHOLANGIOCARCINOMA', 'v': 'CHOL'},
{'k': 'BILE,DUCT,CANCER', 'v': 'CHOL'},
{'k': 'COLON,CANCER', 'v': 'COAD'},
{'k': 'ESOPHAGEAL,CANCER', 'v': 'ESCA'},
{'k': 'GLIOBLASTOMA,CANCER', 'v': 'GBM'}, #Wikify
{'k': 'HEAD,AND,NECK,CANCER', 'v': 'HNSC'},
{'k': 'NECK,CANCER', 'v': 'HNSC'},
{'k': 'HEAD,CANCER', 'v': 'HNSC'},
{'k': 'KIDNEY,CHROMOPHOBE', 'v': 'KICH'},
{'k': 'KIDNEY,RENAL,CLEAR,CELL,CARCINOMA', 'v': 'KIRC'}, #Wikify
{'k': 'KIDNEY,RENAL,PAPILLARY,CELL,CARCINOMA', 'v': 'KIRP'},
{'k': 'LIVER,CANCER', 'v': 'LIHC'},
{'k': 'LUNG,CANCER', 'v': 'LUAD'},
{'k': 'LUNG,SQUAMOUS,CELL,CARCINOMA', 'v': 'LUSC'}, #Wikify
{'k': 'LYMPHOID,CANCER', 'v': 'DLBC'},
{'k': 'LYMPHOMA,CANCER', 'v': 'DLBC'},
{'k': 'MESOTHELIOMA,CANCER', 'v': 'MESO'},
{'k': 'OVARIAN,CANCER', 'v': 'OV'},
{'k': 'PANCREATIC,CANCER', 'v': 'PAAD'},
{'k': 'PHEOCHROMOCYTOMA,CANCER', 'v': 'PCPG'},
{'k': 'PARAGANGLIOMA,CANCER', 'v': 'PCPG'},
{'k': 'PROSTATE,CANCER', 'v': 'PRAD'},
{'k': 'RECTUM,CANCER', 'v': 'READ'},
{'k': 'SARCOMA,CANCER', 'v': 'SARC'},
{'k': 'SKIN,CANCER', 'v': 'SKCM'},
{'k': 'STOMACH,CANCER', 'v': 'STAD'},
{'k': 'TESTICULAR,CANCER', 'v': 'TGCT'},
{'k': 'THYMOMA,CANCER', 'v': 'THYM'}, #Wikify
{'k': 'THYROID,CANCER', 'v': 'THCA'},
{'k': 'UTERINE,CANCER', 'v': 'UCS'},
{'k': 'UTERINE,CORPUS,ENDOMETRIAL,CANCER', 'v': 'UCEC'}, #Wikify
{'k': 'UVEAL,MELANOMA,CANCER', 'v': 'UVM'},
{'k': 'UVEAL,CANCER', 'v': 'UVM'},
{'k': 'LEUKEMIA', 'v': 'LAML'},
{'k': 'MYELOID,LEUKEMIA', 'v': 'LAML'},
{'k': 'ADRENOCORTICAL,CARCINOMA', 'v': 'ACC'},
{'k': 'BLADDER,UROTHELIAL,CARCINOMA', 'v': 'BLCA'},
{'k': 'BRAIN,LOWER,GRADE,GLIOMA', 'v': 'LGG'},
{'k': 'BREAST,INVASIVE,CARCINOMA', 'v': 'BRCA'},
{'k': 'CERVICAL,SQUAMOUS,CELL,CARCINOMA', 'v': 'CESC'},
{'k': 'ENDOCERVICAL,ADENOCARCINOMA', 'v': 'CESC'},
{'k': 'CHOLANGIOCARCINOMA', 'v': 'CHOL'},
{'k': 'COLON,ADENOCARCINOMA', 'v': 'COAD'},
{'k': 'ESOPHAGEAL,CARCINOMA', 'v': 'ESCA'},
{'k': 'GLIOBLASTOMA,MULTIFORME', 'v': 'GBM'},
{'k': 'HEAD,AND,NECK,SQUAMOUS,CELL,CARCINOMA', 'v': 'HNSC'},
{'k': 'KIDNEY,CHROMOPHOBE', 'v': 'KICH'},
{'k': 'KIDNEY,RENAL,CLEAR,CELL,CARCINOMA', 'v': 'KIRC'},
{'k': 'KIDNEY,RENAL,PAPILLARY,CELL,CARCINOMA', 'v': 'KIRP'},
{'k': 'LIVER,HEPATOCELLULAR,CARCINOMA', 'v': 'LIHC'},
{'k': 'LUNG,ADENOCARCINOMA', 'v': 'LUAD'},
{'k': 'LUNG,SQUAMOUS,CELL,CARCINOMA', 'v': 'LUSC'},
{'k': 'LYMPHOID,NEOPLASM,DIFFUSE,LARGE,B-CELL,LYMPHOMA', 'v': 'DLBC'},
{'k': 'MESOTHELIOMA', 'v': 'MESO'},
{'k': 'OVARIAN,SEROUS,CYSTADENOCARCINOMA', 'v': 'OV'},
{'k': 'PANCREATIC,ADENOCARCINOMA', 'v': 'PAAD'},
{'k': 'PHEOCHROMOCYTOMA', 'v': 'PCPG'},
{'k': 'PARAGANGLIOMA', 'v': 'PCPG'},
{'k': 'PROSTATE,ADENOCARCINOMA', 'v': 'PRAD'},
{'k': 'RECTUM,ADENOCARCINOMA', 'v': 'READ'},
{'k': 'SARCOMA', 'v': 'SARC'},
{'k': 'SKIN,CUTANEOUS,MELANOMA', 'v': 'SKCM'},
{'k': 'STOMACH,ADENOCARCINOMA', 'v': 'STAD'},
{'k': 'TESTICULAR,GERM,CELL,TUMORS', 'v': 'TGCT'},
{'k': 'THYMOMA', 'v': 'THYM'},
{'k': 'THYROID,CARCINOMA', 'v': 'THCA'},
{'k': 'UTERINE,CARCINOSARCOMA', 'v': 'UCS'},
{'k': 'UTERINE,CORPUS,ENDOMETRIAL,CARCINOMA', 'v': 'UCEC'},
{'k': 'UVEAL,MELANOMA', 'v': 'UVM'}
]
for kv in genome_id_kv:
if(kv['k'] in terms_uppercase):
terms_uppercase = terms_uppercase.replace(kv['k'], '').replace(',,',',')
return_value.append({'latin': kv['k'].replace(',',' '), 'familiar_term': kv['v']})
if(terms_uppercase[0:1] == ','):
terms_uppercase = terms_uppercase[1:-1]
if(terms_uppercase == ','):
terms_uppercase = ''
print terms_uppercase
return {'terms': terms_uppercase, 'special_terms': return_value}
def auto_complete_search(term):
tr = TermAnalyzer()
termsClassified = tr.identify_term_partial(term)
return_value = {
'termClassification': termsClassified
}
return return_value
def test_linear_classifier():
est = LinearRegression(fit_intercept=False)
# random training data
X = np.random.rand(10, 2)
y = np.random.randint(2, size=10)
est.fit(X, y)
est.coef_ # access coefficients
def load_disease_groups():
disease_groups_array = [{
'genomeType': 'human',
'term': 'Adrenocortical Cancer ',
'group': 'Adrenal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Adrenocortical Carcinoma ',
'group': 'Adrenal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Pheochromocytoma and Paraganglioma ',
'group': 'Adrenal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Cholangiocarcinoma ',
'group': 'Bile',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Cholangiocarcinoma ',
'group': 'Bile',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Bladder Cancer',
'group': 'Bladder',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Bladder Urothelial Carcinoma ',
'group': 'Bladder',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Brain Lower Grade Glioma ',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Glioblastoma ',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Glioblastoma Multiforme',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Glioblastoma Multiforme and Brain Lower Grade Glioma ',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Glioma High Grade',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Breast Invasive Carcinoma ',
'group': 'Breast',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Breast Tumors RNA',
'group': 'Breast',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Cervical Cancer ChemoradioResistant',
'group': 'Cervical',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Cervical Squamous Cell Carcinoma and Endocervical Adenocarcinoma ',
'group': 'Cervical',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Colon Adenocarcinoma',
'group': 'Colon',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Colon Adenocarcinoma and Rectum adenocarcinoma ',
'group': 'colon',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Colon Cancer ',
'group': 'colon',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Ulcerative Colitis Colon Inflammation ',
'group': 'colon',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Endometrial Cancer Stage I',
'group': 'Endometrial',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Esophageal Cancer',
'group': 'Esophagus',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Esophageal Carcinoma',
'group': 'Esophagus',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Head and Neck ',
'group': 'HeadAndNeck',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Head and Neck Squamous Cell Carcinoma ',
'group': 'HeadAndNeck',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney Chromophobe ',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney Chromophobe and Kidney Renal Clear Cell Carcinoma and Kidney Renal Papillary Cell Carcinoma',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney Renal Clear Cell Carcinoma ',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney Renal Clear Cell Carcinoma ',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney Renal Papillary Cell Carcinoma ',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Renal Cell Carcinoma',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Acute Myeloid Leukemia ',
'group': 'Leukemia',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Acute Myeloid Leukemia ',
'group': 'Leukemia',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Hepatocellular Carcinoma ',
'group': 'Liver',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Liver Hepatocellular Carcinoma ',
'group': 'Liver',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Liver Hepatocellular Carcinoma Early Stage Cirrhosis ',
'group': 'Liver',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Blood Lung Cancer',
'group': 'Lung',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Blood Lung Cancer Stage I ',
'group': 'Lung',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Lung Adenocarcinoma ',
'group': 'Lung',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Lung Squamous Cell Carcinoma ',
'group': 'Lung',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Diffuse Large B-Cell Lymphoma',
'group': 'Lymphoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma',
'group': 'Lymphoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Mesothelioma ',
'group': 'Ovarian',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Ovarian Cancer',
'group': 'Ovarian',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Ovarian Serous Cystadenocarcinoma ',
'group': 'Ovarian',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Pancreatic ',
'group': 'Pancreatic',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Pancreatic Adenocarcinoma ',
'group': 'Pancreatic',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Pancreatic Ductal Adenocarcinoma',
'group': 'Pancreatic',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Prostate Adenocarcinoma',
'group': 'Prostate',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Prostate Carcinoma ',
'group': 'Prostate',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Rectal Cancer ',
'group': 'Rectal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Rectum Adenocarcinoma ',
'group': 'Rectal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Sarcoma ',
'group': 'Sarcoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Sarcoma ',
'group': 'Sarcoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Melanoma Malignant ',
'group': 'Skin',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Skin Cutaneous Melanoma',
'group': 'Skin',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Stomach Adenocarcinoma ',
'group': 'Stomach',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Stomach and Esophageal Carcinoma',
'group': 'Stomach',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Stomach Cancer 126 ',
'group': 'Stomach',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Testicular Germ Cell Tumors ',
'group': 'Testicular',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Thymoma ',
'group': 'Thymus',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Thyroid Cancer',
'group': 'Thyroid',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Thyroid Carcinoma',
'group': 'Thyroid',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uterine Carcinosarcoma ',
'group': 'Uterine',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uterine Corpus Endometrial Carcinoma ',
'group': 'Uterine',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uveal Melanoma',
'group': 'Uveal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uveal Melanoma',
'group': 'Uveal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Adrenal ',
'group': 'Adrenal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Bile ',
'group': 'Bile',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Bladder ',
'group': 'Bladder',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Brain',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Breast ',
'group': 'Breast',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Cervical',
'group': 'Cervical',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'colon',
'group': 'colon',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Endometrial',
'group': 'Endometrial',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Esophagus ',
'group': 'Esophagus',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'HeadAndNeck',
'group': 'HeadAndNeck',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney ',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Leukemia',
'group': 'Leukemia',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Liver',
'group': 'Liver',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Lung ',
'group': 'Lung',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Lymphoma',
'group': 'Lymphoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Ovarian ',
'group': 'Ovarian',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Pancreatic ',
'group': 'Pancreatic',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Prostate',
'group': 'Prostate',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Rectal ',
'group': 'Rectal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Sarcoma ',
'group': 'Sarcoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Skin ',
'group': 'Skin',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Stomach ',
'group': 'Stomach',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Testicular ',
'group': 'Testicular',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Thymus ',
'group': 'Thymus',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Thyroid ',
'group': 'Thyroid',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uterine ',
'group': 'Uterine',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uveal',
'group': 'Uveal',
'type': 'DISEASE'
}]
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
#allterms.drop()
for disease in disease_groups_array:
allterms.save({'genomeType': disease['genomeType'],'term': disease['term'].upper(),'type': disease['type'], 'group': disease['group']})
|
|
from multiprocessing import Value
import pytest
import time
from selenium.webdriver.common.keys import Keys
import dash
from dash import Dash, Input, Output, State, MATCH, dcc, html, dash_table as dt
from dash_test_components import MyPersistedComponent
from dash_test_components import MyPersistedComponentNested
@pytest.fixture(autouse=True)
def clear_storage(dash_duo):
yield
dash_duo.clear_storage()
def table_columns(names, **extra_props):
return [
dict(id="c{}".format(i), name=n, renamable=True, hideable=True, **extra_props)
for i, n in enumerate(names)
]
def simple_table(names=("a", "b"), **props_override):
props = dict(
id="table",
columns=table_columns(names),
data=[{"c0": 0, "c1": 1}, {"c0": 2, "c1": 3}],
persistence=True,
)
props.update(props_override)
return dt.DataTable(**props)
def reloadable_app(**props_override):
app = Dash(__name__)
app.persistence = Value("i", 1)
def layout():
return html.Div(
[
html.Div(id="out"),
simple_table(persistence=app.persistence.value, **props_override),
]
)
app.layout = layout
@app.callback(
Output("out", "children"),
[Input("table", "columns"), Input("table", "hidden_columns")],
)
def report_props(columns, hidden_columns):
return "names: [{}]; hidden: [{}]".format(
", ".join([col["name"] for col in columns]), ", ".join(hidden_columns or [])
)
return app
NEW_NAME = "mango"
def rename_and_hide(dash_duo, rename=0, new_name=NEW_NAME, hide=1):
dash_duo.find_element(
".dash-header.column-{} .column-header--edit".format(rename)
).click()
prompt = dash_duo.driver.switch_to.alert
prompt.send_keys(new_name)
prompt.accept()
dash_duo.find_element(
".dash-header.column-{} .column-header--hide".format(hide)
).click()
def check_table_names(dash_duo, names, table_id="table"):
dash_duo.wait_for_text_to_equal(
"#{} .column-0 .column-header-name".format(table_id), names[0]
)
headers = dash_duo.find_elements("#{} .column-header-name".format(table_id))
assert len(headers) == len(names)
for i, n in enumerate(names):
name_el = dash_duo.find_element(
"#{} .column-{} .column-header-name".format(table_id, i)
)
assert name_el.text == n
def test_rdps001_local_reload(dash_duo):
app = reloadable_app()
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out", "names: [a, b]; hidden: []")
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo)
# callback output
dash_duo.wait_for_text_to_equal(
"#out", "names: [{}, b]; hidden: [c1]".format(NEW_NAME)
)
check_table_names(dash_duo, [NEW_NAME])
dash_duo.wait_for_page()
# callback gets persisted values, not the values provided with the layout
dash_duo.wait_for_text_to_equal(
"#out", "names: [{}, b]; hidden: [c1]".format(NEW_NAME)
)
check_table_names(dash_duo, [NEW_NAME])
# new persistence reverts
app.persistence.value = 2
dash_duo.wait_for_page()
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo, 1, "two", 0)
dash_duo.wait_for_text_to_equal("#out", "names: [a, two]; hidden: [c0]")
check_table_names(dash_duo, ["two"])
# put back the old persistence, get the old values
app.persistence.value = 1
dash_duo.wait_for_page()
dash_duo.wait_for_text_to_equal(
"#out", "names: [{}, b]; hidden: [c1]".format(NEW_NAME)
)
check_table_names(dash_duo, [NEW_NAME])
# falsy persistence disables it
app.persistence.value = 0
dash_duo.wait_for_page()
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo)
check_table_names(dash_duo, [NEW_NAME])
dash_duo.wait_for_page()
check_table_names(dash_duo, ["a", "b"])
# falsy to previous truthy also brings the values
app.persistence.value = 2
dash_duo.wait_for_page()
dash_duo.wait_for_text_to_equal("#out", "names: [a, two]; hidden: [c0]")
check_table_names(dash_duo, ["two"])
def test_rdps002_session_reload(dash_duo):
app = reloadable_app(persistence_type="session")
dash_duo.start_server(app)
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo)
check_table_names(dash_duo, [NEW_NAME])
dash_duo.wait_for_page()
# callback gets persisted values, not the values provided with the layout
dash_duo.wait_for_text_to_equal(
"#out", "names: [{}, b]; hidden: [c1]".format(NEW_NAME)
)
check_table_names(dash_duo, [NEW_NAME])
def test_rdps003_memory_reload(dash_duo):
app = reloadable_app(persistence_type="memory")
dash_duo.start_server(app)
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo)
check_table_names(dash_duo, [NEW_NAME])
dash_duo.wait_for_page()
# no persistence after reload with persistence_type=memory
check_table_names(dash_duo, ["a", "b"])
def test_rdps004_show_hide(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Button("Show/Hide", id="toggle-table"), html.Div(id="container")]
)
@app.callback(Output("container", "children"), [Input("toggle-table", "n_clicks")])
def toggle_table(n):
if (n or 0) % 2:
return "nope"
return simple_table(
persistence_type="memory", persistence=1 if (n or 0) < 3 else 2
)
dash_duo.start_server(app)
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo)
check_table_names(dash_duo, [NEW_NAME])
dash_duo.find_element("#toggle-table").click()
# table is gone
dash_duo.wait_for_text_to_equal("#container", "nope")
dash_duo.find_element("#toggle-table").click()
# table is back, with persisted props
check_table_names(dash_duo, [NEW_NAME])
dash_duo.find_element("#toggle-table").click()
# gone again
dash_duo.wait_for_text_to_equal("#container", "nope")
dash_duo.find_element("#toggle-table").click()
# table is back, new persistence val so props not persisted
check_table_names(dash_duo, ["a", "b"])
def test_rdps005_persisted_props(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Button("toggle persisted_props", id="toggle-table"),
html.Div(id="container"),
]
)
@app.callback(Output("container", "children"), [Input("toggle-table", "n_clicks")])
def toggle_table(n):
if (n or 0) % 2:
return simple_table(persisted_props=["data", "columns.name"])
return simple_table()
dash_duo.start_server(app)
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo)
check_table_names(dash_duo, [NEW_NAME])
dash_duo.find_element("#toggle-table").click()
# hidden_columns not persisted
check_table_names(dash_duo, [NEW_NAME, "b"])
dash_duo.find_element("#toggle-table").click()
# back to original persisted_props hidden_columns returns
check_table_names(dash_duo, [NEW_NAME])
def test_rdps006_move_on_page(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Button("move table", id="move-table"), html.Div(id="container")]
)
@app.callback(Output("container", "children"), [Input("move-table", "n_clicks")])
def move_table(n):
children = [html.Div("div 0", id="div0"), simple_table()]
for i in range(1, (n or 0) + 1):
children = [
html.Div("div {}".format(i), id="div{}".format(i)),
html.Div(children),
]
return children
def find_last_div(n):
dash_duo.wait_for_text_to_equal("#div{}".format(n), "div {}".format(n))
assert len(dash_duo.find_elements("#div{}".format(n + 1))) == 0
dash_duo.start_server(app)
find_last_div(0)
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo)
check_table_names(dash_duo, [NEW_NAME])
for i in range(1, 5):
dash_duo.find_element("#move-table").click()
find_last_div(i)
check_table_names(dash_duo, [NEW_NAME])
def test_rdps007_one_prop_changed(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Button("hide/show cols", id="hide-cols"), html.Div(id="container")]
)
@app.callback(Output("container", "children"), [Input("hide-cols", "n_clicks")])
def hide_cols(n):
return simple_table(hidden_columns=["c0"] if (n or 0) % 2 else [])
dash_duo.start_server(app)
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo)
check_table_names(dash_duo, [NEW_NAME])
dash_duo.find_element("#hide-cols").click()
# hidden_columns gets the new value
check_table_names(dash_duo, ["b"])
dash_duo.find_element("#hide-cols").click()
# back to original hidden_columns, but saved value won't come back
check_table_names(dash_duo, [NEW_NAME, "b"])
def test_rdps008_unsaved_part_changed(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Button("toggle deletable", id="deletable"), html.Div(id="container")]
)
@app.callback(Output("container", "children"), [Input("deletable", "n_clicks")])
def toggle_deletable(n):
if (n or 0) % 2:
return simple_table(columns=table_columns(("a", "b"), deletable=True))
return simple_table()
dash_duo.start_server(app)
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo)
check_table_names(dash_duo, [NEW_NAME])
assert len(dash_duo.find_elements(".column-header--delete")) == 0
dash_duo.find_element("#deletable").click()
# column names still persisted when columns.deletable changed
# because extracted name list didn't change
check_table_names(dash_duo, [NEW_NAME])
assert len(dash_duo.find_elements(".column-header--delete")) == 1
dash_duo.find_element("#deletable").click()
check_table_names(dash_duo, [NEW_NAME])
assert len(dash_duo.find_elements(".column-header--delete")) == 0
def test_rdps009_clear_prop_callback(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Button("reset name edits", id="reset-names"), simple_table()]
)
@app.callback(Output("table", "columns"), [Input("reset-names", "n_clicks")])
def reset_names(n):
# callbacks that return the actual persisted prop, as opposed to
# the whole component containing them, always clear persistence, even
# if the value is identical to the original. no_update can prevent this.
# if we had multiple inputs, would need to check triggered
return table_columns(("a", "b")) if n else dash.no_update
dash_duo.start_server(app)
check_table_names(dash_duo, ["a", "b"])
rename_and_hide(dash_duo)
check_table_names(dash_duo, [NEW_NAME])
dash_duo.find_element("#reset-names").click()
# names are reset, but not hidden_columns
check_table_names(dash_duo, ["a"])
def test_rdps010_toggle_persistence(dash_duo):
def make_input(persistence):
return dcc.Input(id="persisted", value="a", persistence=persistence)
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="persistence-val", value=""),
html.Div(make_input(""), id="persisted-container"),
html.Div(id="out"),
]
)
@app.callback(
Output("persisted-container", "children"), [Input("persistence-val", "value")]
)
def set_persistence(val):
return make_input(val)
@app.callback(Output("out", "children"), [Input("persisted", "value")])
def set_out(val):
return val
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out", "a")
dash_duo.find_element("#persisted").send_keys("lpaca")
dash_duo.wait_for_text_to_equal("#out", "alpaca")
dash_duo.find_element("#persistence-val").send_keys("s")
dash_duo.wait_for_text_to_equal("#out", "a")
dash_duo.find_element("#persisted").send_keys("nchovies")
dash_duo.wait_for_text_to_equal("#out", "anchovies")
dash_duo.find_element("#persistence-val").send_keys("2")
dash_duo.wait_for_text_to_equal("#out", "a")
dash_duo.find_element("#persisted").send_keys(
Keys.BACK_SPACE
) # persist falsy value
dash_duo.wait_for_text_to_equal("#out", "")
# alpaca not saved with falsy persistence
dash_duo.clear_input("#persistence-val")
dash_duo.wait_for_text_to_equal("#out", "a")
# anchovies and aardvark saved
dash_duo.find_element("#persistence-val").send_keys("s")
dash_duo.wait_for_text_to_equal("#out", "anchovies")
dash_duo.find_element("#persistence-val").send_keys("2")
dash_duo.wait_for_text_to_equal("#out", "")
def test_rdps011_toggle_persistence2(dash_duo):
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="persistence-val", value=""),
dcc.Input(id="persisted2", value="a", persistence=""),
html.Div(id="out"),
]
)
# this is not a good way to set persistence, as it doesn't allow you to
# get the right initial value. Much better is to update the whole component
# as we do in the previous test case... but it shouldn't break this way.
@app.callback(
Output("persisted2", "persistence"), [Input("persistence-val", "value")]
)
def set_persistence(val):
return val
@app.callback(Output("out", "children"), [Input("persisted2", "value")])
def set_out(val):
return val
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out", "a")
dash_duo.find_element("#persistence-val").send_keys("s")
time.sleep(0.2)
assert not dash_duo.get_logs()
dash_duo.wait_for_text_to_equal("#out", "a")
dash_duo.find_element("#persisted2").send_keys("pricot")
dash_duo.wait_for_text_to_equal("#out", "apricot")
dash_duo.find_element("#persistence-val").send_keys("2")
dash_duo.wait_for_text_to_equal("#out", "a")
dash_duo.find_element("#persisted2").send_keys("rtichoke")
dash_duo.wait_for_text_to_equal("#out", "artichoke")
# no persistence, still goes back to original value
dash_duo.clear_input("#persistence-val")
dash_duo.wait_for_text_to_equal("#out", "a")
# apricot and artichoke saved
dash_duo.find_element("#persistence-val").send_keys("s")
dash_duo.wait_for_text_to_equal("#out", "apricot")
dash_duo.find_element("#persistence-val").send_keys("2")
assert not dash_duo.get_logs()
dash_duo.wait_for_text_to_equal("#out", "artichoke")
def test_rdps012_pattern_matching(dash_duo):
# copy of rdps010 but with dict IDs,
# plus a button to change the dict ID so the persistence should reset
def make_input(persistence, n):
return dcc.Input(
id={"i": n, "id": "persisted"},
className="persisted",
value="a",
persistence=persistence,
)
app = dash.Dash(__name__)
app.layout = html.Div(
[html.Button("click", id="btn", n_clicks=0), html.Div(id="content")]
)
@app.callback(Output("content", "children"), [Input("btn", "n_clicks")])
def content(n):
return [
dcc.Input(
id={"i": n, "id": "persistence-val"},
value="",
className="persistence-val",
),
html.Div(make_input("", n), id={"i": n, "id": "persisted-container"}),
html.Div(id={"i": n, "id": "out"}, className="out"),
]
@app.callback(
Output({"i": MATCH, "id": "persisted-container"}, "children"),
[Input({"i": MATCH, "id": "persistence-val"}, "value")],
[State("btn", "n_clicks")],
)
def set_persistence(val, n):
return make_input(val, n)
@app.callback(
Output({"i": MATCH, "id": "out"}, "children"),
[Input({"i": MATCH, "id": "persisted"}, "value")],
)
def set_out(val):
return val
dash_duo.start_server(app)
for _ in range(3):
dash_duo.wait_for_text_to_equal(".out", "a")
dash_duo.find_element(".persisted").send_keys("lpaca")
dash_duo.wait_for_text_to_equal(".out", "alpaca")
dash_duo.find_element(".persistence-val").send_keys("s")
dash_duo.wait_for_text_to_equal(".out", "a")
dash_duo.find_element(".persisted").send_keys("nchovies")
dash_duo.wait_for_text_to_equal(".out", "anchovies")
dash_duo.find_element(".persistence-val").send_keys("2")
dash_duo.wait_for_text_to_equal(".out", "a")
dash_duo.find_element(".persisted").send_keys(
Keys.BACK_SPACE
) # persist falsy value
dash_duo.wait_for_text_to_equal(".out", "")
# alpaca not saved with falsy persistence
dash_duo.clear_input(".persistence-val")
dash_duo.wait_for_text_to_equal(".out", "a")
# anchovies and aardvark saved
dash_duo.find_element(".persistence-val").send_keys("s")
dash_duo.wait_for_text_to_equal(".out", "anchovies")
dash_duo.find_element(".persistence-val").send_keys("2")
dash_duo.wait_for_text_to_equal(".out", "")
dash_duo.find_element("#btn").click()
def test_rdps013_persisted_props_nested(dash_duo):
# testing persistenceTransforms with generated test components
# with persisted prop and persisted nested prop
app = dash.Dash(__name__)
app.layout = html.Div(
[
html.Button("click me", id="btn"),
html.Div(id="container1"),
html.Div(id="container2"),
]
)
@app.callback(Output("container1", "children"), [Input("btn", "n_clicks")])
def update_container(n_clicks):
return MyPersistedComponent(id="component-propName", persistence=True)
@app.callback(Output("container2", "children"), [Input("btn", "n_clicks")])
def update_container2(n_clicks):
return MyPersistedComponentNested(id="component-propPart", persistence=True)
dash_duo.start_server(app)
# send lower case strings to test components
dash_duo.find_element("#component-propName").send_keys("alpaca")
dash_duo.find_element("#component-propPart").send_keys("artichoke")
dash_duo.find_element("#btn").click()
# persistenceTransforms should return upper case strings
dash_duo.wait_for_text_to_equal("#component-propName", "ALPACA")
dash_duo.wait_for_text_to_equal("#component-propPart", "ARTICHOKE")
|
|
from nltk.tree import *
import os
from parsed_tree_loader import *
from dep_tree_loader import *
class ConllTreeGenerator():
def __init__(self, conll_path, tree_path, dump_path, section_list=[], is_rm_none_word=True, is_lossy=False, is_short_tag=True):
self.section_list = section_list;
self.conll_path = conll_path
self.tree_path = tree_path
self.dump_path = dump_path
self.is_rm_none_word = is_rm_none_word
self.is_lossy = is_lossy
self.is_short_tag = is_short_tag
return
def generate_conll_trees(self, dump=False):
ptl = ParsedTreeLoader()
dtl = DepTreeLoader()
for section in self.section_list:
for tree_filename in os.listdir(self.tree_path + "%02d" % section):
# get file that is to be loaded
print tree_filename
conll_file = self.conll_path + "%02d/" % section + tree_filename + ".3.pa.gs.tab"
tree_file = self.tree_path + "%02d/" % section + tree_filename
# load the files
self.tree_list = ptl.load_parsed_tree(tree_file)
self.conll_list = dtl.load_dep_tree(conll_file)
# remove structure with none word leaf
if self.is_rm_none_word:
ptl.remove_nonword(self.tree_list)
# shorthen the tags in a tree to make them consistent to the penn treebank
if self.is_short_tag:
ptl.shorten_tag(self.tree_list)
self.enumerate_leaves()
dump_filename = ""
if dump == True:
dump_filename = self.dump_path + "%02d/" % section + tree_filename
#47 done
self.generate_conll_tree(dump_filename)
def generate_conll_tree(self, dump_filename=""):
"""
Assuming the order of sentences in ptree_list and conll_list are the same
"""
if not len(self.tree_list) == len(self.conll_list):
print "Incompatible parsing tree and conll data !!!"
return
sent_conll_tree_list = []
for i in range(len(self.conll_list)):
# Every sentence
sent_conll = self.conll_list[i]
#print self.tree_list[i].pprint()
sent_tree = self.tree_list[i].copy(True)
sent_conll_tree = []
tree_words = sent_tree.leaves()
conll_words = [r[0] for r in sent_conll]
for i in range(len(sent_conll)):
if sent_conll[i][2] == 0:
break
modifier = i
modifier_treeposition = sent_tree.leaf_treeposition(modifier)
if len(modifier_treeposition) < 1:
print "Invalid tree";
return
leaf_node = self.find_leaf_node(modifier, sent_tree)
self.generate_spine(leaf_node, len(modifier_treeposition), ('_','_'), '_', sent_conll, sent_conll_tree)
sent_conll_tree.sort(key=lambda tup: tup[0])
sent_conll_tree_list.append(sent_conll_tree)
if not dump_filename == "":
self.write_file(dump_filename, sent_conll_tree_list)
def generate_spine(self, sub_spine, spine_len, adjoin_type, join_position, sent_conll, sent_conll_tree):
while not sub_spine.height() == spine_len:
sub_spine, adjoin_type, join_position, spine_len = self.recursive_generate_spine(sub_spine, spine_len, adjoin_type, join_position, sent_conll, sent_conll_tree)
sent_index = sub_spine.leaves()[0][0] # start from 1 considering ROOT
sent_conll_row = sent_conll[sent_index-1] # sent_index start from 0 in the sent_conll
word, tag, sub_spine = self.remove_tag_word(sub_spine)
sent_conll_tree.append((sent_index, word, tag, sent_conll_row[2], sent_conll_row[3], sub_spine, join_position, adjoin_type))
def recursive_generate_spine(self, sub_spine, spine_len, adjoin_type, join_position, sent_conll, sent_conll_tree):
"""
adjoin_type --- flag indicates if the node has r-adjoin before it in the same level,
-1 if it is s-adjoin, 0 if it is the 1st r-adjoin, 1 if it is r-adjoin but not the 1st one)
join_position --- the height in the head spine that the modifier adjoins (before any r-adjoin)
sent_conll_tree --- store the results
"""
if sub_spine.height() < 2:
print "error the spine is too short"
return
spine = sub_spine.parent() # height at least 3
left_sibling = sub_spine.left_sibling()
right_sibling = sub_spine.right_sibling()
if not self.is_r_adjoin(spine, sub_spine):
# s-adjoin
j = len(spine) - 1
while j >= 0 :
child_tree = spine[j]
#except the spine of the current header
if spine[j] == sub_spine:
j = j - 1
continue
if spine[j] == left_sibling or spine[j] == right_sibling:
child_adjoin_type = ('s', 0)
else:
child_adjoin_type = ('s', 1)
spine.remove(spine[j])
child_sub_spine, child_spine_len = self.get_child_info(child_tree, sub_spine.leaves()[0][0], sent_conll)
child_join_position = sub_spine.height() - 1 # + 1 - 2
#child_adjoin_type = "s"
self.generate_spine(child_sub_spine, child_spine_len, child_adjoin_type, child_join_position, sent_conll, sent_conll_tree)
j = j - 1
else:
# r-adjoin
j = len(spine) - 1
while j >= 0:
child_tree = spine[j]
if spine[j] == sub_spine:
j = j - 1
continue
if spine[j] == left_sibling or spine[j] == right_sibling:
child_adjoin_type = ('r', 0)
else:
child_adjoin_type = ('r', 1)
spine.remove(spine[j])
child_sub_spine, child_spine_len = self.get_child_info(child_tree, sub_spine.leaves()[0][0], sent_conll)
child_join_position = sub_spine.height() - 2 # the height that the child join to, first node as height 1
self.generate_spine(child_sub_spine, child_spine_len, child_adjoin_type, child_join_position, sent_conll, sent_conll_tree)
j = j - 1
# remove the node added by r adjoinction
spine = self.change_tree(spine, sub_spine)
spine_len = spine_len - 1
return spine, adjoin_type, join_position, spine_len
def is_r_adjoin(self, spine, sub_spine):
# no adj node:
if not spine.node == sub_spine.node:
return False
# only two same adj node:
if spine.parent() == None or (not spine.parent().node == spine.node):
print "two same adj nodes -- r adjoin"
return True
# Tree adj node without left sub sibling but with left sibling (same as right):
if self.is_lossy == False:
if (spine.left_sibling() and not sub_spine.left_sibling()) or (spine.right_sibling() and not sub_spine.right_sibling()):
print "Tree adj node without left sub sibling but with left sibling (same as right)"
return False
print "r-adjoin"
return True
def change_tree(self, tree_to_remove, tree_to_add):
parent_tree = tree_to_remove.parent()
p_index = tree_to_remove.parent_index()
tree_to_add.parent()._delparent(tree_to_add, tree_to_add.parent_index())
if not parent_tree == None:
parent_tree.pop(p_index)
parent_tree.insert(p_index, tree_to_add)
return tree_to_add
def get_child_info(self, child_tree, head_sent_index, sent_conll):
for i in range(child_tree.leaves()[0][0]-1, child_tree.leaves()[-1][0]):
if sent_conll[i][2] == head_sent_index:
break
child_head_subtree_index = i - child_tree.leaves()[0][0] + 1
child_spine_len = len(child_tree.leaf_treeposition(child_head_subtree_index))+1
child_sub_spine = self.find_leaf_node(child_head_subtree_index, child_tree)
return child_sub_spine, child_spine_len
def find_leaf_node(self, leaf_index, tree):
"""
leaf_index is the index in the tree passed into the function
"""
node = tree
if node.height() <= 2:
return node
treeposition = tree.leaf_treeposition(leaf_index)
for i in treeposition:
node = node[i]
if node.height() <= 2:
break
return node
def write_file(self, filename, sent_conll_tree_list):
dir = os.path.dirname(filename)
try:
os.stat(dir)
except:
os.mkdir(dir)
fp = open(filename.rstrip("\.mrg")+".spine","w")
for sent_conll_tree in sent_conll_tree_list:
for row in sent_conll_tree:
# sent_index, word, tag, sent_conll_row[2], sent_conll_row[3], sub_spine, join_position, adjoin_type
# Pierre _ NNP NNP _ 2 NMOD _ _ (NNP Pierre) 1 s 0
fp.write("%d %s _ %s %s _ %d %s _ _ \"%s\" %s %s %s\n"
% (row[0],row[1],row[2],row[2],row[3],row[4],row[5].pprint(),str(row[6]),row[7][0],str(row[7][1])))
fp.write("\n")
fp.close()
def remove_tag_word(self, spine):
# if nothing went wrong, the spine would contain both word and tag
treeposition = spine.leaf_treeposition(0)
word = spine.leaves()[0][1]
tag = spine[treeposition[:-1]].node
if len(treeposition) == 1:
spine = ParentedTree("",[])
else:
sub_spine = spine[treeposition[:-2]]
sub_spine.pop()
sub_spine.append(ParentedTree("",[]))
return word, tag, spine
def get_spine(self, treeposition, tree):
# assume one word per pos tag
if len(treeposition) == 1:
return tree
else:
subpath = self.get_spine(treeposition[1:], tree[treeposition[0]])
return Tree(tree.node, [subpath])
def enumerate_leaves(self):
for tree in self.tree_list:
leaves = tree.leaves()
for i in range(len(leaves)):
tree[tree.leaf_treeposition(i)] = (i+1, leaves[i])
HELP_MSG =\
"""
script for extract spine from penn-wsj-dep
options:
-h: help message
-b: begining section
-e: ending section
( training would not be processed
unless both begin and ending section is specfied )
-c: path to penn-wsj-dep, default: "./penn-wsj-deps/"
-t: path to wsj parsed tree, default: "./wsj/"
-d: path to dump conll_tree format
-n: not remove nonword leaf
(otherwise, the nonword leaf would be removed)
-l: lossy extraction
-s: not shorten the tags
"""
if __name__ == "__main__":
import getopt, sys
sec_begin = 0
sec_end = 24
conll_path = "../../../penn-wsj-deps/" #"./penn-wsj-deps/"
tree_path = "../../../wsj/"
d_filename = "../../../wsj_conll_tree/lossy/"
is_rm_none_word = True
is_lossy = False
is_short_tag = True
try:
opt_spec = "hb:e:c:t:d:nl"
opts, args = getopt.getopt(sys.argv[1:], opt_spec)
for opt, value in opts:
if opt == "-h":
print HELP_MSG
sys.exit(0)
elif opt == "-n":
is_rm_none_word = False
elif opt == "-l":
is_lossy = True
elif opt == "-s":
is_short_tag = False
elif opt == "-b":
sec_begin = int(value)
elif opt == "-e":
sec_end = int(value)
elif opt == "-c":
conll_path = value
elif opt == "-t":
tree_path = value
elif opt == "-d":
d_filename = value
else:
print "invalid input, see -h"
sys.exit(0)
if sec_begin >= 0 and sec_end >= 0:
extract_sec = range(sec_begin, sec_end+1)
ctg = ConllTreeGenerator(conll_path, tree_path, d_filename, extract_sec, is_rm_none_word, is_lossy)
ctg.generate_conll_trees(True)
except getopt.GetoptError, e:
print "invalid arguments!! \n" + HELP_MSG
sys.exit(1)
|
|
#!/usr/bin/env python3
__author__ = 'Bieliaievskyi Sergey'
__credits__ = ["Bieliaievskyi Sergey"]
__license__ = "Apache License"
__version__ = "1.0.1"
__maintainer__ = "Bieliaievskyi Sergey"
__email__ = "magelan09@gmail.com"
__status__ = "Release"
import curses
import curses.panel
import itertools
import os
import subprocess
import sys
def init_curses():
my_screen = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.curs_set(0)
my_screen.keypad(1)
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_CYAN)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_BLUE)
curses.init_pair(5, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(6, curses.COLOR_RED, curses.COLOR_WHITE)
curses.init_pair(7, curses.COLOR_YELLOW, curses.COLOR_BLACK)
return my_screen
def shutdown_curses(scr_id):
curses.nocbreak()
curses.echo()
scr_id.keypad(0)
curses.curs_set(1)
curses.endwin()
def create_main_window(screen_id, height, width):
def show_win(win_id):
win_id.bkgd(' ', curses.color_pair(3))
win_id.clear()
win_id.box()
screen_id.addstr(0, 0, ' ' * width, curses.color_pair(3))
screen_id.addstr(0, 3, 'CA manager', curses.color_pair(3) | curses.A_BOLD)
left_scr = curses.newwin(height - 4, int(width / 2) - 1, 1, 0)
left_panel = curses.panel.new_panel(left_scr)
right_scr = curses.newwin(height - 4, int(width / 2) + 2, 1, int(width / 2) - 1)
right_panel = curses.panel.new_panel(right_scr)
show_win(left_scr)
show_win(right_scr)
curses.panel.update_panels()
return (left_scr, left_panel), (right_scr, right_panel)
def get_lines_from_index(page, lines_per_page, filterit=None):
def get_expiration_date(cn):
try:
cert = open(os.environ['PKI_ROOT'] + '/Certs/' + cn + '_cert.pem')
lines = cert.readlines()
date = [l for l in lines if 'Not After' in l][0].split('Not After :')[1]
cert.close()
except IOError:
return ''
return date.rstrip()
def pars_subj():
return {element.split('=')[0]: element.split('=')[1] for element in splited_info[5].split('/') if element}
pure_index_slice = []
with open('%s%s' % (os.environ['PKI_ROOT'], '/index.txt')) as fd:
start_line = page * lines_per_page + 1
iter_index_slice = itertools.islice(fd, start_line, start_line + lines_per_page)
for line in iter_index_slice:
splited_info = line.rstrip().split('\t')
cert_unit_dict = {'Status': splited_info[0],
'cert_file_name': splited_info[3],
'findit': filterit}
cert_unit_dict.update(pars_subj())
cert_unit_dict['expire'] = get_expiration_date(cert_unit_dict['CN'])
if filterit and filterit[1] not in cert_unit_dict.get(filterit[0], ''):
continue
pure_index_slice.append(cert_unit_dict)
return pure_index_slice
def show_lines(left_panel, right_panel, page=0, max_lines=0, findit=None):
def fill_panel(panel, info):
for y, line in enumerate(info):
ready_line = '%s %-*s %-*s %s' % (line['Status'],
second_colom_width,
line['CN'],
third_colom_width,
line.get('emailAddress', ''),
line.get('expire', ''))
try:
panel.addstr(1 + y, 2, ready_line, curses.color_pair(3))
if findit:
panel.addstr(1 + y, 2 + ready_line.find(findit[1]), findit[1], curses.color_pair(4) | curses.A_BOLD)
line['findit_position'] = 2 + ready_line.find(findit[1])
line['screen'] = panel
line['y'] = 1 + y
except:
pass
panel.refresh()
curses.curs_set(0)
cert_list = get_lines_from_index(page, max_lines * 2, findit)
if cert_list:
second_colom_width = len(max([cn['CN'] for cn in cert_list], key=len)) + 2
third_colom_width = len(max([email.get('emailAddress', '') for email in cert_list], key=len)) + 2
left_panel.clear()
left_panel.box()
fill_panel(left_panel, cert_list[:max_lines])
right_panel.clear()
right_panel.box()
fill_panel(right_panel, cert_list[max_lines:])
return cert_list
def keyborad_processor(main_screen):
def clear_cursor():
visible_lines[cursor_position]['screen'].chgat(visible_lines[cursor_position]['y'],
1,
int(width / 2) - 3,
curses.color_pair(3))
if visible_lines[cursor_position]['findit']:
visible_lines[cursor_position]['screen'].addstr(visible_lines[cursor_position]['y'],
visible_lines[cursor_position]['findit_position'],
visible_lines[cursor_position]['findit'][1],
curses.color_pair(4) | curses.A_BOLD)
visible_lines[cursor_position]['screen'].refresh()
def draw_cursor():
if visible_lines:
visible_lines[cursor_position]['screen'].chgat(visible_lines[cursor_position]['y'],
1,
int(width / 2) - 3,
curses.color_pair(5))
if visible_lines[cursor_position]['findit']:
visible_lines[cursor_position]['screen'].addstr(visible_lines[cursor_position]['y'],
visible_lines[cursor_position]['findit_position'],
visible_lines[cursor_position]['findit'][1],
curses.color_pair(6) | curses.A_BOLD)
visible_lines[cursor_position]['screen'].refresh()
def move_cursor_down():
nonlocal current_page
nonlocal cursor_position
nonlocal visible_lines
if cursor_position + 1 == len(visible_lines) and len(visible_lines) == max_lines * 2:
current_page += 1
cursor_position = 0
visible_lines = show_lines(left[0], right[0], page=current_page, max_lines=max_lines)
else:
cursor_position = cursor_position + 1 if cursor_position + 1 < len(visible_lines) else cursor_position
def move_cursor_up():
nonlocal current_page
nonlocal cursor_position
nonlocal visible_lines
if current_page > 0 and cursor_position == 0:
current_page -= 1
visible_lines = show_lines(left[0], right[0], page=current_page, max_lines=max_lines)
cursor_position = len(visible_lines) - 1
else:
cursor_position = cursor_position - 1 if cursor_position > 0 else cursor_position
def call_search_dialog():
nonlocal search_string
sid = edit_box(70, 5, 5, ' Searching for... ')
while True:
key = sid.getch()
if key == 10:
break
if key == 27:
curses.curs_set(0)
return False
edit_box_keyborad_processor(sid, key, search_string)
return True
def new_cert_dialog():
nonlocal obj_set
obj_set = [{'win_id': edit_box(70, 5, 5, ' Name '), 'buffer': [], 'onlydigit': False},
{'win_id': edit_box(70, 8, 5, ' E-mail '), 'buffer': [], 'onlydigit': False},
{'win_id': edit_box(70, 11, 5, ' Expired in ... '), 'buffer': [], 'onlydigit': True}]
for obj in reversed(obj_set):
obj['win_id'].refresh()
cur_object = 0
while True:
key = obj_set[cur_object]['win_id'].getch()
if key == 9:
cur_object = cur_object + 1 if cur_object + 1 < len(obj_set) else 0
if key == 10:
break
if key == 27:
curses.curs_set(0)
return False
edit_box_keyborad_processor(obj_set[cur_object]['win_id'],
key,
obj_set[cur_object]['buffer'],
obj_set[cur_object]['onlydigit'])
return True
def menu():
offset = 0
for item in ['Q: Exit ',
'S: Search ',
'Ctrl+V: show valid ',
'Ctrl+R: show revoked ',
'Ctrl+A: show all ',
'C: Gen CRL ',
'N: New ',
'P: p12 gen ',
'R: Revoke ']:
menu_item = item.split(':')
try:
main_screen.addstr(height - 1, offset, menu_item[0], curses.color_pair(2) | curses.A_BOLD)
main_screen.addstr(height - 1, offset + len(menu_item[0]) + 1, '%s' % menu_item[1], curses.color_pair(1))
except:
pass
offset = offset + len(item) + 2
def show_me_screen():
nonlocal left, right, height, width, max_lines, visible_lines
height, width = main_screen.getmaxyx()
left, right = create_main_window(main_screen, height, width)
max_lines = min([left[0].getmaxyx()[0], right[0].getmaxyx()[0]])
max_lines -= 2
visible_lines = show_lines(left[0], right[0], page=current_page, max_lines=max_lines)
draw_cursor()
main_screen.refresh()
curses.panel.update_panels()
menu()
cursor_position = 0
current_page = 0
left, right, height, width = None, None, 0, 0
max_lines = 0
visible_lines = []
show_me_screen()
while True:
pressed_key = main_screen.getch()
try:
main_screen.move(height - 3, 1)
except:
pass
main_screen.clrtoeol()
if pressed_key == curses.KEY_RESIZE:
if curses.is_term_resized(height, width):
main_screen.clear()
main_screen.refresh()
del left
del right
show_me_screen()
if pressed_key == 258:
clear_cursor()
move_cursor_down()
draw_cursor()
if pressed_key == 99:
generate_crl()
visible_lines = show_lines(left[0],
right[0],
page=current_page,
max_lines=max_lines)
if pressed_key == 259:
clear_cursor()
move_cursor_up()
draw_cursor()
if pressed_key == 338:
clear_cursor()
cursor_position = len(visible_lines) - 1
move_cursor_down()
draw_cursor()
if pressed_key == 339:
clear_cursor()
cursor_position = 0
move_cursor_up()
draw_cursor()
if pressed_key == 114:
if visible_lines[cursor_position]['Status'] == 'V':
revoke_cert(visible_lines[cursor_position]) or generate_crl()
visible_lines = show_lines(left[0],
right[0],
page=current_page,
max_lines=max_lines)
else:
main_screen.addstr(height - 3,
1,
'Already revoked',
curses.color_pair(7) | curses.A_BOLD | curses.A_BLINK)
if pressed_key == 113 or pressed_key == 81:
break
if pressed_key == 112 or pressed_key == 80:
try:
p12_file_size = os.path.getsize('%s/Certs/%s.p12' % (os.environ['PKI_ROOT'],
visible_lines[cursor_position]['CN']))
except FileNotFoundError:
p12_file_size = -1
if p12_file_size <= 0 and visible_lines[cursor_position]['Status'] == 'V':
generate_p12(visible_lines[cursor_position]['CN'])
visible_lines = show_lines(left[0],
right[0],
page=current_page,
max_lines=max_lines)
else:
main_screen.addstr(height - 3,
1,
'Already exist',
curses.color_pair(7) | curses.A_BOLD | curses.A_BLINK)
if pressed_key == 22:
cursor_position = 0
current_page = 0
visible_lines = show_lines(left[0],
right[0],
page=current_page,
max_lines=max_lines, findit=('Status', 'V'))
draw_cursor()
if pressed_key == 18:
cursor_position = 0
current_page = 0
visible_lines = show_lines(left[0],
right[0],
page=current_page,
max_lines=max_lines, findit=('Status', 'R'))
draw_cursor()
if pressed_key == 1:
cursor_position = 0
current_page = 0
visible_lines = show_lines(left[0],
right[0],
page=current_page,
max_lines=max_lines)
draw_cursor()
if pressed_key == 115 or pressed_key == 83:
search_string = []
cursor_position = 0
find_str = None
if call_search_dialog():
find_str = ('CN', ''.join(search_string))
visible_lines = show_lines(left[0],
right[0],
page=current_page,
max_lines=max_lines,
findit=find_str)
draw_cursor()
if pressed_key == 110:
obj_set = []
new_cert_dialog()
if len(obj_set[0]['buffer'] and obj_set[1]['buffer'] and obj_set[2]['buffer']):
(create_request(''.join(obj_set[0]['buffer']),
''.join(obj_set[1]['buffer'])) or
sign_cert(''.join(obj_set[0]['buffer']),
''.join(obj_set[1]['buffer']),
''.join(obj_set[2]['buffer'])) or
generate_p12(''.join(obj_set[0]['buffer'])))
visible_lines = show_lines(left[0],
right[0],
page=current_page,
max_lines=max_lines)
draw_cursor()
def edit_box(length, y_posistion, x_position, caption):
edit_scr = curses.newwin(3, length, y_posistion, x_position)
shadow = curses.newwin(3, length, y_posistion + 1, x_position + 1)
edit_scr.bkgd(' ', curses.color_pair(1))
shadow.bkgd(' ', curses.color_pair(2))
edit_scr.clear()
edit_scr.box()
shadow.clear()
shadow.refresh()
edit_scr.addstr(0, 3, caption)
curses.curs_set(1)
edit_scr.keypad(0)
edit_scr.move(1, 1)
return edit_scr
def edit_box_keyborad_processor(scr, char, edit_string, onlydigit=False):
height, width = scr.getmaxyx()
if any(((48 <= char <= 57),
(65 <= char <= 90),
(97 <= char <= 122),
char == 45,
char == 95,
char == 44,
char == 46,
char == 64)):
if onlydigit and not chr(char).isdigit():
return
if len(edit_string) < width - 3:
scr.addstr(1, len(edit_string) + 1, chr(char))
edit_string.append(chr(char))
if char == 127 and len(edit_string):
edit_string.pop(len(edit_string) - 1)
scr.addstr(1, len(edit_string) + 1, ' ')
scr.move(1, len(edit_string) + 1)
def create_files(file_name, init_txt='', rand=False):
try:
with open(file_name, 'wb' if rand else 'w') as file_descriptor:
if rand:
init_txt = open('/dev/urandom', 'rb').read(1024)
file_descriptor.write(init_txt)
os.chmod(file_name, 0o600)
except IOError as err:
print('%s %s' % (file_name, os.strerror(err.errno)))
return 1
return 0
def create_folders(*folders):
for file_descriptor in folders:
try:
os.mkdir(file_descriptor, 0o700)
except OSError as err:
print(file_descriptor + ' ' + os.strerror(err.errno))
return 1
return 0
def shell_command(shell_cmd):
exec_status = 1
try:
cmd_str = shell_cmd.split(' ')
for (i, line) in enumerate(cmd_str):
if '~' in line:
cmd_str[i] = cmd_str[i].replace('~', ' ')
exec_status = subprocess.call(cmd_str, timeout=180)
except subprocess.TimeoutExpired as err:
print(err)
finally:
return exec_status
def create_ca_req():
print('STEP 1 (create request) STEP 2 (create self-signed CA)\n\n%s\n\n\n'
'STEP 1.>>>>>Trying to generate CA request <<<<<<\n' % ('-' * 70))
exec_status = 1
try:
with open('%s%s' % (sys.argv[0][:sys.argv[0].rfind('/')], '/subj.info')) as fd:
openssl_subj = fd.readline()
openssl_subj = openssl_subj.replace(' ', '~')
openssl_subj = '/'.join([openssl_subj, 'CN=CAserver'])
exec_status = shell_command('/usr/bin/openssl req -new '
'-subj %s '
'-keyout %s/private/cakey.pem '
'-out %s/careq.pem '
'-config %s/openssl.cnf' % (openssl_subj,
os.environ['PKI_ROOT'],
os.environ['PKI_ROOT'],
os.environ['PKI_ROOT']))
except FileNotFoundError as err:
print(err)
input('Press ENTER to continue')
finally:
return exec_status
def sel_sign_ca():
print('STEP 1 (create request) STEP 2 (create self-signed CA)\n\n%s\n\n\n'
'STEP 2.>>>>>Trying to create self-signed CA <<<<<<\n\n' % ('-' * 70))
exit_status = shell_command('/usr/bin/openssl ca '
'-create_serial -out %s/cacert.pem '
'-keyfile %s/private/cakey.pem '
'-selfsign -extensions v3_ca -config %s/openssl.cnf '
'-in %s/careq.pem' % (os.environ['PKI_ROOT'],
os.environ['PKI_ROOT'],
os.environ['PKI_ROOT'],
os.environ['PKI_ROOT']))
if exit_status:
print('(create self-signed CA) >>>>>FAILED<<<<<<')
input('Press ENTER to continue')
print('\n\n\n')
return exit_status
def prepare2run_shellcommand():
curses.endwin()
shell_command('clear')
def generate_crl():
if not curses.isendwin():
prepare2run_shellcommand()
print('STEP 1 (revoke cert) STEP 2 (generate CRL)\n\n%s\n\n\n'
'STEP 2.>>>>>Trying to generate CRL<<<<<<\n\n' % ('-' * 70))
exit_status = shell_command('/usr/bin/openssl ca -gencrl '
'-out %s/crl.pem '
'-config %s/openssl.cnf' % (os.environ['PKI_ROOT'],
os.environ['PKI_ROOT']))
crl_hook = sys.argv[0][:sys.argv[0].rfind('/')]
if (not exit_status and
os.path.exists(crl_hook + '/hooks/crl.hook') and
os.path.getsize(crl_hook + '/hooks/crl.hook') > 0):
hook_stat = shell_command(crl_hook + '/hooks/crl.hook')
if hook_stat:
print('CRL hook execution >>>>>FAILED<<<<<<')
input('Press ENTER to continue')
elif exit_status:
print('STEP 2 (generate CRL) >>>>>FAILED<<<<<<')
input('Press ENTER to continue')
print('\n\n')
return exit_status
def revoke_cert(cert_detail):
if not curses.isendwin():
prepare2run_shellcommand()
print('STEP 1 (revoke cert) STEP 2 (generate CRL)\n\n%s\n\n\n'
'STEP 1.>>>>>Trying to revoke %s<<<<<<\n\n' % ('-' * 70, cert_detail['cert_file_name']))
exit_status = shell_command('/usr/bin/openssl ca -revoke %s/signed_certs/%s.pem -config %s/openssl.cnf' %
(os.environ['PKI_ROOT'], cert_detail['cert_file_name'], os.environ['PKI_ROOT']))
revoke_hook = sys.argv[0][:sys.argv[0].rfind('/')]
if (not exit_status and
os.path.exists(revoke_hook + '/hooks/revoke.hook') and
os.path.getsize(revoke_hook + '/hooks/revoke.hook') > 0):
hook_stat = shell_command('%s/hooks/revoke.hook %s %s %s' %
(revoke_hook,
cert_detail['cert_file_name'],
cert_detail['CN'],
cert_detail['emailAddress']))
if hook_stat:
print('CRL hook execution >>>>>FAILED<<<<<<')
input('Press ENTER to continue')
elif exit_status:
print('(revoke cert)>>>>>FAILED<<<<<<')
input('Press ENTER to continue')
print('\n\n')
return exit_status
def create_request(cn, email):
if not curses.isendwin():
prepare2run_shellcommand()
print('STEP 1 (create request) STEP 2 (sign request) STEP3 (generate p12)\n\n%s\n\n\n'
'STEP 1.>>>>>Trying to generate request for %s<<<<<<\n\n' % ('-' * 70, cn))
try:
openssl_subj = open(sys.argv[0][:sys.argv[0].rfind('/')] + '/subj.info').readline()
except IOError as err:
print(' '.join('%s/subj.info' % [sys.argv[0][:sys.argv[0].rfind('/')],
os.strerror(err.errno),
'\n(create request) >>>>>FAILED<<<<<<']))
input('Press ENTER to continue')
return 1
openssl_subj = openssl_subj.replace(' ', '~')
openssl_subj = '/'.join([openssl_subj,
'CN=%s' % cn,
'emailAddress=%s' % email if email else ''])
exit_status = shell_command('/usr/bin/openssl req -new '
'-config %s/openssl.cnf '
'-subj %s '
'-keyout %s/Certs/%s_key.pem '
'-out %s/Certs/%s_req.pem' %
(os.environ['PKI_ROOT'],
openssl_subj,
os.environ['PKI_ROOT'],
cn,
os.environ['PKI_ROOT'],
cn))
if exit_status:
input('Press ENTER to continue')
print('\n\n\n')
return exit_status
def sign_cert(cn, email, days):
if not curses.isendwin():
prepare2run_shellcommand()
print('STEP 1 (create request) STEP 2 (sign request) STEP3 (generate p12)\n\n%s\n\n\n'
'STEP 2. >>>>>Trying to sign request %s<<<<<<\n\n' % ('-' * 70, cn))
exit_status = shell_command('/usr/bin/openssl ca '
'-config %s/openssl.cnf '
'-days %s '
'-in %s/Certs/%s_req.pem '
'-out %s/Certs/%s_cert.pem' %
(os.environ['PKI_ROOT'],
days,
os.environ['PKI_ROOT'],
cn,
os.environ['PKI_ROOT'],
cn))
new_cert_hook = sys.argv[0][:sys.argv[0].rfind('/')]
if (not exit_status and
os.path.exists(new_cert_hook + '/hooks/new.hook') and
os.path.getsize(new_cert_hook + '/hooks/new.hook') > 0):
hook_stat = shell_command('%s/hooks/new.hook %s %s %s' % (new_cert_hook, cn, email, days))
if hook_stat:
print('CRL hook execution >>>>>FAILED<<<<<<')
input('Press ENTER to continue')
elif exit_status:
print('(sign request) >>>>>FAILED<<<<<<')
input('Press ENTER to continue')
print('\n\n\n')
return exit_status
def generate_p12(cn):
if not curses.isendwin():
prepare2run_shellcommand()
print('STEP 1 (create request) STEP 2 (sign request) STEP3 (generate p12)\n\n%s\n\n\n'
'STEP 3. >>>>>Trying to generate p12 key<<<<<<\n\n' % ('-' * 70))
exit_status = shell_command('/usr/bin/openssl pkcs12 -export -clcerts '
'-in %s/Certs/%s_cert.pem '
'-inkey %s/Certs/%s_key.pem -out %s/Certs/%s.p12' %
(os.environ['PKI_ROOT'],
cn,
os.environ['PKI_ROOT'],
cn,
os.environ['PKI_ROOT'],
cn))
if exit_status:
print('(generate p12) >>>>>FAILED<<<<<<')
try:
if os.path.getsize('%s/Certs/%s.p12' % (os.environ['PKI_ROOT'], cn)) == 0:
print('%s/Certs/%s.p12 is empty. Removing it ' % (os.environ['PKI_ROOT'], cn))
os.remove('%s/Certs/%s.p12' % (os.environ['PKI_ROOT'], cn))
except OSError as err:
print(os.strerror(err.errno))
finally:
return exit_status
if __name__ == '__main__':
if not os.environ.get('PKI_ROOT'):
print('Please set $PKI_ROOT variable \n\npress any key to continue \n')
else:
shell_command("clear")
if 'create' in sys.argv[1:]:
create_folders('%s%s' % (os.environ['PKI_ROOT'], '/private'),
'%s%s' % (os.environ['PKI_ROOT'], '/Certs'),
'%s%s' % (os.environ['PKI_ROOT'], '/signed_certs'),
'%s%s' % (sys.argv[0][:sys.argv[0].rfind('/')], '/hooks'))
stat = (create_files('%s%s' % (os.environ['PKI_ROOT'], '/index.txt')) or
create_files('%s%s' % (os.environ['PKI_ROOT'], '/crlnumber'), init_txt='03') or
create_files('%s%s' % (sys.argv[0][:sys.argv[0].rfind('/')], '/subj.info'),
init_txt='/OU=smth/O=Example Corp/C=SM/ST=Anything/L=My_place') or
create_files('%s%s' % (os.environ['PKI_ROOT'], '/random'), rand=True) or
create_files('%s%s' % (sys.argv[0][:sys.argv[0].rfind('/')], '/hooks/crl.hook')) or
create_files('%s%s' % (sys.argv[0][:sys.argv[0].rfind('/')], '/hooks/new.hook')) or
create_files('%s%s' % (sys.argv[0][:sys.argv[0].rfind('/')], '/hooks/revoke.hook')))
if not stat:
os.chmod(sys.argv[0][:sys.argv[0].rfind('/')] + '/hooks/revoke.hook', 0o700)
os.chmod(sys.argv[0][:sys.argv[0].rfind('/')] + '/hooks/crl.hook', 0o700)
os.chmod(sys.argv[0][:sys.argv[0].rfind('/')] + '/hooks/new.hook', 0o700)
print('Please edit subj.info file.\n'
'It contains all necessary information for generating certs\n'
'Then run "pky.py init" command\n')
elif 'init' in sys.argv[1:]:
if not shell_command('openssl version'):
create_ca_req() or sel_sign_ca()
else:
print('Can not find openssl.\nsudo apt-get install openssl.\n')
else:
if os.path.exists(os.environ.get('PKI_ROOT') + '/index.txt'):
screen = init_curses()
keyborad_processor(screen)
shutdown_curses(screen)
else:
print('ERROR\n '
'%s is not initialized\n'
'Run %s create first.' % (os.environ.get('PKI_ROOT'), sys.argv[0]))
|
|
from enerdata.profiles.profile import *
from enerdata.contracts.tariff import *
from expects import *
import vcr
from decimal import Decimal
with description('A profile with gaps'):
with before.all:
import random
measures = []
start = TIMEZONE.localize(datetime(2015, 3, 1, 1))
end = TIMEZONE.localize(datetime(2015, 4, 1, 0))
gap_start = TIMEZONE.localize(datetime(2015, 3, 15))
gap_end = TIMEZONE.localize(datetime(2015, 3, 16))
start_idx = start
self.gaps = []
self.number_invalid_hours = 0
self.complete_profile = []
while start_idx <= end:
energy = random.randint(0, 10)
self.complete_profile.append(ProfileHour(start_idx, energy, True, 0.0))
if gap_start < start_idx < gap_end:
self.gaps.append(start_idx)
start_idx += timedelta(hours=1)
continue
if random.randint(0, 10) > 8:
valid = False
self.number_invalid_hours += 1
self.gaps.append(start_idx)
else:
valid = True
measures.append(ProfileHour(
TIMEZONE.normalize(start_idx), energy, valid, 0.0
))
start_idx += timedelta(hours=1)
self.profile = Profile(start, end, measures)
with it('has to known the gaps'):
expect(self.profile.gaps).to(contain_exactly(*self.gaps))
with it('has sum hours per period the be the same as total hours'):
hours_per_period = self.profile.get_hours_per_period(T20DHA())
assert sum(hours_per_period.values()) == self.profile.n_hours
with it('has sum valid hours per period from measures'):
hours_per_period = self.profile.get_hours_per_period(
T20DHA(), only_valid=True
)
total_hours = self.profile.n_hours_measures - self.number_invalid_hours
assert sum(hours_per_period.values()) == total_hours
with it('should have estimable hours'):
estimable_hours = self.profile.get_estimable_hours(T20DHA())
expect(sum(estimable_hours.values())).to(be_above(0))
with it('has to be the same the balance with the consumption + estimable'):
tariff = T20DHA()
balance = Counter()
for ph in self.complete_profile:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
balance[period.code] += ph.measure
total = sum(balance.values())
consumption = self.profile.get_consumption_per_period(tariff)
estimable = self.profile.get_estimable_consumption(tariff, balance)
for period in consumption:
energy = consumption[period] + estimable[period]
expect(energy).to(equal(balance[period]))
with it('has to estimate energy for the gaps'):
# Calculate the balance
balance = Counter()
tariff = T20DHA()
tariff.cof = 'A'
for ph in self.complete_profile:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
balance[period.code] += ph.measure
with vcr.use_cassette('spec/fixtures/ree/201503-201504.yaml'):
profile_estimated = self.profile.estimate(tariff, balance)
at_least_one_diff_zero = False
for a_profile in profile_estimated.measures:
# 0, 0.0 and Decimal(0) or Decimal(0.0) are == 0
if a_profile.accumulated != 0:
at_least_one_diff_zero = True
assert type(a_profile.accumulated) == float or isinstance(a_profile.accumulated, Decimal), "Accumulated must be inside a ProfileHour and must be a float or a Decimal instance"
assert at_least_one_diff_zero == True, "Statistically at least one of all the estimated measures must have a dragger different than 0. Debug this scenario to validate if 'ProfileHour.accumulated' field is working properly"
total_energy = sum(balance.values())
expect(profile_estimated.total_consumption).to(equal(total_energy))
with context('Is an empty profile'):
with it('has to generate all the profile estimating'):
balance = Counter()
tariff = T20DHA()
tariff.cof = 'A'
profile = Profile(
self.profile.start_date, self.profile.end_date, []
)
for ph in self.complete_profile:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
balance[period.code] += ph.measure
with vcr.use_cassette('spec/fixtures/ree/201503-201504.yaml'):
profile_estimated = profile.estimate(tariff, balance)
total_energy = sum(balance.values())
expect(profile_estimated.total_consumption).to(equal(total_energy))
with it('has to generate all the profile estimating if all measures are invalid'):
balance = Counter()
tariff = T20DHA()
tariff.cof = 'A'
measures = []
for ph in self.complete_profile:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
balance[period.code] += ph.measure
measures.append(ProfileHour(ph.date, ph.measure, False, 0.0))
profile = Profile(
self.profile.start_date, self.profile.end_date, measures
)
with vcr.use_cassette('spec/fixtures/ree/201503-201504.yaml'):
profile_estimated = profile.estimate(tariff, balance)
total_energy = sum(balance.values())
expect(profile_estimated.total_consumption).to(equal(total_energy))
with context('If the balance is less than profile'):
with it('has to fill with 0 the gaps'):
balance = Counter()
tariff = T20DHA()
tariff.cof = 'A'
balance = self.profile.get_hours_per_period(tariff, only_valid=True)
for period in balance:
balance[period] -= 10
with vcr.use_cassette('spec/fixtures/ree/201503-201504.yaml'):
profile_estimated = self.profile.estimate(tariff, balance)
expect(profile_estimated.n_hours).to(equal(len(self.complete_profile)))
for gap in self.profile.gaps:
pos = bisect.bisect_left(
profile_estimated.measures,
ProfileHour(gap, 0, True, 0.0)
)
measure = profile_estimated.measures[pos]
expect(measure.measure).to(equal(0))
total_energy = sum(balance.values())
# Adjust
profile_estimated = profile_estimated.adjust(tariff, balance)
expect(profile_estimated.total_consumption).to(equal(total_energy))
with it('must fail adjusting'):
balance = Counter()
tariff = T20DHA()
for ph in self.profile.measures:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
balance[period.code] += ph.measure
expect(len(self.profile.gaps)).to(be_above(0))
def adjust_error():
self.profile.adjust(tariff, balance)
expect(adjust_error).to(raise_error(Exception, 'Is not possible to adjust a profile with gaps'))
with description('A complete profile with different energy than balance'):
with before.all:
import random
measures = []
start = TIMEZONE.localize(datetime(2015, 3, 1, 1))
end = TIMEZONE.localize(datetime(2015, 4, 1, 0))
gap_start = TIMEZONE.localize(datetime(2015, 3, 15))
gap_end = TIMEZONE.localize(datetime(2015, 3, 16))
start_idx = start
self.gaps = []
while start_idx <= end:
energy = random.randint(0, 10)
measures.append(ProfileHour(
TIMEZONE.normalize(start_idx), energy, True, 0.0
))
start_idx += timedelta(hours=1)
self.profile = Profile(start, end, measures)
with it('must not have gaps'):
complete_hours = Counter()
tariff = T20DHA()
# There is no gaps
expect(self.profile.gaps).to(contain_exactly(*self.gaps))
profile_hours = self.profile.get_hours_per_period(
tariff, only_valid=True
)
for ph in self.profile.measures:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
complete_hours[period.code] += 1
for period in complete_hours:
expect(profile_hours[period]).to(equal(complete_hours[period]))
with it('must not to estimate'):
tariff = T20DHA()
balance = Counter()
for ph in self.profile.measures:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
balance[period.code] += ph.measure
with vcr.use_cassette('spec/fixtures/ree/201503-201504.yaml'):
profile = self.profile.estimate(tariff, balance)
expect(profile.n_hours).to(equal(self.profile.n_hours))
with context('The difference adjusting must be customitzable and checked per period'):
with context('If is acceptable'):
with it('doesn\'t have to adjust'):
tariff = T20DHA()
balance = Counter()
measures = [
ProfileHour(m.date, m.measure * 1000, m.valid, 0.0)
for m in self.profile.measures
]
profile = Profile(
self.profile.start_date,
self.profile.end_date,
measures
)
for ph in profile.measures:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
balance[period.code] += ph.measure
balance[period.code] -= 940
total_energy = sum(balance.values())
expect(total_energy).to(be_below(profile.total_consumption))
profile = profile.adjust(tariff, balance, 1000)
expect(total_energy).to(equal(profile.total_consumption - 940))
balance = Counter()
for ph in profile.measures:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
balance[period.code] += ph.measure
balance[period.code] += 360
total_energy = sum(balance.values())
expect(total_energy).to(be_above(profile.total_consumption))
profile = profile.adjust(tariff, balance, 1000)
expect(total_energy).to(equal(profile.total_consumption + 360))
with context('If is not acceptable'):
with it('have to adjust and record the period adjusted'):
tariff = T20DHA()
balance = Counter()
for ph in self.profile.measures:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
balance[period.code] += ph.measure
balance[period.code] += 10
adjusted_periods = [period.code]
total_energy = sum(balance.values())
expect(total_energy).to(be_above(self.profile.total_consumption))
profile = self.profile.adjust(tariff, balance, 1)
expect(total_energy).to(equal(profile.total_consumption))
adjusted_balance = Counter()
for ph in profile.measures:
dt = ph.date - timedelta(minutes=1)
period = tariff.get_period_by_date(dt)
adjusted_balance[period.code] += ph.measure
for period in adjusted_balance:
expect(adjusted_balance[period]).to(equal(balance[period]))
expect(profile.adjusted_periods).to(
contain_exactly(*adjusted_periods)
)
|
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import csv
import os.path
import re
import sys
from collections import defaultdict
from collections import namedtuple
from collections import OrderedDict
__author__ = "Eldar Abusalimov"
def outerr(*args, **kwargs):
kwargs.setdefault('file', sys.stderr)
print(*args, **kwargs)
SDF_HEADER_RE = re.compile(r'^>\s*<(.*?)>\s*(?:\((.*?)\)\s*)?$')
def parse_sdf(filename, id_prop):
result = defaultdict(dict)
with open(filename, 'rb') as sdfile:
it = iter(sdfile)
for line in it:
m = SDF_HEADER_RE.match(line)
if not m:
continue
prop = m.group(1)
mol = m.group(2) or None
value = next(it).rstrip()
props = result[mol]
if prop in props:
raise ValueError('Conflicting property name: {}'.format(prop))
props[prop] = value
outerr('checking...')
for mol, props in result.iteritems():
if props.get(id_prop) != mol:
raise ValueError('ID mismatch: {} != {}'
.format(props.get(id_prop), mol))
return dict(result)
def read_csv(filename, prop_names):
result = {}
id_prop = prop_names[0]
with open(filename, 'rb') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.DictReader(csvfile, dialect=dialect)
if reader.fieldnames and id_prop not in reader.fieldnames:
csvfile.seek(0)
reader = csv.DictReader(csvfile, prop_names, dialect=dialect)
for row in reader:
mol = row[id_prop]
result[mol] = row
return result
def join_results(haystack, needle, row_type, include_none=False):
result = dict()
for mol in needle:
try:
props = haystack[mol]
except KeyError:
if include_none:
result[mol] = None
else:
result[mol] = row_type(**dict((prop, props.get(prop, ''))
for prop in row_type._prop_names))
def sort_key(mol_row):
mol, row = mol_row
key_props = []
for prop in row[1:]:
try:
prop = int(prop, base=10)
except ValueError:
pass
key_props.append(prop)
else:
key_props.append(row[0])
return key_props
return OrderedDict(sorted(result.iteritems(), key=sort_key))
def write_csv(filename, table, row_type=None):
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
if row_type is not None:
writer.writerow(row_type._prop_names)
writer.writerows(table.itervalues())
def write_lst(filename, table, row_type=None):
with open(filename, 'wb') as lstfile:
print('*e*\n\n', file=lstfile)
for mol in table:
print(mol, file=lstfile)
def print_table(table, row_type):
header = row_type._prop_names
rows = list(table.itervalues())
col_width = [max(len(x) for x in col) for col in zip(header, *rows)]
print(' '.join('{:>{}}'.format(r, w) for r, w in zip(header, col_width)))
for row in rows:
assert isinstance(row, row_type)
print(' '.join('{:>{}}'.format(r, w) for r, w in zip(row, col_width)))
print('\nTOTAL: {}'.format(len(rows)))
def create_row_type(prop_names, id_prop=None):
if isinstance(prop_names, basestring):
prop_names = prop_names.replace(',', ' ').split()
prop_names = map(str, prop_names)
if id_prop is not None:
prop_names = [id_prop] + [n for n in prop_names if n != id_prop]
row_type = namedtuple('Row', prop_names, rename=True)
row_type._prop_names = row_type._make(prop_names)
return row_type
def read_input_file(filename, prop_names):
ext = os.path.splitext(filename)[1].lower()
outerr('{}: reading...'.format(filename))
if ext == '.csv':
ret = read_csv(filename, prop_names)
else:
if ext != '.sdf':
outerr('{}: will be treated as SDF'.format(filename),
file=sys.stderr)
ret = parse_sdf(filename, id_prop=prop_names[0])
outerr('done: {} molecules'.format(len(ret)))
return ret
def write_output_file(filename, table, row_type=None):
ext = os.path.splitext(filename)[1].lower()
if ext == '.csv':
write_csv(filename, table, row_type)
else:
if ext != '.lst':
outerr('{}: will be treated as LST'.format(filename),
file=sys.stderr)
write_lst(filename, table, row_type)
outerr('{}: result saved'.format(filename))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output',
help='CSV or LST output file')
parser.add_argument('-i', '--id', type=str, default='ID',
help='ID property name')
parser.add_argument('-p', '--props', type=str, default='',
help='Properties to extract from the haystack')
parser.add_argument('haystack_file',
help='SDF or CSV file to search in')
parser.add_argument('needle_files', nargs='+',
help='SDF or CSV file with data to search for')
args = parser.parse_args()
row_type = create_row_type(args.props, id_prop=args.id)
haystack = read_input_file(args.haystack_file, row_type._prop_names)
needle = set()
for needle_file in args.needle_files:
needle |= set(read_input_file(needle_file, row_type._prop_names))
result = join_results(haystack, needle, row_type)
print_table(result, row_type)
if args.output is not None:
write_output_file(args.output, result, row_type)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template settings for RGIMS:
Relief Goods and Inventory Management System
http://eden.sahanafoundation.org/wiki/Deployments/Philippines/RGIMS
"""
T = current.T
settings.base.system_name = "Relief Goods Inventory & Monitoring System"
settings.base.system_name_short = "RGIMS"
# Pre-Populate
settings.base.prepopulate = ("RGIMS", "default/users")
# Theme
settings.base.theme = "RGIMS"
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Default timezone for users
settings.L10n.utc_offset = "UTC +0800"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Finance settings
settings.fin.currencies = {
"USD" : T("United States Dollars"),
"EUR" : T("Euros"),
"PHP" : T("Philippine Pesos")
}
settings.fin.currency_default = "PHP"
# Security Policy
settings.security.policy = 6 # Warehouse-specific restrictions
settings.security.map = True
def rgims_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
tablename = table._tablename
if tablename not in ("inv_recv", "inv_send"):
# Normal lookup
return 0
# For these tables we need to assign the site_id's realm not organisation_id's
db = current.db
stable = db.org_site
record = db(stable.site_id == row.site_id).select(stable.realm_entity,
limitby=(0, 1)
).first()
if record:
return record.realm_entity
# Normal lookup
return 0
settings.auth.realm_entity = rgims_realm_entity
# Enable this for a UN-style deployment
settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
settings.ui.camp = True
# Requests
settings.req.use_commit = False
settings.req.req_form_name = "Request Issue Form"
settings.req.req_shortname = "RIS"
# Restrict the type of requests that can be made, valid values in the
# list are ["Stock", "People", "Other"]. If this is commented out then
# all types will be valid.
settings.req.req_type = ["Stock"]
# Inventory Management
settings.inv.send_form_name = "Tally Out Sheet"
settings.inv.send_short_name = "TOS"
settings.inv.send_ref_field_name = "Tally Out Number"
settings.inv.recv_form_name = "Acknowledgement Receipt for Donations Received Form"
settings.inv.recv_shortname = "ARDR"
settings.inv.recv_type = {
#0: T("-"),
#1: T("Other Warehouse"),
32: T("Donation"),
33: T("Foreign Donation"),
34: T("Local Purchases"),
35: T("Confiscated Goods from Bureau Of Customs")
}
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
module_type = 10,
)),
#("cms", Storage(
# name_nice = T("Content Management"),
# #description = "Content Management System",
# restricted = True,
# module_type = 10,
# )),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 1
)),
#("proc", Storage(
# name_nice = T("Procurement"),
# #description = "Ordering & Purchasing of Goods & Services",
# restricted = True,
# module_type = 10
# )),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 10,
)),
# Vehicle depends on Assets
#("vehicle", Storage(
# name_nice = T("Vehicles"),
# #description = "Manage Vehicles",
# restricted = True,
# module_type = 10,
# )),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = 2,
)),
#("project", Storage(
# name_nice = T("Projects"),
# #description = "Tracking of Projects, Activities and Tasks",
# restricted = True,
# module_type = 10
# )),
#("survey", Storage(
# name_nice = T("Surveys"),
# #description = "Create, enter, and manage surveys.",
# restricted = True,
# module_type = 10,
# )),
#("cr", Storage(
# name_nice = T("Shelters"),
# #description = "Tracks the location, capacity and breakdown of victims in Shelters",
# restricted = True,
# module_type = 10
# )),
#("hms", Storage(
# name_nice = T("Hospitals"),
# #description = "Helps to monitor status of hospitals",
# restricted = True,
# module_type = 10
# )),
#("irs", Storage(
# name_nice = T("Incidents"),
# #description = "Incident Reporting System",
# restricted = False,
# module_type = 10
# )),
])
# END =========================================================================
|
|
from pprint import pformat
import sys
from threading import Lock
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import socket
from django import http
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils.log import getLogger
logger = getLogger('django.request')
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
STATUS_CODE_TEXT = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
}
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = ''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return ''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = ''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = ''
return result
def readline(self, size=None):
while '\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = StringIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = base.get_script_name(environ)
path_info = force_unicode(environ.get('PATH_INFO', u'/'))
if not path_info or path_info == script_name:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
#
# (The comparison of path_info to script_name is to work around an
# apparent bug in flup 1.0.1. Se Django ticket #8490).
path_info = u'/'
self.environ = environ
self.path_info = path_info
self.path = '%s%s' % (script_name, path_info)
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
self._post_parse_error = False
if type(socket._fileobject) is type and isinstance(self.environ['wsgi.input'], socket._fileobject):
# Under development server 'wsgi.input' is an instance of
# socket._fileobject which hangs indefinitely on reading bytes past
# available count. To prevent this it's wrapped in LimitedStream
# that doesn't read past Content-Length bytes.
#
# This is not done for other kinds of inputs (like flup's FastCGI
# streams) beacuse they don't suffer from this problem and we can
# avoid using another wrapper with its own .read and .readline
# implementation.
#
# The type check is done because for some reason, AppEngine
# implements _fileobject as a function, not a class.
try:
content_length = int(self.environ.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
else:
self._stream = self.environ['wsgi.input']
self._read_started = False
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
if self._post_parse_error:
post = '<could not parse>'
else:
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return '<WSGIRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(get, post, cookies, meta)
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.environ.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.environ.get('QUERY_STRING', ''))) or '')
def is_secure(self):
return 'wsgi.url_scheme' in self.environ \
and self.environ['wsgi.url_scheme'] == 'https'
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
# The WSGI spec says 'QUERY_STRING' may be absent.
self._get = http.QueryDict(self.environ.get('QUERY_STRING', ''), encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self.environ.get('HTTP_COOKIE', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
from django.conf import settings
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.initLock.acquire()
try:
try:
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
finally:
self.initLock.release()
set_script_prefix(base.get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError): %s' % request.path,
exc_info=sys.exc_info(),
extra={
'status_code': 400,
'request': request
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
finally:
signals.request_finished.send(sender=self.__class__)
try:
status_text = STATUS_CODE_TEXT[response.status_code]
except KeyError:
status_text = 'UNKNOWN STATUS CODE'
status = '%s %s' % (response.status_code, status_text)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append(('Set-Cookie', str(c.output(header=''))))
start_response(status, response_headers)
return response
|
|
import copy
import re
from io import BytesIO
from itertools import chain
from urllib.parse import quote, urlencode, urljoin, urlsplit
from django.conf import settings
from django.core import signing
from django.core.exceptions import (
DisallowedHost, ImproperlyConfigured, RequestDataTooBig,
)
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils.datastructures import (
CaseInsensitiveMapping, ImmutableList, MultiValueDict,
)
from django.utils.encoding import escape_uri_path, iri_to_uri
from django.utils.functional import cached_property
from django.utils.http import is_same_domain, limited_parse_qsl
RAISE_ERROR = object()
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:\d+)?$")
class UnreadablePostError(OSError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest:
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self.content_type = None
self.content_params = None
def __repr__(self):
if self.method is None or not self.get_full_path():
return '<%s>' % self.__class__.__name__
return '<%s: %s %r>' % (self.__class__.__name__, self.method, self.get_full_path())
@cached_property
def headers(self):
return HttpHeaders(self.META)
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = self.get_port()
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = ['localhost', '127.0.0.1', '[::1]']
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META:
port = self.META['HTTP_X_FORWARDED_PORT']
else:
port = self.META['SERVER_PORT']
return str(port)
def get_full_path(self, force_append_slash=False):
return self._get_full_path(self.path, force_append_slash)
def get_full_path_info(self, force_append_slash=False):
return self._get_full_path(self.path_info, force_append_slash)
def _get_full_path(self, path, force_append_slash):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s%s' % (
escape_uri_path(path),
'/' if force_append_slash and not path.endswith('/') else '',
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempt to return a signed cookie. If the signature fails or the
cookie has expired, raise an exception, unless the `default` argument
is provided, in which case return that value.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def get_raw_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return '{scheme}://{host}{path}'.format(
scheme=self.scheme,
host=self._get_raw_host(),
path=self.get_full_path(),
)
def build_absolute_uri(self, location=None):
"""
Build an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, build the absolute URI
using request.get_full_path(). If the location is absolute, convert it
to an RFC 3987 compliant URI and return it. If location is relative or
is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base
URL constructed from the request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
# Handle the simple, most common case. If the location is absolute
# and a scheme or host (netloc) isn't provided, skip an expensive
# urljoin() as long as no path segments are '.' or '..'.
if (bits.path.startswith('/') and not bits.scheme and not bits.netloc and
'/./' not in bits.path and '/../' not in bits.path):
# If location starts with '//' but has no netloc, reuse the
# schema and netloc from the current request. Strip the double
# slashes and continue as if it wasn't specified.
if location.startswith('//'):
location = location[2:]
location = self._current_scheme_host + location
else:
# Join the constructed URL with the provided location, which
# allows the provided location to apply query strings to the
# base path.
location = urljoin(self._current_scheme_host + self.path, location)
return iri_to_uri(location)
@cached_property
def _current_scheme_host(self):
return '{}://{}'.format(self.scheme, self.get_host())
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Return 'http' by
default.
"""
return 'http'
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
if self.META.get(header) == value:
return 'https'
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Set the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, remove and recreate it on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, 'GET'):
del self.GET
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Return a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
# Limit the maximum request data size that will be handled in-memory.
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
try:
self._body = self.read()
except OSError as e:
raise UnreadablePostError(*e.args) from e
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict()
self._files = MultiValueDict()
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.content_type == 'multipart/form-data':
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
self._mark_post_parse_error()
raise
elif self.content_type == 'application/x-www-form-urlencoded':
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def __iter__(self):
return iter(self.readline, b'')
def readlines(self):
return list(self)
class HttpHeaders(CaseInsensitiveMapping):
HTTP_PREFIX = 'HTTP_'
# PEP 333 gives two headers which aren't prepended with HTTP_.
UNPREFIXED_HEADERS = {'CONTENT_TYPE', 'CONTENT_LENGTH'}
def __init__(self, environ):
headers = {}
for header, value in environ.items():
name = self.parse_header_name(header)
if name:
headers[name] = value
super().__init__(headers)
def __getitem__(self, key):
"""Allow header lookup using underscores in place of hyphens."""
return super().__getitem__(key.replace('_', '-'))
@classmethod
def parse_header_name(cls, header):
if header.startswith(cls.HTTP_PREFIX):
header = header[len(cls.HTTP_PREFIX):]
elif header not in cls.UNPREFIXED_HEADERS:
return None
return header.replace('_', '-').title()
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to str.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super().__init__()
self.encoding = encoding or settings.DEFAULT_CHARSET
query_string = query_string or ''
parse_qsl_kwargs = {
'keep_blank_values': True,
'fields_limit': settings.DATA_UPLOAD_MAX_NUMBER_FIELDS,
'encoding': self.encoding,
}
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(self.encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in limited_parse_qsl(query_string, **parse_qsl_kwargs):
self.appendlist(key, value)
self._mutable = mutable
@classmethod
def fromkeys(cls, iterable, value='', mutable=False, encoding=None):
"""
Return a new QueryDict with keys (may be repeated) from an iterable and
values from value.
"""
q = cls('', mutable=True, encoding=encoding)
for key in iterable:
q.appendlist(key, value)
if not mutable:
q._mutable = False
return q
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super().__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in self.lists():
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in self.lists():
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super().setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super().setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super().pop(key, *args)
def popitem(self):
self._assert_mutable()
return super().popitem()
def clear(self):
self._assert_mutable()
super().clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super().setdefault(key, default)
def copy(self):
"""Return a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Return an encoded string of all query string arguments.
`safe` specifies characters which don't require quoting, for example::
>>> q = QueryDict(mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = safe.encode(self.encoding)
def encode(k, v):
return '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
def encode(k, v):
return urlencode({k: v})
for k, list_ in self.lists():
output.extend(
encode(k.encode(self.encoding), str(v).encode(self.encoding))
for v in list_
)
return '&'.join(output)
# It's neither necessary nor appropriate to use
# django.utils.encoding.force_str() for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Convert bytes objects to strings, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Return any non-bytes objects without change.
"""
if isinstance(s, bytes):
return str(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lowercased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
domain, port = bits if len(bits) == 2 else (bits[0], '')
# Remove a trailing dot (if present) from the domain.
domain = domain[:-1] if domain.endswith('.') else domain
return domain, port
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lowercased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
return any(pattern == '*' or is_same_domain(host, pattern) for pattern in allowed_hosts)
|
|
# ##### BEGIN MIT LICENSE BLOCK #####
#
# Copyright (c) 2015 - 2021 Pixar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
# ##### END MIT LICENSE BLOCK #####
import bpy
import sys
import os
from bpy.types import AddonPreferences
from bpy.props import CollectionProperty, BoolProperty, StringProperty, FloatProperty
from bpy.props import IntProperty, PointerProperty, EnumProperty, FloatVectorProperty
from .rfb_utils import envconfig_utils
from . import rfb_logger
from . import rfb_icons
class RendermanPreferencePath(bpy.types.PropertyGroup):
path: StringProperty(name="", subtype='DIR_PATH')
class RendermanDeviceDesc(bpy.types.PropertyGroup):
name: StringProperty(name="", default="")
id: IntProperty(default=-1)
version_major: IntProperty(default=0)
version_minor: IntProperty(default=0)
use: BoolProperty(name="Use", default=False)
class RendermanPreferences(AddonPreferences):
bl_idname = __package__
def find_xpu_cpu_devices(self):
# for now, there's only one CPU
if len(self.rman_xpu_cpu_devices) < 1:
device = self.rman_xpu_cpu_devices.add()
device.name = "CPU 0"
device.id = 0
device.use = True
def find_xpu_gpu_devices(self):
try:
import rman
count = rman.pxrcore.GetGpgpuCount(rman.pxrcore.k_cuda)
gpu_device_names = list()
# try and add ones that we don't know about
for i in range(count):
desc = rman.pxrcore.GpgpuDescriptor()
rman.pxrcore.GetGpgpuDescriptor(rman.pxrcore.k_cuda, i, desc)
gpu_device_names.append(desc.name)
found = False
for device in self.rman_xpu_gpu_devices:
if device.name == desc.name:
found = True
break
if not found:
device = self.rman_xpu_gpu_devices.add()
device.name = desc.name
device.version_major = desc.major
device.version_minor = desc.minor
device.id = i
if len(self.rman_xpu_gpu_devices) == 1:
# always use the first one, if this is our first time adding
# gpus
device.use = True
# now, try and remove devices that no longer exist
name_list = [device.name for device in self.rman_xpu_gpu_devices]
for nm in name_list:
if nm not in gpu_device_names:
self.rman_xpu_gpu_devices.remove(self.rman_xpu_gpu_devices.find(nm))
except Exception as e:
rfb_logger.rfb_log().debug("Exception when getting GPU devices: %s" % str(e))
pass
def find_xpu_devices(self):
self.find_xpu_cpu_devices()
self.find_xpu_gpu_devices()
# find the renderman options installed
def find_installed_rendermans(self, context):
options = [('NEWEST', 'Newest Version Installed',
'Automatically updates when new version installed. NB: If an RMANTREE environment variable is set, this will always take precedence.')]
for vers, path in envconfig_utils.get_installed_rendermans():
options.append((path, vers, path))
return options
rman_xpu_cpu_devices: bpy.props.CollectionProperty(type=RendermanDeviceDesc)
rman_xpu_gpu_devices: bpy.props.CollectionProperty(type=RendermanDeviceDesc)
def fill_gpu_devices(self, context):
items = []
items.append(('-1', 'None', ''))
for device in self.rman_xpu_gpu_devices:
items.append(('%d' % device.id, '%s (%d.%d)' % (device.name, device.version_major, device.version_minor), ''))
return items
rman_xpu_gpu_selection: EnumProperty(name="GPU Device",
items=fill_gpu_devices
)
rman_xpu_device: EnumProperty(name="Devices",
description="Select category",
items=[
("CPU", "CPU", ""),
("GPU", "GPU", "")
]
)
rmantree_choice: EnumProperty(
name='RenderMan Version to use',
description='Leaving as "Newest" will automatically update when you install a new RenderMan version',
# default='NEWEST',
items=find_installed_rendermans
)
rmantree_method: EnumProperty(
name='RenderMan Location',
description='''How RenderMan should be detected. Most users should leave to "Detect".
Users should restart Blender after making a change.
''',
items=[('ENV', 'Get From RMANTREE Environment Variable',
'This will use the RMANTREE set in the enviornment variables'),
('DETECT', 'Choose From Installed',
'''This will scan for installed RenderMan locations to choose from.'''),
('MANUAL', 'Set Manually', 'Manually set the RenderMan installation (for expert users)')],
default='ENV')
path_rmantree: StringProperty(
name="RMANTREE Path",
description="Path to RenderMan Pro Server installation folder",
subtype='DIR_PATH',
default='')
draw_ipr_text: BoolProperty(
name="Draw IPR Text",
description="Draw notice on View3D when IPR is active",
default=True)
draw_panel_icon: BoolProperty(
name="Draw Panel Icon",
description="Draw an icon on RenderMan Panels",
default=True)
path_fallback_textures_path: StringProperty(
name="Fallback Texture Path",
description="Fallback path for textures, when the current directory is not writable",
subtype='FILE_PATH',
default=os.path.join('<OUT>', 'textures'))
path_fallback_textures_path_always: BoolProperty(
name="Always Fallback",
description="Always use the fallback texture path regardless",
default=False)
rman_txmanager_keep_extension: BoolProperty(
name='Keep original extension',
default=True,
description="If on, keep the original extension of the input image."
)
rman_txmanager_workers: IntProperty(
name='Number of processes',
description="Number of txmake processes to launch in parallel. Default to 2 (assuming a typical 4-cores computer). You should only increase this if you have more than 8 physical cores.",
default=2,
min=1,max=32
)
rman_txmanager_tex_extensions: StringProperty(
name='Texture Extensions',
description="Any file with one of these extensions will not be converted by the texture manager and used as-is. Entries should be space-delimited.",
default='tex tx txr ptx ptex ies',
)
rman_scene_version_padding: IntProperty(
name="Version Padding",
description="The number of zeros to pad the version token",
default=3,
min=1, max=4
)
rman_scene_take_padding: IntProperty(
name="Take Padding",
description="The number of zeros to pad the take token",
default=2,
min=1, max=4
)
rman_scene_version_increment: EnumProperty(
name="Increment Version",
description="The version number can be set to automatically increment each time you render",
items=[
('MANUALLY', 'Manually', ''),
('RENDER', 'On Render', ''),
('BATCH RENDER', 'On Batch Render', '')
],
default='MANUALLY'
)
rman_scene_take_increment: EnumProperty(
name="Increment Take",
description="The take number can be set to automatically increment each time you render",
items=[
('MANUALLY', 'Manually', ''),
('RENDER', 'On Render', ''),
('BATCH RENDER', 'On Batch Render', '')
],
default='MANUALLY'
)
def update_rman_logging_level(self, context):
level = rfb_logger.__LOG_LEVELS__[self.rman_logging_level]
rfb_logger.set_logger_level(level)
rman_logging_level: EnumProperty(
name='Logging Level',
description='''Log level verbosity. Advanced: Setting the RFB_LOG_LEVEL environment variable will override this preference. Requires a restart.
''',
items=[('CRITICAL', 'Critical', ''),
('ERROR', 'Error', ''),
('WARNING', 'Warning', ''),
('INFO', 'Info', ''),
('VERBOSE', 'Verbose', ''),
('DEBUG', 'Debug', ''),
],
default='WARNING',
update=update_rman_logging_level)
rman_logging_file: StringProperty(
name='Logging File',
description='''A file to write logging to. This will always write at DEBUG level. Setting the RFB_LOG_FILE environment variable will override this preference. Requires a restart.''',
default = '',
subtype='FILE_PATH'
)
rman_do_preview_renders: BoolProperty(
name="Render Previews",
description="Enable rendering of material previews. This is considered a WIP.",
default=False)
rman_preview_renders_minSamples: IntProperty(
name="Preview Min Samples",
description="Minimum samples for preview renders",
default=0,
min=0, soft_max=4,
)
rman_preview_renders_maxSamples: IntProperty(
name="Preview Max Samples",
description="Maximum samples for preview renders",
default=1,
min=1, soft_max=4,
)
rman_preview_renders_pixelVariance: FloatProperty(
name="Pixel Variance",
description="Maximum samples for preview renders",
default=0.15,
min=0.001, soft_max=0.5,
)
rman_viewport_draw_bucket: BoolProperty(
name="Draw Bucket Marker",
description="Unchechk this if you do not want the bucket markers in the viewport",
default=True
)
rman_viewport_draw_progress: BoolProperty(
name="Draw Progress Bar",
description="Unchechk this if you do not want the progress bar in the viewport",
default=True
)
rman_viewport_crop_color: FloatVectorProperty(
name="CropWindow Color",
description="Color of the cropwindow border in the viewport when in IPR.",
default=(0.0, 0.498, 1.0, 1.0),
size=4,
subtype="COLOR")
rman_viewport_bucket_color: FloatVectorProperty(
name="Bucket Marker Color",
description="Color of the bucket markers in the viewport when in IPR.",
default=(0.0, 0.498, 1.0, 1.0),
size=4,
subtype="COLOR")
rman_viewport_progress_color: FloatVectorProperty(
name="Progress Bar Color",
description="Color of the progress bar in the viewport when in IPR.",
default=(0.0, 0.498, 1.0, 1.0),
size=4,
subtype="COLOR")
rman_editor: StringProperty(
name="Editor",
subtype='FILE_PATH',
description="Text editor excutable you want to use to view RIB.",
default=""
)
rman_show_cycles_convert: BoolProperty(
name="Convert Cycles Nodes",
default=False,
description="Add convert Cycles Networks buttons to the material properties panel. N.B.: This isn't guaranteed to fully convert Cycles networks successfully. Also, because of differences in OSL implementations, converted networks may cause stability problems when rendering."
)
rman_render_nurbs_as_mesh: BoolProperty(
name="NURBS as Mesh",
default=True,
description="Render all NURBS surfaces as meshes."
)
rman_emit_default_params: BoolProperty(
name="Emit Default Params",
default=False,
description="Controls whether or not parameters that are not changed from their defaults should be emitted to RenderMan. Turning this on is only useful for debugging purposes."
)
rman_show_advanced_params: BoolProperty(
name="Show Advanced",
default=False,
description="Show advanced preferences"
)
rman_config_dir: StringProperty(
name="Config Directory",
subtype='DIR_PATH',
description="Path to JSON configuration files. Requires a restart.",
default=""
)
rman_viewport_refresh_rate: FloatProperty(
name="Viewport Refresh Rate",
description="The number of seconds to wait before the viewport refreshes during IPR.",
default=0.01,
precision=5,
min=0.00001,
max=0.1
)
# For the preset browser
rpbConfigFile: StringProperty(default='')
rpbUserLibraries: CollectionProperty(type=RendermanPreferencePath)
rpbSelectedLibrary: StringProperty(default='')
rpbSelectedCategory: StringProperty(default='')
rpbSelectedPreset: StringProperty(default='')
def update_stats_config(self, context):
bpy.ops.renderman.update_stats_config('INVOKE_DEFAULT')
# For roz stats
rman_roz_logLevel: EnumProperty(
name="Log Level",
default='3',
items=[('0', 'None', ''),
('1', 'Severe', ''),
('2', 'Error', ''),
('3', 'Warning', ''),
('4', 'Info', ''),
('5', 'Debug', ''),
],
description="Change the logging level for the live statistics system.",
update=update_stats_config
)
rman_roz_grpcServer: BoolProperty(name="Send Stats to 'it' HUD", default=True,
description="Turn this off if you don't want stats to be sent to the 'it' HUD.",
update=update_stats_config)
rman_roz_webSocketServer: BoolProperty(name="Enable Live Stats", default=False,
description="Turning this off will disable the live statistics system in RfB.",
update=update_stats_config)
rman_roz_webSocketServer_Port: IntProperty(name="Port", default=9723,
min=0,
description="Port number of the live stats server to use.",
update=update_stats_config)
def draw_xpu_devices(self, context, layout):
if self.rman_xpu_device == 'CPU':
device = self.rman_xpu_cpu_devices[0]
layout.prop(device, 'use', text='%s' % device.name)
else:
if len(self.rman_xpu_gpu_devices) < 1:
layout.label(text="No compatible GPU devices found.", icon='INFO')
else:
'''
## TODO: For when XPU can support multiple gpu devices...
for device in self.rman_xpu_gpu_devices:
layout.prop(device, 'use', text='%s (%d.%d)' % (device.name, device.version_major, device.version_minor))
'''
# Else, we only can select one GPU
layout.prop(self, 'rman_xpu_gpu_selection')
def draw(self, context):
self.layout.use_property_split = True
self.layout.use_property_decorate = False
layout = self.layout
rman_r_icon = rfb_icons.get_icon("rman_blender")
row = layout.row()
row.use_property_split = False
col = row.column()
col.prop(self, 'rmantree_method')
if self.rmantree_method == 'DETECT':
col.prop(self, 'rmantree_choice')
if self.rmantree_choice == 'NEWEST':
if envconfig_utils.reload_envconfig():
col.label(text="RMANTREE: %s " % envconfig_utils.reload_envconfig().rmantree)
elif self.rmantree_method == 'ENV':
if envconfig_utils.reload_envconfig():
col.label(text="RMANTREE: %s" % envconfig_utils.reload_envconfig().rmantree)
else:
col.prop(self, "path_rmantree")
if envconfig_utils.reload_envconfig() is None:
row = layout.row()
row.alert = True
row.label(text='Error in RMANTREE. Reload addon to reset.', icon='ERROR')
return
# Behavior Prefs
row = layout.row()
row.label(text='Behavior', icon_value=rman_r_icon.icon_id)
row = layout.row()
col = row.column()
col.prop(self, 'rman_do_preview_renders')
col.prop(self, 'rman_render_nurbs_as_mesh')
col.prop(self, 'rman_show_cycles_convert')
col.prop(self, 'rman_emit_default_params')
# XPU Prefs
if sys.platform != ("darwin") and envconfig_utils.envconfig().has_xpu_license:
row = layout.row()
row.label(text='XPU', icon_value=rman_r_icon.icon_id)
row = layout.row()
row.use_property_split = False
row.prop(self, 'rman_xpu_device', expand=True)
row = layout.row()
row.use_property_split = False
self.find_xpu_devices()
col = row.column()
box = col.box()
self.draw_xpu_devices(context, box)
# Workspace
row = layout.row()
row.label(text='Workspace', icon_value=rman_r_icon.icon_id)
row = layout.row()
col = row.column()
col.prop(self, "rman_scene_version_padding")
col.prop(self, "rman_scene_take_padding")
col.prop(self, "rman_scene_version_increment")
col.prop(self, "rman_scene_take_increment")
# TxManager
row = layout.row()
row.label(text='Texture Manager', icon_value=rman_r_icon.icon_id)
row = layout.row()
col = row.column()
col.prop(self, 'path_fallback_textures_path')
col.prop(self, 'path_fallback_textures_path_always')
col.prop(self, "rman_txmanager_workers")
col.prop(self, "rman_txmanager_keep_extension")
col.prop(self, "rman_txmanager_tex_extensions")
# UI Prefs
row = layout.row()
row.label(text='UI', icon_value=rman_r_icon.icon_id)
row = layout.row()
col = row.column()
col.prop(self, 'rman_viewport_crop_color')
col.prop(self, 'rman_viewport_draw_bucket')
if self.rman_viewport_draw_bucket:
col.prop(self, 'rman_viewport_bucket_color')
col.prop(self, 'rman_viewport_draw_progress')
if self.rman_viewport_draw_progress:
col.prop(self, 'rman_viewport_progress_color')
col.prop(self, 'draw_ipr_text')
col.prop(self, 'draw_panel_icon')
col.prop(self, 'rman_editor')
# Logging
row = layout.row()
row.label(text='Logging', icon_value=rman_r_icon.icon_id)
row = layout.row()
col = row.column()
col.prop(self, 'rman_logging_level')
col.prop(self, 'rman_logging_file')
# Advanced
row = layout.row()
row.use_property_split = False
row.use_property_decorate = True
row.prop(self, 'rman_show_advanced_params')
row = layout.row()
col = row.column()
ui_open = getattr(self, 'rman_show_advanced_params')
if ui_open:
col.label(text='Live Statistics', icon_value=rman_r_icon.icon_id)
row = col.row()
col = row.column()
col.prop(self, 'rman_roz_logLevel')
col.prop(self, 'rman_roz_grpcServer')
col.prop(self, 'rman_roz_webSocketServer')
if self.rman_roz_webSocketServer:
try:
from .rman_stats import RfBStatsManager
stats_mgr = RfBStatsManager.get_stats_manager()
split = layout.split()
row = split.row()
col.prop(self, 'rman_roz_webSocketServer_Port', slider=False)
col = row.column()
col.label(text='')
col = row.column()
if stats_mgr:
if stats_mgr.is_connected():
col.operator('renderman.disconnect_stats_render')
else:
col.operator('renderman.attach_stats_render')
col.label(text=' Web Socket Status: %s' % stats_mgr.get_status())
except Exception as e:
rfb_logger.rfb_log().debug("Could not import rman_stats: %s" % str(e))
pass
row = layout.row()
col = row.column()
col.label(text='Other', icon_value=rman_r_icon.icon_id)
col.prop(self, 'rman_viewport_refresh_rate')
col.prop(self, 'rman_config_dir')
if self.rman_do_preview_renders:
col.prop(self, 'rman_preview_renders_minSamples')
col.prop(self, 'rman_preview_renders_maxSamples')
col.prop(self, 'rman_preview_renders_pixelVariance')
classes = [
RendermanPreferencePath,
RendermanDeviceDesc,
RendermanPreferences
]
def register():
for cls in classes:
try:
bpy.utils.register_class(cls)
except ValueError as e:
rfb_logger.rfb_log().debug("Could not register class, %s, because: %s" % (str(cls), str(e)))
pass
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass
|
|
'''
Defines the manager for global hot keys.
@author: Eitan Isaacson
@organization: IBM Corporation
@copyright: Copyright (c) 2006, 2007 IBM Corporation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
'''
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository.Gio import Settings as GSettings
from .i18n import _
import pyatspi
HOTKEYS_GSCHEMA = 'org.a11y.Accerciser.hotkeys'
HOTKEYS_BASEPATH = '/org/a11y/accerciser/hotkeys/'
COL_COMPONENT = 0
COL_DESC = 1
COL_CALLBACK = 2
COL_KEYPRESS = 3
COL_MOD = 4
COL_LOCALIZED_COMP = 5
def _charToKeySym(key):
'''
A convinience function to convert either a character, or key name to it's
respective keyval
@param key: The character or key name to convert.
@type key: string
@return: A key symbol
@rtype: long
'''
try:
rv = gdk.unicode_to_keyval(ord(key))
except:
rv = getattr(gdk, 'KEY_%s' % key)
return rv
class HotkeyManager(gtk.ListStore):
'''
A model that stores all of the global key bindings. All accerciser components
that need global hotkeys should register the key combination and callback
with the main instance of this class.
'''
def __init__(self):
'''
Constructor for the L{HotkeyManager}
'''
gtk.ListStore.__init__(self, str, str, object, int, int, str)
self.connect('row-changed', self._onComboChanged)
masks = [mask for mask in pyatspi.allModifiers()]
pyatspi.Registry.registerKeystrokeListener(
self._accEventKeyPressed, mask=masks, kind=(pyatspi.KEY_PRESSED_EVENT,))
def _accEventKeyPressed(self, event):
'''
Handle certain key presses globally. Pass on to the hotkey manager the
key combinations pressed for further processing.
@param event: The event that is being handled.
@type event: L{pyatspi.event.Event}
'''
handled = self.hotkeyPress(event.hw_code, event.modifiers)
event.consume = handled
def hotkeyPress(self, key, modifiers):
'''
Call the appropriate callbacks for given key combination. This method
should be called by an at-spi keyboard:press event handler in the
main program.
@param key: The pressed key code.
@type key: integer
@param modifiers: The modifiers that were depressed during the keystroke.
@type modifiers: integer
'''
km = gdk.Keymap.get_default()
callback = None
for combo in self:
success, entries = km.get_entries_for_keyval(combo[COL_KEYPRESS])
if not success: continue
if key in [int(entry.keycode) for entry in entries] and \
modifiers & combo[COL_MOD] == combo[COL_MOD]:
callback = combo[COL_CALLBACK]
if callback:
callback()
return bool(callback)
def addKeyCombo(self, component, localized_component, description,
callback, keypress, modifiers):
'''
Adds the given key combination with the appropriate callbacks to
the L{HotkeyManager}. If an identical description with the identical
component already exists in the model, just reassign with the new callback.
I{Note:} It is important that the component and description strings be
unique.
@param component: The component name, usually the plugin name, or "Core".
@type component: string
@param description: A description of the action performed during the given
keycombo.
@type description: string
@param callback: The callback to call when the given key combination
is pressed.
@type callback: callable
@param keypress: The key symbol of the keystroke that performs given operation.
@type keypress: long
@param modifiers: The modifiers that must be depressed for function to
be perfomed.
@type modifiers: int
'''
component_desc_pairs = list(zip([row[COL_COMPONENT] for row in self],
[row[COL_DESC] for row in self]))
if (component, description) in component_desc_pairs:
path = component_desc_pairs.index((component, description))
self[path][COL_CALLBACK] = callback
else:
gspath = self._getComboGSettingsPath(component, description)
gsettings = GSettings.new_with_path(HOTKEYS_GSCHEMA, gspath)
if gsettings.get_string('hotkey-combo'):
final_keypress, final_modifiers = gtk.accelerator_parse(
gsettings.get_string('hotkey-combo'))
else:
final_keypress, final_modifiers = keypress, modifiers
self.append([component, description, callback,
int(final_keypress), final_modifiers, localized_component])
def removeKeyCombo(self, component, description, callback, key, modifiers):
'''
Removes the given callback from L{HotkeyManager}. It does not erase the
entire key combo entry.
@param component: The component name, usually the plugin name, or "Core".
@type component: string
@param description: A description of the action performed during the given
keycombo.
@type description: string
@param callback: The callback to call when the given key combination
is pressed.
@type callback: callable
@param key: The key symbol of the keystroke that performs given operation.
@type key: long
@param modifiers: The modifiers that must be depressed for function to
be perfomed.
@type modifiers: int
'''
iter = self.get_iter_first()
while iter:
if self[iter][COL_CALLBACK] == callback:
# We never really remove it, just set the callback to None
self[iter][COL_CALLBACK] = ''
iter = self.iter_next(iter)
def _onComboChanged(self, model, path, iter):
'''
Callback for row changes. Copies the changed key combos over to gsettings.
@param model: The model that emitted the signal. Should be this class instance.
@type model: L{gtk.TreeModel}
@param path: The path of the row that has changed.
@type path: tuple
@param iter: The iter of the row that has changed.
@type iter: L{gtk.TreeIter}
'''
if not model[iter][COL_COMPONENT] or not model[iter][COL_DESC]:
return
gspath = self._getComboGSettingsPath(model[iter][COL_COMPONENT],
model[iter][COL_DESC])
gsettings = GSettings.new_with_path(HOTKEYS_GSCHEMA, gspath)
combo_name = gtk.accelerator_name(model[iter][COL_KEYPRESS],
gdk.ModifierType(model[iter][COL_MOD]))
key = gsettings.get_string('hotkey-combo')
if key != combo_name and key != '/':
gsettings.set_string('hotkey-combo', combo_name)
def _getComboGSettingsPath(self, component, description):
'''
Useful method that build and returns a gsettings path for a key combo.
@param component: The component of the hotkey.
@type component: string
@param description: The description of the hotkey action
@type description: string
@return: A full gsettings path
@rtype: string
'''
dash_component = self.__dasherize(component)
dash_description = self.__dasherize(description)
path = '/'.join([dash_component, dash_description])
return HOTKEYS_BASEPATH + path + '/'
def __dasherize(self, item):
'''
This method dasherize and decapitalize a given string.
@param component: The given string
@type component: string
@return: A dasherized and decapitalized string
@rtype: string
'''
return item.lower().replace(' ', '-')
class HotkeyTreeView(gtk.TreeView):
'''
A tree view of the variuos global hotkey combinations. The keys and
modifiers could also be changed through this widget.
'''
def __init__(self, hotkey_manager):
'''
Construct the tree view with the given L{HotkeyManager}.
@ivar hotkey_manager: The manager we wish to view.
@type hotkey_manager: L{HotkeyManager}
@param hotkey_manager: The manager we wish to view.
@type hotkey_manager: L{HotkeyManager}
'''
gtk.TreeView.__init__(self)
self.hotkey_manager = hotkey_manager
modelfilter = self.hotkey_manager.filter_new(None)
modelfilter.set_visible_func(self._rowVisibleFunc, None)
self.set_model(modelfilter)
crt = gtk.CellRendererText()
tvc = gtk.TreeViewColumn(_('Component'))
tvc.pack_start(crt, True)
tvc.add_attribute(crt, 'text', COL_COMPONENT)
tvc.set_cell_data_func(crt, self._componentDataFunc, COL_COMPONENT)
self.append_column(tvc)
crt = gtk.CellRendererText()
tvc = gtk.TreeViewColumn(_('Task'))
tvc.pack_start(crt, True)
tvc.add_attribute(crt, 'text', COL_DESC)
tvc.set_cell_data_func(crt, self._translateDataFunc, COL_DESC)
self.append_column(tvc)
crt = gtk.CellRendererText()
tvc = gtk.TreeViewColumn(_('Key'))
tvc.set_min_width(64)
tvc.pack_start(crt, True)
crt.props.editable = True
tvc.add_attribute(crt, 'text', COL_KEYPRESS)
tvc.set_cell_data_func(crt, self._keyCellFunc)
crt.connect('edited', self._onKeyChanged)
self.append_column(tvc)
crt = gtk.CellRendererToggle()
tvc = gtk.TreeViewColumn(_('Alt'))
tvc.pack_start(crt, True)
tvc.set_cell_data_func(crt, self._modCellFunc, gdk.ModifierType.MOD1_MASK)
crt.connect('toggled', self._onModToggled, gdk.ModifierType.MOD1_MASK)
self.append_column(tvc)
crt = gtk.CellRendererToggle()
tvc = gtk.TreeViewColumn(_('Ctrl'))
tvc.pack_start(crt, True)
tvc.set_cell_data_func(crt, self._modCellFunc, \
gdk.ModifierType.CONTROL_MASK)
crt.connect('toggled', self._onModToggled, gdk.ModifierType.CONTROL_MASK)
self.append_column(tvc)
crt = gtk.CellRendererToggle()
tvc = gtk.TreeViewColumn(_('Shift'))
tvc.pack_start(crt, True)
tvc.set_cell_data_func(crt, self._modCellFunc, gdk.ModifierType.SHIFT_MASK)
crt.connect('toggled', self._onModToggled, gdk.ModifierType.SHIFT_MASK)
self.append_column(tvc)
def _translateDataFunc(self, column, cell, model, iter, column_id):
'''
Show the component name as a translated string.
@param column: The treeview column of the cell renderer.
@type column: L{gtk.TreeViewColumn}
@param cell: The cell rendere we need to modify.
@type cell: L{gtk.CellRendererText}
@param model: The treeview's model.
@type model: L{gtk.ListStore}
@param iter: The iter of the given cell data.
@type iter: L{gtk.TreeIter}
'''
cell.set_property('text', _(model[iter][column_id]))
def _componentDataFunc(self, column, cell, model, iter, column_id):
'''
Show the component name as a translated string.
@param column: The treeview column of the cell renderer.
@type column: L{gtk.TreeViewColumn}
@param cell: The cell rendere we need to modify.
@type cell: L{gtk.CellRendererText}
@param model: The treeview's model.
@type model: L{gtk.ListStore}
@param iter: The iter of the given cell data.
@type iter: L{gtk.TreeIter}
'''
cell.set_property('text', model[iter][COL_LOCALIZED_COMP] or \
model[iter][COL_COMPONENT])
def _keyCellFunc(self, column, cell, model, iter, foo=None):
'''
Show the key symbol as a string for easy readability.
@param column: The treeview column of the cell renderer.
@type column: L{gtk.TreeViewColumn}
@param column: The cell rendere we need to modify.
@type column: L{gtk.CellRendererText}
@param model: The treeview's model.
@type model: L{gtk.ListStore}
@param iter: The iter of the given cell data.
@type iter: L{gtk.TreeIter}
'''
if model[iter][COL_KEYPRESS] > 0:
cell.set_property('text',
gdk.keyval_name(model[iter][COL_KEYPRESS]))
cell.set_property('sensitive', True)
else:
cell.set_property('text', '<select key>')
cell.set_property('sensitive', False)
def _modCellFunc(self, column, cell, model, iter, mask):
'''
Show the given modifier mask as toggled or not.
@param column: The treeview column of the cell renderer.
@type column: L{gtk.TreeViewColumn}
@param column: The cell rendere we need to modify.
@type column: L{gtk.CellRendererText}
@param model: The treeview's model.
@type model: L{gtk.ListStore}
@param iter: The iter of the given cell data.
@type iter: L{gtk.TreeIter}
@param mask: A modifier mask.
@type mask: integer
'''
cell.set_property('active', bool(mask & model[iter][COL_MOD]))
def _onKeyChanged(self, cellrenderertext, path, new_text):
'''
A callback for the key cellrenderer when 'edited'. Model must be
changed accordingly.
@param cellrenderertext: The cell renderer that emitted the signal
@type cellrenderertext: L{gtk.CellRendererText}
@param path: Path of the edited cellrenderer.
@type path: tuple
@param new_text: The new text that was entered.
@type new_text: string
'''
keysym = -1
if new_text:
try:
keysym = _charToKeySym(new_text)
except:
keysym = _charToKeySym(new_text[0])
self.hotkey_manager[path][COL_KEYPRESS] = int(keysym)
def _onModToggled(self, renderer_toggle, path, mask):
'''
A callback for the modifiers' cellrenderers when 'toggled'.
Model must be changed accordingly.
@param renderer_toggle: The cell renderer that emitted the signal
@type renderer_toggle: L{gtk.CellRendererToggle}
@param path: Path of the edited cellrenderer.
@type path: tuple
@param mask: Modifier mask that must be inverted.
@type new_text: integer
'''
self.hotkey_manager[path][COL_MOD] ^= mask
def _rowVisibleFunc(self, model, iter, foo=None):
'''
A filter function to hide the rows that do not contain valid callbacks.
This is usually the case when a plugin is disabled.
@param model: The view's model.
@type model: L{gtk.ListStore}
@param iter: The iter of the row in question.
@type iter: L{gtk.TreeIter}
@return: True if row should be displayed.
@rtype: boolean
'''
return bool(model[iter][COL_CALLBACK])
|
|
# -*- coding: utf-8 -*-
import io
import operator
import os
import re
import sys
import signal
import tempfile
import subprocess
import argparse
try:
import psutil
except ImportError:
psutil = None
def _log_info(msg, **kwds):
if kwds:
msg = msg.format(**kwds)
sys.stdout.write(msg)
sys.stdout.write('\n')
def _log_error(msg, **kwds):
if kwds:
msg = msg.format(**kwds)
sys.stderr.write(msg)
sys.stderr.write('\n')
def _parse_cli_arguments():
parser = argparse.ArgumentParser(prog='gossc',
description='high-level screen manager')
subparsers = parser.add_subparsers(title='action', dest='action')
# create the parser for the "init" command
parser_init = subparsers.add_parser('init',
help='init screen')
parser_init.add_argument('screen_name',
help='screen name')
parser_init.add_argument('--lines',
dest='lines',
type=int,
default=10000,
help='output buffer lines')
# create the parser for the "exec" command
parser_exec = subparsers.add_parser('exec',
help='execute commands in screen')
parser_exec.add_argument('screen_name',
help='screen name')
parser_exec.add_argument('script_name',
nargs='?',
default=None,
help='script name')
# create the parser for the "plist" command
parser_plist = subparsers.add_parser('plist',
help='list all processes in screen')
parser_plist.add_argument('screen_name',
help='screen name')
# create the parser for the "psck" command
parser_psck = subparsers.add_parser('psck',
help='check processes in screen')
parser_psck.add_argument('screen_name',
help='screen name')
parser_psck.add_argument('patterns',
nargs='?',
default=None,
help='patterns of entry')
# create the parser for the "plist" command
parser_pkill = subparsers.add_parser('pkill',
help='kill all processes in screen')
parser_pkill.add_argument('screen_name',
help='screen name')
parser_pkill.add_argument('--force',
dest='force',
action='store_true',
default=False,
help='force kill')
return parser.parse_args(sys.argv[1:])
def _find_screens(screen_name):
command = ['screen', '-ls', screen_name]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
output, unused_err = process.communicate()
unused_retcode = process.poll() # `screen -ls` always return 1
screens = []
screen_suffix = "." + screen_name
for raw_line in io.BytesIO(output):
if not raw_line.startswith("\t"):
continue
screen_sockname = raw_line.strip().partition("\t")[0]
if screen_sockname.endswith(screen_suffix):
screen_pid = int(screen_sockname.partition(".")[0])
screens.append(screen_pid)
return screens
def init_screen(namespace):
screen_name = namespace.screen_name
screens = _find_screens(screen_name)
if not screens:
_log_info("create screen [{screen_name}]", screen_name=screen_name)
command = ['screen', '-dmS', screen_name,
'-h', str(namespace.lines)]
subprocess.call(command)
else:
command = ['screen', '-x', str(screens[0]),
'-p', '0', '-X', 'eval', 'stuff ^U']
subprocess.call(command)
def exec_jobs(namespace):
screen_name = namespace.screen_name
script_name = namespace.script_name
screens = _find_screens(screen_name)
if not screens:
_log_error("screen not exists [{screen_name}]",
screen_name=screen_name)
return
if script_name is not None:
try:
stream = open(script_name, 'r')
except IOError:
_log_error("script not exists [{script_name}]",
script_name=script_name)
return
else:
stream = sys.stdin
script_key = 'x'
screen_pid = screens[0]
script_fd, script_path = tempfile.mkstemp(prefix='gospel-')
os.write(script_fd, '\n') # add an additional '\n' ahead of the script
for line in stream:
os.write(script_fd, line.rstrip('\r\n') + '\n')
os.close(script_fd)
command = ['screen', '-x', str(screen_pid),
'-X', 'readreg', script_key, script_path]
subprocess.call(command)
command = ['screen', '-x', str(screen_pid),
'-p', '0', '-X', 'paste', script_key]
subprocess.call(command)
os.remove(script_path)
def _get_processes_in_screen(screen_pid, with_cmdline=False):
if psutil is None:
_log_error("No module named 'psutil'")
return
screen_proc = psutil.Process(screen_pid)
if psutil.version_info[0] >= 2:
# psutil >= 2.0
get_name = operator.methodcaller('name')
get_cmdline = operator.methodcaller('cmdline')
get_children = operator.methodcaller('children')
else:
get_name = operator.attrgetter('name')
get_cmdline = operator.attrgetter('cmdline')
get_children = operator.methodcaller('get_children')
for level3_proc in get_children(screen_proc):
if get_name(level3_proc) == 'login':
# pstree: screen -- login -- sh
level2_proc_list = get_children(level3_proc)
else:
# pstree: screen -- sh
level2_proc_list = [level3_proc]
for level2_proc in level2_proc_list:
for level1_proc in get_children(level2_proc):
if with_cmdline:
yield level1_proc.pid, get_cmdline(level1_proc)
else:
yield level1_proc.pid
def plist_jobs(namespace):
screen_name = namespace.screen_name
screens = _find_screens(screen_name)
if not screens:
_log_error("screen not exists [{screen_name}]",
screen_name=screen_name)
return
for child_pid in _get_processes_in_screen(screens[0]):
_log_info("{child_pid}", child_pid=child_pid)
def psck_jobs(namespace):
screen_name = namespace.screen_name
screens = _find_screens(screen_name)
if not screens:
_log_error("screen not exists [{screen_name}]",
screen_name=screen_name)
return
patterns = namespace.patterns
if patterns is None:
stream = sys.stdin
else:
stream = patterns.splitlines()
entries = []
for line in stream:
line = line.strip()
if not line:
continue
patterns = []
for regex in line.split('&&'):
regex = regex.strip()
if not regex:
continue
patterns.append(re.compile(regex))
if patterns:
entries.append((line, tuple(patterns)))
if not entries:
return
mismatched = 0
processes = dict(_get_processes_in_screen(screens[0], with_cmdline=True))
for line, patterns in entries:
matched_pid = None
for child_pid, arguments in processes.iteritems():
if all(any(pattern.search(arg)
for arg in arguments)
for pattern in patterns):
matched_pid = child_pid
break
if matched_pid is None:
mismatched += 1
_log_error('{pid}\t{entry}', pid='NIL', entry=line)
else:
processes.pop(matched_pid, None)
_log_info('{pid}\t{entry}', pid=matched_pid, entry=line)
if mismatched == len(entries):
exit(code=255)
else:
exit(code=mismatched)
def pkill_jobs(namespace):
screen_name = namespace.screen_name
screens = _find_screens(screen_name)
if not screens:
_log_error("screen not exists [{screen_name}]",
screen_name=screen_name)
return
if namespace.force:
sig = signal.SIGKILL
else:
sig = signal.SIGINT
for child_pid in _get_processes_in_screen(screens[0]):
os.kill(child_pid, sig)
def main():
namespace = _parse_cli_arguments()
{
'init': init_screen,
'exec': exec_jobs,
'plist': plist_jobs,
'psck': psck_jobs,
'pkill': pkill_jobs,
}[namespace.action](namespace)
if __name__ == '__main__':
main()
|
|
import os
import re
import threading
from xml.dom import minidom
from lib import cover_creator as cover_creator_module
from lib import data_manager as data_manager_module
from lib import description_page as description_page_module
from lib import epub_zip as epub_zip_module
from lib import story_json as story_json_module
from lib import util as util_module
from lib import values as values_module
_EPUB_URL = "https://www.fimfiction.net/download_epub.php?story={story_id}"
_EPUB_LOCK = threading.Lock()
_PRINT_LOCK = threading.Lock()
_DATA_LOCK = threading.Lock()
_CONSUMER_NUM = 5 # Magic number for number of consumer threads.
class EpubProducer(threading.Thread):
def __init__(self):
super(EpubProducer, self).__init__()
self.listOfEpubs = os.listdir(values_module.ORIGINALS_DIR)[::-1]
def request(self):
if len(self.listOfEpubs):
return self.listOfEpubs.pop(-1)
return ''
def remaining(self):
return len(self.listOfEpubs)
def run(self):
while self.listOfEpubs:
pass
class EpubConsumer(threading.Thread):
def __init__(self, producer, data_manager):
super(EpubConsumer, self).__init__()
self.producer = producer
self.data_manager = data_manager
def run(self):
while self.producer.remaining():
with _EPUB_LOCK:
epub_filename = self.producer.request()
if epub_filename:
try:
self.check_for_updates(epub_filename)
except Exception as e:
with _PRINT_LOCK:
print (epub_filename + ' had an error.').upper()
def check_for_updates(self, epub_filename):
"""Checks if the epub needs update and performs updates if necessary.
Args:
epub_filename (str): The filename of the epub.
"""
original_epub_filepath = os.path.join(
values_module.ORIGINALS_DIR, epub_filename)
epub_dir = epub_zip_module.expand(
original_epub_filepath, new_location=values_module.UPDATED_DIR)
util_module.correct_meta(epub_dir)
story_json = self.get_story_json(epub_dir)
if story_json is None:
return
story_id = story_json.get_id()
date_modified = story_json.get_date_modified()
if not os.path.exists(
os.path.join(values_module.UPDATED_DIR, epub_filename)):
with _DATA_LOCK:
self.data_manager.update_epub_binary(story_id, date_modified)
self.update_story(epub_dir, story_json)
else:
with _DATA_LOCK:
epub_needs_update = self.data_manager.does_epub_needs_update(
story_id, date_modified, update=True)
if epub_needs_update:
self.update_story(epub_dir, story_json)
else:
with _PRINT_LOCK:
print '{title} is up to date.'.format(
title=story_json.get_title())
epub_zip_module.remove(epub_dir)
def get_story_json(self, epub_dir):
"""Retrieves the Story JSON.
Args:
epub_dir (string): Directory of the unzipped epub.
Returns:
StoryJson object loaded with the story details.
"""
story_id = None
epub_opf = os.path.join(epub_dir, 'book.opf')
try:
epub_opf_doc = minidom.parse(epub_opf)
identifier = epub_opf_doc.getElementsByTagName('dc:identifier')[0]
story_url = identifier.childNodes[0].data
story_id = re.match(
r'https?://www.fimfiction.net/story/(\d+)/', story_url).group(1)
return story_json_module.StoryJson(int(story_id))
except story_json_module.InvalidStoryIdError:
with _PRINT_LOCK:
print 'Story does not exist.',
print (story_id if story_id else 'Unknown Story Id.',
epub_dir.rsplit(os.sep, 1)[1])
epub_zip_module.remove(epub_dir)
def download_epub(self, epub_dir, story_id):
"""Downloads a fresh copy of the epub from fimfiction.net
Args:
epub_dir (str): Directory of the unzipped epub.
story_id (int): ID of the story.
"""
epub_url = _EPUB_URL.format(story_id=story_id)
epub_filename = epub_dir + '.epub'
response = util_module.http_get_request(epub_url)
epub_zip_module.remove(epub_dir)
epub_zip_module.remove(epub_filename)
with open(epub_filename, 'wb') as epub_file:
epub_file.write(response.read())
epub_zip_module.expand(
epub_filename, new_location=values_module.UPDATED_DIR)
util_module.correct_meta(epub_dir)
def update_story(self, epub_dir, story_json):
"""Updates the story with a cover and a description page.
Args:
epub_dir (str): Directory of the unzipped epub.
story_json (StoryJson): Story JSON object.
"""
self.download_epub(epub_dir, story_json.get_id())
# Create the cover for the epub.
cover_creator = (
cover_creator_module.CoverCreator(epub_dir, story_json))
cover_creator.create_cover()
# Create the description page for the epub.
description_page = (
description_page_module.DescriptionPage(epub_dir, story_json))
description_page.create_page()
# Download images found in the description of the epub.
story_json.download_images(epub_dir)
epub_zip_module.compress(epub_dir, remove_dir=True)
with _PRINT_LOCK:
print '{title} has been updated.'.format(
title=story_json.get_title())
def setup():
"""Sets up the expected folders and cleans them of subfolders."""
for directory in values_module.DIRECTORIES:
# Create Originals and Updated directories
if not os.path.exists(directory):
os.makedirs(directory)
# Remove folders from Originals and Updated directories
for story in os.listdir(directory):
epub = os.path.join(directory, story)
if os.path.isdir(epub):
epub_zip_module.remove(epub)
def main():
"""Runs through all of the epubs and updates them."""
setup()
data_manager = data_manager_module.DataManager()
producer = EpubProducer()
consumers = [
EpubConsumer(producer, data_manager) for n in range(_CONSUMER_NUM)]
producer.start()
[consumer.start() for consumer in consumers]
producer.join()
[consumer.join() for consumer in consumers]
data_manager.write_seen_blocks()
print '\nAll stories updated.'
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from unicodedata import normalize
from flask import Flask, render_template, request, abort, session, flash, redirect, url_for, Response, g
from flaskext.markdown import Markdown
from werkzeug.contrib.atom import AtomFeed
from werkzeug import secure_filename
from urlparse import urljoin
from pymongo import Connection
from bson import ObjectId
import ConfigParser
import datetime
import os, sys, logging
logging.basicConfig(stream=sys.stderr)
app = Flask(__name__)
Markdown(app)
con = Connection()
col = con.tinyblog
basedir = os.path.dirname(os.path.realpath(__file__))
config = ConfigParser.ConfigParser()
config.readfp(open(basedir+'/config.ini'))
@app.route('/')
def index():
data = {
'articles' : col.article.find({'published':"1"}).sort('updated', -1).limit(10),
'page' : 1,
'total' : col.article.count()
}
return render_template('index.html', data=data)
@app.route('/page/<num>')
def page(num) :
skip = 10 * int(num) -10
if skip < 0 :
skip = 0
data = {
'articles' : col.article.find({'published':"1"}).sort('updated', -1).limit(10).skip(skip),
'page' : int(num),
'total' : col.article.count()
}
return render_template('index.html', data=data)
UPLOAD_FOLDER = basedir+'/uploads'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['POST'])
def upload() :
if 'auth' not in session :
return redirect(url_for('auth'))
if request.method == 'POST':
saved_files_urls = []
for key, file in request.files.iteritems():
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
ident = ObjectId()
col.media.insert({'filename' : filename, '_id' : ident})
try :
saved_files_urls.append('/uploads/'+filename)
except :
pass
return saved_files_urls[0]
@app.route('/<slug>', methods=['GET','POST'])
def view(slug) :
if not slug :
return abort(404)
if request.method == 'POST' :
comment = request.form.get('comment')
author = request.form.get('author')
if not comment or not author :
flash('Failed to post comment due to missing fields', 'danger')
return redirect('/'+slug)
data = {
'comment': request.form.get('comment'),
'slug': request.form.get('slug'),
'author': request.form.get('author'),
'approved': 0,
'created' : datetime.datetime.utcnow()
}
col.comment.insert(data)
flash('Your comment is awaiting moderation', 'warning')
# probably send mod an email
return redirect('/'+slug)
criteria = {'slug':slug}
data = {
'article' : col.article.find_one(criteria),
'comments' : col.comment.find(criteria).sort('updated', -1)
}
return render_template('view.html', data=data)
@app.route('/delete/<comment_id>')
def remove(comment_id) :
if 'auth' not in session :
return redirect(url_for('auth'))
try :
cid = ObjectId(comment_id)
except :
return abort(404)
criteria = {
'_id' : cid
}
data = {
'$set' : {
'approved' : 0
}
}
col.comment.update(criteria, data)
flash('Comment Unapproved', 'success')
return redirect(request.referrer)
@app.route('/approve/<comment_id>')
def approve(comment_id) :
if 'auth' not in session :
return redirect(url_for('auth'))
try :
cid = ObjectId(comment_id)
except :
return abort(404)
criteria = {
'_id' : cid
}
data = {
'$set' : {
'approved' : 1
}
}
col.comment.update(criteria, data)
flash('Comment approved', 'success')
return redirect(request.referrer)
@app.route('/settings', methods=['GET','POST'])
def settings() :
if 'auth' not in session :
return redirect(url_for('auth'))
if request.method == 'POST':
title = request.form.get('title')
description = request.form.get('description')
author = request.form.get('author')
style = request.form.get('style')
criteria = {'settings':1}
data = {
'$set' : {
'title' : title,
'description' : description,
'style' : style,
'author' : author,
}
}
col.settings.update(criteria, data, True)
try :
set_settings()
except :
pass
flash('Settings Saved Successfully', 'success')
return redirect(url_for('settings'))
data = col.settings.find_one({'settings':1})
return render_template('settings.html', data=data)
@app.route('/edit/<slug>', methods=['GET','POST'])
def edit(slug) :
if 'auth' not in session :
return redirect(url_for('auth'))
if request.method == 'POST':
title = request.form.get('title')
if not title :
return redirect('/edit/new')
slug = request.form.get('slug')
if not slug :
slug = slugify(title)
criteria = {'slug':slug}
raw_categories = request.form.get('categories')
categories = [x.strip() for x in raw_categories.split(',')]
data = {
'$set' : {
'title' : title,
'slug' : slug,
'body' : request.form.get('body'),
'image' : request.form.get('image'),
'overview' : request.form.get('overview'),
'published' : request.form.get('published'),
'categories' : categories,
'updated' : datetime.datetime.utcnow()
}
}
col.article.update(criteria, data, True)
flash('Post Saved Successfully', 'success')
return redirect('/edit/'+slug)
data = col.article.find_one({'slug':slug})
return render_template('edit.html', data=data)
@app.route('/auth', methods=['GET','POST'])
def auth() :
if request.method == 'POST' :
username = request.form.get('username')
password = request.form.get('password')
if username != config.get('tinyblog', 'username') or password != config.get('tinyblog', 'password') :
flash('Incorrect username / password', 'danger')
else :
session['auth'] = True
flash('Welcome '+username, 'success')
return redirect(url_for('index'))
data = {}
return render_template('auth.html', data=data)
@app.route('/category/<category>')
def category(category) :
criteria = {
'categories' : category
}
data = {
'articles' : col.article.find(criteria),
'page' : 1,
'total' : col.article.count(),
'category' : category
}
return render_template('index.html', data=data)
@app.route('/style.css')
def stylesheet() :
try :
style = app.config.style
return Response(style, mimetype='text/css')
except :
return ''
@app.route('/sitemap.xml')
def sitemap() :
return ''
@app.route('/logout')
def logout() :
session.clear()
return redirect(url_for('index'))
def make_external(url):
return urljoin(request.url_root, url)
@app.route('/feed')
def recent_feed():
feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root)
articles = col.article.find().limit(20).sort('updated', -1)
for article in articles:
feed.add(
article['title'],
unicode(article['overview']),
content_type='text',
author=app.config.blog_author,
url=make_external(article['slug']),
updated=article['updated'],
published=article['updated']
)
return feed.get_response()
def slugify(text, encoding=None,
# @thanks https://gist.github.com/turicas/1428479
permitted_chars='abcdefghijklmnopqrstuvwxyz0123456789-'):
if isinstance(text, str):
text = text.decode(encoding or 'ascii')
clean_text = text.strip().replace(' ', '-').lower()
while '--' in clean_text:
clean_text = clean_text.replace('--', '-')
ascii_text = normalize('NFKD', clean_text).encode('ascii', 'ignore')
strict_text = map(lambda x: x if x in permitted_chars else '', ascii_text)
return ''.join(strict_text)
def set_settings() :
try :
settings = col.settings.find_one()
app.config.blog_title = settings['title']
app.config.blog_author = settings['author']
app.config.blog_description = settings['description']
app.config.style = settings['style']
except :
return
set_settings()
if __name__ == '__main__' :
app.secret_key = 'abc'
app.run(debug=True, host="0.0.0.0", port=5001)
|
|
<<<<<<< HEAD
<<<<<<< HEAD
import unittest
from test import support
# Skip this test if the _testcapi module isn't available.
support.import_module('_testcapi')
from _testcapi import getargs_keywords, getargs_keyword_only
try:
from _testcapi import getargs_L, getargs_K
except ImportError:
getargs_L = None # PY_LONG_LONG not available
# > How about the following counterproposal. This also changes some of
# > the other format codes to be a little more regular.
# >
# > Code C type Range check
# >
# > b unsigned char 0..UCHAR_MAX
# > h signed short SHRT_MIN..SHRT_MAX
# > B unsigned char none **
# > H unsigned short none **
# > k * unsigned long none
# > I * unsigned int 0..UINT_MAX
#
#
# > i int INT_MIN..INT_MAX
# > l long LONG_MIN..LONG_MAX
#
# > K * unsigned long long none
# > L long long LLONG_MIN..LLONG_MAX
#
# > Notes:
# >
# > * New format codes.
# >
# > ** Changed from previous "range-and-a-half" to "none"; the
# > range-and-a-half checking wasn't particularly useful.
#
# Plus a C API or two, e.g. PyInt_AsLongMask() ->
# unsigned long and PyInt_AsLongLongMask() -> unsigned
# long long (if that exists).
LARGE = 0x7FFFFFFF
VERY_LARGE = 0xFF0000121212121212121242
from _testcapi import UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, INT_MAX, \
INT_MIN, LONG_MIN, LONG_MAX, PY_SSIZE_T_MIN, PY_SSIZE_T_MAX, \
SHRT_MIN, SHRT_MAX
# fake, they are not defined in Python's header files
LLONG_MAX = 2**63-1
LLONG_MIN = -2**63
ULLONG_MAX = 2**64-1
class Int:
def __int__(self):
return 99
class IntSubclass(int):
def __int__(self):
return 99
class BadInt:
def __int__(self):
return 1.0
class BadInt2:
def __int__(self):
return True
class BadInt3(int):
def __int__(self):
return True
class Unsigned_TestCase(unittest.TestCase):
def test_b(self):
from _testcapi import getargs_b
# b returns 'unsigned char', and does range checking (0 ... UCHAR_MAX)
self.assertRaises(TypeError, getargs_b, 3.14)
self.assertEqual(99, getargs_b(Int()))
self.assertEqual(0, getargs_b(IntSubclass()))
self.assertRaises(TypeError, getargs_b, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_b(BadInt2()))
self.assertEqual(0, getargs_b(BadInt3()))
self.assertRaises(OverflowError, getargs_b, -1)
self.assertEqual(0, getargs_b(0))
self.assertEqual(UCHAR_MAX, getargs_b(UCHAR_MAX))
self.assertRaises(OverflowError, getargs_b, UCHAR_MAX + 1)
self.assertEqual(42, getargs_b(42))
self.assertRaises(OverflowError, getargs_b, VERY_LARGE)
def test_B(self):
from _testcapi import getargs_B
# B returns 'unsigned char', no range checking
self.assertRaises(TypeError, getargs_B, 3.14)
self.assertEqual(99, getargs_B(Int()))
self.assertEqual(0, getargs_B(IntSubclass()))
self.assertRaises(TypeError, getargs_B, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_B(BadInt2()))
self.assertEqual(0, getargs_B(BadInt3()))
self.assertEqual(UCHAR_MAX, getargs_B(-1))
self.assertEqual(0, getargs_B(0))
self.assertEqual(UCHAR_MAX, getargs_B(UCHAR_MAX))
self.assertEqual(0, getargs_B(UCHAR_MAX+1))
self.assertEqual(42, getargs_B(42))
self.assertEqual(UCHAR_MAX & VERY_LARGE, getargs_B(VERY_LARGE))
def test_H(self):
from _testcapi import getargs_H
# H returns 'unsigned short', no range checking
self.assertRaises(TypeError, getargs_H, 3.14)
self.assertEqual(99, getargs_H(Int()))
self.assertEqual(0, getargs_H(IntSubclass()))
self.assertRaises(TypeError, getargs_H, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_H(BadInt2()))
self.assertEqual(0, getargs_H(BadInt3()))
self.assertEqual(USHRT_MAX, getargs_H(-1))
self.assertEqual(0, getargs_H(0))
self.assertEqual(USHRT_MAX, getargs_H(USHRT_MAX))
self.assertEqual(0, getargs_H(USHRT_MAX+1))
self.assertEqual(42, getargs_H(42))
self.assertEqual(VERY_LARGE & USHRT_MAX, getargs_H(VERY_LARGE))
def test_I(self):
from _testcapi import getargs_I
# I returns 'unsigned int', no range checking
self.assertRaises(TypeError, getargs_I, 3.14)
self.assertEqual(99, getargs_I(Int()))
self.assertEqual(0, getargs_I(IntSubclass()))
self.assertRaises(TypeError, getargs_I, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_I(BadInt2()))
self.assertEqual(0, getargs_I(BadInt3()))
self.assertEqual(UINT_MAX, getargs_I(-1))
self.assertEqual(0, getargs_I(0))
self.assertEqual(UINT_MAX, getargs_I(UINT_MAX))
self.assertEqual(0, getargs_I(UINT_MAX+1))
self.assertEqual(42, getargs_I(42))
self.assertEqual(VERY_LARGE & UINT_MAX, getargs_I(VERY_LARGE))
def test_k(self):
from _testcapi import getargs_k
# k returns 'unsigned long', no range checking
# it does not accept float, or instances with __int__
self.assertRaises(TypeError, getargs_k, 3.14)
self.assertRaises(TypeError, getargs_k, Int())
self.assertEqual(0, getargs_k(IntSubclass()))
self.assertRaises(TypeError, getargs_k, BadInt())
self.assertRaises(TypeError, getargs_k, BadInt2())
self.assertEqual(0, getargs_k(BadInt3()))
self.assertEqual(ULONG_MAX, getargs_k(-1))
self.assertEqual(0, getargs_k(0))
self.assertEqual(ULONG_MAX, getargs_k(ULONG_MAX))
self.assertEqual(0, getargs_k(ULONG_MAX+1))
self.assertEqual(42, getargs_k(42))
self.assertEqual(VERY_LARGE & ULONG_MAX, getargs_k(VERY_LARGE))
class Signed_TestCase(unittest.TestCase):
def test_h(self):
from _testcapi import getargs_h
# h returns 'short', and does range checking (SHRT_MIN ... SHRT_MAX)
self.assertRaises(TypeError, getargs_h, 3.14)
self.assertEqual(99, getargs_h(Int()))
self.assertEqual(0, getargs_h(IntSubclass()))
self.assertRaises(TypeError, getargs_h, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_h(BadInt2()))
self.assertEqual(0, getargs_h(BadInt3()))
self.assertRaises(OverflowError, getargs_h, SHRT_MIN-1)
self.assertEqual(SHRT_MIN, getargs_h(SHRT_MIN))
self.assertEqual(SHRT_MAX, getargs_h(SHRT_MAX))
self.assertRaises(OverflowError, getargs_h, SHRT_MAX+1)
self.assertEqual(42, getargs_h(42))
self.assertRaises(OverflowError, getargs_h, VERY_LARGE)
def test_i(self):
from _testcapi import getargs_i
# i returns 'int', and does range checking (INT_MIN ... INT_MAX)
self.assertRaises(TypeError, getargs_i, 3.14)
self.assertEqual(99, getargs_i(Int()))
self.assertEqual(0, getargs_i(IntSubclass()))
self.assertRaises(TypeError, getargs_i, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_i(BadInt2()))
self.assertEqual(0, getargs_i(BadInt3()))
self.assertRaises(OverflowError, getargs_i, INT_MIN-1)
self.assertEqual(INT_MIN, getargs_i(INT_MIN))
self.assertEqual(INT_MAX, getargs_i(INT_MAX))
self.assertRaises(OverflowError, getargs_i, INT_MAX+1)
self.assertEqual(42, getargs_i(42))
self.assertRaises(OverflowError, getargs_i, VERY_LARGE)
def test_l(self):
from _testcapi import getargs_l
# l returns 'long', and does range checking (LONG_MIN ... LONG_MAX)
self.assertRaises(TypeError, getargs_l, 3.14)
self.assertEqual(99, getargs_l(Int()))
self.assertEqual(0, getargs_l(IntSubclass()))
self.assertRaises(TypeError, getargs_l, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_l(BadInt2()))
self.assertEqual(0, getargs_l(BadInt3()))
self.assertRaises(OverflowError, getargs_l, LONG_MIN-1)
self.assertEqual(LONG_MIN, getargs_l(LONG_MIN))
self.assertEqual(LONG_MAX, getargs_l(LONG_MAX))
self.assertRaises(OverflowError, getargs_l, LONG_MAX+1)
self.assertEqual(42, getargs_l(42))
self.assertRaises(OverflowError, getargs_l, VERY_LARGE)
def test_n(self):
from _testcapi import getargs_n
# n returns 'Py_ssize_t', and does range checking
# (PY_SSIZE_T_MIN ... PY_SSIZE_T_MAX)
self.assertRaises(TypeError, getargs_n, 3.14)
self.assertRaises(TypeError, getargs_n, Int())
self.assertEqual(0, getargs_n(IntSubclass()))
self.assertRaises(TypeError, getargs_n, BadInt())
self.assertRaises(TypeError, getargs_n, BadInt2())
self.assertEqual(0, getargs_n(BadInt3()))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MIN-1)
self.assertEqual(PY_SSIZE_T_MIN, getargs_n(PY_SSIZE_T_MIN))
self.assertEqual(PY_SSIZE_T_MAX, getargs_n(PY_SSIZE_T_MAX))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MAX+1)
self.assertEqual(42, getargs_n(42))
self.assertRaises(OverflowError, getargs_n, VERY_LARGE)
@unittest.skipIf(getargs_L is None, 'PY_LONG_LONG is not available')
class LongLong_TestCase(unittest.TestCase):
def test_L(self):
from _testcapi import getargs_L
# L returns 'long long', and does range checking (LLONG_MIN
# ... LLONG_MAX)
self.assertRaises(TypeError, getargs_L, 3.14)
self.assertRaises(TypeError, getargs_L, "Hello")
self.assertEqual(99, getargs_L(Int()))
self.assertEqual(0, getargs_L(IntSubclass()))
self.assertRaises(TypeError, getargs_L, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_L(BadInt2()))
self.assertEqual(0, getargs_L(BadInt3()))
self.assertRaises(OverflowError, getargs_L, LLONG_MIN-1)
self.assertEqual(LLONG_MIN, getargs_L(LLONG_MIN))
self.assertEqual(LLONG_MAX, getargs_L(LLONG_MAX))
self.assertRaises(OverflowError, getargs_L, LLONG_MAX+1)
self.assertEqual(42, getargs_L(42))
self.assertRaises(OverflowError, getargs_L, VERY_LARGE)
def test_K(self):
from _testcapi import getargs_K
# K return 'unsigned long long', no range checking
self.assertRaises(TypeError, getargs_K, 3.14)
self.assertRaises(TypeError, getargs_K, Int())
self.assertEqual(0, getargs_K(IntSubclass()))
self.assertRaises(TypeError, getargs_K, BadInt())
self.assertRaises(TypeError, getargs_K, BadInt2())
self.assertEqual(0, getargs_K(BadInt3()))
self.assertEqual(ULLONG_MAX, getargs_K(ULLONG_MAX))
self.assertEqual(0, getargs_K(0))
self.assertEqual(0, getargs_K(ULLONG_MAX+1))
self.assertEqual(42, getargs_K(42))
self.assertEqual(VERY_LARGE & ULLONG_MAX, getargs_K(VERY_LARGE))
class Paradox:
"This statement is false."
def __bool__(self):
raise NotImplementedError
class Boolean_TestCase(unittest.TestCase):
def test_p(self):
from _testcapi import getargs_p
self.assertEqual(0, getargs_p(False))
self.assertEqual(0, getargs_p(None))
self.assertEqual(0, getargs_p(0))
self.assertEqual(0, getargs_p(0.0))
self.assertEqual(0, getargs_p(0j))
self.assertEqual(0, getargs_p(''))
self.assertEqual(0, getargs_p(()))
self.assertEqual(0, getargs_p([]))
self.assertEqual(0, getargs_p({}))
self.assertEqual(1, getargs_p(True))
self.assertEqual(1, getargs_p(1))
self.assertEqual(1, getargs_p(1.0))
self.assertEqual(1, getargs_p(1j))
self.assertEqual(1, getargs_p('x'))
self.assertEqual(1, getargs_p((1,)))
self.assertEqual(1, getargs_p([1]))
self.assertEqual(1, getargs_p({1:2}))
self.assertEqual(1, getargs_p(unittest.TestCase))
self.assertRaises(NotImplementedError, getargs_p, Paradox())
class Tuple_TestCase(unittest.TestCase):
def test_tuple(self):
from _testcapi import getargs_tuple
ret = getargs_tuple(1, (2, 3))
self.assertEqual(ret, (1,2,3))
# make sure invalid tuple arguments are handled correctly
class seq:
def __len__(self):
return 2
def __getitem__(self, n):
raise ValueError
self.assertRaises(TypeError, getargs_tuple, 1, seq())
class Keywords_TestCase(unittest.TestCase):
def test_positional_args(self):
# using all positional args
self.assertEqual(
getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), 10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_mixed_args(self):
# positional and keyword args
self.assertEqual(
getargs_keywords((1,2), 3, (4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_keyword_args(self):
# all keywords
self.assertEqual(
getargs_keywords(arg1=(1,2), arg2=3, arg3=(4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_optional_args(self):
# missing optional keyword args, skipping tuples
self.assertEqual(
getargs_keywords(arg1=(1,2), arg2=3, arg5=10),
(1, 2, 3, -1, -1, -1, -1, -1, -1, 10)
)
def test_required_args(self):
# required arg missing
try:
getargs_keywords(arg1=(1,2))
except TypeError as err:
self.assertEqual(str(err), "Required argument 'arg2' (pos 2) not found")
else:
self.fail('TypeError should have been raised')
def test_too_many_args(self):
try:
getargs_keywords((1,2),3,(4,(5,6)),(7,8,9),10,111)
except TypeError as err:
self.assertEqual(str(err), "function takes at most 5 arguments (6 given)")
else:
self.fail('TypeError should have been raised')
def test_invalid_keyword(self):
# extraneous keyword arg
try:
getargs_keywords((1,2),3,arg5=10,arg666=666)
except TypeError as err:
self.assertEqual(str(err), "'arg666' is an invalid keyword argument for this function")
else:
self.fail('TypeError should have been raised')
def test_surrogate_keyword(self):
try:
getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), **{'\uDC80': 10})
except TypeError as err:
self.assertEqual(str(err), "'\udc80' is an invalid keyword argument for this function")
else:
self.fail('TypeError should have been raised')
class KeywordOnly_TestCase(unittest.TestCase):
def test_positional_args(self):
# using all possible positional args
self.assertEqual(
getargs_keyword_only(1, 2),
(1, 2, -1)
)
def test_mixed_args(self):
# positional and keyword args
self.assertEqual(
getargs_keyword_only(1, 2, keyword_only=3),
(1, 2, 3)
)
def test_keyword_args(self):
# all keywords
self.assertEqual(
getargs_keyword_only(required=1, optional=2, keyword_only=3),
(1, 2, 3)
)
def test_optional_args(self):
# missing optional keyword args, skipping tuples
self.assertEqual(
getargs_keyword_only(required=1, optional=2),
(1, 2, -1)
)
self.assertEqual(
getargs_keyword_only(required=1, keyword_only=3),
(1, -1, 3)
)
def test_required_args(self):
self.assertEqual(
getargs_keyword_only(1),
(1, -1, -1)
)
self.assertEqual(
getargs_keyword_only(required=1),
(1, -1, -1)
)
# required arg missing
with self.assertRaisesRegex(TypeError,
"Required argument 'required' \(pos 1\) not found"):
getargs_keyword_only(optional=2)
with self.assertRaisesRegex(TypeError,
"Required argument 'required' \(pos 1\) not found"):
getargs_keyword_only(keyword_only=3)
def test_too_many_args(self):
with self.assertRaisesRegex(TypeError,
"Function takes at most 2 positional arguments \(3 given\)"):
getargs_keyword_only(1, 2, 3)
with self.assertRaisesRegex(TypeError,
"function takes at most 3 arguments \(4 given\)"):
getargs_keyword_only(1, 2, 3, keyword_only=5)
def test_invalid_keyword(self):
# extraneous keyword arg
with self.assertRaisesRegex(TypeError,
"'monster' is an invalid keyword argument for this function"):
getargs_keyword_only(1, 2, monster=666)
def test_surrogate_keyword(self):
with self.assertRaisesRegex(TypeError,
"'\udc80' is an invalid keyword argument for this function"):
getargs_keyword_only(1, 2, **{'\uDC80': 10})
class Bytes_TestCase(unittest.TestCase):
def test_c(self):
from _testcapi import getargs_c
self.assertRaises(TypeError, getargs_c, b'abc') # len > 1
self.assertEqual(getargs_c(b'a'), b'a')
self.assertEqual(getargs_c(bytearray(b'a')), b'a')
self.assertRaises(TypeError, getargs_c, memoryview(b'a'))
self.assertRaises(TypeError, getargs_c, 's')
self.assertRaises(TypeError, getargs_c, None)
def test_s(self):
from _testcapi import getargs_s
self.assertEqual(getargs_s('abc\xe9'), b'abc\xc3\xa9')
self.assertRaises(TypeError, getargs_s, 'nul:\0')
self.assertRaises(TypeError, getargs_s, b'bytes')
self.assertRaises(TypeError, getargs_s, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_s, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_s, None)
def test_s_star(self):
from _testcapi import getargs_s_star
self.assertEqual(getargs_s_star('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_s_star('nul:\0'), b'nul:\0')
self.assertEqual(getargs_s_star(b'bytes'), b'bytes')
self.assertEqual(getargs_s_star(bytearray(b'bytearray')), b'bytearray')
self.assertEqual(getargs_s_star(memoryview(b'memoryview')), b'memoryview')
self.assertRaises(TypeError, getargs_s_star, None)
def test_s_hash(self):
from _testcapi import getargs_s_hash
self.assertEqual(getargs_s_hash('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_s_hash('nul:\0'), b'nul:\0')
self.assertEqual(getargs_s_hash(b'bytes'), b'bytes')
self.assertRaises(TypeError, getargs_s_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_s_hash, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_s_hash, None)
def test_z(self):
from _testcapi import getargs_z
self.assertEqual(getargs_z('abc\xe9'), b'abc\xc3\xa9')
self.assertRaises(TypeError, getargs_z, 'nul:\0')
self.assertRaises(TypeError, getargs_z, b'bytes')
self.assertRaises(TypeError, getargs_z, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_z, memoryview(b'memoryview'))
self.assertIsNone(getargs_z(None))
def test_z_star(self):
from _testcapi import getargs_z_star
self.assertEqual(getargs_z_star('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_z_star('nul:\0'), b'nul:\0')
self.assertEqual(getargs_z_star(b'bytes'), b'bytes')
self.assertEqual(getargs_z_star(bytearray(b'bytearray')), b'bytearray')
self.assertEqual(getargs_z_star(memoryview(b'memoryview')), b'memoryview')
self.assertIsNone(getargs_z_star(None))
def test_z_hash(self):
from _testcapi import getargs_z_hash
self.assertEqual(getargs_z_hash('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_z_hash('nul:\0'), b'nul:\0')
self.assertEqual(getargs_z_hash(b'bytes'), b'bytes')
self.assertRaises(TypeError, getargs_z_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_z_hash, memoryview(b'memoryview'))
self.assertIsNone(getargs_z_hash(None))
def test_y(self):
from _testcapi import getargs_y
self.assertRaises(TypeError, getargs_y, 'abc\xe9')
self.assertEqual(getargs_y(b'bytes'), b'bytes')
self.assertRaises(TypeError, getargs_y, b'nul:\0')
self.assertRaises(TypeError, getargs_y, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_y, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_y, None)
def test_y_star(self):
from _testcapi import getargs_y_star
self.assertRaises(TypeError, getargs_y_star, 'abc\xe9')
self.assertEqual(getargs_y_star(b'bytes'), b'bytes')
self.assertEqual(getargs_y_star(b'nul:\0'), b'nul:\0')
self.assertEqual(getargs_y_star(bytearray(b'bytearray')), b'bytearray')
self.assertEqual(getargs_y_star(memoryview(b'memoryview')), b'memoryview')
self.assertRaises(TypeError, getargs_y_star, None)
def test_y_hash(self):
from _testcapi import getargs_y_hash
self.assertRaises(TypeError, getargs_y_hash, 'abc\xe9')
self.assertEqual(getargs_y_hash(b'bytes'), b'bytes')
self.assertEqual(getargs_y_hash(b'nul:\0'), b'nul:\0')
self.assertRaises(TypeError, getargs_y_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_y_hash, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_y_hash, None)
def test_w_star(self):
# getargs_w_star() modifies first and last byte
from _testcapi import getargs_w_star
self.assertRaises(TypeError, getargs_w_star, 'abc\xe9')
self.assertRaises(TypeError, getargs_w_star, b'bytes')
self.assertRaises(TypeError, getargs_w_star, b'nul:\0')
self.assertRaises(TypeError, getargs_w_star, memoryview(b'bytes'))
self.assertEqual(getargs_w_star(bytearray(b'bytearray')), b'[ytearra]')
self.assertEqual(getargs_w_star(memoryview(bytearray(b'memoryview'))),
b'[emoryvie]')
self.assertRaises(TypeError, getargs_w_star, None)
class Unicode_TestCase(unittest.TestCase):
def test_u(self):
from _testcapi import getargs_u
self.assertEqual(getargs_u('abc\xe9'), 'abc\xe9')
self.assertRaises(TypeError, getargs_u, 'nul:\0')
self.assertRaises(TypeError, getargs_u, b'bytes')
self.assertRaises(TypeError, getargs_u, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_u, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_u, None)
def test_u_hash(self):
from _testcapi import getargs_u_hash
self.assertEqual(getargs_u_hash('abc\xe9'), 'abc\xe9')
self.assertEqual(getargs_u_hash('nul:\0'), 'nul:\0')
self.assertRaises(TypeError, getargs_u_hash, b'bytes')
self.assertRaises(TypeError, getargs_u_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_u_hash, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_u_hash, None)
def test_Z(self):
from _testcapi import getargs_Z
self.assertEqual(getargs_Z('abc\xe9'), 'abc\xe9')
self.assertRaises(TypeError, getargs_Z, 'nul:\0')
self.assertRaises(TypeError, getargs_Z, b'bytes')
self.assertRaises(TypeError, getargs_Z, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_Z, memoryview(b'memoryview'))
self.assertIsNone(getargs_Z(None))
def test_Z_hash(self):
from _testcapi import getargs_Z_hash
self.assertEqual(getargs_Z_hash('abc\xe9'), 'abc\xe9')
self.assertEqual(getargs_Z_hash('nul:\0'), 'nul:\0')
self.assertRaises(TypeError, getargs_Z_hash, b'bytes')
self.assertRaises(TypeError, getargs_Z_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_Z_hash, memoryview(b'memoryview'))
self.assertIsNone(getargs_Z_hash(None))
if __name__ == "__main__":
unittest.main()
=======
import unittest
from test import support
# Skip this test if the _testcapi module isn't available.
support.import_module('_testcapi')
from _testcapi import getargs_keywords, getargs_keyword_only
try:
from _testcapi import getargs_L, getargs_K
except ImportError:
getargs_L = None # PY_LONG_LONG not available
# > How about the following counterproposal. This also changes some of
# > the other format codes to be a little more regular.
# >
# > Code C type Range check
# >
# > b unsigned char 0..UCHAR_MAX
# > h signed short SHRT_MIN..SHRT_MAX
# > B unsigned char none **
# > H unsigned short none **
# > k * unsigned long none
# > I * unsigned int 0..UINT_MAX
#
#
# > i int INT_MIN..INT_MAX
# > l long LONG_MIN..LONG_MAX
#
# > K * unsigned long long none
# > L long long LLONG_MIN..LLONG_MAX
#
# > Notes:
# >
# > * New format codes.
# >
# > ** Changed from previous "range-and-a-half" to "none"; the
# > range-and-a-half checking wasn't particularly useful.
#
# Plus a C API or two, e.g. PyInt_AsLongMask() ->
# unsigned long and PyInt_AsLongLongMask() -> unsigned
# long long (if that exists).
LARGE = 0x7FFFFFFF
VERY_LARGE = 0xFF0000121212121212121242
from _testcapi import UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, INT_MAX, \
INT_MIN, LONG_MIN, LONG_MAX, PY_SSIZE_T_MIN, PY_SSIZE_T_MAX, \
SHRT_MIN, SHRT_MAX
# fake, they are not defined in Python's header files
LLONG_MAX = 2**63-1
LLONG_MIN = -2**63
ULLONG_MAX = 2**64-1
class Int:
def __int__(self):
return 99
class IntSubclass(int):
def __int__(self):
return 99
class BadInt:
def __int__(self):
return 1.0
class BadInt2:
def __int__(self):
return True
class BadInt3(int):
def __int__(self):
return True
class Unsigned_TestCase(unittest.TestCase):
def test_b(self):
from _testcapi import getargs_b
# b returns 'unsigned char', and does range checking (0 ... UCHAR_MAX)
self.assertRaises(TypeError, getargs_b, 3.14)
self.assertEqual(99, getargs_b(Int()))
self.assertEqual(0, getargs_b(IntSubclass()))
self.assertRaises(TypeError, getargs_b, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_b(BadInt2()))
self.assertEqual(0, getargs_b(BadInt3()))
self.assertRaises(OverflowError, getargs_b, -1)
self.assertEqual(0, getargs_b(0))
self.assertEqual(UCHAR_MAX, getargs_b(UCHAR_MAX))
self.assertRaises(OverflowError, getargs_b, UCHAR_MAX + 1)
self.assertEqual(42, getargs_b(42))
self.assertRaises(OverflowError, getargs_b, VERY_LARGE)
def test_B(self):
from _testcapi import getargs_B
# B returns 'unsigned char', no range checking
self.assertRaises(TypeError, getargs_B, 3.14)
self.assertEqual(99, getargs_B(Int()))
self.assertEqual(0, getargs_B(IntSubclass()))
self.assertRaises(TypeError, getargs_B, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_B(BadInt2()))
self.assertEqual(0, getargs_B(BadInt3()))
self.assertEqual(UCHAR_MAX, getargs_B(-1))
self.assertEqual(0, getargs_B(0))
self.assertEqual(UCHAR_MAX, getargs_B(UCHAR_MAX))
self.assertEqual(0, getargs_B(UCHAR_MAX+1))
self.assertEqual(42, getargs_B(42))
self.assertEqual(UCHAR_MAX & VERY_LARGE, getargs_B(VERY_LARGE))
def test_H(self):
from _testcapi import getargs_H
# H returns 'unsigned short', no range checking
self.assertRaises(TypeError, getargs_H, 3.14)
self.assertEqual(99, getargs_H(Int()))
self.assertEqual(0, getargs_H(IntSubclass()))
self.assertRaises(TypeError, getargs_H, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_H(BadInt2()))
self.assertEqual(0, getargs_H(BadInt3()))
self.assertEqual(USHRT_MAX, getargs_H(-1))
self.assertEqual(0, getargs_H(0))
self.assertEqual(USHRT_MAX, getargs_H(USHRT_MAX))
self.assertEqual(0, getargs_H(USHRT_MAX+1))
self.assertEqual(42, getargs_H(42))
self.assertEqual(VERY_LARGE & USHRT_MAX, getargs_H(VERY_LARGE))
def test_I(self):
from _testcapi import getargs_I
# I returns 'unsigned int', no range checking
self.assertRaises(TypeError, getargs_I, 3.14)
self.assertEqual(99, getargs_I(Int()))
self.assertEqual(0, getargs_I(IntSubclass()))
self.assertRaises(TypeError, getargs_I, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_I(BadInt2()))
self.assertEqual(0, getargs_I(BadInt3()))
self.assertEqual(UINT_MAX, getargs_I(-1))
self.assertEqual(0, getargs_I(0))
self.assertEqual(UINT_MAX, getargs_I(UINT_MAX))
self.assertEqual(0, getargs_I(UINT_MAX+1))
self.assertEqual(42, getargs_I(42))
self.assertEqual(VERY_LARGE & UINT_MAX, getargs_I(VERY_LARGE))
def test_k(self):
from _testcapi import getargs_k
# k returns 'unsigned long', no range checking
# it does not accept float, or instances with __int__
self.assertRaises(TypeError, getargs_k, 3.14)
self.assertRaises(TypeError, getargs_k, Int())
self.assertEqual(0, getargs_k(IntSubclass()))
self.assertRaises(TypeError, getargs_k, BadInt())
self.assertRaises(TypeError, getargs_k, BadInt2())
self.assertEqual(0, getargs_k(BadInt3()))
self.assertEqual(ULONG_MAX, getargs_k(-1))
self.assertEqual(0, getargs_k(0))
self.assertEqual(ULONG_MAX, getargs_k(ULONG_MAX))
self.assertEqual(0, getargs_k(ULONG_MAX+1))
self.assertEqual(42, getargs_k(42))
self.assertEqual(VERY_LARGE & ULONG_MAX, getargs_k(VERY_LARGE))
class Signed_TestCase(unittest.TestCase):
def test_h(self):
from _testcapi import getargs_h
# h returns 'short', and does range checking (SHRT_MIN ... SHRT_MAX)
self.assertRaises(TypeError, getargs_h, 3.14)
self.assertEqual(99, getargs_h(Int()))
self.assertEqual(0, getargs_h(IntSubclass()))
self.assertRaises(TypeError, getargs_h, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_h(BadInt2()))
self.assertEqual(0, getargs_h(BadInt3()))
self.assertRaises(OverflowError, getargs_h, SHRT_MIN-1)
self.assertEqual(SHRT_MIN, getargs_h(SHRT_MIN))
self.assertEqual(SHRT_MAX, getargs_h(SHRT_MAX))
self.assertRaises(OverflowError, getargs_h, SHRT_MAX+1)
self.assertEqual(42, getargs_h(42))
self.assertRaises(OverflowError, getargs_h, VERY_LARGE)
def test_i(self):
from _testcapi import getargs_i
# i returns 'int', and does range checking (INT_MIN ... INT_MAX)
self.assertRaises(TypeError, getargs_i, 3.14)
self.assertEqual(99, getargs_i(Int()))
self.assertEqual(0, getargs_i(IntSubclass()))
self.assertRaises(TypeError, getargs_i, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_i(BadInt2()))
self.assertEqual(0, getargs_i(BadInt3()))
self.assertRaises(OverflowError, getargs_i, INT_MIN-1)
self.assertEqual(INT_MIN, getargs_i(INT_MIN))
self.assertEqual(INT_MAX, getargs_i(INT_MAX))
self.assertRaises(OverflowError, getargs_i, INT_MAX+1)
self.assertEqual(42, getargs_i(42))
self.assertRaises(OverflowError, getargs_i, VERY_LARGE)
def test_l(self):
from _testcapi import getargs_l
# l returns 'long', and does range checking (LONG_MIN ... LONG_MAX)
self.assertRaises(TypeError, getargs_l, 3.14)
self.assertEqual(99, getargs_l(Int()))
self.assertEqual(0, getargs_l(IntSubclass()))
self.assertRaises(TypeError, getargs_l, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_l(BadInt2()))
self.assertEqual(0, getargs_l(BadInt3()))
self.assertRaises(OverflowError, getargs_l, LONG_MIN-1)
self.assertEqual(LONG_MIN, getargs_l(LONG_MIN))
self.assertEqual(LONG_MAX, getargs_l(LONG_MAX))
self.assertRaises(OverflowError, getargs_l, LONG_MAX+1)
self.assertEqual(42, getargs_l(42))
self.assertRaises(OverflowError, getargs_l, VERY_LARGE)
def test_n(self):
from _testcapi import getargs_n
# n returns 'Py_ssize_t', and does range checking
# (PY_SSIZE_T_MIN ... PY_SSIZE_T_MAX)
self.assertRaises(TypeError, getargs_n, 3.14)
self.assertRaises(TypeError, getargs_n, Int())
self.assertEqual(0, getargs_n(IntSubclass()))
self.assertRaises(TypeError, getargs_n, BadInt())
self.assertRaises(TypeError, getargs_n, BadInt2())
self.assertEqual(0, getargs_n(BadInt3()))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MIN-1)
self.assertEqual(PY_SSIZE_T_MIN, getargs_n(PY_SSIZE_T_MIN))
self.assertEqual(PY_SSIZE_T_MAX, getargs_n(PY_SSIZE_T_MAX))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MAX+1)
self.assertEqual(42, getargs_n(42))
self.assertRaises(OverflowError, getargs_n, VERY_LARGE)
@unittest.skipIf(getargs_L is None, 'PY_LONG_LONG is not available')
class LongLong_TestCase(unittest.TestCase):
def test_L(self):
from _testcapi import getargs_L
# L returns 'long long', and does range checking (LLONG_MIN
# ... LLONG_MAX)
self.assertRaises(TypeError, getargs_L, 3.14)
self.assertRaises(TypeError, getargs_L, "Hello")
self.assertEqual(99, getargs_L(Int()))
self.assertEqual(0, getargs_L(IntSubclass()))
self.assertRaises(TypeError, getargs_L, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_L(BadInt2()))
self.assertEqual(0, getargs_L(BadInt3()))
self.assertRaises(OverflowError, getargs_L, LLONG_MIN-1)
self.assertEqual(LLONG_MIN, getargs_L(LLONG_MIN))
self.assertEqual(LLONG_MAX, getargs_L(LLONG_MAX))
self.assertRaises(OverflowError, getargs_L, LLONG_MAX+1)
self.assertEqual(42, getargs_L(42))
self.assertRaises(OverflowError, getargs_L, VERY_LARGE)
def test_K(self):
from _testcapi import getargs_K
# K return 'unsigned long long', no range checking
self.assertRaises(TypeError, getargs_K, 3.14)
self.assertRaises(TypeError, getargs_K, Int())
self.assertEqual(0, getargs_K(IntSubclass()))
self.assertRaises(TypeError, getargs_K, BadInt())
self.assertRaises(TypeError, getargs_K, BadInt2())
self.assertEqual(0, getargs_K(BadInt3()))
self.assertEqual(ULLONG_MAX, getargs_K(ULLONG_MAX))
self.assertEqual(0, getargs_K(0))
self.assertEqual(0, getargs_K(ULLONG_MAX+1))
self.assertEqual(42, getargs_K(42))
self.assertEqual(VERY_LARGE & ULLONG_MAX, getargs_K(VERY_LARGE))
class Paradox:
"This statement is false."
def __bool__(self):
raise NotImplementedError
class Boolean_TestCase(unittest.TestCase):
def test_p(self):
from _testcapi import getargs_p
self.assertEqual(0, getargs_p(False))
self.assertEqual(0, getargs_p(None))
self.assertEqual(0, getargs_p(0))
self.assertEqual(0, getargs_p(0.0))
self.assertEqual(0, getargs_p(0j))
self.assertEqual(0, getargs_p(''))
self.assertEqual(0, getargs_p(()))
self.assertEqual(0, getargs_p([]))
self.assertEqual(0, getargs_p({}))
self.assertEqual(1, getargs_p(True))
self.assertEqual(1, getargs_p(1))
self.assertEqual(1, getargs_p(1.0))
self.assertEqual(1, getargs_p(1j))
self.assertEqual(1, getargs_p('x'))
self.assertEqual(1, getargs_p((1,)))
self.assertEqual(1, getargs_p([1]))
self.assertEqual(1, getargs_p({1:2}))
self.assertEqual(1, getargs_p(unittest.TestCase))
self.assertRaises(NotImplementedError, getargs_p, Paradox())
class Tuple_TestCase(unittest.TestCase):
def test_tuple(self):
from _testcapi import getargs_tuple
ret = getargs_tuple(1, (2, 3))
self.assertEqual(ret, (1,2,3))
# make sure invalid tuple arguments are handled correctly
class seq:
def __len__(self):
return 2
def __getitem__(self, n):
raise ValueError
self.assertRaises(TypeError, getargs_tuple, 1, seq())
class Keywords_TestCase(unittest.TestCase):
def test_positional_args(self):
# using all positional args
self.assertEqual(
getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), 10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_mixed_args(self):
# positional and keyword args
self.assertEqual(
getargs_keywords((1,2), 3, (4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_keyword_args(self):
# all keywords
self.assertEqual(
getargs_keywords(arg1=(1,2), arg2=3, arg3=(4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_optional_args(self):
# missing optional keyword args, skipping tuples
self.assertEqual(
getargs_keywords(arg1=(1,2), arg2=3, arg5=10),
(1, 2, 3, -1, -1, -1, -1, -1, -1, 10)
)
def test_required_args(self):
# required arg missing
try:
getargs_keywords(arg1=(1,2))
except TypeError as err:
self.assertEqual(str(err), "Required argument 'arg2' (pos 2) not found")
else:
self.fail('TypeError should have been raised')
def test_too_many_args(self):
try:
getargs_keywords((1,2),3,(4,(5,6)),(7,8,9),10,111)
except TypeError as err:
self.assertEqual(str(err), "function takes at most 5 arguments (6 given)")
else:
self.fail('TypeError should have been raised')
def test_invalid_keyword(self):
# extraneous keyword arg
try:
getargs_keywords((1,2),3,arg5=10,arg666=666)
except TypeError as err:
self.assertEqual(str(err), "'arg666' is an invalid keyword argument for this function")
else:
self.fail('TypeError should have been raised')
def test_surrogate_keyword(self):
try:
getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), **{'\uDC80': 10})
except TypeError as err:
self.assertEqual(str(err), "'\udc80' is an invalid keyword argument for this function")
else:
self.fail('TypeError should have been raised')
class KeywordOnly_TestCase(unittest.TestCase):
def test_positional_args(self):
# using all possible positional args
self.assertEqual(
getargs_keyword_only(1, 2),
(1, 2, -1)
)
def test_mixed_args(self):
# positional and keyword args
self.assertEqual(
getargs_keyword_only(1, 2, keyword_only=3),
(1, 2, 3)
)
def test_keyword_args(self):
# all keywords
self.assertEqual(
getargs_keyword_only(required=1, optional=2, keyword_only=3),
(1, 2, 3)
)
def test_optional_args(self):
# missing optional keyword args, skipping tuples
self.assertEqual(
getargs_keyword_only(required=1, optional=2),
(1, 2, -1)
)
self.assertEqual(
getargs_keyword_only(required=1, keyword_only=3),
(1, -1, 3)
)
def test_required_args(self):
self.assertEqual(
getargs_keyword_only(1),
(1, -1, -1)
)
self.assertEqual(
getargs_keyword_only(required=1),
(1, -1, -1)
)
# required arg missing
with self.assertRaisesRegex(TypeError,
"Required argument 'required' \(pos 1\) not found"):
getargs_keyword_only(optional=2)
with self.assertRaisesRegex(TypeError,
"Required argument 'required' \(pos 1\) not found"):
getargs_keyword_only(keyword_only=3)
def test_too_many_args(self):
with self.assertRaisesRegex(TypeError,
"Function takes at most 2 positional arguments \(3 given\)"):
getargs_keyword_only(1, 2, 3)
with self.assertRaisesRegex(TypeError,
"function takes at most 3 arguments \(4 given\)"):
getargs_keyword_only(1, 2, 3, keyword_only=5)
def test_invalid_keyword(self):
# extraneous keyword arg
with self.assertRaisesRegex(TypeError,
"'monster' is an invalid keyword argument for this function"):
getargs_keyword_only(1, 2, monster=666)
def test_surrogate_keyword(self):
with self.assertRaisesRegex(TypeError,
"'\udc80' is an invalid keyword argument for this function"):
getargs_keyword_only(1, 2, **{'\uDC80': 10})
class Bytes_TestCase(unittest.TestCase):
def test_c(self):
from _testcapi import getargs_c
self.assertRaises(TypeError, getargs_c, b'abc') # len > 1
self.assertEqual(getargs_c(b'a'), b'a')
self.assertEqual(getargs_c(bytearray(b'a')), b'a')
self.assertRaises(TypeError, getargs_c, memoryview(b'a'))
self.assertRaises(TypeError, getargs_c, 's')
self.assertRaises(TypeError, getargs_c, None)
def test_s(self):
from _testcapi import getargs_s
self.assertEqual(getargs_s('abc\xe9'), b'abc\xc3\xa9')
self.assertRaises(TypeError, getargs_s, 'nul:\0')
self.assertRaises(TypeError, getargs_s, b'bytes')
self.assertRaises(TypeError, getargs_s, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_s, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_s, None)
def test_s_star(self):
from _testcapi import getargs_s_star
self.assertEqual(getargs_s_star('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_s_star('nul:\0'), b'nul:\0')
self.assertEqual(getargs_s_star(b'bytes'), b'bytes')
self.assertEqual(getargs_s_star(bytearray(b'bytearray')), b'bytearray')
self.assertEqual(getargs_s_star(memoryview(b'memoryview')), b'memoryview')
self.assertRaises(TypeError, getargs_s_star, None)
def test_s_hash(self):
from _testcapi import getargs_s_hash
self.assertEqual(getargs_s_hash('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_s_hash('nul:\0'), b'nul:\0')
self.assertEqual(getargs_s_hash(b'bytes'), b'bytes')
self.assertRaises(TypeError, getargs_s_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_s_hash, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_s_hash, None)
def test_z(self):
from _testcapi import getargs_z
self.assertEqual(getargs_z('abc\xe9'), b'abc\xc3\xa9')
self.assertRaises(TypeError, getargs_z, 'nul:\0')
self.assertRaises(TypeError, getargs_z, b'bytes')
self.assertRaises(TypeError, getargs_z, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_z, memoryview(b'memoryview'))
self.assertIsNone(getargs_z(None))
def test_z_star(self):
from _testcapi import getargs_z_star
self.assertEqual(getargs_z_star('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_z_star('nul:\0'), b'nul:\0')
self.assertEqual(getargs_z_star(b'bytes'), b'bytes')
self.assertEqual(getargs_z_star(bytearray(b'bytearray')), b'bytearray')
self.assertEqual(getargs_z_star(memoryview(b'memoryview')), b'memoryview')
self.assertIsNone(getargs_z_star(None))
def test_z_hash(self):
from _testcapi import getargs_z_hash
self.assertEqual(getargs_z_hash('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_z_hash('nul:\0'), b'nul:\0')
self.assertEqual(getargs_z_hash(b'bytes'), b'bytes')
self.assertRaises(TypeError, getargs_z_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_z_hash, memoryview(b'memoryview'))
self.assertIsNone(getargs_z_hash(None))
def test_y(self):
from _testcapi import getargs_y
self.assertRaises(TypeError, getargs_y, 'abc\xe9')
self.assertEqual(getargs_y(b'bytes'), b'bytes')
self.assertRaises(TypeError, getargs_y, b'nul:\0')
self.assertRaises(TypeError, getargs_y, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_y, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_y, None)
def test_y_star(self):
from _testcapi import getargs_y_star
self.assertRaises(TypeError, getargs_y_star, 'abc\xe9')
self.assertEqual(getargs_y_star(b'bytes'), b'bytes')
self.assertEqual(getargs_y_star(b'nul:\0'), b'nul:\0')
self.assertEqual(getargs_y_star(bytearray(b'bytearray')), b'bytearray')
self.assertEqual(getargs_y_star(memoryview(b'memoryview')), b'memoryview')
self.assertRaises(TypeError, getargs_y_star, None)
def test_y_hash(self):
from _testcapi import getargs_y_hash
self.assertRaises(TypeError, getargs_y_hash, 'abc\xe9')
self.assertEqual(getargs_y_hash(b'bytes'), b'bytes')
self.assertEqual(getargs_y_hash(b'nul:\0'), b'nul:\0')
self.assertRaises(TypeError, getargs_y_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_y_hash, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_y_hash, None)
def test_w_star(self):
# getargs_w_star() modifies first and last byte
from _testcapi import getargs_w_star
self.assertRaises(TypeError, getargs_w_star, 'abc\xe9')
self.assertRaises(TypeError, getargs_w_star, b'bytes')
self.assertRaises(TypeError, getargs_w_star, b'nul:\0')
self.assertRaises(TypeError, getargs_w_star, memoryview(b'bytes'))
self.assertEqual(getargs_w_star(bytearray(b'bytearray')), b'[ytearra]')
self.assertEqual(getargs_w_star(memoryview(bytearray(b'memoryview'))),
b'[emoryvie]')
self.assertRaises(TypeError, getargs_w_star, None)
class Unicode_TestCase(unittest.TestCase):
def test_u(self):
from _testcapi import getargs_u
self.assertEqual(getargs_u('abc\xe9'), 'abc\xe9')
self.assertRaises(TypeError, getargs_u, 'nul:\0')
self.assertRaises(TypeError, getargs_u, b'bytes')
self.assertRaises(TypeError, getargs_u, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_u, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_u, None)
def test_u_hash(self):
from _testcapi import getargs_u_hash
self.assertEqual(getargs_u_hash('abc\xe9'), 'abc\xe9')
self.assertEqual(getargs_u_hash('nul:\0'), 'nul:\0')
self.assertRaises(TypeError, getargs_u_hash, b'bytes')
self.assertRaises(TypeError, getargs_u_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_u_hash, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_u_hash, None)
def test_Z(self):
from _testcapi import getargs_Z
self.assertEqual(getargs_Z('abc\xe9'), 'abc\xe9')
self.assertRaises(TypeError, getargs_Z, 'nul:\0')
self.assertRaises(TypeError, getargs_Z, b'bytes')
self.assertRaises(TypeError, getargs_Z, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_Z, memoryview(b'memoryview'))
self.assertIsNone(getargs_Z(None))
def test_Z_hash(self):
from _testcapi import getargs_Z_hash
self.assertEqual(getargs_Z_hash('abc\xe9'), 'abc\xe9')
self.assertEqual(getargs_Z_hash('nul:\0'), 'nul:\0')
self.assertRaises(TypeError, getargs_Z_hash, b'bytes')
self.assertRaises(TypeError, getargs_Z_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_Z_hash, memoryview(b'memoryview'))
self.assertIsNone(getargs_Z_hash(None))
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import unittest
from test import support
# Skip this test if the _testcapi module isn't available.
support.import_module('_testcapi')
from _testcapi import getargs_keywords, getargs_keyword_only
try:
from _testcapi import getargs_L, getargs_K
except ImportError:
getargs_L = None # PY_LONG_LONG not available
# > How about the following counterproposal. This also changes some of
# > the other format codes to be a little more regular.
# >
# > Code C type Range check
# >
# > b unsigned char 0..UCHAR_MAX
# > h signed short SHRT_MIN..SHRT_MAX
# > B unsigned char none **
# > H unsigned short none **
# > k * unsigned long none
# > I * unsigned int 0..UINT_MAX
#
#
# > i int INT_MIN..INT_MAX
# > l long LONG_MIN..LONG_MAX
#
# > K * unsigned long long none
# > L long long LLONG_MIN..LLONG_MAX
#
# > Notes:
# >
# > * New format codes.
# >
# > ** Changed from previous "range-and-a-half" to "none"; the
# > range-and-a-half checking wasn't particularly useful.
#
# Plus a C API or two, e.g. PyInt_AsLongMask() ->
# unsigned long and PyInt_AsLongLongMask() -> unsigned
# long long (if that exists).
LARGE = 0x7FFFFFFF
VERY_LARGE = 0xFF0000121212121212121242
from _testcapi import UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, INT_MAX, \
INT_MIN, LONG_MIN, LONG_MAX, PY_SSIZE_T_MIN, PY_SSIZE_T_MAX, \
SHRT_MIN, SHRT_MAX
# fake, they are not defined in Python's header files
LLONG_MAX = 2**63-1
LLONG_MIN = -2**63
ULLONG_MAX = 2**64-1
class Int:
def __int__(self):
return 99
class IntSubclass(int):
def __int__(self):
return 99
class BadInt:
def __int__(self):
return 1.0
class BadInt2:
def __int__(self):
return True
class BadInt3(int):
def __int__(self):
return True
class Unsigned_TestCase(unittest.TestCase):
def test_b(self):
from _testcapi import getargs_b
# b returns 'unsigned char', and does range checking (0 ... UCHAR_MAX)
self.assertRaises(TypeError, getargs_b, 3.14)
self.assertEqual(99, getargs_b(Int()))
self.assertEqual(0, getargs_b(IntSubclass()))
self.assertRaises(TypeError, getargs_b, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_b(BadInt2()))
self.assertEqual(0, getargs_b(BadInt3()))
self.assertRaises(OverflowError, getargs_b, -1)
self.assertEqual(0, getargs_b(0))
self.assertEqual(UCHAR_MAX, getargs_b(UCHAR_MAX))
self.assertRaises(OverflowError, getargs_b, UCHAR_MAX + 1)
self.assertEqual(42, getargs_b(42))
self.assertRaises(OverflowError, getargs_b, VERY_LARGE)
def test_B(self):
from _testcapi import getargs_B
# B returns 'unsigned char', no range checking
self.assertRaises(TypeError, getargs_B, 3.14)
self.assertEqual(99, getargs_B(Int()))
self.assertEqual(0, getargs_B(IntSubclass()))
self.assertRaises(TypeError, getargs_B, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_B(BadInt2()))
self.assertEqual(0, getargs_B(BadInt3()))
self.assertEqual(UCHAR_MAX, getargs_B(-1))
self.assertEqual(0, getargs_B(0))
self.assertEqual(UCHAR_MAX, getargs_B(UCHAR_MAX))
self.assertEqual(0, getargs_B(UCHAR_MAX+1))
self.assertEqual(42, getargs_B(42))
self.assertEqual(UCHAR_MAX & VERY_LARGE, getargs_B(VERY_LARGE))
def test_H(self):
from _testcapi import getargs_H
# H returns 'unsigned short', no range checking
self.assertRaises(TypeError, getargs_H, 3.14)
self.assertEqual(99, getargs_H(Int()))
self.assertEqual(0, getargs_H(IntSubclass()))
self.assertRaises(TypeError, getargs_H, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_H(BadInt2()))
self.assertEqual(0, getargs_H(BadInt3()))
self.assertEqual(USHRT_MAX, getargs_H(-1))
self.assertEqual(0, getargs_H(0))
self.assertEqual(USHRT_MAX, getargs_H(USHRT_MAX))
self.assertEqual(0, getargs_H(USHRT_MAX+1))
self.assertEqual(42, getargs_H(42))
self.assertEqual(VERY_LARGE & USHRT_MAX, getargs_H(VERY_LARGE))
def test_I(self):
from _testcapi import getargs_I
# I returns 'unsigned int', no range checking
self.assertRaises(TypeError, getargs_I, 3.14)
self.assertEqual(99, getargs_I(Int()))
self.assertEqual(0, getargs_I(IntSubclass()))
self.assertRaises(TypeError, getargs_I, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_I(BadInt2()))
self.assertEqual(0, getargs_I(BadInt3()))
self.assertEqual(UINT_MAX, getargs_I(-1))
self.assertEqual(0, getargs_I(0))
self.assertEqual(UINT_MAX, getargs_I(UINT_MAX))
self.assertEqual(0, getargs_I(UINT_MAX+1))
self.assertEqual(42, getargs_I(42))
self.assertEqual(VERY_LARGE & UINT_MAX, getargs_I(VERY_LARGE))
def test_k(self):
from _testcapi import getargs_k
# k returns 'unsigned long', no range checking
# it does not accept float, or instances with __int__
self.assertRaises(TypeError, getargs_k, 3.14)
self.assertRaises(TypeError, getargs_k, Int())
self.assertEqual(0, getargs_k(IntSubclass()))
self.assertRaises(TypeError, getargs_k, BadInt())
self.assertRaises(TypeError, getargs_k, BadInt2())
self.assertEqual(0, getargs_k(BadInt3()))
self.assertEqual(ULONG_MAX, getargs_k(-1))
self.assertEqual(0, getargs_k(0))
self.assertEqual(ULONG_MAX, getargs_k(ULONG_MAX))
self.assertEqual(0, getargs_k(ULONG_MAX+1))
self.assertEqual(42, getargs_k(42))
self.assertEqual(VERY_LARGE & ULONG_MAX, getargs_k(VERY_LARGE))
class Signed_TestCase(unittest.TestCase):
def test_h(self):
from _testcapi import getargs_h
# h returns 'short', and does range checking (SHRT_MIN ... SHRT_MAX)
self.assertRaises(TypeError, getargs_h, 3.14)
self.assertEqual(99, getargs_h(Int()))
self.assertEqual(0, getargs_h(IntSubclass()))
self.assertRaises(TypeError, getargs_h, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_h(BadInt2()))
self.assertEqual(0, getargs_h(BadInt3()))
self.assertRaises(OverflowError, getargs_h, SHRT_MIN-1)
self.assertEqual(SHRT_MIN, getargs_h(SHRT_MIN))
self.assertEqual(SHRT_MAX, getargs_h(SHRT_MAX))
self.assertRaises(OverflowError, getargs_h, SHRT_MAX+1)
self.assertEqual(42, getargs_h(42))
self.assertRaises(OverflowError, getargs_h, VERY_LARGE)
def test_i(self):
from _testcapi import getargs_i
# i returns 'int', and does range checking (INT_MIN ... INT_MAX)
self.assertRaises(TypeError, getargs_i, 3.14)
self.assertEqual(99, getargs_i(Int()))
self.assertEqual(0, getargs_i(IntSubclass()))
self.assertRaises(TypeError, getargs_i, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_i(BadInt2()))
self.assertEqual(0, getargs_i(BadInt3()))
self.assertRaises(OverflowError, getargs_i, INT_MIN-1)
self.assertEqual(INT_MIN, getargs_i(INT_MIN))
self.assertEqual(INT_MAX, getargs_i(INT_MAX))
self.assertRaises(OverflowError, getargs_i, INT_MAX+1)
self.assertEqual(42, getargs_i(42))
self.assertRaises(OverflowError, getargs_i, VERY_LARGE)
def test_l(self):
from _testcapi import getargs_l
# l returns 'long', and does range checking (LONG_MIN ... LONG_MAX)
self.assertRaises(TypeError, getargs_l, 3.14)
self.assertEqual(99, getargs_l(Int()))
self.assertEqual(0, getargs_l(IntSubclass()))
self.assertRaises(TypeError, getargs_l, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_l(BadInt2()))
self.assertEqual(0, getargs_l(BadInt3()))
self.assertRaises(OverflowError, getargs_l, LONG_MIN-1)
self.assertEqual(LONG_MIN, getargs_l(LONG_MIN))
self.assertEqual(LONG_MAX, getargs_l(LONG_MAX))
self.assertRaises(OverflowError, getargs_l, LONG_MAX+1)
self.assertEqual(42, getargs_l(42))
self.assertRaises(OverflowError, getargs_l, VERY_LARGE)
def test_n(self):
from _testcapi import getargs_n
# n returns 'Py_ssize_t', and does range checking
# (PY_SSIZE_T_MIN ... PY_SSIZE_T_MAX)
self.assertRaises(TypeError, getargs_n, 3.14)
self.assertRaises(TypeError, getargs_n, Int())
self.assertEqual(0, getargs_n(IntSubclass()))
self.assertRaises(TypeError, getargs_n, BadInt())
self.assertRaises(TypeError, getargs_n, BadInt2())
self.assertEqual(0, getargs_n(BadInt3()))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MIN-1)
self.assertEqual(PY_SSIZE_T_MIN, getargs_n(PY_SSIZE_T_MIN))
self.assertEqual(PY_SSIZE_T_MAX, getargs_n(PY_SSIZE_T_MAX))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MAX+1)
self.assertEqual(42, getargs_n(42))
self.assertRaises(OverflowError, getargs_n, VERY_LARGE)
@unittest.skipIf(getargs_L is None, 'PY_LONG_LONG is not available')
class LongLong_TestCase(unittest.TestCase):
def test_L(self):
from _testcapi import getargs_L
# L returns 'long long', and does range checking (LLONG_MIN
# ... LLONG_MAX)
self.assertRaises(TypeError, getargs_L, 3.14)
self.assertRaises(TypeError, getargs_L, "Hello")
self.assertEqual(99, getargs_L(Int()))
self.assertEqual(0, getargs_L(IntSubclass()))
self.assertRaises(TypeError, getargs_L, BadInt())
with self.assertWarns(DeprecationWarning):
self.assertEqual(1, getargs_L(BadInt2()))
self.assertEqual(0, getargs_L(BadInt3()))
self.assertRaises(OverflowError, getargs_L, LLONG_MIN-1)
self.assertEqual(LLONG_MIN, getargs_L(LLONG_MIN))
self.assertEqual(LLONG_MAX, getargs_L(LLONG_MAX))
self.assertRaises(OverflowError, getargs_L, LLONG_MAX+1)
self.assertEqual(42, getargs_L(42))
self.assertRaises(OverflowError, getargs_L, VERY_LARGE)
def test_K(self):
from _testcapi import getargs_K
# K return 'unsigned long long', no range checking
self.assertRaises(TypeError, getargs_K, 3.14)
self.assertRaises(TypeError, getargs_K, Int())
self.assertEqual(0, getargs_K(IntSubclass()))
self.assertRaises(TypeError, getargs_K, BadInt())
self.assertRaises(TypeError, getargs_K, BadInt2())
self.assertEqual(0, getargs_K(BadInt3()))
self.assertEqual(ULLONG_MAX, getargs_K(ULLONG_MAX))
self.assertEqual(0, getargs_K(0))
self.assertEqual(0, getargs_K(ULLONG_MAX+1))
self.assertEqual(42, getargs_K(42))
self.assertEqual(VERY_LARGE & ULLONG_MAX, getargs_K(VERY_LARGE))
class Paradox:
"This statement is false."
def __bool__(self):
raise NotImplementedError
class Boolean_TestCase(unittest.TestCase):
def test_p(self):
from _testcapi import getargs_p
self.assertEqual(0, getargs_p(False))
self.assertEqual(0, getargs_p(None))
self.assertEqual(0, getargs_p(0))
self.assertEqual(0, getargs_p(0.0))
self.assertEqual(0, getargs_p(0j))
self.assertEqual(0, getargs_p(''))
self.assertEqual(0, getargs_p(()))
self.assertEqual(0, getargs_p([]))
self.assertEqual(0, getargs_p({}))
self.assertEqual(1, getargs_p(True))
self.assertEqual(1, getargs_p(1))
self.assertEqual(1, getargs_p(1.0))
self.assertEqual(1, getargs_p(1j))
self.assertEqual(1, getargs_p('x'))
self.assertEqual(1, getargs_p((1,)))
self.assertEqual(1, getargs_p([1]))
self.assertEqual(1, getargs_p({1:2}))
self.assertEqual(1, getargs_p(unittest.TestCase))
self.assertRaises(NotImplementedError, getargs_p, Paradox())
class Tuple_TestCase(unittest.TestCase):
def test_tuple(self):
from _testcapi import getargs_tuple
ret = getargs_tuple(1, (2, 3))
self.assertEqual(ret, (1,2,3))
# make sure invalid tuple arguments are handled correctly
class seq:
def __len__(self):
return 2
def __getitem__(self, n):
raise ValueError
self.assertRaises(TypeError, getargs_tuple, 1, seq())
class Keywords_TestCase(unittest.TestCase):
def test_positional_args(self):
# using all positional args
self.assertEqual(
getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), 10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_mixed_args(self):
# positional and keyword args
self.assertEqual(
getargs_keywords((1,2), 3, (4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_keyword_args(self):
# all keywords
self.assertEqual(
getargs_keywords(arg1=(1,2), arg2=3, arg3=(4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_optional_args(self):
# missing optional keyword args, skipping tuples
self.assertEqual(
getargs_keywords(arg1=(1,2), arg2=3, arg5=10),
(1, 2, 3, -1, -1, -1, -1, -1, -1, 10)
)
def test_required_args(self):
# required arg missing
try:
getargs_keywords(arg1=(1,2))
except TypeError as err:
self.assertEqual(str(err), "Required argument 'arg2' (pos 2) not found")
else:
self.fail('TypeError should have been raised')
def test_too_many_args(self):
try:
getargs_keywords((1,2),3,(4,(5,6)),(7,8,9),10,111)
except TypeError as err:
self.assertEqual(str(err), "function takes at most 5 arguments (6 given)")
else:
self.fail('TypeError should have been raised')
def test_invalid_keyword(self):
# extraneous keyword arg
try:
getargs_keywords((1,2),3,arg5=10,arg666=666)
except TypeError as err:
self.assertEqual(str(err), "'arg666' is an invalid keyword argument for this function")
else:
self.fail('TypeError should have been raised')
def test_surrogate_keyword(self):
try:
getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), **{'\uDC80': 10})
except TypeError as err:
self.assertEqual(str(err), "'\udc80' is an invalid keyword argument for this function")
else:
self.fail('TypeError should have been raised')
class KeywordOnly_TestCase(unittest.TestCase):
def test_positional_args(self):
# using all possible positional args
self.assertEqual(
getargs_keyword_only(1, 2),
(1, 2, -1)
)
def test_mixed_args(self):
# positional and keyword args
self.assertEqual(
getargs_keyword_only(1, 2, keyword_only=3),
(1, 2, 3)
)
def test_keyword_args(self):
# all keywords
self.assertEqual(
getargs_keyword_only(required=1, optional=2, keyword_only=3),
(1, 2, 3)
)
def test_optional_args(self):
# missing optional keyword args, skipping tuples
self.assertEqual(
getargs_keyword_only(required=1, optional=2),
(1, 2, -1)
)
self.assertEqual(
getargs_keyword_only(required=1, keyword_only=3),
(1, -1, 3)
)
def test_required_args(self):
self.assertEqual(
getargs_keyword_only(1),
(1, -1, -1)
)
self.assertEqual(
getargs_keyword_only(required=1),
(1, -1, -1)
)
# required arg missing
with self.assertRaisesRegex(TypeError,
"Required argument 'required' \(pos 1\) not found"):
getargs_keyword_only(optional=2)
with self.assertRaisesRegex(TypeError,
"Required argument 'required' \(pos 1\) not found"):
getargs_keyword_only(keyword_only=3)
def test_too_many_args(self):
with self.assertRaisesRegex(TypeError,
"Function takes at most 2 positional arguments \(3 given\)"):
getargs_keyword_only(1, 2, 3)
with self.assertRaisesRegex(TypeError,
"function takes at most 3 arguments \(4 given\)"):
getargs_keyword_only(1, 2, 3, keyword_only=5)
def test_invalid_keyword(self):
# extraneous keyword arg
with self.assertRaisesRegex(TypeError,
"'monster' is an invalid keyword argument for this function"):
getargs_keyword_only(1, 2, monster=666)
def test_surrogate_keyword(self):
with self.assertRaisesRegex(TypeError,
"'\udc80' is an invalid keyword argument for this function"):
getargs_keyword_only(1, 2, **{'\uDC80': 10})
class Bytes_TestCase(unittest.TestCase):
def test_c(self):
from _testcapi import getargs_c
self.assertRaises(TypeError, getargs_c, b'abc') # len > 1
self.assertEqual(getargs_c(b'a'), b'a')
self.assertEqual(getargs_c(bytearray(b'a')), b'a')
self.assertRaises(TypeError, getargs_c, memoryview(b'a'))
self.assertRaises(TypeError, getargs_c, 's')
self.assertRaises(TypeError, getargs_c, None)
def test_s(self):
from _testcapi import getargs_s
self.assertEqual(getargs_s('abc\xe9'), b'abc\xc3\xa9')
self.assertRaises(TypeError, getargs_s, 'nul:\0')
self.assertRaises(TypeError, getargs_s, b'bytes')
self.assertRaises(TypeError, getargs_s, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_s, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_s, None)
def test_s_star(self):
from _testcapi import getargs_s_star
self.assertEqual(getargs_s_star('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_s_star('nul:\0'), b'nul:\0')
self.assertEqual(getargs_s_star(b'bytes'), b'bytes')
self.assertEqual(getargs_s_star(bytearray(b'bytearray')), b'bytearray')
self.assertEqual(getargs_s_star(memoryview(b'memoryview')), b'memoryview')
self.assertRaises(TypeError, getargs_s_star, None)
def test_s_hash(self):
from _testcapi import getargs_s_hash
self.assertEqual(getargs_s_hash('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_s_hash('nul:\0'), b'nul:\0')
self.assertEqual(getargs_s_hash(b'bytes'), b'bytes')
self.assertRaises(TypeError, getargs_s_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_s_hash, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_s_hash, None)
def test_z(self):
from _testcapi import getargs_z
self.assertEqual(getargs_z('abc\xe9'), b'abc\xc3\xa9')
self.assertRaises(TypeError, getargs_z, 'nul:\0')
self.assertRaises(TypeError, getargs_z, b'bytes')
self.assertRaises(TypeError, getargs_z, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_z, memoryview(b'memoryview'))
self.assertIsNone(getargs_z(None))
def test_z_star(self):
from _testcapi import getargs_z_star
self.assertEqual(getargs_z_star('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_z_star('nul:\0'), b'nul:\0')
self.assertEqual(getargs_z_star(b'bytes'), b'bytes')
self.assertEqual(getargs_z_star(bytearray(b'bytearray')), b'bytearray')
self.assertEqual(getargs_z_star(memoryview(b'memoryview')), b'memoryview')
self.assertIsNone(getargs_z_star(None))
def test_z_hash(self):
from _testcapi import getargs_z_hash
self.assertEqual(getargs_z_hash('abc\xe9'), b'abc\xc3\xa9')
self.assertEqual(getargs_z_hash('nul:\0'), b'nul:\0')
self.assertEqual(getargs_z_hash(b'bytes'), b'bytes')
self.assertRaises(TypeError, getargs_z_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_z_hash, memoryview(b'memoryview'))
self.assertIsNone(getargs_z_hash(None))
def test_y(self):
from _testcapi import getargs_y
self.assertRaises(TypeError, getargs_y, 'abc\xe9')
self.assertEqual(getargs_y(b'bytes'), b'bytes')
self.assertRaises(TypeError, getargs_y, b'nul:\0')
self.assertRaises(TypeError, getargs_y, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_y, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_y, None)
def test_y_star(self):
from _testcapi import getargs_y_star
self.assertRaises(TypeError, getargs_y_star, 'abc\xe9')
self.assertEqual(getargs_y_star(b'bytes'), b'bytes')
self.assertEqual(getargs_y_star(b'nul:\0'), b'nul:\0')
self.assertEqual(getargs_y_star(bytearray(b'bytearray')), b'bytearray')
self.assertEqual(getargs_y_star(memoryview(b'memoryview')), b'memoryview')
self.assertRaises(TypeError, getargs_y_star, None)
def test_y_hash(self):
from _testcapi import getargs_y_hash
self.assertRaises(TypeError, getargs_y_hash, 'abc\xe9')
self.assertEqual(getargs_y_hash(b'bytes'), b'bytes')
self.assertEqual(getargs_y_hash(b'nul:\0'), b'nul:\0')
self.assertRaises(TypeError, getargs_y_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_y_hash, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_y_hash, None)
def test_w_star(self):
# getargs_w_star() modifies first and last byte
from _testcapi import getargs_w_star
self.assertRaises(TypeError, getargs_w_star, 'abc\xe9')
self.assertRaises(TypeError, getargs_w_star, b'bytes')
self.assertRaises(TypeError, getargs_w_star, b'nul:\0')
self.assertRaises(TypeError, getargs_w_star, memoryview(b'bytes'))
self.assertEqual(getargs_w_star(bytearray(b'bytearray')), b'[ytearra]')
self.assertEqual(getargs_w_star(memoryview(bytearray(b'memoryview'))),
b'[emoryvie]')
self.assertRaises(TypeError, getargs_w_star, None)
class Unicode_TestCase(unittest.TestCase):
def test_u(self):
from _testcapi import getargs_u
self.assertEqual(getargs_u('abc\xe9'), 'abc\xe9')
self.assertRaises(TypeError, getargs_u, 'nul:\0')
self.assertRaises(TypeError, getargs_u, b'bytes')
self.assertRaises(TypeError, getargs_u, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_u, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_u, None)
def test_u_hash(self):
from _testcapi import getargs_u_hash
self.assertEqual(getargs_u_hash('abc\xe9'), 'abc\xe9')
self.assertEqual(getargs_u_hash('nul:\0'), 'nul:\0')
self.assertRaises(TypeError, getargs_u_hash, b'bytes')
self.assertRaises(TypeError, getargs_u_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_u_hash, memoryview(b'memoryview'))
self.assertRaises(TypeError, getargs_u_hash, None)
def test_Z(self):
from _testcapi import getargs_Z
self.assertEqual(getargs_Z('abc\xe9'), 'abc\xe9')
self.assertRaises(TypeError, getargs_Z, 'nul:\0')
self.assertRaises(TypeError, getargs_Z, b'bytes')
self.assertRaises(TypeError, getargs_Z, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_Z, memoryview(b'memoryview'))
self.assertIsNone(getargs_Z(None))
def test_Z_hash(self):
from _testcapi import getargs_Z_hash
self.assertEqual(getargs_Z_hash('abc\xe9'), 'abc\xe9')
self.assertEqual(getargs_Z_hash('nul:\0'), 'nul:\0')
self.assertRaises(TypeError, getargs_Z_hash, b'bytes')
self.assertRaises(TypeError, getargs_Z_hash, bytearray(b'bytearray'))
self.assertRaises(TypeError, getargs_Z_hash, memoryview(b'memoryview'))
self.assertIsNone(getargs_Z_hash(None))
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
# -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import select
import socket
import sys
import time
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, errorcode
try:
socket_map
except NameError:
socket_map = {}
def _strerror(err):
res = os.strerror(err)
if res == 'Unknown error':
res = errorcode[err]
return res
class ExitNow(Exception):
pass
def read(obj):
try:
obj.handle_read_event()
except (ExitNow, KeyboardInterrupt, SystemExit):
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except (ExitNow, KeyboardInterrupt, SystemExit):
raise
except:
obj.handle_error()
def _exception(obj):
try:
obj.handle_expt_event()
except (ExitNow, KeyboardInterrupt, SystemExit):
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & (select.POLLIN | select.POLLPRI):
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & (select.POLLERR | select.POLLNVAL):
obj.handle_expt_event()
if flags & select.POLLHUP:
obj.handle_close()
except (ExitNow, KeyboardInterrupt, SystemExit):
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
if is_w:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error, err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
_exception(obj)
def poll2(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in map.items():
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
if obj.writable():
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error, err:
if err.args[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
poll3 = poll2 # Alias for backward compatibility
def loop(timeout=30.0, use_poll=False, map=None, count=None):
if map is None:
map = socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = poll2
else:
poll_fun = poll
if count is None:
while map:
poll_fun(timeout, map)
else:
while map and count > 0:
poll_fun(timeout, map)
count = count - 1
class dispatcher:
debug = False
connected = False
accepting = False
closing = False
addr = None
def __init__(self, sock=None, map=None):
if map is None:
self._map = socket_map
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error, err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self.del_channel(map)
raise
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
self._fileno = sock.fileno()
self.add_channel(map)
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = True
if os.name == 'nt' and num > 5:
num = 5
return self.socket.listen(num)
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = False
err = self.socket.connect_ex(address)
# XXX Should interpret Winsock return values
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
return
if err in (0, EISCONN):
self.addr = address
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
return conn, addr
except socket.error, why:
if why.args[0] == EWOULDBLOCK:
pass
else:
raise
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error, why:
if why.args[0] == EWOULDBLOCK:
return 0
elif why.args[0] in (ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED):
self.handle_close()
return 0
else:
raise
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return ''
else:
return data
except socket.error, why:
# winsock sometimes throws ENOTCONN
if why.args[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED]:
self.handle_close()
return ''
else:
raise
def close(self):
self.connected = False
self.accepting = False
self.del_channel()
try:
self.socket.close()
except socket.error, why:
if why.args[0] not in (ENOTCONN, EBADF):
raise
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
return getattr(self.socket, attr)
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if __debug__ or type != 'info':
print '%s: %s' % (type, message)
def handle_read_event(self):
if self.accepting:
# accepting sockets are never connected, they "spawn" new
# sockets that are connected
self.handle_accept()
elif not self.connected:
self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
def handle_connect_event(self):
self.connected = True
self.handle_connect()
def handle_write_event(self):
if self.accepting:
# Accepting sockets shouldn't get a write event.
# We will pretend it didn't happen.
return
if not self.connected:
#check for errors
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect_event()
self.handle_write()
def handle_expt_event(self):
# if the handle_expt is the same default worthless method,
# we'll not even bother calling it, we'll instead generate
# a useful error
x = True
try:
y1 = self.__class__.handle_expt.im_func
y2 = dispatcher.handle_expt.im_func
x = y1 is y2
except AttributeError:
pass
if x:
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
msg = _strerror(err)
raise socket.error(err, msg)
else:
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.handle_close()
def handle_expt(self):
self.log_info('unhandled exception', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
self.log_info('unhandled accept event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None, map=None):
dispatcher.__init__(self, sock, map)
self.out_buffer = ''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
if not tb: # Must have a traceback
raise AssertionError("traceback does not exist")
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None, ignore_all=False):
if map is None:
map = socket_map
for x in map.values():
try:
x.close()
except OSError, x:
if x.args[0] == EBADF:
pass
elif not ignore_all:
raise
except (ExitNow, KeyboardInterrupt, SystemExit):
raise
except:
if not ignore_all:
raise
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# Here we override just enough to make a file
# look like a socket for the purposes of asyncore.
# The passed fd is automatically os.dup()'d
def __init__(self, fd):
self.fd = os.dup(fd)
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
read = recv
write = send
def close(self):
os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd, map=None):
dispatcher.__init__(self, None, map)
self.connected = True
try:
fd = fd.fileno()
except AttributeError:
pass
self.set_file(fd)
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def set_file(self, fd):
self._fileno = fd
self.socket = file_wrapper(fd)
self.add_channel()
|
|
from typing import List, Iterable, Sequence, ByteString
import sdl2
from sdl2.ext import Color
from sdl2 import (
SDL_CreateRGBSurfaceWithFormatFrom,
SDL_ConvertSurfaceFormat,
SDL_FreeSurface,
SDL_Error
)
def ltorgba(c, alpha=0xff):
return (c << 24) | (c << 16) | (c << 8) | alpha
def rgbafrom2bit(c, alpha=0xff):
assert c < 4
return ltorgba(c*85, alpha=alpha)
def lto2bit(c8):
return c8 // 85
def get_tile_surfaces(tiles, palette, tile_size=(8, 8), format=sdl2.SDL_PIXELFORMAT_RGBA32):
"""Surfaces in the returned iterator must be freed.
"""
tile_width, tile_height = tile_size
rgb_tile = bytearray(tile_width*tile_height*4)
for tile in tiles:
for i, c8 in enumerate(tile):
# Get 32-bit RGBA color from 8-bit value
c = palette[lto2bit(c8)]
rgb_tile[4*i+0] = c >> 24
rgb_tile[4*i+1] = (c >> 16) & 0xff
rgb_tile[4*i+2] = (c >> 8) & 0xff
rgb_tile[4*i+3] = c & 0xff
# depth = 32, pitch=4*(size of row in px)
surf = SDL_CreateRGBSurfaceWithFormatFrom(bytes(rgb_tile), tile_width,
tile_height, 32, 4*tile_width,
sdl2.SDL_PIXELFORMAT_RGBA32)
if not surf:
raise SDL_Error()
else:
yield surf
def encode_rgb(iterable: Iterable[int], palette: Sequence[int]) \
-> Iterable[int]:
"""Consumes an iterable of byte values and generates pairs of bytes
representing 8 pixels.
:param iterable: RGB8 grayscale decoded image.
:param palette: List of colors used to encode iterable.
:returns: An iterable of encoded data.
"""
try:
iterable = iter(iterable)
while True:
hi = 0
lo = 0
for i in range(8):
b = next(iterable)
c = palette.index(b)
hi |= (c >> 1) << (7 - i)
lo |= (c & 1) << (7 - i)
yield hi
yield lo
except StopIteration:
raise StopIteration
def decode_2bit(iterable: Iterable[int], palette: Sequence[Color]) \
-> Iterable[int]:
"""For every two bytes consumed from the given iterable, generates 8 decoded
RGB8 colors based on the palette.
:param iterable: 2-bit grayscale encoded image.
:param palette: List of colors used to decode the iterable.
:returns: An iterable of decoded data.
"""
iterable = iter(iterable)
#print(palette)
try:
while True:
hi = next(iterable)
lo = next(iterable)
for i in range(8):
c = (lo >> (7-i)) & 1
c |= ((hi >> (7-i)) & 1) << 1
color = palette[c]
yield color
except StopIteration:
raise StopIteration()
def decode_tile(tile: ByteString, palette: Sequence[Color]) -> ByteString:
"""Decode a 2-bit tile to 8-bit grayscale.
:param tile: 16-byte encoded 2-bit tile data.
:param palette: List of colors to decode the tile.
:returns: A 64-byte decoded tile.
"""
decoded = bytearray(64)
for i in range(0, len(tile), 2):
hi = tile[i]
lo = tile[i+1]
for j in range(8):
c = (hi >> (7-j)) & 1
c |= ((lo >> (7-j)) & 1) << 1
decoded[i*4+j] = palette[c]
return decoded
class RGBTileset():
"""Grayscale RGB8 decoded tileset.
:param data: Decoded tileset data.
:param size: Image dimensions in pixels.
:param tile_size: Tile dimensions in pixels.
"""
def __init__(self, data, size, tile_size):
self.size = size
self.tile_size = tile_size
self.data = data
# Must use a string for GBTileset type hint because it's defined later
@staticmethod
def from_gb(gbtileset: 'GBTileset', palette: Sequence[int]) \
-> 'RGBTileset':
"""Decode a 2-bit grayscale tileset to grayscale RGB8 using a palette.
:param gbtileset: The encoded tileset object.
:param palette: A list of colors used to decode the image.
:returns: A decoded RGB8 tileset.
"""
width, height = gbtileset.size
twidth, theight = gbtileset.tile_size
width_tiles = width // twidth
height_tiles = height // theight
encoded_tiles = gbtileset.split_tiles()
decoded_data = bytearray(width*height)
for i, et in enumerate(encoded_tiles):
tx = (i % width_tiles) * twidth
ty = (i // width_tiles) * theight
decoded_tile = bytes(decode_2bit(et, palette))
for y in range(theight):
row = decoded_tile[y*twidth:(y+1)*twidth]
decoded_data[(ty+y)*width+tx:(ty+y)*width+tx+twidth] = row
return RGBTileset(decoded_data, gbtileset.size, gbtileset.tile_size)
def to_gb(self, palette: Sequence[int]) -> 'GBTileset':
return GBTileset.from_rgb(self, palette)
def split_tiles(self) -> Sequence[Sequence[int]]:
"""In the encoded data, tiles are stored contiguously; decoded, they're
stored in a grid, like you might expect a tileset image to look.
:returns: A list of tiles.
"""
width, height = self.size
twidth, theight = self.tile_size
width_tiles = width // twidth
height_tiles = height // theight
for i in range(width_tiles*height_tiles):
tile = bytearray(twidth*theight)
tx = (i % width_tiles) * twidth
ty = (i // width_tiles) * theight
for y in range(ty, ty+theight):
row = self.data[y*width+tx:y*width+tx+twidth]
tile[twidth*(y-ty):twidth*(y-ty)+twidth] = row
yield tile
class GBTileset():
"""2-bit encoded tileset.
:param data: Encoded tileset data.
:param size: Image dimensions in pixels.
:param tile_size: Tile dimensions in pixels.
"""
def __init__(self, data, size, tile_size):
self.size = size
self.tile_size = tile_size
self.data = data
def split_tiles(self):
"""An 8x8 tile is 16 bytes long; split the encoded block of data into
chunks based on the size of a tile.
"""
twidth, theight = self.tile_size
tsize_bytes = (twidth // 8) * theight * 2
return (self.data[i:i+tsize_bytes] for i in range(0, len(self.data),
tsize_bytes))
@staticmethod
def from_rgb(rgbtileset: RGBTileset, palette: Sequence[int]) \
-> 'GBTileset':
"""Encode a tileset using the colors defined by a palette
:param rgbtileset: Source tileset.
:param palette: List of colors used to encode the image.
:returns: An encoded tileset object.
"""
width, height = rgbtileset.size
twidth, theight = rgbtileset.tile_size
width_tiles = width // twidth
height_tiles = height // theight
tsize_bytes = (twidth // 8) * theight * 2
encoded_data = bytearray(width_tiles * height_tiles * tsize_bytes)
for i, dt in enumerate(rgbtileset.split_tiles()):
encoded_data[i*tsize_bytes:(i+1)*tsize_bytes] = \
bytes(encode_rgb(dt, palette))
return GBTileset(encoded_data, rgbtileset.size, rgbtileset.tile_size)
def to_rgb(self, palette: Sequence[int]) -> RGBTileset:
return RGBTileset.from_gb(self, palette)
if __name__ == '__main__':
from PIL import Image
palette = [0xff, 0x55, 0xaa, 0x00]
img_size = (128, 48)
tile_size = (8, 8)
with open('gym.2bpp', 'rb') as f:
encoded_data_correct = f.read()
img = Image.open('gym.png')
assert img.size == img_size
decoded_data_correct = img.tobytes()
rgbtileset = RGBTileset(decoded_data_correct, img_size, tile_size)
gbtileset = GBTileset.from_rgb(rgbtileset, palette)
for i, (b1, b2) in enumerate(zip(gbtileset.data, encoded_data_correct)):
print(i, b1, b2)
assert b1 == b2
gbtileset = GBTileset(encoded_data_correct, img_size, tile_size)
rgbtileset = RGBTileset.from_gb(gbtileset, palette)
for i, (b1, b2) in enumerate(zip(rgbtileset.data, decoded_data_correct)):
print(i, b1, b2)
assert b1 == b2
print('TEST OK')
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
import sys
from oslo.config import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import event as virtevent
driver_opts = [
cfg.StrOpt('compute_driver',
help='Driver to use for controlling virtualization. Options '
'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
'fake.FakeDriver, baremetal.BareMetalDriver, '
'vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver'),
cfg.StrOpt('default_ephemeral_format',
default=None,
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
cfg.StrOpt('preallocate_images',
default='none',
help='VM image preallocation mode: '
'"none" => no storage provisioning is done up front, '
'"space" => storage is fully allocated at instance start'),
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts)
LOG = logging.getLogger(__name__)
def driver_dict_from_config(named_driver_config, *args, **kwargs):
driver_registry = dict()
for driver_str in named_driver_config:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
driver_registry[driver_type] = driver_class(*args, **kwargs)
return driver_registry
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
def block_device_info_get_swap(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('swap') or {'device_name': None,
'swap_size': 0}
def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
return ephemerals
def block_device_info_get_mapping(block_device_info):
block_device_info = block_device_info or {}
block_device_mapping = block_device_info.get('block_device_mapping') or []
return block_device_mapping
class ComputeDriver(object):
"""Base class for compute drivers.
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
Some methods here take an instance of nova.compute.service.Instance. This
is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic data structure into terms that are specific to the
virtualization platform.
"""
capabilities = {
"has_imagecache": False,
"supports_recreate": False,
}
def __init__(self, virtapi):
self.virtapi = virtapi
self._compute_event_callback = None
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_num_instances(self):
"""Return the total number of virtual machines.
Return the number of virtual machines that the hypervisor knows
about.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance_id):
"""Checks existence of an instance on the host.
:param instance_id: The ID / name of the instance to lookup
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return instance_id in self.list_instances()
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instance_uuids(self):
"""
Return the UUIDS of all the instances known to the virtualization
layer, as a list.
"""
raise NotImplementedError()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
raise NotImplementedError()
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
:param block_device_info: Info pertaining to attached volumes
:param bad_volumes_callback: Function to handle any bad volumes
encountered
"""
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_vnc_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_spice_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM"""
raise NotImplementedError()
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""
Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
def attach_interface(self, instance, image_meta, network_info):
"""Attach an interface to the instance."""
raise NotImplementedError()
def detach_interface(self, instance, network_info):
"""Detach an interface from the instance."""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
"""
Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
"""
Snapshots the specified instance.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
"""Completes a resize, turning on the migrated instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
"""Finish reverting a resize, powering back on the instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance):
"""Pause the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause paused VM instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
"""suspend the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance):
"""Power off the specified instance."""
raise NotImplementedError()
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
raise NotImplementedError()
def soft_delete(self, instance):
"""Soft delete the specified instance."""
raise NotImplementedError()
def restore(self, instance):
"""Restore the specified instance."""
raise NotImplementedError()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task
:param nodename:
node which the caller want to get resources from
a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
def pre_live_migration(self, ctxt, instance_ref,
block_device_info, network_info,
migrate_data=None):
"""Prepare an instance for live migration
:param ctxt: security context
:param instance_ref: instance object that will be migrated
:param block_device_info: instance block device information
:param network_info: instance network information
:param migrate_data: implementation specific data dict.
"""
raise NotImplementedError()
def pre_block_migration(self, ctxt, instance_ref, disk_info):
"""Prepare a block device for migration
:param ctxt: security context
:param instance_ref: instance object that will have its disk migrated
:param disk_info: information about disk to be migrated (as returned
from get_instance_disk_info())
"""
raise NotImplementedError()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, migrate VM disk.
:params migrate_data: implementation specific params.
"""
raise NotImplementedError()
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param ctxt: security context
:param instance_ref: instance object that is migrated
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
raise NotImplementedError()
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param src_compute_info: Info about the sending machine
:param dst_compute_info: Info about the receiving machine
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""This method is called when a security group is added to an instance.
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
Scenario:
* we are running on host 'H0' and we have an instance 'i-0'.
* instance 'i-0' is a member of security group 'speaks-b'
* group 'speaks-b' has an ingress rule that authorizes group 'b'
* another host 'H1' runs an instance 'i-1'
* instance 'i-1' is a member of security group 'b'
When 'i-1' launches or terminates we will receive the message
to update members of group 'b', at which time we will make
any changes needed to the rules for instance 'i-0' to allow
or deny traffic coming from 'i-1', depending on if it is being
added or removed from the group.
In this scenario, 'i-1' could just as easily have been running on our
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
:py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:params instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, context, instance_id, new_pass=None):
"""
Set the root password on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the value of the new password.
"""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""
Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def change_instance_metadata(self, context, instance, diff):
"""
Applies a diff to the instance metadata.
This is an optional driver method which is used to publish
changes to the instance's metadata to the hypervisor. If the
hypervisor has no means of publishing the instance metadata to
the instance, then this method should not be implemented.
"""
pass
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances
:param timeout: the currently configured timeout for considering
rebooting instances to be stuck
:param instances: instances that have been in rebooting state
longer than the configured timeout
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
raise NotImplementedError()
def get_host_stats(self, refresh=False):
"""Return currently known host stats."""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
"""
Return performance counters associated with the given disk_id on the
given instance_name. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def interface_stats(self, instance_name, iface_id):
"""
Return performance counters associated with the given iface_id on the
given instance_id. These are returned as [rx_bytes, rx_packets,
rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
indicates receive, tx indicates transmit, bytes and packets indicate
the total number of bytes or packets transferred, and errs and dropped
is the total number of packets failed / dropped.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def legacy_nwinfo(self):
"""True if the driver requires the legacy network_info format."""
# TODO(tr3buchet): update all subclasses and remove this method and
# related helpers.
raise NotImplementedError(self.legacy_nwinfo)
def macs_for_instance(self, instance):
"""What MAC addresses must this instance have?
Some hypervisors (such as bare metal) cannot do freeform virtualisation
of MAC addresses. This method allows drivers to return a set of MAC
addresses that the instance is to have. allocate_for_instance will take
this into consideration when provisioning networking for the instance.
Mapping of MAC addresses to actual networks (or permitting them to be
freeform) is up to the network implementation layer. For instance,
with openflow switches, fixed MAC addresses can still be virtualised
onto any L2 domain, with arbitrary VLANs etc, but regular switches
require pre-configured MAC->network mappings that will match the
actual configuration.
Most hypervisors can use the default implementation which returns None.
Hypervisors with MAC limits should return a set of MAC addresses, which
will be supplied to the allocate_for_instance call by the compute
manager, and it is up to that call to ensure that all assigned network
details are compatible with the set of MAC addresses.
This is called during spawn_instance by the compute manager.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
return None
def manage_image_cache(self, context, all_instances):
"""
Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
"""
pass
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'ip': ip,
'initiator': initiator,
'host': hostname
}
"""
raise NotImplementedError()
def get_available_nodes(self):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
stats = self.get_host_stats(refresh=True)
if not isinstance(stats, list):
stats = [stats]
return [s['hypervisor_hostname'] for s in stats]
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return {}
def instance_on_disk(self, instance):
"""Checks access of instance files on the host.
:param instance: instance to lookup
Returns True if files of an instance with the supplied ID accessible on
the host, False otherwise.
.. note::
Used in rebuild for HA implementation and required for validation
of access to instance shared disk files
"""
return False
def register_event_listener(self, callback):
"""Register a callback to receive events.
Register a callback to receive asynchronous event
notifications from hypervisors. The callback will
be invoked with a single parameter, which will be
an instance of the nova.virt.event.Event class."""
self._compute_event_callback = callback
def emit_event(self, event):
"""Dispatches an event to the compute manager.
Invokes the event callback registered by the
compute manager to dispatch the event. This
must only be invoked from a green thread."""
if not self._compute_event_callback:
LOG.debug("Discarding event %s" % str(event))
return
if not isinstance(event, virtevent.Event):
raise ValueError(
_("Event must be an instance of nova.virt.event.Event"))
try:
LOG.debug("Emitting event %s" % str(event))
self._compute_event_callback(event)
except Exception, ex:
LOG.error(_("Exception dispatching event %(event)s: %(ex)s")
% locals())
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
Load the compute driver module specified by the compute_driver
configuration option or, if supplied, the driver name supplied as an
argument.
Compute drivers constructors take a VirtAPI object as their first object
and this must be supplied.
:param virtapi: a VirtAPI instance
:param compute_driver: a compute driver name to override the config opt
:returns: a ComputeDriver instance
"""
if not compute_driver:
compute_driver = CONF.compute_driver
if not compute_driver:
LOG.error(_("Compute driver option required, but not specified"))
sys.exit(1)
LOG.info(_("Loading compute driver '%s'") % compute_driver)
try:
driver = importutils.import_object_ns('nova.virt',
compute_driver,
virtapi)
return utils.check_isinstance(driver, ComputeDriver)
except ImportError as e:
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
def compute_driver_matches(match):
return CONF.compute_driver and CONF.compute_driver.endswith(match)
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service import api
from sahara.service.validations import node_group_templates as nt
from sahara.tests.unit.service.validation import utils as u
class TestNGTemplateCreateValidation(u.ValidationTestCase):
def setUp(self):
super(TestNGTemplateCreateValidation, self).setUp()
self._create_object_fun = nt.check_node_group_template_create
self.scheme = nt.NODE_GROUP_TEMPLATE_SCHEMA
api.plugin_base.setup_plugins()
def test_node_groups_create_required(self):
self._assert_create_object_validation(
data={
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'name' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'flavor_id' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'hadoop_version' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'node_processes' is a required property")
)
self._assert_create_object_validation(
data={
'name': "a",
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': []
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'[] is too short')
)
def test_ng_template_create_v_names(self):
data = {
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode']
}
self._assert_valid_name_hostname_validation(data)
def test_ng_template_create_v_node_processes(self):
self._assert_create_object_validation(
data={
'name': "a",
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ["namenode", "namenode"]
},
bad_req_i=(1, 'INVALID_REFERENCE',
'Duplicates in node processes have been detected')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['wrong_process']
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin supports the following node procesess: "
"['namenode', 'datanode', 'secondarynamenode', "
"'oozie', 'tasktracker', 'jobtracker', 'hiveserver']")
)
def test_ng_template_create_v_right(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode',
'datanode',
'secondarynamenode',
'tasktracker',
'jobtracker'],
'node_configs': {
'HDFS': {
u'hadoop.tmp.dir': '/temp/'
}
},
'image_id': '550e8400-e29b-41d4-a716-446655440000',
'volumes_per_node': 2,
'volumes_size': 10,
'description': 'test node template',
'floating_ip_pool': 'd9a3bebc-f788-4b81-9a93-aa048022c1ca'
}
)
def test_ng_template_create_v_minimum_ints(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['wrong_process'],
'volumes_per_node': -1
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'-1.0 is less than the minimum of 0')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['wrong_process'],
'volumes_size': 0
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'0.0 is less than the minimum of 1')
)
def test_ng_template_create_v_types(self):
default_data = {
'name': 'a', 'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode']
}
self._assert_types(default_data)
def test_ng_template_create_v_unique_ng(self):
data = {
'name': 'test',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode']}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'NAME_ALREADY_EXISTS',
"NodeGroup template with name 'test' already exists")
)
def test_ng_template_create_v_flavor_exists(self):
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '1',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode']
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Requested flavor '1' not found")
)
def test_ng_template_create_v_ng_configs(self):
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode'],
'node_configs': {
'wrong_target': {
u'hadoop.tmp.dir': '/temp/'
}
}},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin doesn't contain applicable "
"target 'wrong_target'")
)
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode'],
'node_configs': {
'HDFS': {
's': 'a'
}
}
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin's applicable target 'HDFS' doesn't "
"contain config with name 's'")
)
def test_ng_template_cinder(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['wrong_process'],
'volumes_per_node': -1
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'-1.0 is less than the minimum of 0')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['wrong_process'],
'volumes_size': 0
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'0.0 is less than the minimum of 1')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['datanode', 'tasktracker'],
'volumes_per_node': 1,
'volumes_size': 1,
'volume_mount_prefix': '/mnt/volume'
}
)
data = {
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['datanode', 'tasktracker'],
'volumes_per_node': 1,
'volumes_size': 1,
'volume_mount_prefix': 'qwerty'
}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'VALIDATION_ERROR', "'qwerty' is not a 'posix_path'")
)
def test_wrong_floating_ip_pool(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['datanode', 'tasktracker'],
'floating_ip_pool': 'network_bad'
},
bad_req_i=(1, 'INVALID_REFERENCE', "Floating IP pool network_bad "
"for node group 'a' not found")
)
|
|
#from http://bookworm-project.github.io/Docs/input.txt.html
import os
import xml.dom.minidom
import urllib
import sys
import magic
import re
import wget
#from https://github.com/euske/pdfminer/tree/b0e035c24fa062cd55cfd55ffc12bc3aa60a4ef6 download the zip and python setup.py install
from cStringIO import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
import json
import subprocess
#encoding
import codecs
def createSearchString(year, uni, url):
#create dom object
doc = xml.dom.minidom.Document()
#create anchor element
anchor = doc.createElement('a')
#add attributes to element
anchor.attributes['href']= url
anchor.attributes['target']= "_blank"
#create text for inside the element
txt = doc.createTextNode("%s document from %s" % (year, uni))
anchor.appendChild(txt)
return anchor.toxml()
def removeEverythingButAlphaNumeric(stringToClean):
textFinal = re.sub(r'\W+', ' ', stringToClean)
return textFinal
#from http://www.binpress.com/tutorial/manipulating-pdfs-with-python/167
def convert(fname1, pages=None):
if not pages:
pagenums = set()
else:
pagenums = set(pages)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
infile = open(fname1, 'rb')
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close
#TODO Write function which removes all but the alphanumeric strings (new line, tab and carriage returns etc)
return removeEverythingButAlphaNumeric(text)
def createTextFilename(filePathOnly, year, uni, extensionRequired):
orig = filePathOnly + "_" + year + "_" + uni
orig2 = re.sub('[/ \s\/]', '_', orig)
if ( extensionRequired == True ):
return orig2 + ".txt"
else:
return orig2
def returnMimeType(locationOfFile):
m = magic.open(magic.MAGIC_MIME_TYPE)
m.load()
mimeType = m.file(locationOfFile)
return mimeType
def createJsonCatalogTxt(year, uni, filename, searchString):
date = year
#from https://freepythontips.wordpress.com/2013/08/08/storing-and-loading-data-with-json/
jsonObject1 = {u"date": int(date), u"uni": uni, u"filename": filename, u"searchstring": searchString}
return json.dumps(jsonObject1)
def doesFileExist(URL, root):
#strip filename from url
URLSplit = URL.split(os.sep)
URLFile = URLSplit[-1]
print "Checking to see if the file already exists"
if os.path.exists(os.path.join(root, URLFile)):
print "It appears that we have already downloaded that file"
return True
else:
print "Nope we do not have that file, yet!"
return False
def fetchPdf(URL, root):
URLSplit = URL.split(os.sep)
URLFile = URLSplit[-1]
pathToCheck = os.path.join(root, URLFile)
print "Checking to see if the file already exists"
if os.path.exists(pathToCheck):
print "It appears that we have already downloaded that file"
return pathToCheck
else:
#get wget from https://pypi.python.org/pypi/wget and run the setup.py file using "python setup.py install" after unzipping
#gets the PDF from the URL and saves it to the directory to which root is pointing
print "It appears that we do not have that file yet"
f = wget.download(URL, root)
print "File downloaded ..."
print "... checking to see if the file is a PDF"
if returnMimeType(f) == 'application/pdf':
print "The file is a PDF"
return f
else:
return False
def lowerAndStrip(URL):
#TODO write a more comprehensive clean up module
URL1 = URL.lower()
URL2 = URL1.strip()
return str(URL2)
currentDir = os.getcwd()
print "Current working directory is %s" % (currentDir)
print "Creating output environment"
pTrt = os.path.join(currentDir, "files", "texts", "raw")
if (not os.path.exists(pTrt)):
os.makedirs(pTrt)
os.makedirs(os.path.join(currentDir, "files", "metadata"))
print "Copying our field_descriptions.json file from %s to the files/metadata dir " % (currentDir)
subprocess.call(['cp', 'field_descriptions.json', 'files/metadata/'])
print "Creating the jsoncatalog.txt file"
jsonCatalogFile = codecs.open(os.path.join(currentDir, "files", "metadata" , "jsoncatalog.txt"), 'wb', "utf-8")
for root, dirs, files in os.walk(os.path.join(currentDir, "bookworm_transform")):
for file in files:
textFileLocation = os.path.join(root, file)
if returnMimeType(textFileLocation) == 'text/plain':
with open(textFileLocation) as theTextFile:
for line in theTextFile:
cleanLine = line.strip()
if cleanLine.endswith(".pdf"):
print "Processing line %s " % (cleanLine)
#TODO write break out in the event that file retured is not a pdf
pdfFileLocation = fetchPdf(cleanLine, root)
pathToSplit = str(pdfFileLocation)
print "Splitting the path %s " % (pdfFileLocation)
splitPathArray = pathToSplit.split(os.sep)
print "The split path looks like this ..."
print splitPathArray
startIndex = splitPathArray.index('bookworm_transform')
print "The start index of the split path is %s " % (str(startIndex))
year = splitPathArray[startIndex + 1]
print "The year is %s " % (str(year))
uni = splitPathArray[startIndex + 2]
print "The uni is %s " % (uni)
filePathOnly, fileExtension = os.path.splitext(pdfFileLocation)
print "File path only is %s and file extension is %s" % (filePathOnly, fileExtension)
fn = createTextFilename(filePathOnly, year, uni, False)
print "The filename for the text withOUT an extension is %s " % (fn)
#create text file name with a text extension
fnExt = createTextFilename(filePathOnly, year, uni, True)
print "The filename for the text WITH an extension is %s " % (fnExt)
rawTt = os.path.join(currentDir, "files", "texts", "raw", fnExt)
if (not os.path.exists(rawTt)):
rawTextFile = codecs.open(rawTt, 'wb', "utf-8")
print "Extracting text from PDF file and writing to text file."
rawTextFile.write(convert(pdfFileLocation))
rawTextFile.close()
#TODO scrape search and URL string from data
else:
print "We have already extracted the text for that file ****"
searchString = createSearchString(year, uni, cleanLine)
#create json catalog file
print "Writing to the jsoncatalogfile"
jsonCatalogFile.write(createJsonCatalogTxt(year, uni, fn, searchString))
jsonCatalogFile.write("\n")
print "*"
print "Finished processing %s " % (pdfFileLocation)
print "Finished processing ALL files"
jsonCatalogFile.close()
|
|
"""A fast, lightweight, and secure session WSGI middleware for use with GAE."""
from Cookie import CookieError, SimpleCookie
from base64 import b64decode, b64encode
import datetime
import hashlib
import hmac
import logging
import pickle
import os
import threading
import time
from google.appengine.api import memcache
from google.appengine.ext import db
# Configurable cookie options
COOKIE_NAME_PREFIX = "kJ" # identifies a cookie as being one used by gae-sessions (so you can set cookies too)
COOKIE_PATH = "/"
DEFAULT_COOKIE_ONLY_THRESH = 10240 # 10KB: GAE only allows ~16000B in HTTP header - leave ~6KB for other info
DEFAULT_LIFETIME = datetime.timedelta(days=7)
# constants
SID_LEN = 43 # timestamp (10 chars) + underscore + md5 (32 hex chars)
SIG_LEN = 44 # base 64 encoded HMAC-SHA256
MAX_COOKIE_LEN = 4096
EXPIRE_COOKIE_FMT = ' %s=; expires=Wed, 01-Jan-1970 00:00:00 GMT; Path=' + COOKIE_PATH
COOKIE_FMT = ' ' + COOKIE_NAME_PREFIX + '%02d="%s"; %sPath=' + COOKIE_PATH + '; HttpOnly'
COOKIE_FMT_SECURE = COOKIE_FMT + '; Secure'
COOKIE_DATE_FMT = '%a, %d-%b-%Y %H:%M:%S GMT'
COOKIE_OVERHEAD = len(COOKIE_FMT % (0, '', '')) + len('expires=Xxx, xx XXX XXXX XX:XX:XX GMT; ') + 150 # 150=safety margin (e.g., in case browser uses 4000 instead of 4096)
MAX_DATA_PER_COOKIE = MAX_COOKIE_LEN - COOKIE_OVERHEAD
_tls = threading.local()
def get_current_session():
"""Returns the session associated with the current request."""
return _tls.current_session
def set_current_session(session):
"""Sets the session associated with the current request."""
_tls.current_session = session
def is_gaesessions_key(k):
return k.startswith(COOKIE_NAME_PREFIX)
class SessionModel(db.Model):
"""Contains session data. key_name is the session ID and pdump contains a
pickled dictionary which maps session variables to their values."""
pdump = db.BlobProperty()
class Session(object):
"""Manages loading, reading/writing key-value pairs, and saving of a session.
``sid`` - if set, then the session for that sid (if any) is loaded. Otherwise,
sid will be loaded from the HTTP_COOKIE (if any).
"""
DIRTY_BUT_DONT_PERSIST_TO_DB = 1
def __init__(self, sid=None, lifetime=DEFAULT_LIFETIME, no_datastore=False,
cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH, cookie_key=None):
self._accessed = False
self.sid = None
self.cookie_keys = []
self.cookie_data = None
self.data = {}
self.dirty = False # has the session been changed?
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.base_key = cookie_key
if sid:
self.__set_sid(sid, False)
self.data = None
else:
self.__read_cookie()
@staticmethod
def __compute_hmac(base_key, sid, text):
"""Computes the signature for text given base_key and sid."""
key = base_key + sid
return b64encode(hmac.new(key, text, hashlib.sha256).digest())
def __read_cookie(self):
"""Reads the HTTP Cookie and loads the sid and data from it (if any)."""
try:
# check the cookie to see if a session has been started
cookie = SimpleCookie(os.environ['HTTP_COOKIE'])
self.cookie_keys = filter(is_gaesessions_key, cookie.keys())
if not self.cookie_keys:
return # no session yet
self.cookie_keys.sort()
data = ''.join(cookie[k].value for k in self.cookie_keys)
i = SIG_LEN + SID_LEN
sig, sid, b64pdump = data[:SIG_LEN], data[SIG_LEN:i], data[i:]
pdump = b64decode(b64pdump)
actual_sig = Session.__compute_hmac(self.base_key, sid, pdump)
if sig == actual_sig:
self.__set_sid(sid, False)
# check for expiration and terminate the session if it has expired
if self.get_expiration() != 0 and time.time() > self.get_expiration():
return self.terminate()
if pdump:
self.data = self.__decode_data(pdump)
else:
self.data = None # data is in memcache/db: load it on-demand
else:
logging.warn('cookie with invalid sig received from %s: %s' % (os.environ.get('REMOTE_ADDR'), b64pdump))
except (CookieError, KeyError, IndexError, TypeError):
# there is no cookie (i.e., no session) or the cookie is invalid
self.terminate(False)
def make_cookie_headers(self):
"""Returns a list of cookie headers to send (if any)."""
# expire all cookies if the session has ended
if not self.sid:
return [EXPIRE_COOKIE_FMT % k for k in self.cookie_keys]
if self.cookie_data is None:
return [] # no cookie headers need to be sent
# build the cookie header(s): includes sig, sid, and cookie_data
if self.is_ssl_only():
m = MAX_DATA_PER_COOKIE - 8
fmt = COOKIE_FMT_SECURE
else:
m = MAX_DATA_PER_COOKIE
fmt = COOKIE_FMT
sig = Session.__compute_hmac(self.base_key, self.sid, self.cookie_data)
cv = sig + self.sid + b64encode(self.cookie_data)
num_cookies = 1 + (len(cv) - 1) / m
if self.get_expiration() > 0:
ed = "expires=%s; " % datetime.datetime.fromtimestamp(self.get_expiration()).strftime(COOKIE_DATE_FMT)
else:
ed = ''
cookies = [fmt % (i, cv[i * m:i * m + m], ed) for i in xrange(num_cookies)]
# expire old cookies which aren't needed anymore
old_cookies = xrange(num_cookies, len(self.cookie_keys))
key = COOKIE_NAME_PREFIX + '%02d'
cookies_to_ax = [EXPIRE_COOKIE_FMT % (key % i) for i in old_cookies]
return cookies + cookies_to_ax
def is_active(self):
"""Returns True if this session is active (i.e., it has been assigned a
session ID and will be or has been persisted)."""
return self.sid is not None
def is_ssl_only(self):
"""Returns True if cookies set by this session will include the "Secure"
attribute so that the client will only send them over a secure channel
like SSL)."""
return self.sid is not None and self.sid[-33] == 'S'
def is_accessed(self):
"""Returns True if any value of this session has been accessed."""
return self._accessed
def ensure_data_loaded(self):
"""Fetch the session data if it hasn't been retrieved it yet."""
self._accessed = True
if self.data is None and self.sid:
self.__retrieve_data()
def get_expiration(self):
"""Returns the timestamp at which this session will expire."""
try:
return int(self.sid[:-33])
except:
return 0
def __make_sid(self, expire_ts=None, ssl_only=False):
"""Returns a new session ID."""
# make a random ID (random.randrange() is 10x faster but less secure?)
if expire_ts is None:
expire_dt = datetime.datetime.now() + self.lifetime
expire_ts = int(time.mktime((expire_dt).timetuple()))
else:
expire_ts = int(expire_ts)
if ssl_only:
sep = 'S'
else:
sep = '_'
return ('%010d' % expire_ts) + sep + hashlib.md5(os.urandom(16)).hexdigest()
@staticmethod
def __encode_data(d):
"""Returns a "pickled+" encoding of d. d values of type db.Model are
protobuf encoded before pickling to minimize CPU usage & data size."""
# separate protobufs so we'll know how to decode (they are just strings)
eP = {} # for models encoded as protobufs
eO = {} # for everything else
for k, v in d.iteritems():
if isinstance(v, db.Model):
eP[k] = db.model_to_protobuf(v)
else:
eO[k] = v
return pickle.dumps((eP, eO), 2)
@staticmethod
def __decode_data(pdump):
"""Returns a data dictionary after decoding it from "pickled+" form."""
try:
eP, eO = pickle.loads(pdump)
for k, v in eP.iteritems():
eO[k] = db.model_from_protobuf(v)
except Exception, e:
logging.warn("failed to decode session data: %s" % e)
eO = {}
return eO
def regenerate_id(self, expiration_ts=None):
"""Assigns the session a new session ID (data carries over). This
should be called whenever a user authenticates to prevent session
fixation attacks.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session expiration time will not be changed.
"""
if self.sid or expiration_ts is not None:
self.ensure_data_loaded() # ensure we have the data before we delete it
if expiration_ts is None:
expiration_ts = self.get_expiration()
self.__set_sid(self.__make_sid(expiration_ts, self.is_ssl_only()))
self.dirty = True # ensure the data is written to the new session
def start(self, expiration_ts=None, ssl_only=False):
"""Starts a new session. expiration specifies when it will expire. If
expiration is not specified, then self.lifetime will used to
determine the expiration date.
Normally this method does not need to be called directly - a session is
automatically started when the first value is added to the session.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session will expire after the default ``lifetime`` has past
(as specified in ``SessionMiddleware``).
``ssl_only`` - Whether to specify the "Secure" attribute on the cookie
so that the client will ONLY transfer the cookie over a secure channel.
"""
self.dirty = True
self.data = {}
self.__set_sid(self.__make_sid(expiration_ts, ssl_only), True)
def terminate(self, clear_data=True):
"""Deletes the session and its data, and expires the user's cookie."""
if clear_data:
self.__clear_data()
self.sid = None
self.data = {}
self.dirty = False
if self.cookie_keys:
self.cookie_data = '' # trigger the cookies to expire
else:
self.cookie_data = None
def __set_sid(self, sid, make_cookie=True):
"""Sets the session ID, deleting the old session if one existed. The
session's data will remain intact (only the session ID changes)."""
if self.sid:
self.__clear_data()
self.sid = sid
self.db_key = db.Key.from_path(SessionModel.kind(), sid, namespace='')
# set the cookie if requested
if make_cookie:
self.cookie_data = '' # trigger the cookie to be sent
def __clear_data(self):
"""Deletes this session from memcache and the datastore."""
if self.sid:
memcache.delete(self.sid, namespace='') # not really needed; it'll go away on its own
try:
db.delete(self.db_key)
except:
pass # either it wasn't in the db (maybe cookie/memcache-only) or db is down => cron will expire it
def __retrieve_data(self):
"""Sets the data associated with this session after retrieving it from
memcache or the datastore. Assumes self.sid is set. Checks for session
expiration after getting the data."""
pdump = memcache.get(self.sid, namespace='')
if pdump is None:
# memcache lost it, go to the datastore
if self.no_datastore:
logging.info("can't find session data in memcache for sid=%s (using memcache only sessions)" % self.sid)
self.terminate(False) # we lost it; just kill the session
return
session_model_instance = db.get(self.db_key)
if session_model_instance:
pdump = session_model_instance.pdump
else:
logging.error("can't find session data in the datastore for sid=%s" % self.sid)
self.terminate(False) # we lost it; just kill the session
return
self.data = self.__decode_data(pdump)
def save(self, persist_even_if_using_cookie=False):
"""Saves the data associated with this session IF any changes have been
made (specifically, if any mutator methods like __setitem__ or the like
is called).
If the data is small enough it will be sent back to the user in a cookie
instead of using memcache and the datastore. If `persist_even_if_using_cookie`
evaluates to True, memcache and the datastore will also be used. If the
no_datastore option is set, then the datastore will never be used.
Normally this method does not need to be called directly - a session is
automatically saved at the end of the request if any changes were made.
"""
if not self.sid:
return # no session is active
if not self.dirty:
return # nothing has changed
dirty = self.dirty
self.dirty = False # saving, so it won't be dirty anymore
# do the pickling ourselves b/c we need it for the datastore anyway
pdump = self.__encode_data(self.data)
# persist via cookies if it is reasonably small
if len(pdump) * 4 / 3 <= self.cookie_only_thresh: # 4/3 b/c base64 is ~33% bigger
self.cookie_data = pdump
if not persist_even_if_using_cookie:
return
elif self.cookie_keys:
# latest data will only be in the backend, so expire data cookies we set
self.cookie_data = ''
memcache.set(self.sid, pdump, namespace='', time=self.get_expiration()) # may fail if memcache is down
# persist the session to the datastore
if dirty is Session.DIRTY_BUT_DONT_PERSIST_TO_DB or self.no_datastore:
return
try:
SessionModel(key_name=self.sid, pdump=pdump).put()
except Exception, e:
logging.warning("unable to persist session to datastore for sid=%s (%s)" % (self.sid, e))
# Users may interact with the session through a dictionary-like interface.
def clear(self):
"""Removes all data from the session (but does not terminate it)."""
if self.sid:
self.data = {}
self.dirty = True
def get(self, key, default=None):
"""Retrieves a value from the session."""
self.ensure_data_loaded()
return self.data.get(key, default)
def has_key(self, key):
"""Returns True if key is set."""
self.ensure_data_loaded()
return key in self.data
def pop(self, key, default=None):
"""Removes key and returns its value, or default if key is not present."""
self.ensure_data_loaded()
self.dirty = True
return self.data.pop(key, default)
def pop_quick(self, key, default=None):
"""Removes key and returns its value, or default if key is not present.
The change will only be persisted to memcache until another change
necessitates a write to the datastore."""
self.ensure_data_loaded()
if self.dirty is False:
self.dirty = Session.DIRTY_BUT_DONT_PERSIST_TO_DB
return self.data.pop(key, default)
def set_quick(self, key, value):
"""Set a value named key on this session. The change will only be
persisted to memcache until another change necessitates a write to the
datastore. This will start a session if one is not already active."""
dirty = self.dirty
self[key] = value
if dirty is False or dirty is Session.DIRTY_BUT_DONT_PERSIST_TO_DB:
self.dirty = Session.DIRTY_BUT_DONT_PERSIST_TO_DB
def __getitem__(self, key):
"""Returns the value associated with key on this session."""
self.ensure_data_loaded()
return self.data.__getitem__(key)
def __setitem__(self, key, value):
"""Set a value named key on this session. This will start a session if
one is not already active."""
self.ensure_data_loaded()
if not self.sid:
self.start()
self.data.__setitem__(key, value)
self.dirty = True
def __delitem__(self, key):
"""Deletes the value associated with key on this session."""
self.ensure_data_loaded()
self.data.__delitem__(key)
self.dirty = True
def __iter__(self):
"""Returns an iterator over the keys (names) of the stored values."""
self.ensure_data_loaded()
return self.data.iterkeys()
def __contains__(self, key):
"""Returns True if key is present on this session."""
self.ensure_data_loaded()
return self.data.__contains__(key)
def __str__(self):
"""Returns a string representation of the session."""
if self.sid:
self.ensure_data_loaded()
return "SID=%s %s" % (self.sid, self.data)
else:
return "uninitialized session"
class SessionMiddleware(object):
"""WSGI middleware that adds session support.
``cookie_key`` - A key used to secure cookies so users cannot modify their
content. Keys should be at least 32 bytes (RFC2104). Tip: generate your
key using ``os.urandom(64)`` but do this OFFLINE and copy/paste the output
into a string which you pass in as ``cookie_key``. If you use ``os.urandom()``
to dynamically generate your key at runtime then any existing sessions will
become junk every time your app starts up!
``lifetime`` - ``datetime.timedelta`` that specifies how long a session may last. Defaults to 7 days.
``no_datastore`` - By default all writes also go to the datastore in case
memcache is lost. Set to True to never use the datastore. This improves
write performance but sessions may be occassionally lost.
``cookie_only_threshold`` - A size in bytes. If session data is less than this
threshold, then session data is kept only in a secure cookie. This avoids
memcache/datastore latency which is critical for small sessions. Larger
sessions are kept in memcache+datastore instead. Defaults to 10KB.
"""
def __init__(self, app, cookie_key, lifetime=DEFAULT_LIFETIME, no_datastore=False, cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH):
self.app = app
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.cookie_key = cookie_key
if not self.cookie_key:
raise ValueError("cookie_key MUST be specified")
if len(self.cookie_key) < 32:
raise ValueError("RFC2104 recommends you use at least a 32 character key. Try os.urandom(64) to make a key.")
def __call__(self, environ, start_response):
# initialize a session for the current user
_tls.current_session = Session(lifetime=self.lifetime, no_datastore=self.no_datastore, cookie_only_threshold=self.cookie_only_thresh, cookie_key=self.cookie_key)
# create a hook for us to insert a cookie into the response headers
def my_start_response(status, headers, exc_info=None):
_tls.current_session.save() # store the session if it was changed
for ch in _tls.current_session.make_cookie_headers():
headers.append(('Set-Cookie', ch))
return start_response(status, headers, exc_info)
# let the app do its thing
return self.app(environ, my_start_response)
class DjangoSessionMiddleware(object):
"""Django middleware that adds session support. You must specify the
session configuration parameters by modifying the call to ``SessionMiddleware``
in ``DjangoSessionMiddleware.__init__()`` since Django cannot call an
initialization method with parameters.
"""
def __init__(self):
fake_app = lambda environ, start_response: start_response
self.wrapped_wsgi_middleware = SessionMiddleware(fake_app, cookie_key='you MUST change this')
self.response_handler = None
def process_request(self, request):
self.response_handler = self.wrapped_wsgi_middleware(None, lambda status, headers, exc_info: headers)
request.session = get_current_session() # for convenience
def process_response(self, request, response):
if self.response_handler:
session_headers = self.response_handler(None, [], None)
for k, v in session_headers:
response[k] = v
self.response_handler = None
if hasattr(request, 'session') and request.session.is_accessed():
from django.utils.cache import patch_vary_headers
logging.info("Varying")
patch_vary_headers(response, ('Cookie',))
return response
def delete_expired_sessions():
"""Deletes expired sessions from the datastore.
If there are more than 500 expired sessions, only 500 will be removed.
Returns True if all expired sessions have been removed.
"""
now_str = unicode(int(time.time()))
q = db.Query(SessionModel, keys_only=True, namespace='')
key = db.Key.from_path('SessionModel', now_str + u'\ufffd', namespace='')
q.filter('__key__ < ', key)
results = q.fetch(500)
db.delete(results)
logging.info('gae-sessions: deleted %d expired sessions from the datastore' % len(results))
return len(results) < 500
|
|
# -*- coding: UTF-8 -*-
# Copyright 2013-2018 Luc Saffre
# License: BSD (see file COPYING for details)
from builtins import str
from builtins import object
import logging
logger = logging.getLogger(__name__)
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy as pgettext
# om atelier import rstgen
from lino.api import dd, rt
from lino import mixins
from lino.utils import join_elems
from etgen import html as xghtml
from etgen.html import E
from lino.mixins import Referrable
from lino.modlib.users.mixins import My, UserAuthored
from .utils import ResponseStates, PollStates
from .roles import PollsUser, PollsStaff
NullBooleanField = models.NullBooleanField
class ChoiceSet(mixins.BabelNamed):
class Meta(object):
app_label = 'polls'
verbose_name = _("Choice Set")
verbose_name_plural = _("Choice Sets")
class ChoiceSets(dd.Table):
required_roles = dd.login_required(PollsStaff)
model = 'polls.ChoiceSet'
detail_layout = """
name
ChoicesBySet
"""
# insert_layout = """
# id
# name
# """
class Choice(mixins.BabelNamed, mixins.Sequenced):
class Meta(object):
app_label = 'polls'
verbose_name = _("Choice")
verbose_name_plural = _("Choices")
choiceset = dd.ForeignKey('polls.ChoiceSet', related_name='choices')
def get_siblings(self):
return self.choiceset.choices.all()
@dd.action()
def select_by_response(self, ar):
mi = ar.master_instance
dd.logger.info("20140929 %s", mi)
if isinstance(mi, Response):
AnswerChoice(response=mi, choice=self).save()
class Choices(dd.Table):
model = 'polls.Choice'
required_roles = dd.login_required(PollsStaff)
class ChoicesBySet(Choices):
master_key = 'choiceset'
# required_roles = dd.login_required()
@dd.python_2_unicode_compatible
class Poll(UserAuthored, mixins.CreatedModified, Referrable):
class Meta(object):
app_label = 'polls'
abstract = dd.is_abstract_model(__name__, 'Poll')
verbose_name = _("Poll")
verbose_name_plural = _("Polls")
ordering = ['ref']
title = models.CharField(_("Heading"), max_length=200)
details = models.TextField(_("Details"), blank=True)
default_choiceset = dd.ForeignKey(
'polls.ChoiceSet',
null=True, blank=True,
related_name='polls',
verbose_name=_("Default Choiceset"))
default_multiple_choices = models.BooleanField(
_("Allow multiple choices"), default=False)
questions_to_add = models.TextField(
_("Questions to add"),
help_text=_("Paste text for questions to add. "
"Every non-empty line will create one question."),
blank=True)
state = PollStates.field(default=PollStates.as_callable('draft'))
workflow_state_field = 'state'
def __str__(self):
return self.ref or self.title
def after_ui_save(self, ar, cw):
if self.questions_to_add:
# print "20150203 self.questions_to_add", self,
# self.questions_to_add
q = None
qkw = dict()
number = 1
for ln in self.questions_to_add.splitlines():
ln = ln.strip()
if ln:
if ln.startswith('#'):
q.details = ln[1:]
q.save()
continue
elif ln.startswith('='):
q = Question(poll=self, title=ln[1:],
is_heading=True, **qkw)
number = 1
else:
q = Question(poll=self, title=ln,
number=str(number), **qkw)
number += 1
q.full_clean()
q.save()
qkw.update(seqno=q.seqno + 1)
self.questions_to_add = ''
self.save() # save again because we modified afterwards
super(Poll, self).after_ui_save(ar, cw)
# @dd.virtualfield(dd.HtmlBox(_("Result")))
# def result(self, ar):
# return E.div(*tuple(get_poll_result(self)))
# def get_poll_result(self):
# #~ yield E.h1(self.title)
# for cs in ChoiceSet.objects.all():
# questions = self.questions.filter(choiceset=cs)
# if questions.count() > 0:
# yield E.h2(str(cs))
# for question in questions:
# yield E.p(question.text)
class PollDetail(dd.DetailLayout):
main = "general results"
general = dd.Panel("""
ref title workflow_buttons
details
default_choiceset default_multiple_choices
polls.QuestionsByPoll
""", label=_("General"))
results = dd.Panel("""
id user created modified state
polls.ResponsesByPoll
# result
PollResult
""", label=_("Results"))
class Polls(dd.Table):
required_roles = dd.login_required(PollsUser)
model = 'polls.Poll'
column_names = 'ref title user state *'
detail_layout = PollDetail()
insert_layout = dd.InsertLayout("""
ref title
default_choiceset default_multiple_choices
questions_to_add
""", window_size=(60, 15))
class AllPolls(Polls):
required_roles = dd.login_required(PollsStaff)
column_names = 'id ref title user state *'
class MyPolls(My, Polls):
"""Show all polls whose author I am."""
column_names = 'ref title state *'
@dd.python_2_unicode_compatible
class Question(mixins.Sequenced):
class Meta(object):
app_label = 'polls'
verbose_name = _("Question")
verbose_name_plural = _("Questions")
ordering = ['seqno']
allow_cascaded_delete = ['poll']
poll = dd.ForeignKey('polls.Poll', related_name='questions')
number = models.CharField(_("No."), max_length=20, blank=True)
title = models.CharField(pgettext("polls", "Title"), max_length=200)
details = models.TextField(_("Details"), blank=True)
choiceset = dd.ForeignKey('polls.ChoiceSet', blank=True, null=True)
multiple_choices = models.BooleanField(
_("Allow multiple choices"), blank=True, default=False)
is_heading = models.BooleanField(_("Heading"), default=False)
NUMBERED_TITLE_FORMAT = "%s) %s"
def __str__(self):
#~ return self.text[:40].strip() + ' ...'
if self.number:
return self.NUMBERED_TITLE_FORMAT % (self.number, self.title)
return self.title
def get_siblings(self):
#~ return self.choiceset.choices.order_by('seqno')
return self.poll.questions.all()
def get_choiceset(self):
if self.is_heading:
return None
if self.choiceset is None:
return self.poll.default_choiceset
return self.choiceset
def full_clean(self, *args, **kw):
if self.multiple_choices is None:
self.multiple_choices = self.poll.default_multiple_choices
#~ if self.choiceset_id is None:
#~ self.choiceset = self.poll.default_choiceset
super(Question, self).full_clean()
Question.set_widget_options('number', width=5)
class Questions(dd.Table):
required_roles = dd.login_required(PollsStaff)
model = 'polls.Question'
column_names = "seqno poll number title choiceset is_heading *"
detail_layout = """
poll number is_heading choiceset multiple_choices
title
details
AnswersByQuestion
"""
order_by = ['poll', 'seqno']
class QuestionsByPoll(Questions):
required_roles = dd.login_required(PollsUser)
master_key = 'poll'
column_names = 'seqno number title:50 is_heading *'
auto_fit_column_widths = True
stay_in_grid = True
class ToggleChoice(dd.Action):
readonly = False
show_in_bbar = False
parameters = dict(
question=dd.ForeignKey("polls.Question"),
choice=dd.ForeignKey("polls.Choice"),
)
params_layout = 'question\nchoice' # Py3 would otherwise display
# them in arbitrary order
no_params_window = True
def run_from_ui(self, ar, **kw):
response = ar.selected_rows[0]
if response is None:
return
pv = ar.action_param_values
qs = AnswerChoice.objects.filter(response=response, **pv)
if qs.count() == 1:
qs[0].delete()
elif qs.count() == 0:
if not pv.question.multiple_choices:
# delete any other choice which might exist
qs = AnswerChoice.objects.filter(
response=response, question=pv.question)
qs.delete()
obj = AnswerChoice(response=response, **pv)
obj.full_clean()
obj.save()
else:
raise Exception(
"Oops, %s returned %d rows." % (qs.query, qs.count()))
ar.success(refresh=True)
# dd.logger.info("20140930 %s", obj)
@dd.python_2_unicode_compatible
class Response(UserAuthored, mixins.Registrable):
class Meta(object):
app_label = 'polls'
verbose_name = _("Response")
verbose_name_plural = _("Responses")
ordering = ['date']
poll = dd.ForeignKey('polls.Poll', related_name='responses')
date = models.DateField(_("Date"), default=dd.today)
state = ResponseStates.field(default=ResponseStates.as_callable('draft'))
remark = models.TextField(verbose_name=_("My general remark"), blank=True)
partner = dd.ForeignKey('contacts.Partner', blank=True, null=True)
toggle_choice = ToggleChoice()
@dd.chooser()
def poll_choices(cls):
return Poll.objects.filter(state=PollStates.active)
def __str__(self):
if self.partner is None:
return _("%(user)s's response to %(poll)s") % dict(
user=self.user, poll=self.poll)
return _("{poll} {partner} {date}").format(
user=self.user.initials,
date=dd.fds(self.date),
partner=self.partner.get_full_name(salutation=False),
poll=self.poll)
@classmethod
def get_registrable_fields(model, site):
for f in super(Response, model).get_registrable_fields(site):
yield f
yield 'user'
yield 'poll'
yield 'date'
yield 'partner'
class ResponseDetail(dd.DetailLayout):
main = "answers more"
answers = dd.Panel("""
poll partner date workflow_buttons
polls.AnswersByResponse
""", label=_("General"))
more = dd.Panel("""
user state
remark
""", label=_("More"))
class Responses(dd.Table):
required_roles = dd.login_required(PollsUser)
model = 'polls.Response'
detail_layout = ResponseDetail()
insert_layout = """
user date
poll
"""
class AllResponses(Responses):
required_roles = dd.login_required(PollsStaff)
class MyResponses(My, Responses):
column_names = 'date poll state remark *'
class ResponsesByPoll(Responses):
master_key = 'poll'
column_names = 'date user state partner remark *'
class ResponsesByPartner(Responses):
master_key = 'partner'
column_names = 'date user state remark *'
display_mode = 'summary'
@classmethod
def get_table_summary(self, obj, ar):
if obj is None:
return
visible_polls = Poll.objects.filter(state__in=(
PollStates.active, PollStates.closed)).order_by('ref')
qs = Response.objects.filter(partner=obj).order_by('date')
polls_responses = {}
for resp in qs:
polls_responses.setdefault(resp.poll.pk, []).append(resp)
items = []
for poll in visible_polls:
iar = self.insert_action.request_from(
ar, obj, known_values=dict(poll=poll))
elems = [str(poll), ' : ']
responses = polls_responses.get(poll.pk, [])
elems += join_elems(
[ar.obj2html(r, dd.fds(r.date))
for r in responses], sep=', ')
if poll.state == PollStates.active:
elems += [' ', iar.ar2button()]
#elems += [' ', iar.insert_button()]
items.append(E.li(*elems))
return E.div(E.ul(*items))
class AnswerChoice(dd.Model):
class Meta(object):
app_label = 'polls'
verbose_name = _("Answer Choice")
verbose_name_plural = _("Answer Choices")
# ordering = ['question__seqno']
# ordering removed 20160721 because it probably caused random
# results when serializing.
allow_cascaded_delete = ['response']
response = dd.ForeignKey('polls.Response')
question = dd.ForeignKey('polls.Question')
choice = dd.ForeignKey(
'polls.Choice',
related_name='answers', verbose_name=_("My answer"),
blank=True, null=True)
@dd.chooser()
def choice_choices(cls, question):
return question.get_choiceset().choices.all()
class AnswerChoices(dd.Table):
required_roles = dd.login_required(PollsStaff)
model = 'polls.AnswerChoice'
@dd.python_2_unicode_compatible
class AnswerRemark(dd.Model):
class Meta(object):
app_label = 'polls'
verbose_name = _("Answer Remark")
verbose_name_plural = _("Answer Remarks")
ordering = ['question__seqno']
allow_cascaded_delete = ['response']
response = dd.ForeignKey('polls.Response')
question = dd.ForeignKey('polls.Question')
remark = models.TextField(_("My remark"), blank=True)
def __str__(self):
# return _("Remark for {0}").format(self.question)
return str(self.question)
class AnswerRemarks(dd.Table):
required_roles = dd.login_required(PollsUser)
model = 'polls.AnswerRemark'
detail_layout = dd.DetailLayout("""
remark
response question
""", window_size=(60, 10))
insert_layout = dd.InsertLayout("""
remark
response question
""", window_size=(60, 10))
hidden_elements = dd.fields_list(AnswerRemark, 'response question')
stay_in_grid = True
class AnswerRemarksByAnswer(AnswerRemarks):
use_as_default_table = False
hide_top_toolbar = True
class AllAnswerRemarks(AnswerRemarks):
required_roles = dd.login_required(PollsStaff)
# class VirtualTableRow(object):
# def save_new_instance(elem, ar):
# pre_ui_save.send(sender=elem.__class__, instance=elem, ar=ar)
# elem.before_ui_save(ar)
# elem.save(force_insert=True)
# # yes, `on_ui_created` comes *after* save()
# on_ui_created.send(elem, request=ar.request)
# # elem.after_ui_create(ar)
# elem.after_ui_save(ar, None)
# def save_watched_instance(elem, ar, watcher):
# if watcher.is_dirty():
# pre_ui_save.send(sender=elem.__class__, instance=elem, ar=ar)
# elem.before_ui_save(ar)
# elem.save(force_update=True)
# watcher.send_update(ar)
# ar.success(_("%s has been updated.") % obj2unicode(elem))
# else:
# ar.success(_("%s : nothing to save.") % obj2unicode(elem))
# elem.after_ui_save(ar, watcher)
# def delete_instance(self, ar):
# pre_ui_delete.send(sender=self, request=ar.request)
# self.delete()
@dd.python_2_unicode_compatible
class AnswersByResponseRow(object):
FORWARD_TO_QUESTION = tuple(
"full_clean after_ui_save disable_delete save_new_instance save_watched_instance delete_instance".split())
def __init__(self, response, question):
self.response = response
self.question = question
# Needed by AnswersByResponse.get_row_by_pk
self.pk = self.id = question.pk
try:
self.remark = AnswerRemark.objects.get(
question=question, response=response)
except AnswerRemark.DoesNotExist:
self.remark = AnswerRemark(
question=question, response=response)
self.choices = AnswerChoice.objects.filter(
question=question, response=response)
for k in self.FORWARD_TO_QUESTION:
setattr(self, k, getattr(question, k))
def __str__(self):
if self.choices.count() == 0:
return str(_("N/A"))
return ', '.join([str(ac.choice) for ac in self.choices])
def obj2href(self, ar):
# needed by detail_pointer
return ''
class AnswerRemarkField(dd.VirtualField):
editable = True
def __init__(self):
t = models.TextField(_("My remark"), blank=True)
dd.VirtualField.__init__(self, t, None)
def set_value_in_object(self, ar, obj, value):
#~ e = self.get_entry_from_answer(obj)
obj.remark.remark = value
obj.remark.save()
def value_from_object(self, obj, ar):
#~ logger.info("20120118 value_from_object() %s",dd.obj2str(obj))
#~ e = self.get_entry_from_answer(obj)
return obj.remark.remark
class AnswersByResponse(dd.VirtualTable):
label = _("Answers")
editable = True
master = 'polls.Response'
column_names = 'question:40 answer_buttons:30 remark:20 *'
variable_row_height = True
auto_fit_column_widths = True
display_mode = 'summary'
# workflow_state_field = 'state'
remark = AnswerRemarkField()
@classmethod
def get_data_rows(self, ar):
response = ar.master_instance
if response is None:
return
for q in rt.models.polls.Question.objects.filter(poll=response.poll):
yield AnswersByResponseRow(response, q)
@classmethod
def get_table_summary(self, response, ar):
"""Presents this response as a table with one row per question and one
column for each response of the same poll. The answers for
this response are editable if this response is not registered.
The answers of other responses are never editable.
"""
if response is None:
return
if response.poll_id is None:
return
AnswerRemarks = rt.models.polls.AnswerRemarksByAnswer
all_responses = rt.models.polls.Response.objects.filter(
poll=response.poll).order_by('date')
if response.partner:
all_responses = all_responses.filter(partner=response.partner)
ht = xghtml.Table()
ht.attrib.update(cellspacing="5px", bgcolor="#ffffff", width="100%")
cellattrs = dict(align="left", valign="top", bgcolor="#eeeeee")
headers = [str(_("Question"))]
for r in all_responses:
if r == response:
headers.append(dd.fds(r.date))
else:
headers.append(ar.obj2html(r, dd.fds(r.date)))
ht.add_header_row(*headers, **cellattrs)
ar.master_instance = response # must set it because
# get_data_rows() needs it.
# 20151211
# editable = Responses.update_action.request_from(ar).get_permission(
# response)
sar = Responses.update_action.request_from(ar)
sar.selected_rows = [response]
editable = sar.get_permission()
# editable = insert.get_permission(response)
kv = dict(response=response)
insert = AnswerRemarks.insert_action.request_from(
ar, known_values=kv)
detail = AnswerRemarks.detail_action.request_from(ar)
for answer in self.get_data_rows(ar):
cells = [self.question.value_from_object(answer, ar)]
for r in all_responses:
if editable and r == response:
insert.known_values.update(question=answer.question)
detail.known_values.update(question=answer.question)
items = [
self.answer_buttons.value_from_object(answer, ar)]
if answer.remark.remark:
items += [E.br(), answer.remark.remark]
if answer.remark.pk:
items += [
' ',
detail.ar2button(
answer.remark, _("Remark"),
icon_name=None)]
# ar.obj2html(answer.remark, _("Remark"))]
else:
btn = insert.ar2button(
answer.remark, _("Remark"), icon_name=None)
# sar = RemarksByAnswer.request_from(ar, answer)
# btn = sar.insert_button(_("Remark"), icon_name=None)
items += [" (", btn, ")"]
else:
other_answer = AnswersByResponseRow(r, answer.question)
items = [str(other_answer)]
if other_answer.remark.remark:
items += [E.br(), answer.remark.remark]
cells.append(E.p(*items))
ht.add_body_row(*cells, **cellattrs)
return ar.html_text(ht.as_element())
# return E.div(ht.as_element(), class_="htmlText")
@dd.displayfield(_("My answer"))
def answer_buttons(self, obj, ar):
# assert isinstance(obj, Answer)
cs = obj.question.get_choiceset()
if cs is None:
return ''
elems = []
pv = dict(question=obj.question)
# ia = obj.response.toggle_choice
sar = obj.response.toggle_choice.request_from(
ar, is_on_main_actor=False)
# print(20170731, sar.is_on_main_actor)
if False: # since 20170129
ba = Responses.actions.toggle_choice
if ba is None:
raise Exception("No toggle_choice on {0}?".format(ar.actor))
sar = ba.request_from(ar)
# print("20150203 answer_buttons({0})".format(sar))
# if the response is registered, just display the choice, no
# toggle buttons since answer cannot be toggled:
# 20151211
sar.selected_rows = [obj.response]
if not sar.get_permission():
return str(obj)
AnswerChoice = rt.models.polls.AnswerChoice
for c in cs.choices.all():
pv.update(choice=c)
text = str(c)
qs = AnswerChoice.objects.filter(
response=obj.response, **pv)
if qs.count() == 1:
text = [E.b('[', text, ']')]
elif qs.count() == 0:
pass
else:
raise Exception(
"Oops: %s returned %d rows." % (qs.query, qs.count()))
sar.set_action_param_values(**pv)
e = sar.ar2button(obj.response, text, style="text-decoration:none")
elems.append(e)
return E.span(*join_elems(elems), **{'class':"htmlText"})
@classmethod
def get_pk_field(self):
return Question._meta.pk
@classmethod
def get_row_by_pk(self, ar, pk):
response = ar.master_instance
#~ if response is None: return
q = rt.models.polls.Question.objects.get(pk=pk)
return AnswersByResponseRow(response, q)
@classmethod
def disable_delete(self, obj, ar):
return "Not deletable"
@dd.displayfield(_("Question"))
def question(self, obj, ar):
if obj.question.number:
txt = obj.question.NUMBERED_TITLE_FORMAT % (
obj.question.number, obj.question.title)
else:
txt = obj.question.title
attrs = {'class': "htmlText"}
if obj.question.details:
attrs.update(title=obj.question.details)
if obj.question.is_heading:
txt = E.b(txt, **attrs)
return E.span(txt, **attrs)
@dd.python_2_unicode_compatible
class AnswersByQuestionRow(object):
FORWARD_TO_RESPONSE = tuple(
"full_clean after_ui_save disable_delete obj2href".split())
def __init__(self, response, question):
self.response = response
self.question = question
# Needed by AnswersByQuestion.get_row_by_pk
self.pk = self.id = response.pk
try:
self.remark = AnswerRemark.objects.get(
question=question, response=response).remark
except AnswerRemark.DoesNotExist:
self.remark = ''
self.choices = AnswerChoice.objects.filter(
question=question, response=response)
for k in self.FORWARD_TO_RESPONSE:
setattr(self, k, getattr(question, k))
def __str__(self):
if self.choices.count() == 0:
return str(_("N/A"))
return ', '.join([str(ac.choice) for ac in self.choices])
class AnswersByQuestion(dd.VirtualTable):
label = _("Answers")
master = 'polls.Question'
column_names = 'response:40 answer:30 remark:20 *'
variable_row_height = True
auto_fit_column_widths = True
@classmethod
def get_data_rows(self, ar):
question = ar.master_instance
if question is None:
return
for r in rt.models.polls.Response.objects.filter(poll=question.poll):
yield AnswersByQuestionRow(r, question)
@dd.displayfield(_("Response"))
def response(self, obj, ar):
return ar.obj2html(obj.response)
@dd.displayfield(_("Remark"))
def remark(self, obj, ar):
return obj.remark
@dd.displayfield(_("Answer"))
def answer(self, obj, ar):
return str(obj)
class PollResult(Questions):
master_key = 'poll'
column_names = "question choiceset answers a1"
# @classmethod
# def get_data_rows(self, ar):
# poll = ar.master_instance
# if poll is None:
# return
# for obj in super(PollResult, self).get_request_queryset(ar):
# yield obj
@dd.virtualfield(dd.ForeignKey('polls.Question'))
def question(self, obj, ar):
return obj
@dd.requestfield(_("#Answers"))
def answers(self, obj, ar):
#~ return ar.spawn(Answer.objects.filter(question=obj))
return AnswerChoices.request(known_values=dict(question=obj))
@dd.requestfield(_("A1"))
def a1(self, obj, ar):
cs = obj.get_choiceset()
if cs is not None:
c = next(iter(cs.choices.all()))
#~ return Answer.objects.filter(question=obj,choice=c)
return AnswerChoices.request(
known_values=dict(question=obj, choice=c))
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.orm import exc
from quantum.common import exceptions as q_exc
import quantum.db.api as db
from quantum.db import models_v2
from quantum.db import securitygroups_db as sg_db
from quantum import manager
from quantum.openstack.common import log as logging
from quantum.plugins.linuxbridge.common import config # noqa
from quantum.plugins.linuxbridge.common import constants
from quantum.plugins.linuxbridge.db import l2network_models_v2
LOG = logging.getLogger(__name__)
def initialize():
db.configure_db()
def sync_network_states(network_vlan_ranges):
"""Synchronize network_states table with current configured VLAN ranges."""
session = db.get_session()
with session.begin():
# get existing allocations for all physical networks
allocations = dict()
states = (session.query(l2network_models_v2.NetworkState).
all())
for state in states:
if state.physical_network not in allocations:
allocations[state.physical_network] = set()
allocations[state.physical_network].add(state)
# process vlan ranges for each configured physical network
for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
# determine current configured allocatable vlans for this
# physical network
vlan_ids = set()
for vlan_range in vlan_ranges:
vlan_ids |= set(xrange(vlan_range[0], vlan_range[1] + 1))
# remove from table unallocated vlans not currently allocatable
if physical_network in allocations:
for state in allocations[physical_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(state.vlan_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not state.allocated:
# it's not, so remove it from table
LOG.debug(_("Removing vlan %(vlan_id)s on "
"physical network %(physical_network)s"
" from pool"),
{'vlan_id': state.vlan_id,
'physical_network': physical_network})
session.delete(state)
del allocations[physical_network]
# add missing allocatable vlans to table
for vlan_id in sorted(vlan_ids):
state = l2network_models_v2.NetworkState(physical_network,
vlan_id)
session.add(state)
# remove from table unallocated vlans for any unconfigured physical
# networks
for states in allocations.itervalues():
for state in states:
if not state.allocated:
LOG.debug(_("Removing vlan %(vlan_id)s on physical "
"network %(physical_network)s"
" from pool"),
{'vlan_id': state.vlan_id,
'physical_network': physical_network})
session.delete(state)
def get_network_state(physical_network, vlan_id):
"""Get state of specified network."""
session = db.get_session()
try:
state = (session.query(l2network_models_v2.NetworkState).
filter_by(physical_network=physical_network,
vlan_id=vlan_id).
one())
return state
except exc.NoResultFound:
return None
def reserve_network(session):
with session.begin(subtransactions=True):
state = (session.query(l2network_models_v2.NetworkState).
filter_by(allocated=False).
with_lockmode('update').
first())
if not state:
raise q_exc.NoNetworkAvailable()
LOG.debug(_("Reserving vlan %(vlan_id)s on physical network "
"%(physical_network)s from pool"),
{'vlan_id': state.vlan_id,
'physical_network': state.physical_network})
state.allocated = True
return (state.physical_network, state.vlan_id)
def reserve_specific_network(session, physical_network, vlan_id):
with session.begin(subtransactions=True):
try:
state = (session.query(l2network_models_v2.NetworkState).
filter_by(physical_network=physical_network,
vlan_id=vlan_id).
with_lockmode('update').
one())
if state.allocated:
if vlan_id == constants.FLAT_VLAN_ID:
raise q_exc.FlatNetworkInUse(
physical_network=physical_network)
else:
raise q_exc.VlanIdInUse(vlan_id=vlan_id,
physical_network=physical_network)
LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
"network %(physical_network)s from pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
state.allocated = True
except exc.NoResultFound:
LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
"network %(physical_network)s outside pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
state = l2network_models_v2.NetworkState(physical_network, vlan_id)
state.allocated = True
session.add(state)
def release_network(session, physical_network, vlan_id, network_vlan_ranges):
with session.begin(subtransactions=True):
try:
state = (session.query(l2network_models_v2.NetworkState).
filter_by(physical_network=physical_network,
vlan_id=vlan_id).
with_lockmode('update').
one())
state.allocated = False
inside = False
for vlan_range in network_vlan_ranges.get(physical_network, []):
if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]:
inside = True
break
if inside:
LOG.debug(_("Releasing vlan %(vlan_id)s on physical network "
"%(physical_network)s to pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
else:
LOG.debug(_("Releasing vlan %(vlan_id)s on physical network "
"%(physical_network)s outside pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
session.delete(state)
except exc.NoResultFound:
LOG.warning(_("vlan_id %(vlan_id)s on physical network "
"%(physical_network)s not found"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
def add_network_binding(session, network_id, physical_network, vlan_id):
with session.begin(subtransactions=True):
binding = l2network_models_v2.NetworkBinding(network_id,
physical_network, vlan_id)
session.add(binding)
def get_network_binding(session, network_id):
try:
binding = (session.query(l2network_models_v2.NetworkBinding).
filter_by(network_id=network_id).
one())
return binding
except exc.NoResultFound:
return
def get_port_from_device(device):
"""Get port from database."""
LOG.debug(_("get_port_from_device() called"))
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(device))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.QuantumManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = []
for port_in_db, sg_id in port_and_sgs:
if sg_id:
port_dict['security_groups'].append(sg_id)
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def set_port_status(port_id, status):
"""Set the port status."""
LOG.debug(_("set_port_status as %s called"), status)
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring, long, unicode
import functools
from collections import Mapping
from datetime import datetime
from sqlalchemy import extract, func
from sqlalchemy.orm import synonym
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from flexget.manager import Session
from flexget.utils import qualities, json
from flexget.entry import Entry
def with_session(*args, **kwargs):
""""
A decorator which creates a new session if one was not passed via keyword argument to the function.
Automatically commits and closes the session if one was created, caller is responsible for commit if passed in.
If arguments are given when used as a decorator, they will automatically be passed to the created Session when
one is not supplied.
"""
def decorator(func):
def wrapper(*args, **kwargs):
if kwargs.get('session'):
return func(*args, **kwargs)
with _Session() as session:
kwargs['session'] = session
return func(*args, **kwargs)
return wrapper
if len(args) == 1 and not kwargs and callable(args[0]):
# Used without arguments, e.g. @with_session
# We default to expire_on_commit being false, in case the decorated function returns db instances
_Session = functools.partial(Session, expire_on_commit=False)
return decorator(args[0])
else:
# Arguments were specified, turn them into arguments for Session creation e.g. @with_session(autocommit=True)
_Session = functools.partial(Session, *args, **kwargs)
return decorator
def pipe_list_synonym(name):
"""Converts pipe separated text into a list"""
def getter(self):
attr = getattr(self, name)
if attr:
return attr.strip('|').split('|')
def setter(self, value):
if isinstance(value, str):
setattr(self, name, value)
else:
setattr(self, name, '|'.join(value))
return synonym(name, descriptor=property(getter, setter))
def text_date_synonym(name):
"""Converts Y-M-D date strings into datetime objects"""
def getter(self):
return getattr(self, name)
def setter(self, value):
if isinstance(value, basestring):
try:
setattr(self, name, datetime.strptime(value, '%Y-%m-%d'))
except ValueError:
# Invalid date string given, set to None
setattr(self, name, None)
else:
setattr(self, name, value)
return synonym(name, descriptor=property(getter, setter))
def entry_synonym(name):
"""Use json to serialize python objects for db storage."""
def only_builtins(item):
supported_types = (str, unicode, int, float, long, bool, datetime)
# dict, list, tuple and set are also supported, but handled separately
if isinstance(item, supported_types):
return item
elif isinstance(item, Mapping):
result = {}
for key, value in item.items():
try:
result[key] = only_builtins(value)
except TypeError:
continue
return result
elif isinstance(item, (list, tuple, set)):
result = []
for value in item:
try:
result.append(only_builtins(value))
except ValueError:
continue
if isinstance(item, list):
return result
elif isinstance(item, tuple):
return tuple(result)
else:
return set(result)
else:
for s_type in supported_types:
if isinstance(item, s_type):
return s_type(item)
# If item isn't a subclass of a builtin python type, raise ValueError.
raise TypeError('%r is not of type Entry.' % type(item))
def getter(self):
return Entry(json.loads(getattr(self, name), decode_datetime=True))
def setter(self, entry):
if isinstance(entry, Entry) or isinstance(entry, dict):
setattr(self, name, unicode(json.dumps(only_builtins(dict(entry)), encode_datetime=True)))
else:
raise TypeError('%r is not of type Entry or dict.' % type(entry))
return synonym(name, descriptor=property(getter, setter))
def json_synonym(name):
"""Use json to serialize python objects for db storage."""
def getter(self):
return json.loads(getattr(self, name), decode_datetime=True)
def setter(self, entry):
setattr(self, name, unicode(json.dumps(entry, encode_datetime=True)))
return synonym(name, descriptor=property(getter, setter))
class CaseInsensitiveWord(Comparator):
"""Hybrid value representing a string that compares case insensitively."""
def __init__(self, word):
if isinstance(word, CaseInsensitiveWord):
self.word = word.word
else:
self.word = word
def lower(self):
if isinstance(self.word, str):
return self.word.lower()
else:
return func.lower(self.word)
def operate(self, op, other):
if not isinstance(other, CaseInsensitiveWord):
other = CaseInsensitiveWord(other)
return op(self.lower(), other.lower())
def __clause_element__(self):
return self.lower()
def __str__(self):
return self.word
def __getattr__(self, item):
"""Expose string methods to be called directly on this object."""
return getattr(self.word, item)
def quality_property(text_attr):
def getter(self):
return qualities.Quality(getattr(self, text_attr))
def setter(self, value):
if isinstance(value, str):
setattr(self, text_attr, value)
else:
setattr(self, text_attr, value.name)
class QualComparator(Comparator):
def operate(self, op, other):
if isinstance(other, qualities.Quality):
other = other.name
return op(self.__clause_element__(), other)
def comparator(self):
return QualComparator(getattr(self, text_attr))
prop = hybrid_property(getter, setter)
prop.comparator(comparator)
return prop
def quality_requirement_property(text_attr):
def getter(self):
return qualities.Requirements(getattr(self, text_attr))
def setter(self, value):
if isinstance(value, str):
setattr(self, text_attr, value)
else:
setattr(self, text_attr, value.text)
prop = hybrid_property(getter, setter)
return prop
def ignore_case_property(text_attr):
def getter(self):
return CaseInsensitiveWord(getattr(self, text_attr))
def setter(self, value):
setattr(self, text_attr, value)
return hybrid_property(getter, setter)
def year_property(date_attr):
def getter(self):
date = getattr(self, date_attr)
return date and date.year
def expr(cls):
return extract('year', getattr(cls, date_attr))
return hybrid_property(getter, expr=expr)
|
|
"""
Changed from Ed Barosh algorithm. Stable
Adapted Global Market Rotation Strategy
This strategy rotates between six global market ETFs on a monthly
basis. Each month the performance and mean 20-day volitility over
the last 13 weekds are used to rank which ETF should be invested
in for the coming month.
"""
import math
def initialize(context):
"""Initialize context object. It's passed to the handle_data function."""
context.stocks = {
#12915: sid(12915), # MDY (SPDR S&P MIDCAP 400)
8554:sid(8554), #SPY
21769: sid(21769), # IEV (ISHARES EUROPE ETF)
24705: sid(24705), # EEM (ISHARES MSCI EMERGING MARKETS)
23134: sid(23134), # ILF (ISHARES LATIN AMERICA 40)
23118: sid(23118), # EEP (ISHARES MSCI PACIFIC EX JAPAN)
22887: sid(22887), # EDV (VANGUARD EXTENDED DURATION TREASURY)
23921:sid(23921), #TLT
26807:sid(26807), #GLD
#38297:sid(38297), #CWB
#34831:sid(34831), #PCY
#35175:sid(35175), #JNK
24611: sid(24611), #EZA
#28320:sid(28320), #USO
#33748:sid(33748), #RSX
#10773:sid(10773), #IFN
#26703:sid(26703), #FXI
}
# Rebalancing period in calendar days
context.period = 31
# The order ID of the sell order currently being filled
context.oid = None
# The current stock being held
context.currentStock = None
# The next stock that needs to get purchased (once the sell order
# on the current stock is filled
context.nextStock = None
# The 3-month lookback period. Calculated based on there being
# an average of 21 trading days in a month
context.lookback = 63
context.currentMonth = None
schedule_function(func=process, date_rule=date_rules.every_day())
def getminmax(vdict):
"""
Get the minimum and maximum values of a list of dictionary values.
:param vdict: Python dict-like object.
:returns: minimum and maximum of vdict values
"""
vals = vdict.values()
return min(vals), max(vals)
def hist_volatility(period, prices):
"""
Calculate the n-day historical volatility given a set of n+1 prices.
:param period: The number of days for which to calculate volatility
:param prices: An array of price information. Must be of length period + 1.
"""
# HVdaily = sqrt( sum[1..n](x_t - Xbar)^2 / n - 1)
# Start by calculating Xbar = 1/n sum[1..n] (ln(P_t / P_t-1))
returns = []
for i in xrange(1, period + 1):
returns.append(math.log(prices[i] / prices[i-1]))
# Find the average of all returns
rmean = sum(returns) / period
# Determine the difference of each return from the mean, then square
diff = []
for i in xrange(0, period):
diff.append(math.pow((returns[i] - rmean), 2))
# Take the square root of the sum over the period - 1. Then mulitply
# that by the square root of the number of trading days in a year
vol = math.sqrt(sum(diff) / (period - 1)) * math.sqrt(252/period)
return vol
def getmetrics(prices, period):
"""
Get the performance and average 20-day volatility of a security
over a given period
:param prices:
:param period: The time period for which to find
"""
# Get the prices
#prices = data['close_price'][security][-period-1:]
start = prices[-period] # First item
end = prices[-1] # Last item
performance = (end - start) / start
# Calculate 20-day volatility for the given period
volats = []
j = 0
for i in xrange(-period, 0):
volats.append(hist_volatility(20, prices[i-21:21+j]))
j += 1
avg_volat = sum(volats) / period
return performance, avg_volat
def getbeststock(context, close, stocks, period):
"""
Pick the best stock from a group of stocks based on the given
data over a specified period using the stocks' performance and
volatility
:param data: The datapanel with data of all the stocks
:param stocks: A list of stocks to rank
:param period: The time period over which the stocks will be analyzed
"""
best = None
performances = {}
volatilities = {}
# Get performance and volatility for all the stocks
for stock in stocks:
perf, volat = getmetrics(close[stock.sid], period)
performances[stock.sid] = perf
volatilities[stock.sid] = volat
# Determine min/max of each. NOTE: volatility is switched
# since a low volatility should be weighted highly.
minp, maxp = getminmax(performances)
maxv, minv = getminmax(volatilities)
# Normalize the performance and volatility values to a range
# between [0..1] then rank them based on a 70/30 weighting.
for stock in stocks:
perf = (performances[stock.sid] - minp) / (maxp - minp)
volat = (volatilities[stock.sid] - minv) / (maxv - minv)
rank = perf * 0.7 + volat * 0.3
#log.info('Rank info for %s: p=%s, v=%s, r=%s' % (s,p,v,rank))
# If the new rank is greater than the old best rank, pick it.
if best is None or rank > best[1]:
best = stock, rank
return best[0]
def sellholdings(context):
"""Sell all the currently held positions in the context's portfolio."""
positions = context.portfolio.positions
oid = None
for pos in positions.values():
if (pos.amount > 0):
log.info('Selling %s shares of %s' % (pos.amount, pos.sid.symbol))
oid = order(pos.sid, -pos.amount)
return oid
# window_length SHOULD EQUAL context.metricPeriod
def accumulateData(context, data):
close = data.history(context.stocks.values(), "price", bar_count=context.lookback, frequency='1d')
return close
def days(begin, end):
"""Calculate amount of calendar days between two dates."""
roundb = begin.replace(hour = 0, minute = 0, second = 0, microsecond = 0)
rounde = end.replace(hour = 0, minute = 0, second = 0, microsecond = 0)
return (rounde - roundb).days
def process(context, data):
"""
The main proccessing function.
Called whenever a market event occurs for any of algorithm's securities.
:param context: context object
:param data: Object contains all the market data for algorithm securities
keyed by security id. It represents a snapshot of algorithm's
universe as of when this method is called.
:returns: None
"""
# Accumulate data until there is enough days worth of data
# to process without having outOfBounds issues.
close_panel = accumulateData(context, data)
if close_panel is None:
# There is insufficient data accumulated to process
return
current_date = get_datetime()
# If there is an order ID, check the status of the order.
# If there is an order and it is filled, the next stock
# can be purchased.
if context.oid is not None:
orderobj = get_order(context.oid)
if orderobj.filled == orderobj.amount:
log.info('Sold %s shares of %s' % (-orderobj.amount, orderobj.sid.symbol))
# Good to buy next holding
cash = context.portfolio.cash
oobj = get_order(order_value(context.nextStock, cash))
log.info('Sell order complete, buying %s shares of %s. Cash is %s' % \
(oobj.amount, context.nextStock.symbol, cash))
context.currentStock = context.nextStock
context.oid = None
context.nextStock = None
date = get_datetime()
month = date.month
if not context.currentMonth:
# Set the month initially
context.currentMonth = month
if context.currentMonth == month:
# If the current month is unchanged, nothing further to do
return
context.currentMonth = month
log.info("Rebalance...")
# At this point, a new month has been reached. The stocks
# need to be
# Ensure stocks are only traded if possible.
# (e.g) EDV doesn't start trading until late 2007, without
# this, any backtest run before that date would fail.
stocks = []
for stock in context.stocks.values():
if current_date > stock.security_start_date: #and stock.sid in datapanel['price']:
stocks.append(stock)
# Determine which stock should be used for the next month
best = getbeststock(context, close_panel, stocks, context.lookback)
if best:
if (context.currentStock is not None and context.currentStock == best):
# If there is a stock currently held and it is the same as
# the new 'best' stock, nothing needs to be done
return
else:
# Otherwise, the current stock needs to be sold and the new
# stock bought
context.oid = sellholdings(context)
context.nextStock = best
# Purchase will not occur until the next call of handle_data
# and only when the order has been filled.
# If there is no stock currently held, it needs to be bought.
# This only happend
if context.currentStock is None:
cash = context.portfolio.cash
oobj = get_order(order_value(context.nextStock, cash))
log.info('Buying %s shares of %s. Cash is %s' % \
(oobj.amount, context.nextStock.symbol, cash))
context.currentStock = context.nextStock
context.oid = None
context.nextStock = None
|
|
from __future__ import print_function, unicode_literals
import os
import subprocess
import sys
import textwrap
import time
import traceback
from collections import OrderedDict
from fnmatch import fnmatch
import py
import pytest
import six
import tox
import tox.session
from tox import venv
from tox.config import parseconfig
from tox.config.parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
from tox.config.parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
from tox.reporter import update_default_reporter
from tox.venv import CreationConfig, VirtualEnv, getdigest
mark_dont_run_on_windows = pytest.mark.skipif(os.name == "nt", reason="non windows test")
mark_dont_run_on_posix = pytest.mark.skipif(os.name == "posix", reason="non posix test")
def pytest_configure():
if "TOXENV" in os.environ:
del os.environ["TOXENV"]
if "HUDSON_URL" in os.environ:
del os.environ["HUDSON_URL"]
def pytest_addoption(parser):
parser.addoption(
"--no-network",
action="store_true",
dest="no_network",
help="don't run tests requiring network",
)
def pytest_report_header():
return "tox comes from: {!r}".format(tox.__file__)
@pytest.fixture
def work_in_clean_dir(tmpdir):
with tmpdir.as_cwd():
yield
@pytest.fixture(autouse=True)
def check_cwd_not_changed_by_test():
old = os.getcwd()
yield
new = os.getcwd()
if old != new:
pytest.fail("test changed cwd: {!r} => {!r}".format(old, new))
@pytest.fixture(autouse=True)
def check_os_environ_stable():
old = os.environ.copy()
to_clean = {
k: os.environ.pop(k, None)
for k in {
PARALLEL_ENV_VAR_KEY_PRIVATE,
PARALLEL_ENV_VAR_KEY_PUBLIC,
str("TOX_WORK_DIR"),
str("PYTHONPATH"),
}
}
yield
for key, value in to_clean.items():
if value is not None:
os.environ[key] = value
new = os.environ
extra = {k: new[k] for k in set(new) - set(old)}
miss = {k: old[k] for k in set(old) - set(new)}
diff = {
"{} = {} vs {}".format(k, old[k], new[k])
for k in set(old) & set(new)
if old[k] != new[k] and not (k.startswith("PYTEST_") or k.startswith("COV_"))
}
if extra or miss or diff:
msg = "test changed environ"
if extra:
msg += " extra {}".format(extra)
if miss:
msg += " miss {}".format(miss)
if diff:
msg += " diff {}".format(diff)
pytest.fail(msg)
@pytest.fixture(name="newconfig")
def create_new_config_file(tmpdir):
def create_new_config_file_(args, source=None, plugins=(), filename="tox.ini"):
if source is None:
source = args
args = []
s = textwrap.dedent(source)
p = tmpdir.join(filename)
p.write(s)
tox.session.setup_reporter(args)
with tmpdir.as_cwd():
return parseconfig(args, plugins=plugins)
return create_new_config_file_
@pytest.fixture
def cmd(request, monkeypatch, capfd):
if request.config.option.no_network:
pytest.skip("--no-network was specified, test cannot run")
request.addfinalizer(py.path.local().chdir)
def run(*argv):
reset_report()
with RunResult(argv, capfd) as result:
_collect_session(result)
# noinspection PyBroadException
try:
tox.session.main([str(x) for x in argv])
assert False # this should always exist with SystemExit
except SystemExit as exception:
result.ret = exception.code
except OSError as e:
traceback.print_exc()
result.ret = e.errno
except Exception:
traceback.print_exc()
result.ret = 1
return result
def _collect_session(result):
prev_build = tox.session.build_session
def build_session(config):
result.session = prev_build(config)
return result.session
monkeypatch.setattr(tox.session, "build_session", build_session)
yield run
class RunResult:
def __init__(self, args, capfd):
self.args = args
self.ret = None
self.duration = None
self.out = None
self.err = None
self.session = None
self.capfd = capfd
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.duration = time.time() - self._start
self.out, self.err = self.capfd.readouterr()
def _read(self, out, pos):
out.buffer.seek(pos)
return out.buffer.read().decode(out.encoding, errors=out.errors)
@property
def outlines(self):
out = [] if self.out is None else self.out.splitlines()
err = [] if self.err is None else self.err.splitlines()
return err + out
def __repr__(self):
res = "RunResult(ret={}, args={!r}, out=\n{}\n, err=\n{})".format(
self.ret,
self.args,
self.out,
self.err,
)
if six.PY2:
return res.encode("UTF-8")
else:
return res
def output(self):
return "{}\n{}\n{}".format(self.ret, self.err, self.out)
def assert_success(self, is_run_test_env=True):
msg = self.output()
assert self.ret == 0, msg
if is_run_test_env:
assert any(" congratulations :)" == line for line in reversed(self.outlines)), msg
def assert_fail(self, is_run_test_env=True):
msg = self.output()
assert self.ret, msg
if is_run_test_env:
assert not any(" congratulations :)" == line for line in reversed(self.outlines)), msg
class ReportExpectMock:
def __init__(self):
from tox import reporter
self.instance = reporter._INSTANCE
self.clear()
self._index = -1
def clear(self):
self._index = -1
if not six.PY2:
self.instance.reported_lines.clear()
else:
del self.instance.reported_lines[:]
def getnext(self, cat):
__tracebackhide__ = True
newindex = self._index + 1
while newindex < len(self.instance.reported_lines):
call = self.instance.reported_lines[newindex]
lcat = call[0]
if fnmatch(lcat, cat):
self._index = newindex
return call
newindex += 1
raise LookupError(
"looking for {!r}, no reports found at >={:d} in {!r}".format(
cat,
self._index + 1,
self.instance.reported_lines,
),
)
def expect(self, cat, messagepattern="*", invert=False):
__tracebackhide__ = True
if not messagepattern.startswith("*"):
messagepattern = "*{}".format(messagepattern)
while self._index < len(self.instance.reported_lines):
try:
call = self.getnext(cat)
except LookupError:
break
for lmsg in call[1:]:
lmsg = str(lmsg).replace("\n", " ")
if fnmatch(lmsg, messagepattern):
if invert:
raise AssertionError(
"found {}({!r}), didn't expect it".format(cat, messagepattern),
)
return
if not invert:
raise AssertionError(
"looking for {}({!r}), no reports found at >={:d} in {!r}".format(
cat,
messagepattern,
self._index + 1,
self.instance.reported_lines,
),
)
def not_expect(self, cat, messagepattern="*"):
return self.expect(cat, messagepattern, invert=True)
class pcallMock:
def __init__(self, args, cwd, env, stdout, stderr, shell):
self.arg0 = args[0]
self.args = args
self.cwd = cwd
self.env = env
self.stdout = stdout
self.stderr = stderr
self.shell = shell
self.pid = os.getpid()
self.returncode = 0
@staticmethod
def communicate():
return "", ""
def wait(self):
pass
@pytest.fixture(name="mocksession")
def create_mocksession(request):
config = request.getfixturevalue("newconfig")([], "")
class MockSession(tox.session.Session):
def __init__(self, config):
self.logging_levels(config.option.quiet_level, config.option.verbose_level)
super(MockSession, self).__init__(config, popen=self.popen)
self._pcalls = []
self.report = ReportExpectMock()
def _clearmocks(self):
if not six.PY2:
self._pcalls.clear()
else:
del self._pcalls[:]
self.report.clear()
def popen(self, args, cwd, shell=None, stdout=None, stderr=None, env=None, **_):
process_call_mock = pcallMock(args, cwd, env, stdout, stderr, shell)
self._pcalls.append(process_call_mock)
return process_call_mock
def new_config(self, config):
self.logging_levels(config.option.quiet_level, config.option.verbose_level)
self.config = config
self.venv_dict.clear()
self.existing_venvs.clear()
def logging_levels(self, quiet, verbose):
update_default_reporter(quiet, verbose)
if hasattr(self, "config"):
self.config.option.quiet_level = quiet
self.config.option.verbose_level = verbose
return MockSession(config)
@pytest.fixture
def newmocksession(mocksession, newconfig):
def newmocksession_(args, source, plugins=()):
config = newconfig(args, source, plugins=plugins)
mocksession._reset(config, mocksession.popen)
return mocksession
return newmocksession_
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n{}".format(py.io.saferepr(out))
@pytest.fixture
def initproj(tmpdir):
"""Create a factory function for creating example projects.
Constructed folder/file hierarchy examples:
with `src_root` other than `.`:
tmpdir/
name/ # base
src_root/ # src_root
name/ # package_dir
__init__.py
name.egg-info/ # created later on package build
setup.py
with `src_root` given as `.`:
tmpdir/
name/ # base, src_root
name/ # package_dir
__init__.py
name.egg-info/ # created later on package build
setup.py
"""
def initproj_(nameversion, filedefs=None, src_root=".", add_missing_setup_py=True):
if filedefs is None:
filedefs = {}
if not src_root:
src_root = "."
if isinstance(nameversion, six.string_types):
parts = nameversion.rsplit(str("-"), 1)
if len(parts) == 1:
parts.append("0.1")
name, version = parts
else:
name, version = nameversion
base = tmpdir.join(name)
src_root_path = _path_join(base, src_root)
assert base == src_root_path or src_root_path.relto(
base,
), "`src_root` must be the constructed project folder or its direct or indirect subfolder"
base.ensure(dir=1)
create_files(base, filedefs)
if not _filedefs_contains(base, filedefs, "setup.py") and add_missing_setup_py:
create_files(
base,
{
"setup.py": """
from setuptools import setup, find_packages
setup(
name='{name}',
description='{name} project',
version='{version}',
license='MIT',
platforms=['unix', 'win32'],
packages=find_packages('{src_root}'),
package_dir={{'':'{src_root}'}},
)
""".format(
**locals()
),
},
)
if not _filedefs_contains(base, filedefs, src_root_path.join(name)):
create_files(
src_root_path,
{
name: {
"__init__.py": textwrap.dedent(
'''
""" module {} """
__version__ = {!r}''',
)
.strip()
.format(name, version),
},
},
)
manifestlines = [
"include {}".format(p.relto(base)) for p in base.visit(lambda x: x.check(file=1))
]
create_files(base, {"MANIFEST.in": "\n".join(manifestlines)})
base.chdir()
return base
with py.path.local().as_cwd():
yield initproj_
def _path_parts(path):
path = path and str(path) # py.path.local support
parts = []
while path:
folder, name = os.path.split(path)
if folder == path: # root folder
folder, name = name, folder
if name:
parts.append(name)
path = folder
parts.reverse()
return parts
def _path_join(base, *args):
# workaround for a py.path.local bug on Windows (`path.join('/x', abs=1)`
# should be py.path.local('X:\\x') where `X` is the current drive, when in
# fact it comes out as py.path.local('\\x'))
return py.path.local(base.join(*args, abs=1))
def _filedefs_contains(base, filedefs, path):
"""
whether `filedefs` defines a file/folder with the given `path`
`path`, if relative, will be interpreted relative to the `base` folder, and
whether relative or not, must refer to either the `base` folder or one of
its direct or indirect children. The base folder itself is considered
created if the filedefs structure is not empty.
"""
unknown = object()
base = py.path.local(base)
path = _path_join(base, path)
path_rel_parts = _path_parts(path.relto(base))
for part in path_rel_parts:
if not isinstance(filedefs, dict):
return False
filedefs = filedefs.get(part, unknown)
if filedefs is unknown:
return False
return path_rel_parts or path == base and filedefs
def create_files(base, filedefs):
for key, value in filedefs.items():
if isinstance(value, dict):
create_files(base.ensure(key, dir=1), value)
elif isinstance(value, six.string_types):
s = textwrap.dedent(value)
if not isinstance(s, six.text_type):
if not isinstance(s, six.binary_type):
s = str(s)
else:
s = six.ensure_text(s)
base.join(key).write_text(s, encoding="UTF-8")
@pytest.fixture()
def mock_venv(monkeypatch):
"""This creates a mock virtual environment (e.g. will inherit the current interpreter).
Note: because we inherit, to keep things sane you must call the py environment and only that;
and cannot install any packages."""
# first ensure we have a clean python path
monkeypatch.delenv(str("PYTHONPATH"), raising=False)
# object to collect some data during the execution
class Result(object):
def __init__(self, session):
self.popens = popen_list
self.session = session
res = OrderedDict()
# convince tox that the current running virtual environment is already the env we would create
class ProxyCurrentPython:
@classmethod
def readconfig(cls, path):
if path.dirname.endswith("{}py".format(os.sep)):
return CreationConfig(
base_resolved_python_sha256=getdigest(sys.executable),
base_resolved_python_path=sys.executable,
tox_version=tox.__version__,
sitepackages=False,
usedevelop=False,
deps=[],
alwayscopy=False,
)
elif path.dirname.endswith("{}.package".format(os.sep)):
return CreationConfig(
base_resolved_python_sha256=getdigest(sys.executable),
base_resolved_python_path=sys.executable,
tox_version=tox.__version__,
sitepackages=False,
usedevelop=False,
deps=[(getdigest(""), "setuptools >= 35.0.2"), (getdigest(""), "wheel")],
alwayscopy=False,
)
assert False # pragma: no cover
monkeypatch.setattr(CreationConfig, "readconfig", ProxyCurrentPython.readconfig)
# provide as Python the current python executable
def venv_lookup(venv, name):
assert name == "python"
venv.envconfig.envdir = py.path.local(sys.executable).join("..", "..")
return sys.executable
monkeypatch.setattr(VirtualEnv, "_venv_lookup", venv_lookup)
# don't allow overriding the tox config data for the host Python
def finish_venv(self):
return
monkeypatch.setattr(VirtualEnv, "finish", finish_venv)
# we lie that it's an environment with no packages in it
@tox.hookimpl
def tox_runenvreport(venv, action):
return []
monkeypatch.setattr(venv, "tox_runenvreport", tox_runenvreport)
# intercept the build session to save it and we intercept the popen invocations
# collect all popen calls
popen_list = []
def popen(cmd, **kwargs):
# we don't want to perform installation of new packages,
# just replace with an always ok cmd
if "pip" in cmd and "install" in cmd:
cmd = ["python", "-c", "print({!r})".format(cmd)]
ret = None
try:
ret = subprocess.Popen(cmd, **kwargs)
except tox.exception.InvocationError as exception: # pragma: no cover
ret = exception # pragma: no cover
finally:
popen_list.append((kwargs.get("env"), ret, cmd))
return ret
def build_session(config):
session = tox.session.Session(config, popen=popen)
res[id(session)] = Result(session)
return session
monkeypatch.setattr(tox.session, "build_session", build_session)
return res
@pytest.fixture(scope="session")
def current_tox_py():
"""generate the current (test runners) python versions key
e.g. py37 when running under Python 3.7"""
return "{}{}{}".format("pypy" if tox.INFO.IS_PYPY else "py", *sys.version_info)
def pytest_runtest_setup(item):
reset_report()
def pytest_runtest_teardown(item):
reset_report()
def pytest_pyfunc_call(pyfuncitem):
reset_report()
def reset_report(quiet=0, verbose=0):
from tox.reporter import _INSTANCE
_INSTANCE._reset(quiet_level=quiet, verbose_level=verbose)
|
|
from __future__ import unicode_literals
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from moto.core import BaseBackend
from moto.ec2 import ec2_backends
from moto.elb import elb_backends
from moto.elb.exceptions import LoadBalancerNotFoundError
# http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown
DEFAULT_COOLDOWN = 300
class InstanceState(object):
def __init__(self, instance, lifecycle_state="InService"):
self.instance = instance
self.lifecycle_state = lifecycle_state
class FakeScalingPolicy(object):
def __init__(self, name, adjustment_type, as_name, scaling_adjustment,
cooldown, autoscaling_backend):
self.name = name
self.adjustment_type = adjustment_type
self.as_name = as_name
self.scaling_adjustment = scaling_adjustment
if cooldown is not None:
self.cooldown = cooldown
else:
self.cooldown = DEFAULT_COOLDOWN
self.autoscaling_backend = autoscaling_backend
def execute(self):
if self.adjustment_type == 'ExactCapacity':
self.autoscaling_backend.set_desired_capacity(self.as_name, self.scaling_adjustment)
elif self.adjustment_type == 'ChangeInCapacity':
self.autoscaling_backend.change_capacity(self.as_name, self.scaling_adjustment)
elif self.adjustment_type == 'PercentChangeInCapacity':
self.autoscaling_backend.change_capacity_percent(self.as_name, self.scaling_adjustment)
class FakeLaunchConfiguration(object):
def __init__(self, name, image_id, key_name, ramdisk_id, kernel_id, security_groups, user_data,
instance_type, instance_monitoring, instance_profile_name,
spot_price, ebs_optimized, associate_public_ip_address, block_device_mapping_dict):
self.name = name
self.image_id = image_id
self.key_name = key_name
self.ramdisk_id = ramdisk_id
self.kernel_id = kernel_id
self.security_groups = security_groups if security_groups else []
self.user_data = user_data
self.instance_type = instance_type
self.instance_monitoring = instance_monitoring
self.instance_profile_name = instance_profile_name
self.spot_price = spot_price
self.ebs_optimized = ebs_optimized
self.associate_public_ip_address = associate_public_ip_address
self.block_device_mapping_dict = block_device_mapping_dict
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
instance_profile_name = properties.get("IamInstanceProfile")
backend = autoscaling_backends[region_name]
config = backend.create_launch_configuration(
name=resource_name,
image_id=properties.get("ImageId"),
kernel_id=properties.get("KernelId"),
ramdisk_id=properties.get("RamdiskId"),
key_name=properties.get("KeyName"),
security_groups=properties.get("SecurityGroups"),
user_data=properties.get("UserData"),
instance_type=properties.get("InstanceType"),
instance_monitoring=properties.get("InstanceMonitoring"),
instance_profile_name=instance_profile_name,
spot_price=properties.get("SpotPrice"),
ebs_optimized=properties.get("EbsOptimized"),
associate_public_ip_address=properties.get("AssociatePublicIpAddress"),
block_device_mappings=properties.get("BlockDeviceMapping.member")
)
return config
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name)
return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
backend = autoscaling_backends[region_name]
try:
backend.delete_launch_configuration(resource_name)
except KeyError:
pass
def delete(self, region_name):
backend = autoscaling_backends[region_name]
backend.delete_launch_configuration(self.name)
@property
def physical_resource_id(self):
return self.name
@property
def block_device_mappings(self):
if not self.block_device_mapping_dict:
return None
else:
return self._parse_block_device_mappings()
@property
def instance_monitoring_enabled(self):
if self.instance_monitoring:
return 'true'
return 'false'
def _parse_block_device_mappings(self):
block_device_map = BlockDeviceMapping()
for mapping in self.block_device_mapping_dict:
block_type = BlockDeviceType()
mount_point = mapping.get('device_name')
if 'ephemeral' in mapping.get('virtual_name', ''):
block_type.ephemeral_name = mapping.get('virtual_name')
else:
block_type.volume_type = mapping.get('ebs._volume_type')
block_type.snapshot_id = mapping.get('ebs._snapshot_id')
block_type.delete_on_termination = mapping.get('ebs._delete_on_termination')
block_type.size = mapping.get('ebs._volume_size')
block_type.iops = mapping.get('ebs._iops')
block_device_map[mount_point] = block_type
return block_device_map
class FakeAutoScalingGroup(object):
def __init__(self, name, availability_zones, desired_capacity, max_size,
min_size, launch_config_name, vpc_zone_identifier,
default_cooldown, health_check_period, health_check_type,
load_balancers, placement_group, termination_policies,
autoscaling_backend, tags):
self.autoscaling_backend = autoscaling_backend
self.name = name
self.availability_zones = availability_zones
self.max_size = max_size
self.min_size = min_size
self.launch_config = self.autoscaling_backend.launch_configurations[launch_config_name]
self.launch_config_name = launch_config_name
self.vpc_zone_identifier = vpc_zone_identifier
self.default_cooldown = default_cooldown if default_cooldown else DEFAULT_COOLDOWN
self.health_check_period = health_check_period
self.health_check_type = health_check_type if health_check_type else "EC2"
self.load_balancers = load_balancers
self.placement_group = placement_group
self.termination_policies = termination_policies
self.instance_states = []
self.set_desired_capacity(desired_capacity)
self.tags = tags if tags else []
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
launch_config_name = properties.get("LaunchConfigurationName")
load_balancer_names = properties.get("LoadBalancerNames", [])
backend = autoscaling_backends[region_name]
group = backend.create_autoscaling_group(
name=resource_name,
availability_zones=properties.get("AvailabilityZones", []),
desired_capacity=properties.get("DesiredCapacity"),
max_size=properties.get("MaxSize"),
min_size=properties.get("MinSize"),
launch_config_name=launch_config_name,
vpc_zone_identifier=(','.join(properties.get("VPCZoneIdentifier", [])) or None),
default_cooldown=properties.get("Cooldown"),
health_check_period=properties.get("HealthCheckGracePeriod"),
health_check_type=properties.get("HealthCheckType"),
load_balancers=load_balancer_names,
placement_group=None,
termination_policies=properties.get("TerminationPolicies", []),
tags=properties.get("Tags", []),
)
return group
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name)
return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
backend = autoscaling_backends[region_name]
try:
backend.delete_autoscaling_group(resource_name)
except KeyError:
pass
except LoadBalancerNotFoundError:
# sometimes the ELB gets modified before the ASG, so just skip over desired capacity
backend.autoscaling_groups.pop(resource_name, None)
def delete(self, region_name):
backend = autoscaling_backends[region_name]
try:
backend.delete_autoscaling_group(self.name)
except LoadBalancerNotFoundError:
# sometimes the ELB gets deleted before the ASG, so just skip over desired capacity
backend.autoscaling_groups.pop(self.name, None)
@property
def physical_resource_id(self):
return self.name
def update(self, availability_zones, desired_capacity, max_size, min_size,
launch_config_name, vpc_zone_identifier, default_cooldown,
health_check_period, health_check_type, load_balancers,
placement_group, termination_policies):
self.availability_zones = availability_zones
self.max_size = max_size
self.min_size = min_size
if launch_config_name:
self.launch_config = self.autoscaling_backend.launch_configurations[launch_config_name]
self.launch_config_name = launch_config_name
self.vpc_zone_identifier = vpc_zone_identifier
self.health_check_period = health_check_period
self.health_check_type = health_check_type
self.set_desired_capacity(desired_capacity)
def set_desired_capacity(self, new_capacity):
if new_capacity is None:
self.desired_capacity = self.min_size
else:
self.desired_capacity = new_capacity
curr_instance_count = len(self.instance_states)
if self.desired_capacity == curr_instance_count:
return
if self.desired_capacity > curr_instance_count:
# Need more instances
count_needed = int(self.desired_capacity) - int(curr_instance_count)
reservation = self.autoscaling_backend.ec2_backend.add_instances(
self.launch_config.image_id,
count_needed,
self.launch_config.user_data,
self.launch_config.security_groups,
instance_type=self.launch_config.instance_type,
)
for instance in reservation.instances:
instance.autoscaling_group = self
self.instance_states.append(InstanceState(instance))
else:
# Need to remove some instances
count_to_remove = curr_instance_count - self.desired_capacity
instances_to_remove = self.instance_states[:count_to_remove]
instance_ids_to_remove = [instance.instance.id for instance in instances_to_remove]
self.autoscaling_backend.ec2_backend.terminate_instances(instance_ids_to_remove)
self.instance_states = self.instance_states[count_to_remove:]
class AutoScalingBackend(BaseBackend):
def __init__(self, ec2_backend, elb_backend):
self.autoscaling_groups = {}
self.launch_configurations = {}
self.policies = {}
self.ec2_backend = ec2_backend
self.elb_backend = elb_backend
def reset(self):
ec2_backend = self.ec2_backend
elb_backend = self.elb_backend
self.__dict__ = {}
self.__init__(ec2_backend, elb_backend)
def create_launch_configuration(self, name, image_id, key_name, kernel_id, ramdisk_id,
security_groups, user_data, instance_type,
instance_monitoring, instance_profile_name,
spot_price, ebs_optimized, associate_public_ip_address, block_device_mappings):
launch_configuration = FakeLaunchConfiguration(
name=name,
image_id=image_id,
key_name=key_name,
kernel_id=kernel_id,
ramdisk_id=ramdisk_id,
security_groups=security_groups,
user_data=user_data,
instance_type=instance_type,
instance_monitoring=instance_monitoring,
instance_profile_name=instance_profile_name,
spot_price=spot_price,
ebs_optimized=ebs_optimized,
associate_public_ip_address=associate_public_ip_address,
block_device_mapping_dict=block_device_mappings,
)
self.launch_configurations[name] = launch_configuration
return launch_configuration
def describe_launch_configurations(self, names):
configurations = self.launch_configurations.values()
if names:
return [configuration for configuration in configurations if configuration.name in names]
else:
return list(configurations)
def delete_launch_configuration(self, launch_configuration_name):
self.launch_configurations.pop(launch_configuration_name, None)
def create_autoscaling_group(self, name, availability_zones,
desired_capacity, max_size, min_size,
launch_config_name, vpc_zone_identifier,
default_cooldown, health_check_period,
health_check_type, load_balancers,
placement_group, termination_policies, tags):
def make_int(value):
return int(value) if value is not None else value
max_size = make_int(max_size)
min_size = make_int(min_size)
default_cooldown = make_int(default_cooldown)
if health_check_period is None:
health_check_period = 300
else:
health_check_period = make_int(health_check_period)
group = FakeAutoScalingGroup(
name=name,
availability_zones=availability_zones,
desired_capacity=desired_capacity,
max_size=max_size,
min_size=min_size,
launch_config_name=launch_config_name,
vpc_zone_identifier=vpc_zone_identifier,
default_cooldown=default_cooldown,
health_check_period=health_check_period,
health_check_type=health_check_type,
load_balancers=load_balancers,
placement_group=placement_group,
termination_policies=termination_policies,
autoscaling_backend=self,
tags=tags,
)
self.autoscaling_groups[name] = group
self.update_attached_elbs(group.name)
return group
def update_autoscaling_group(self, name, availability_zones,
desired_capacity, max_size, min_size,
launch_config_name, vpc_zone_identifier,
default_cooldown, health_check_period,
health_check_type, load_balancers,
placement_group, termination_policies):
group = self.autoscaling_groups[name]
group.update(availability_zones, desired_capacity, max_size,
min_size, launch_config_name, vpc_zone_identifier,
default_cooldown, health_check_period, health_check_type,
load_balancers, placement_group, termination_policies)
return group
def describe_autoscaling_groups(self, names):
groups = self.autoscaling_groups.values()
if names:
return [group for group in groups if group.name in names]
else:
return list(groups)
def delete_autoscaling_group(self, group_name):
self.set_desired_capacity(group_name, 0)
self.autoscaling_groups.pop(group_name, None)
def describe_autoscaling_instances(self):
instance_states = []
for group in self.autoscaling_groups.values():
instance_states.extend(group.instance_states)
return instance_states
def set_desired_capacity(self, group_name, desired_capacity):
group = self.autoscaling_groups[group_name]
group.set_desired_capacity(desired_capacity)
self.update_attached_elbs(group_name)
def change_capacity(self, group_name, scaling_adjustment):
group = self.autoscaling_groups[group_name]
desired_capacity = group.desired_capacity + scaling_adjustment
self.set_desired_capacity(group_name, desired_capacity)
def change_capacity_percent(self, group_name, scaling_adjustment):
""" http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html
If PercentChangeInCapacity returns a value between 0 and 1,
Auto Scaling will round it off to 1. If the PercentChangeInCapacity
returns a value greater than 1, Auto Scaling will round it off to the
lower value. For example, if PercentChangeInCapacity returns 12.5,
then Auto Scaling will round it off to 12."""
group = self.autoscaling_groups[group_name]
percent_change = 1 + (scaling_adjustment / 100.0)
desired_capacity = group.desired_capacity * percent_change
if group.desired_capacity < desired_capacity < group.desired_capacity + 1:
desired_capacity = group.desired_capacity + 1
else:
desired_capacity = int(desired_capacity)
self.set_desired_capacity(group_name, desired_capacity)
def create_autoscaling_policy(self, name, adjustment_type, as_name,
scaling_adjustment, cooldown):
policy = FakeScalingPolicy(name, adjustment_type, as_name,
scaling_adjustment, cooldown, self)
self.policies[name] = policy
return policy
def describe_policies(self):
return list(self.policies.values())
def delete_policy(self, group_name):
self.policies.pop(group_name, None)
def execute_policy(self, group_name):
policy = self.policies[group_name]
policy.execute()
def update_attached_elbs(self, group_name):
group = self.autoscaling_groups[group_name]
group_instance_ids = set(state.instance.id for state in group.instance_states)
for elb in self.elb_backend.describe_load_balancers(names=group.load_balancers):
elb_instace_ids = set(elb.instance_ids)
self.elb_backend.register_instances(elb.name, group_instance_ids - elb_instace_ids)
self.elb_backend.deregister_instances(elb.name, elb_instace_ids - group_instance_ids)
autoscaling_backends = {}
for region, ec2_backend in ec2_backends.items():
autoscaling_backends[region] = AutoScalingBackend(ec2_backend, elb_backends[region])
|
|
# SUN
"""
0000 0010 0000 = 0x020
0100 0010 0010 = 0x422
0010 0000 0100 = 0x204
0000 1111 0000 = 0x0F0
0001 1111 1000 = 0x1F8
1101 1111 1000 = 0xDF8
0001 1111 1011 = 0x1FB
0001 1111 1000 = 0x1F8
0000 1111 0000 = 0x0F0
0010 0000 0100 = 0x204
0100 0100 0010 = 0x442
0000 0100 0000 = 0x040
"""
"""
[ ] airplane
[ ] alarm_clock
[ ] anger
[X] angry
[ ] apple
[ ] aquarius
[ ] aries
[ ] arrow_heading_down
[ ] arrow_heading_up
[ ] arrow_lower_left
[ ] arrow_lower_right
[ ] arrow_up_down
[ ] arrow_upper_left
[ ] arrow_upper_right
[ ] art
[ ] athletic_shoe
[ ] atm
[ ] baby_chick
[ ] banana
[ ] bangbang
[ ] bank
[ ] baseball
[ ] basketball
[ ] beer
[ ] bell
[ ] bike
[ ] birthday
[ ] black_nib
[ ] blue_car
[ ] bomb
[ ] book
[ ] boom
[ ] bread
[ ] broken_heart
[ ] bulb
[ ] bullettrain_side
[ ] bus
[ ] bust_in_silhouette
[ ] cake
[ ] calling
[ ] camera
[ ] cancer
[ ] capricorn
[ ] carousel_horse
[X] cat
[ ] cd
[ ] checkered_flag
[ ] cherries
[ ] cherry_blossom
[ ] christmas_tree
[ ] circus_tent
[ ] cl
[ ] clapper
[X] closed_umbrella
[X] cloud
[ ] clubs
[ ] cocktail
[ ] coffee
[ ] computer
[ ] confounded
[ ] convenience_store
[ ] copyright
[ ] crescent_moon
[ ] crown
[ ] cry
[X] curly_loop
[ ] cyclone
[ ] dash
[ ] diamond_shape_with_a_dot_inside
[ ] diamonds
[ ] disappointed
[ ] dizzy_face
[X] dog
[ ] door
[ ] droplet
[ ] ear
[ ] eight
[ ] end
[ ] envelope
[ ] envelope_with_arrow
[ ] exclamation
[ ] eyeglasses
[ ] eyes
[ ] fax
[ ] first_quarter_moon
[ ] fish
[ ] fist
[ ] five
[ ] foggy
[ ] footprints
[ ] fork_and_knife
[ ] four
[ ] four_leaf_clover
[ ] free
[ ] fuelpump
[ ] full_moon
[ ] gemini
[ ] gift
[ ] golf
[X] grin
[ ] hamburger
[ ] handbag
[ ] hash
[ ] headphones
[ ] heart
[ ] heart_eyes
[ ] heartbeat
[ ] hearts
[ ] high_heel
[ ] horse
[ ] hospital
[ ] hotel
[ ] hotsprings
[ ] hourglass_flowing_sand
[ ] house
[ ] id
[ ] interrobang
[ ] iphone
[ ] jeans
[ ] key
[ ] kiss
[X] laughing
[ ] left_right_arrow
[ ] leftwards_arrow_with_hook
[ ] leo
[ ] libra
[ ] lipstick
[ ] love_letter
[ ] m
[ ] mag
[ ] maple_leaf
[ ] microphone
[ ] moneybag
[ ] mount_fuji
[ ] movie_camera
[ ] musical_note
[ ] new
[ ] new_moon
[ ] ng
[ ] night_with_stars
[ ] nine
[ ] no_smoking
[ ] notes
[ ] ocean
[ ] office
[ ] ok
[ ] on
[ ] one
[ ] pager
[ ] paperclip
[ ] parking
[ ] pencil
[ ] pencil2
[ ] penguin
[X] pensive
[X] persevere
[ ] pig
[ ] pisces
[ ] post_office
[ ] pouch
[ ] punch
[ ] purse
[ ] rage
[ ] railway_car
[ ] raised_hand
[ ] ramen
[ ] recycle
[ ] red_car
[ ] registered
[ ] relieved
[ ] restroom
[ ] ribbon
[ ] rice_ball
[ ] ring
[ ] runner
[ ] running_shirt_with_sash
[ ] sagittarius
[ ] sailboat
[ ] sake
[ ] school
[ ] scissors
[ ] scorpius
[ ] scream
[ ] seat
[ ] secret
[ ] seedling
[ ] seven
[ ] ship
[ ] shirt
[ ] six
[ ] ski
[ ] smiley
[ ] smirk
[ ] smoking
[ ] snail
[ ] snowboarder
[X] snowman
[ ] sob
[ ] soccer
[ ] soon
[ ] spades
[ ] sparkles
[ ] stuck_out_tongue_winking_eye
[X] sunny
[ ] sweat
[ ] sweat_drops
[ ] sweat_smile
[ ] taurus
[ ] tea
[ ] telephone
[ ] tennis
[ ] three
[ ] thumbsup
[ ] ticket
[ ] tm
[ ] tophat
[ ] traffic_light
[ ] triangular_flag_on_post
[ ] tulip
[ ] tv
[ ] two
[ ] two_hearts
[ ] u5408
[ ] u6e80
[ ] u7981
[ ] u7a7a
[X] umbrella
[ ] unamused
[ ] v
[ ] video_game
[ ] virgo
[ ] warning
[ ] watch
[ ] wavy_dash
[ ] waxing_gibbous_moon
[ ] wheelchair
[ ] wine_glass
[ ] wink
[ ] wrench
[ ] yen
[ ] yum
[ ] zap
[ ] zero
[ ] zzz
"""
sunny = [0x020, 0x422, 0x204, 0x0F0,
0x1F8, 0xDF8, 0x1FB, 0x1F8,
0x0F0, 0x204, 0x442, 0x040]
cloud = [0x000, 0x000, 0x000, 0x018,
0x1A4, 0x242, 0x402, 0x402,
0x244, 0x1B8, 0x000, 0x000]
umbrella = [0x040, 0x0E0, 0x3F8, 0x7FC,
0x7FC, 0xFFE, 0xFFE, 0x952,
0x040, 0x040, 0x240, 0x180]
snowman = [0x0E0, 0x110, 0x208, 0x2A8,
0x208, 0x110, 0x208, 0x404,
0x404, 0x404, 0x208, 0x1F0]
lightning = [0x008, 0x018, 0x030, 0x070,
0x0E0, 0x1FC, 0x3F8, 0x070,
0x0E0, 0x0C0, 0x180, 0x100]
rain = [0xC63, 0x000, 0x000, 0x30C,
0x000, 0x000, 0xC63, 0x000,
0x000, 0x30C, 0x000, 0xC63]
dog = [0x402, 0x606, 0x50A, 0x4F2,
0x402, 0x000, 0x090, 0x000,
0x000, 0x060, 0x060, 0x000]
cat = [0x801, 0xC03, 0xAF5, 0x801,
0x000, 0x108, 0x000, 0xF0F,
0x000, 0x264, 0x198, 0x000]
sailboat = [0x040, 0x040, 0x0D0, 0x150,
0x158, 0x258, 0x25C, 0x7DE,
0x000, 0x3FC, 0x1F8, 0x000]
tree = [0x040, 0x1F0, 0x0E0, 0x1F0,
0x378, 0x1F0, 0x3D8, 0xFFE,
0x040, 0x1F0, 0x110, 0x0E0]
grin = [0x000, 0x000, 0x108, 0x294,
0x000, 0x000, 0x1F8, 0x108,
0x1F8, 0x000, 0x000, 0x000]
angry = [0x000, 0x000, 0x204, 0x108,
0x090, 0x000, 0x000, 0x060,
0x090, 0x108, 0x000, 0x000]
persevere = [0x000, 0x000, 0x110, 0x208,
0x404, 0x000, 0x000, 0x1F0,
0x208, 0x000, 0x000, 0x000]
pensive = [0x000, 0x000, 0x090, 0x108,
0x204, 0x000, 0x000, 0x168,
0x294, 0x000, 0x000, 0x000]
laughing = [0x000, 0x000, 0x294, 0x108,
0x294, 0x000, 0x060, 0x090,
0x090, 0x060, 0x000, 0x000]
BMP = {}
BMP["sunny"] = [0x020, 0x422, 0x204, 0x0F0,
0x1F8, 0xDF8, 0x1FB, 0x1F8,
0x0F0, 0x204, 0x442, 0x040]
BMP["cloud"] = [0x000, 0x000, 0x000, 0x018,
0x1A4, 0x242, 0x402, 0x402,
0x244, 0x1B8, 0x000, 0x000]
BMP["lightning"] = [0x008, 0x018, 0x030, 0x070,
0x0E0, 0x1FC, 0x3F8, 0x070,
0x0E0, 0x0C0, 0x180, 0x100]
BMP["house"] = [0x060, 0x0F0, 0x1F8, 0x3FC,
0x7FE, 0xFFF, 0xFFF, 0x264,
0x264, 0x3FC, 0x3FC, 0x3FC]
BMP["clock"] = [0x0F0, 0x30C, 0x402, 0x406,
0x909, 0x891, 0x861, 0x801,
0x402, 0x402, 0x30C, 0x0F0]
BMP["end"] = [0x180, 0x3FC, 0x7FC, 0x7FC,
0x3FC, 0x180, 0x000, 0xEE2,
0xAA2, 0xEAE, 0x8AA, 0xEAE]
BMP["bulb"] = [0x040, 0x444, 0x208, 0x0E0,
0x110, 0xD16, 0x110, 0x0E0,
0x000, 0x0E0, 0x0E0, 0x0E0]
BMP["train"] = [0x1F8, 0x30C, 0x3FC, 0x204,
0x204, 0x3FC, 0x3FC, 0x36C,
0x3FC, 0x090, 0x108, 0x204]
BMP["exclamation"] = [0x070, 0x070, 0x070, 0x060,
0x060, 0x060, 0x040, 0x040,
0x040, 0x000, 0x0C0, 0x0C0]
BMP["curly_loop"] = [0x1F0, 0x60C, 0x802, 0x802,
0x0F1, 0x109, 0x269, 0x289,
0x292, 0x262, 0x10C, 0x0F0]
BMP["closed_umbrella"] = [0x0C0, 0x120, 0x020, 0x020,
0x070, 0x070, 0x070, 0x070,
0x070, 0x070, 0x020, 0x020]
from PIL import Image
im = Image.new("1", (12, 12))
pix = im.load()
for x in range(12):
for y in range(12):
row = BMP["curly_loop"][y]
cell = row & (1 << (12 - x - 1))
pix[x, y] = cell
im.save("curly_loop.bmp")
|
|
# -*- coding: utf-8 -*-
# pylint: disable=C0103
"""
Unconstrained optimization tools to minimize or maximize an objective function.
"""
# Author: bertrand-l
# License: BSD
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import copy
import numpy as np
from warnings import warn
from ..util import assert_in, assert_positive, assert_Xy
from .objective import BaseObjective
__all__ = ('GradientDescent', )
class BaseUnconstrainedOptimizer(object):
"""
Base class for unconstrained minimizers.
Subclasses must overwrite at least minimize, and possibly __init__ and
settings to provide a more explicit interface.
"""
def __init__(self, **kwargs):
self._settings = kwargs
def __str__(self):
settings = self._settings
kwargs = ('{}: {}'.format(k, settings[k]) for k in sorted(settings))
return "{} object\n {}".format(self.__class__.__name__,
"\n ".join(kwargs))
def __repr__(self):
settings = self._settings
kwargs = ('{}={}'.format(k, settings[k]) for k in sorted(settings))
return "<{}({}) object at {}>".format(self.__class__.__name__,
','.join(kwargs),
hex(id(self)))
def minimize(self, objective, theta, *args, **kwargs):
"""
Returns the argument that minimizes the objective function.
"""
obj = objective(theta, *args, **kwargs)
return theta, {}
def settings(self, **kwargs):
"""
Set parameter(s).
"""
if len(kwargs) is 0:
return self._settings
self._settings.update({key: kwargs[key] for key in kwargs
if key in self._settings and
kwargs[key] is not None})
class GradientDescent(BaseUnconstrainedOptimizer):
"""
Batch, mini-batch or stochastic gradient descent to minimize a smooth
objective/error/cost function.
Parameters
----------
method : {'batch', 'minibatch', 'stochastic'}, optional
Method for gradient descent algorithm. Batch is not recommended for
large datasets.
alpha : float > 0 or 0, optional
Step size or learning rate. If `alpha` is 0, a reasonable (i.e.,
hopefully not too crazy) value is used.
adapt : boolean, optional
Use an adaptative learning rate (Barzilai-Borwein method).
atol, rtol : float, optional
Absolute and relative tolerance.
nstepmax : positive integer, optional
Maximum number of steps.
nminibatch : positive integer, optional
Number of samples used in the mini-batch.
"""
def __init__(self, method='minibatch', alpha=0., adapt=True,
atol=1.e-12, rtol=1.e-6, nstepmax=5000, nminibatch=50):
self._settings = {}
self.settings(method=method, alpha=alpha, adapt=adapt, atol=atol,
rtol=rtol, nstepmax=nstepmax, nminibatch=nminibatch)
def minimize(self, objective, theta, X, y):
"""
Find the minimum of the objective function.
Parameters
----------
objective : instance of a BaseObjective subclass
The cost/error/likelihood function to minimize. Must take theta, X
and y as arguments.
theta : array_like
Guess for the model parameters.
X : array_like
input features, shape=(n_samples, n_features).
y : array_like
Targets, shape=(n_samples,).
Returns
-------
theta : array_like
Parameters at the minimum of the objective function
info : dict
Raises
------
TypeError
"""
X, y = assert_Xy(X, y)
theta = np.asarray(theta)
if not isinstance(objective, BaseObjective):
raise TypeError("'objective' is not a instance of a " +
"BaseObjective subclass.")
method = self._settings['method']
alpha = self._settings['alpha']
adapt = self._settings['adapt']
atol = self._settings['atol']
rtol = self._settings['rtol']
nstepmax = self._settings['nstepmax']
nminibatch = self._settings['nminibatch']
if nminibatch >= len(X):
method = 'batch'
elif method == 'stoch':
method, nminibatch = 'minib', 1
# find a reasonable alpha
if alpha < 1.e-12 or adapt:
alpha = 0.4
J0 = objective.J(theta, X, y)
gradJ0 = objective.gradient_J(theta, X, y)
while (alpha > 1.e-12 and
J0 < objective.J(theta - alpha * gradJ0, X, y)):
alpha *= 0.5
alpha *= 0.5
# minimize objective
theta1, theta2, gradient1, J1 = None, None, None, None
indices_minib, iminib = None, 0
k, converged = 0, False
while k < nstepmax and not converged and not np.isnan(theta).any():
# (d / dtheta) objective
if method == 'minib':
if (indices_minib is None or
iminib + nminibatch > len(indices_minib)):
iminib = 0
indices_minib = list(range(len(X)))
np.random.shuffle(indices_minib)
ind = indices_minib[iminib:iminib + nminibatch]
iminib += nminibatch
gradient = objective.gradient_J(theta, X[ind, :], y[ind])
else:
gradient = objective.gradient_J(theta, X, y)
# Barzilai and Borwein learning rate
if adapt and theta1 is not None and gradient1 is not None:
dtheta = theta - theta1
dgradient = gradient - gradient1
dtdg = np.dot(dtheta, dgradient)
dgdg = np.dot(dgradient, dgradient)
alpha = dtdg / max(dgdg, 1e-18)
alpha = np.sign(alpha) * min(1.e+8, max(1.e-15, np.abs(alpha)))
# update theta
theta1, theta2 = copy(theta), copy(theta1)
theta -= alpha * gradient
gradient1 = copy(gradient)
k += 1
# check convergence
if theta2 is not None:
delta = np.maximum(np.abs(theta - theta1),
np.abs(theta1 - theta2))
theta_min = np.minimum(np.abs(theta), np.abs(theta1),
np.abs(theta2))
if (delta <= np.maximum(atol, rtol * theta_min)).all():
#converged = True
if J1 is None:
J1 = objective.J(theta, X, y)
else:
J = objective.J(theta, X, y)
if abs(J - J1) <= max(atol,
rtol * min(abs(J), abs(J1))):
converged = True
J1 = copy(J)
if not converged:
warn("'GradientDescent' has not converged.")
info = {"converged": converged, "nsteps": k}
return theta, info
def settings(self, method=None, alpha=None, adapt=None, atol=None,
rtol=None, nstepmax=None, nminibatch=None):
"""
Optionally sets one or several parameters and returns current settings.
Parameters
----------
method : {'batch', 'minibatch', 'stochastic'}, optional
Method for gradient descent algorithm. Batch is not recommended for
large datasets.
alpha : float > 0 or 0, optional
Step size or learning rate (should not be larger than 0.3). If
`alpha` is 0, a reasonable (i.e., hopefully not too crazy) value is
used.
adapt : boolean, optional
Use an adaptative learning rate (Barzilai-Borwein method).
atol, rtol : float, optional
Absolute and relative tolerance.
nstepmax : positive integer, optional
Maximum number of steps.
nminibatch : positive integer, optional
Number of samples used in the mini-batch.
Raises
------
TypeError, ValueError
"""
if method is not None:
method = str(method).strip().replace('-', '').lower()[:5]
assert_in(method, 'method', ('batch', 'minib', 'stoch'))
self._settings['method'] = method
if alpha is not None:
assert_positive(alpha, 'alpha', nonnegative=True)
self._settings['alpha'] = alpha
if adapt is not None:
assert_in(adapt, 'adapt', (True, False))
self._settings['adapt'] = adapt
if atol is not None:
assert_positive(atol, 'atol')
self._settings['atol'] = atol
if rtol is not None:
assert_positive(rtol, 'rtol')
self._settings['rtol'] = rtol
if nstepmax is not None:
nstepmax = int(nstepmax)
assert_positive(nstepmax, 'nstepmax')
self._settings['nstepmax'] = nstepmax
if nminibatch is not None:
nminibatch = int(nminibatch)
assert_positive(nminibatch, 'nminibatch')
self._settings['nminibatch'] = nminibatch
return self._settings
|
|
'''
TUIO Input Provider
===================
TUIO is the de facto standard network protocol for the transmission of
touch and fiducial information between a server and a client. To learn
more about TUIO (which is itself based on the OSC protocol), please
refer to http://tuio.org -- The specification should be of special
interest.
Configure a TUIO provider in the config.ini
-------------------------------------------
The TUIO provider can be configured in the configuration file in the
``[input]`` section::
[input]
# name = tuio,<ip>:<port>
multitouchtable = tuio,192.168.0.1:3333
Configure a TUIO provider in the App
------------------------------------
You must add the provider before your application is run, like this::
from kivy.app import App
from kivy.config import Config
class TestApp(App):
def build(self):
Config.set('input', 'multitouchscreen1', 'tuio,0.0.0.0:3333')
# You can also add a second TUIO listener
# Config.set('input', 'source2', 'tuio,0.0.0.0:3334')
# Then do the usual things
# ...
return
'''
__all__ = ('TuioMotionEventProvider', 'Tuio2dCurMotionEvent',
'Tuio2dObjMotionEvent')
from kivy.logger import Logger
from functools import partial
from collections import deque
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.input.motionevent import MotionEvent
from kivy.input.shape import ShapeRect
class TuioMotionEventProvider(MotionEventProvider):
'''The TUIO provider listens to a socket and handles some of the incoming
OSC messages:
* /tuio/2Dcur
* /tuio/2Dobj
You can easily extend the provider to handle new TUIO paths like so::
# Create a class to handle the new TUIO type/path
# Replace NEWPATH with the pathname you want to handle
class TuioNEWPATHMotionEvent(MotionEvent):
def __init__(self, id, args):
super(TuioNEWPATHMotionEvent, self).__init__(id, args)
def depack(self, args):
# In this method, implement 'unpacking' for the received
# arguments. you basically translate from TUIO args to Kivy
# MotionEvent variables. If all you receive are x and y
# values, you can do it like this:
if len(args) == 2:
self.sx, self.sy = args
self.profile = ('pos', )
self.sy = 1 - self.sy
super(TuioNEWPATHMotionEvent, self).depack(args)
# Register it with the TUIO MotionEvent provider.
# You obviously need to replace the PATH placeholders appropriately.
TuioMotionEventProvider.register('/tuio/PATH', TuioNEWPATHMotionEvent)
.. note::
The class name is of no technical importance. Your class will be
associated with the path that you pass to the ``register()``
function. To keep things simple, you should name your class after the
path that it handles, though.
'''
__handlers__ = {}
def __init__(self, device, args):
super(TuioMotionEventProvider, self).__init__(device, args)
args = args.split(',')
if len(args) <= 0:
Logger.error('Tuio: Invalid configuration for TUIO provider')
Logger.error('Tuio: Format must be ip:port (eg. 127.0.0.1:3333)')
err = 'Tuio: Current configuration is <%s>' % (str(','.join(args)))
Logger.error(err)
return
ipport = args[0].split(':')
if len(ipport) != 2:
Logger.error('Tuio: Invalid configuration for TUIO provider')
Logger.error('Tuio: Format must be ip:port (eg. 127.0.0.1:3333)')
err = 'Tuio: Current configuration is <%s>' % (str(','.join(args)))
Logger.error(err)
return
self.ip, self.port = args[0].split(':')
self.port = int(self.port)
self.handlers = {}
self.oscid = None
self.tuio_event_q = deque()
self.touches = {}
@staticmethod
def register(oscpath, classname):
'''Register a new path to handle in TUIO provider'''
TuioMotionEventProvider.__handlers__[oscpath] = classname
@staticmethod
def unregister(oscpath, classname):
'''Unregister a path to stop handling it in the TUIO provider'''
if oscpath in TuioMotionEventProvider.__handlers__:
del TuioMotionEventProvider.__handlers__[oscpath]
@staticmethod
def create(oscpath, **kwargs):
'''Create a touch event from a TUIO path'''
if oscpath not in TuioMotionEventProvider.__handlers__:
raise Exception('Unknown %s touch path' % oscpath)
return TuioMotionEventProvider.__handlers__[oscpath](**kwargs)
def start(self):
'''Start the TUIO provider'''
try:
from oscpy.server import OSCThreadServer
except ImportError:
Logger.info(
'Please install the oscpy python module to use the TUIO '
'provider.'
)
raise
self.oscid = osc = OSCThreadServer()
osc.listen(self.ip, self.port, default=True)
for oscpath in TuioMotionEventProvider.__handlers__:
self.touches[oscpath] = {}
osc.bind(oscpath, partial(self._osc_tuio_cb, oscpath))
def stop(self):
'''Stop the TUIO provider'''
self.oscid.stop_all()
def update(self, dispatch_fn):
'''Update the TUIO provider (pop events from the queue)'''
# read the Queue with event
while True:
try:
value = self.tuio_event_q.pop()
except IndexError:
# queue is empty, we're done for now
return
self._update(dispatch_fn, value)
def _osc_tuio_cb(self, oscpath, address, *args):
self.tuio_event_q.appendleft([oscpath, address, args])
def _update(self, dispatch_fn, value):
oscpath, command, args = value
# verify commands
if command not in [b'alive', b'set']:
return
# move or create a new touch
if command == b'set':
id = args[0]
if id not in self.touches[oscpath]:
# new touch
touch = TuioMotionEventProvider.__handlers__[oscpath](
self.device, id, args[1:])
self.touches[oscpath][id] = touch
dispatch_fn('begin', touch)
else:
# update a current touch
touch = self.touches[oscpath][id]
touch.move(args[1:])
dispatch_fn('update', touch)
# alive event, check for deleted touch
if command == b'alive':
alives = args
to_delete = []
for id in self.touches[oscpath]:
if id not in alives:
# touch up
touch = self.touches[oscpath][id]
if touch not in to_delete:
to_delete.append(touch)
for touch in to_delete:
dispatch_fn('end', touch)
del self.touches[oscpath][touch.id]
class TuioMotionEvent(MotionEvent):
'''Abstraction for TUIO touches/fiducials.
Depending on the tracking software you use (e.g. Movid, CCV, etc.) and its
TUIO implementation, the TuioMotionEvent object can support multiple
profiles such as:
* Fiducial ID: profile name 'markerid', attribute ``.fid``
* Position: profile name 'pos', attributes ``.x``, ``.y``
* Angle: profile name 'angle', attribute ``.a``
* Velocity vector: profile name 'mov', attributes ``.X``, ``.Y``
* Rotation velocity: profile name 'rot', attribute ``.A``
* Motion acceleration: profile name 'motacc', attribute ``.m``
* Rotation acceleration: profile name 'rotacc', attribute ``.r``
'''
__attrs__ = ('a', 'b', 'c', 'X', 'Y', 'Z', 'A', 'B', 'C', 'm', 'r')
def __init__(self, device, id, args):
super(TuioMotionEvent, self).__init__(device, id, args)
# Default argument for TUIO touches
self.a = 0.0
self.b = 0.0
self.c = 0.0
self.X = 0.0
self.Y = 0.0
self.Z = 0.0
self.A = 0.0
self.B = 0.0
self.C = 0.0
self.m = 0.0
self.r = 0.0
angle = property(lambda self: self.a)
mot_accel = property(lambda self: self.m)
rot_accel = property(lambda self: self.r)
xmot = property(lambda self: self.X)
ymot = property(lambda self: self.Y)
zmot = property(lambda self: self.Z)
class Tuio2dCurMotionEvent(TuioMotionEvent):
'''A 2dCur TUIO touch.'''
def __init__(self, device, id, args):
super(Tuio2dCurMotionEvent, self).__init__(device, id, args)
def depack(self, args):
self.is_touch = True
if len(args) < 5:
self.sx, self.sy = list(map(float, args[0:2]))
self.profile = ('pos', )
elif len(args) == 5:
self.sx, self.sy, self.X, self.Y, self.m = list(map(float,
args[0:5]))
self.Y = -self.Y
self.profile = ('pos', 'mov', 'motacc')
else:
self.sx, self.sy, self.X, self.Y = list(map(float, args[0:4]))
self.m, width, height = list(map(float, args[4:7]))
self.Y = -self.Y
self.profile = ('pos', 'mov', 'motacc', 'shape')
if self.shape is None:
self.shape = ShapeRect()
self.shape.width = width
self.shape.height = height
self.sy = 1 - self.sy
super(Tuio2dCurMotionEvent, self).depack(args)
class Tuio2dObjMotionEvent(TuioMotionEvent):
'''A 2dObj TUIO object.
'''
def __init__(self, device, id, args):
super(Tuio2dObjMotionEvent, self).__init__(device, id, args)
def depack(self, args):
self.is_touch = True
if len(args) < 5:
self.sx, self.sy = args[0:2]
self.profile = ('pos', )
elif len(args) == 9:
self.fid, self.sx, self.sy, self.a, self.X, self.Y = args[:6]
self.A, self.m, self.r = args[6:9]
self.Y = -self.Y
self.profile = ('markerid', 'pos', 'angle', 'mov', 'rot',
'motacc', 'rotacc')
else:
self.fid, self.sx, self.sy, self.a, self.X, self.Y = args[:6]
self.A, self.m, self.r, width, height = args[6:11]
self.Y = -self.Y
self.profile = ('markerid', 'pos', 'angle', 'mov', 'rot', 'rotacc',
'acc', 'shape')
if self.shape is None:
self.shape = ShapeRect()
self.shape.width = width
self.shape.height = height
self.sy = 1 - self.sy
super(Tuio2dObjMotionEvent, self).depack(args)
class Tuio2dBlbMotionEvent(TuioMotionEvent):
'''A 2dBlb TUIO object.
# FIXME 3d shape are not supported
/tuio/2Dobj set s i x y a X Y A m r
/tuio/2Dblb set s x y a w h f X Y A m r
'''
def __init__(self, device, id, args):
super(Tuio2dBlbMotionEvent, self).__init__(device, id, args)
def depack(self, args):
self.is_touch = True
self.sx, self.sy, self.a, self.X, self.Y, sw, sh, sd, \
self.A, self.m, self.r = args
self.Y = -self.Y
self.profile = ('pos', 'angle', 'mov', 'rot', 'rotacc',
'acc', 'shape')
if self.shape is None:
self.shape = ShapeRect()
self.shape.width = sw
self.shape.height = sh
self.sy = 1 - self.sy
super(Tuio2dBlbMotionEvent, self).depack(args)
# registers
TuioMotionEventProvider.register(b'/tuio/2Dcur', Tuio2dCurMotionEvent)
TuioMotionEventProvider.register(b'/tuio/2Dobj', Tuio2dObjMotionEvent)
TuioMotionEventProvider.register(b'/tuio/2Dblb', Tuio2dBlbMotionEvent)
MotionEventFactory.register('tuio', TuioMotionEventProvider)
|
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import numpy as np
import pytest
from pandas import Timedelta, offsets, to_timedelta
def test_construction():
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
assert Timedelta(10, unit='d').value == expected
assert Timedelta(10.0, unit='d').value == expected
assert Timedelta('10 days').value == expected
assert Timedelta(days=10).value == expected
assert Timedelta(days=10.0).value == expected
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
assert Timedelta('10 days 00:00:10').value == expected
assert Timedelta(days=10, seconds=10).value == expected
assert Timedelta(days=10, milliseconds=10 * 1000).value == expected
assert Timedelta(days=10,
microseconds=10 * 1000 * 1000).value == expected
# rounding cases
assert Timedelta(82739999850000).value == 82739999850000
assert ('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
assert Timedelta(123072001000000).value == 123072001000000
assert ('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
# string conversion with/without leading zero
# GH#9570
assert Timedelta('0:00:00') == timedelta(hours=0)
assert Timedelta('00:00:00') == timedelta(hours=0)
assert Timedelta('-1:00:00') == -timedelta(hours=1)
assert Timedelta('-01:00:00') == -timedelta(hours=1)
# more strings & abbrevs
# GH#8190
assert Timedelta('1 h') == timedelta(hours=1)
assert Timedelta('1 hour') == timedelta(hours=1)
assert Timedelta('1 hr') == timedelta(hours=1)
assert Timedelta('1 hours') == timedelta(hours=1)
assert Timedelta('-1 hours') == -timedelta(hours=1)
assert Timedelta('1 m') == timedelta(minutes=1)
assert Timedelta('1.5 m') == timedelta(seconds=90)
assert Timedelta('1 minute') == timedelta(minutes=1)
assert Timedelta('1 minutes') == timedelta(minutes=1)
assert Timedelta('1 s') == timedelta(seconds=1)
assert Timedelta('1 second') == timedelta(seconds=1)
assert Timedelta('1 seconds') == timedelta(seconds=1)
assert Timedelta('1 ms') == timedelta(milliseconds=1)
assert Timedelta('1 milli') == timedelta(milliseconds=1)
assert Timedelta('1 millisecond') == timedelta(milliseconds=1)
assert Timedelta('1 us') == timedelta(microseconds=1)
assert Timedelta('1 micros') == timedelta(microseconds=1)
assert Timedelta('1 microsecond') == timedelta(microseconds=1)
assert Timedelta('1.5 microsecond') == Timedelta('00:00:00.000001500')
assert Timedelta('1 ns') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nano') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nanosecond') == Timedelta('00:00:00.000000001')
# combos
assert Timedelta('10 days 1 hour') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h 1m 1s') == timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s 3us') == -timedelta(
days=10, hours=1, minutes=1, seconds=1, microseconds=3)
assert Timedelta('-10 days 1 h 1.5m 1s 3us') == -timedelta(
days=10, hours=1, minutes=1, seconds=31, microseconds=3)
# Currently invalid as it has a - on the hh:mm:dd part
# (only allowed on the days)
with pytest.raises(ValueError):
Timedelta('-10 days -1 h 1.5m 1s 3us')
# only leading neg signs are allowed
with pytest.raises(ValueError):
Timedelta('10 days -1 h 1.5m 1s 3us')
# no units specified
with pytest.raises(ValueError):
Timedelta('3.1415')
# invalid construction
with pytest.raises(ValueError, match="cannot construct a Timedelta"):
Timedelta()
with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
Timedelta('foo')
msg = ("cannot construct a Timedelta from "
"the passed arguments, allowed keywords are ")
with pytest.raises(ValueError, match=msg):
Timedelta(day=10)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
assert Timedelta(10.5, unit='s').value == expected
# offset
assert to_timedelta(offsets.Hour(2)) == Timedelta(hours=2)
assert Timedelta(offsets.Hour(2)) == Timedelta(hours=2)
assert Timedelta(offsets.Second(2)) == Timedelta(seconds=2)
# GH#11995: unicode
expected = Timedelta('1H')
result = Timedelta(u'1H')
assert result == expected
assert to_timedelta(offsets.Hour(2)) == Timedelta(u'0 days, 02:00:00')
with pytest.raises(ValueError):
Timedelta(u'foo bar')
@pytest.mark.parametrize('item', list({'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}.items()))
@pytest.mark.parametrize('npdtype', [np.int64, np.int32, np.int16,
np.float64, np.float32, np.float16])
def test_td_construction_with_np_dtypes(npdtype, item):
# GH#8757: test construction with np dtypes
pykwarg, npkwarg = item
expected = np.timedelta64(1, npkwarg).astype('m8[ns]').view('i8')
assert Timedelta(**{pykwarg: npdtype(1)}).value == expected
@pytest.mark.parametrize('val', [
'1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999'])
def test_td_from_repr_roundtrip(val):
# round-trip both for string and value
td = Timedelta(val)
assert Timedelta(td.value) == td
# str does not normally display nanos
if not td.nanoseconds:
assert Timedelta(str(td)) == td
assert Timedelta(td._repr_base(format='all')) == td
def test_overflow_on_construction():
# GH#3374
value = Timedelta('1day').value * 20169940
with pytest.raises(OverflowError):
Timedelta(value)
# xref GH#17637
with pytest.raises(OverflowError):
Timedelta(7 * 19999, unit='D')
with pytest.raises(OverflowError):
Timedelta(timedelta(days=13 * 19999))
@pytest.mark.parametrize('fmt,exp', [
('P6DT0H50M3.010010012S', Timedelta(days=6, minutes=50, seconds=3,
milliseconds=10, microseconds=10,
nanoseconds=12)),
('P-6DT0H50M3.010010012S', Timedelta(days=-6, minutes=50, seconds=3,
milliseconds=10, microseconds=10,
nanoseconds=12)),
('P4DT12H30M5S', Timedelta(days=4, hours=12, minutes=30, seconds=5)),
('P0DT0H0M0.000000123S', Timedelta(nanoseconds=123)),
('P0DT0H0M0.00001S', Timedelta(microseconds=10)),
('P0DT0H0M0.001S', Timedelta(milliseconds=1)),
('P0DT0H1M0S', Timedelta(minutes=1)),
('P1DT25H61M61S', Timedelta(days=1, hours=25, minutes=61, seconds=61))
])
def test_iso_constructor(fmt, exp):
assert Timedelta(fmt) == exp
@pytest.mark.parametrize('fmt', [
'PPPPPPPPPPPP', 'PDTHMS', 'P0DT999H999M999S',
'P1DT0H0M0.0000000000000S', 'P1DT0H0M00000000000S',
'P1DT0H0M0.S'])
def test_iso_constructor_raises(fmt):
with pytest.raises(ValueError, match=('Invalid ISO 8601 Duration '
'format - {}'.format(fmt))):
Timedelta(fmt)
@pytest.mark.parametrize('constructed_td, conversion', [
(Timedelta(nanoseconds=100), '100ns'),
(Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1, milliseconds=1,
microseconds=1, nanoseconds=1), 694861001001001),
(Timedelta(microseconds=1) + Timedelta(nanoseconds=1), '1us1ns'),
(Timedelta(microseconds=1) - Timedelta(nanoseconds=1), '999ns'),
(Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2), '990ns')])
def test_td_constructor_on_nanoseconds(constructed_td, conversion):
# GH#9273
assert constructed_td == Timedelta(conversion)
def test_td_constructor_value_error():
with pytest.raises(TypeError):
Timedelta(nanoseconds='abc')
|
|
#!/usr/bin/env python
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, md5
from youtube_dl.extractor import (
YoutubeIE,
DailymotionIE,
TEDIE,
)
class BaseTestSubtitles(unittest.TestCase):
url = None
IE = None
def setUp(self):
self.DL = FakeYDL()
self.ie = self.IE(self.DL)
def getInfoDict(self):
info_dict = self.ie.extract(self.url)
return info_dict
def getSubtitles(self):
info_dict = self.getInfoDict()
return info_dict['subtitles']
class TestYoutubeSubtitles(BaseTestSubtitles):
url = 'QRS8MkLhQmM'
IE = YoutubeIE
def getSubtitles(self):
info_dict = self.getInfoDict()
return info_dict[0]['subtitles']
def test_youtube_no_writesubtitles(self):
self.DL.params['writesubtitles'] = False
subtitles = self.getSubtitles()
self.assertEqual(subtitles, None)
def test_youtube_subtitles(self):
self.DL.params['writesubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260')
def test_youtube_subtitles_lang(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitleslangs'] = ['it']
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')
def test_youtube_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles.keys()), 13)
def test_youtube_subtitles_sbv_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'sbv'
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '13aeaa0c245a8bed9a451cb643e3ad8b')
def test_youtube_subtitles_vtt_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'vtt'
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '356cdc577fde0c6783b9b822e7206ff7')
def test_youtube_list_subtitles(self):
self.DL.expect_warning(u'Video doesn\'t have automatic captions')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_youtube_automatic_captions(self):
self.url = '8YoUxe5ncPo'
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslangs'] = ['it']
subtitles = self.getSubtitles()
self.assertTrue(subtitles['it'] is not None)
def test_youtube_nosubtitles(self):
self.DL.expect_warning(u'video doesn\'t have subtitles')
self.url = 'sAjKT8FhjI8'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles), 0)
def test_youtube_multiple_langs(self):
self.url = 'QRS8MkLhQmM'
self.DL.params['writesubtitles'] = True
langs = ['it', 'fr', 'de']
self.DL.params['subtitleslangs'] = langs
subtitles = self.getSubtitles()
for lang in langs:
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
class TestDailymotionSubtitles(BaseTestSubtitles):
url = 'http://www.dailymotion.com/video/xczg00'
IE = DailymotionIE
def test_no_writesubtitles(self):
subtitles = self.getSubtitles()
self.assertEqual(subtitles, None)
def test_subtitles(self):
self.DL.params['writesubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
def test_subtitles_lang(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitleslangs'] = ['fr']
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles.keys()), 5)
def test_list_subtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_automatic_captions(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslang'] = ['en']
subtitles = self.getSubtitles()
self.assertTrue(len(subtitles.keys()) == 0)
def test_nosubtitles(self):
self.DL.expect_warning(u'video doesn\'t have subtitles')
self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles), 0)
def test_multiple_langs(self):
self.DL.params['writesubtitles'] = True
langs = ['es', 'fr', 'de']
self.DL.params['subtitleslangs'] = langs
subtitles = self.getSubtitles()
for lang in langs:
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
class TestTedSubtitles(BaseTestSubtitles):
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
IE = TEDIE
def test_no_writesubtitles(self):
subtitles = self.getSubtitles()
self.assertEqual(subtitles, None)
def test_subtitles(self):
self.DL.params['writesubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '2154f31ff9b9f89a0aa671537559c21d')
def test_subtitles_lang(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitleslangs'] = ['fr']
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['fr']), '7616cbc6df20ec2c1204083c83871cf6')
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles.keys()), 28)
def test_list_subtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_automatic_captions(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslang'] = ['en']
subtitles = self.getSubtitles()
self.assertTrue(len(subtitles.keys()) == 0)
def test_multiple_langs(self):
self.DL.params['writesubtitles'] = True
langs = ['es', 'fr', 'de']
self.DL.params['subtitleslangs'] = langs
subtitles = self.getSubtitles()
for lang in langs:
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# Copyright (c) 2012 Berlin Institute of Technology
# All rights reserved.
#
# Developed by: Philipp Meier <pmeier82@gmail.com>
# Neural Information Processing Group (NI)
# School for Electrical Engineering and Computer Science
# Berlin Institute of Technology
# MAR 5-6, Marchstr. 23, 10587 Berlin, Germany
# http://www.ni.tu-berlin.de/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal with the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the documentation
# and/or other materials provided with the distribution.
# * Neither the names of Neural Information Processing Group (NI), Berlin
# Institute of Technology, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# WITH THE SOFTWARE.
#_____________________________________________________________________________
#
# Acknowledgements:
# Philipp Meier <pmeier82@gmail.com>
#_____________________________________________________________________________
#
"""implementation of spike sorting with matched filters
See:
[1] F. Franke, M. Natora, C. Boucsein, M. Munk, and K. Obermayer. An online
spike detection and spike classification algorithm capable of instantaneous
resolution of overlapping spikes. Journal of Computational Neuroscience, 2009
[2] F. Franke, ... , 2012,
The revolutionary BOTM Paper
"""
__docformat__ = 'restructuredtext'
__all__ = ['FilterBankSortingNode', 'AdaptiveBayesOptimalTemplateMatchingNode',
'BayesOptimalTemplateMatchingNode', 'BOTMNode', 'ABOTMNode']
##---IMPORTS
import collections
import copy
import logging
import sys
import scipy as sp
from scipy import linalg as sp_la
from sklearn.mixture import log_multivariate_normal_density
from sklearn.utils.extmath import logsumexp
from .base_nodes import PCANode
from .cluster import HomoscedasticClusteringNode
from .filter_bank import FilterBankError, FilterBankNode
from .linear_filter import MatchedFilterNode
from .prewhiten import PrewhiteningNode
from .spike_detection import SDMteoNode, ThresholdDetectorNode
from ..common import (
overlaps, epochs_from_spiketrain, epochs_from_spiketrain_set,
shifted_matrix_sub, mcvec_to_conc, epochs_from_binvec, merge_epochs,
matrix_argmax, dict_list_to_ndarray, get_cut, GdfFile, MxRingBuffer,
mcvec_from_conc, get_aligned_spikes, vec2ten, get_tau_align_min,
get_tau_align_max, get_tau_align_energy, mad_scaling, mad_scale_op_mx,
mad_scale_op_vec, xi_vs_f)
##---CONSTANTS
MTEO_DET = SDMteoNode
MTEO_KWARGS = {'kvalues': [3, 9, 15, 21],
'threshold_factor': 0.98,
'min_dist': 32}
##---CLASSES
class FilterBankSortingNode(FilterBankNode):
"""abstract class that handles filter instances and their outputs
This class provides a pipeline structure to implement spike sorting
algorithms that operate on a filter bank. The implementation is done by
implementing the `self._pre_filter`, `self._post_filter`, `self._pre_sort`,
`self._sort_chunk` and `self._post_sort` methods with meaning full
processing. After the filter steps the filter output is present and can be
processed on. Input data can be partitioned into chunks of smaller size.
"""
def __init__(self, **kwargs):
"""
:type ce: TimeSeriesCovE
:keyword ce: covariance estimator instance, if None a new instance
will be created and initialised with the identity matrix
corresponding to the template size.
Required
:type templates: ndarray
:keyword templates: templates to initialise the filter stack.
[ntemps][tf][nc] a tensor of templates
:type align_kind: str
:keyword align_kind: The feature used for alignment. One of:
- "max" - align on maximum of the waveform
- "min" - align on minimum of the waveform
- "energy" - align on peak of energy
- "none" - no alignment
Default='min'
:type chan_set: tuple
:keyword chan_set: tuple of int designating the subset of channels
this filter bank operates on.
Default=tuple(range(nc))
:type filter_cls: FilterNode
:keyword filter_cls: the class of filter node to use for the filter
bank, this must be a subclass of 'FilterNode'.
Default=MatchedFilterNode
:type rb_cap: int
:keyword rb_cap: capacity of the ringbuffer that stores observations
to calculate the mean template.
Default=350
:type chunk_size: int
:keyword chunk_size: if input data will be longer than chunk_size, the
input will be processed chunk per chunk to overcome memory sinks
Default=100000
:type verbose: int
:keyword verbose: verbosity level, 0:none, >1: print .. ref `VERBOSE`
Default=0
:type dtype: dtype resolvable
:keyword dtype: anything that resolves into a scipy dtype, like a
string or number type
Default=None
"""
# kwargs
templates = kwargs.pop('templates', None)
tf = kwargs.get('tf', None)
self._align_kind = kwargs.pop('align_kind', 'min')
if tf is None and templates is None:
raise FilterBankError('\'templates\' or \'tf\' are required!')
if tf is None:
if templates.ndim != 3:
raise FilterBankError(
'templates have to be provided in a tensor of shape '
'[ntemps][tf][nc]!')
kwargs['tf'] = templates.shape[1]
chunk_size = kwargs.pop('chunk_size', 100000)
# everything not popped goes to super
super(FilterBankSortingNode, self).__init__(**kwargs)
# members
self._fout = None
self._data = None
self._chunk = None
self._chunk_offset = 0
self._chunk_size = int(chunk_size)
self.rval = {}
# create filters for templates
if templates is not None:
for temp in templates:
self.create_filter(temp)
## SortingNode interface
def _execute(self, x):
# No channel masking for now
#self._data = x[:, self._chan_set]
self._data = x
dlen = self._data.shape[0]
self.rval.clear()
for i in self._idx_active_set:
self.rval[i] = []
curr_chunk = 0
has_next_chunk = True
# sort per chunk
while has_next_chunk:
# get chunk limits
self._chunk_offset = curr_chunk * self._chunk_size
clen = min(dlen, (curr_chunk + 1) * self._chunk_size)
clen -= self._chunk_offset
# generate data chunk and process
self._chunk = self._data[
self._chunk_offset:self._chunk_offset + clen]
self._fout = sp.empty((clen, self.nf))
# filtering
self._pre_filter()
self._fout = super(FilterBankSortingNode, self)._execute(
self._chunk)
self._post_filter()
# sorting
self._pre_sort()
self._sort_chunk()
self._post_sort()
# iteration
curr_chunk += 1
if self._chunk_offset + clen >= dlen:
has_next_chunk = False
self._combine_results()
# return input data
return x
## FilterBankSortingNode interface - prototypes
def _pre_filter(self):
pass
def _post_filter(self):
pass
def _pre_sort(self):
pass
def _post_sort(self):
pass
def _sort_chunk(self):
pass
def _combine_results(self):
self.rval = dict_list_to_ndarray(self.rval)
correct = int(self._tf / 2)
for k in self.rval:
self.rval[k].sort()
self.rval[k] -= correct
## result access
def spikes_u(self, u, mc=True, exclude_overlaps=True, overlap_window=None,
align_at=-1, align_kind='min', align_rsf=1.):
"""yields the spike for the u-th filter
:type u: int
:param u: index of the filter # CHECK THIS
:type mc: bool
:param mc: if True, return spikes multi-channeled, else return spikes
concatenated
Default=True
:type exclude_overlaps: bool
:param exclude_overlaps: if True, exclude overlap spike
:type overlap_window: int
:param overlap_window: if `exclude_overlaps` is True, this will define
the overlap range,
if None set overlap_window=self._tf.
Default=None
"""
# init
cut = get_cut(self._tf)
rval = None
size = 0, sum(cut), self._data.shape[1]
if mc is False:
size = size[0], size[1] * size[2]
st_dict = copy.deepcopy(self.rval)
# extract spikes
spks, st_dict[u] = get_aligned_spikes(
self._data,
st_dict[u],
align_at=align_at,
tf=self._tf,
mc=mc,
kind=align_kind,
rsf=align_rsf)
if exclude_overlaps is True:
if st_dict[u].size > 0:
ovlp_info = overlaps(st_dict, overlap_window or self._tf)[0]
spks = spks[ovlp_info[u] == False]
return spks
## plotting methods
def plot_sorting(self, ph=None, show=False):
"""plot the sorting of the last data chunk
:type ph: plot handle
:param ph: plot handle top use for the plot
:type show: bool
:param show: if True, call plt.show()
"""
# get plotting tools
try:
from spikeplot import COLOURS, mcdata
except ImportError:
return None
# check
if self._data is None or self.rval is None or len(
self._idx_active_set) == 0:
logging.warn('not initialised properly to plot a sorting!')
return None
# create events
ev = {}
if self.rval is not None:
temps = self.template_set
for i in self._idx_active_set:
if i in self.rval:
if self.rval[i].any():
ev[i] = (self.bank[i].xi, self.rval[i])
# create colours
cols = COLOURS[:self.nf]
# calc discriminants for single units
other = None
if self.nf > 0:
self.reset_history()
other = super(FilterBankSortingNode, self)._execute(self._data)
other += getattr(self, '_lpr_s', sp.log(1.e-6))
other -= [.5 * self.get_xcorrs_at(i)
for i in xrange(self.nf)]
# plot mcdata
return mcdata(self._data, other=other, events=ev,
plot_handle=ph, colours=cols, show=show)
def plot_sorting_waveforms(self, ph=None, show=False, **kwargs):
"""plot the waveforms of the sorting of the last data chunk
:type ph: plot handle
:param ph: plot handle to use for the
:type show: bool
:param show: if True, call plt.show()
"""
# get plotting tools
try:
from spikeplot import waveforms
except ImportError:
return None
# check
if self._data is None or self.rval is None or len(
self._idx_active_set) == 0:
logging.warn('not initialised properly to plot a sorting!')
return None
# init
wf = {}
temps = {}
cut = get_cut(self._tf)
# build waveforms
for u in self.rval:
spks_u = self.spikes_u(
u, exclude_overlaps=False, align_kind=self._align_kind,
align_at=getattr(self, '_learn_templates', -1),
align_rsf=getattr(self, '_learn_templates_rsf', 1.))
temps[u] = self.bank[u].xi_conc
if spks_u.size > 0:
wf[u] = self.spikes_u(u, align_kind=self._align_kind)
else:
wf[u] = temps[u]
"""
waveforms(waveforms, samples_per_second=None, tf=None, plot_mean=False,
plot_single_waveforms=True, set_y_range=False,
plot_separate=True, plot_handle=None, colours=None, title=None,
filename=None, show=True):
"""
return waveforms(wf, samples_per_second=None, tf=self._tf,
plot_mean=True, templates=temps,
plot_single_waveforms=True, set_y_range=False,
plot_separate=True, plot_handle=ph, show=show)
def sorting2gdf(self, fname):
"""yield the gdf representing the current sorting"""
GdfFile.write_gdf(fname, self.rval)
class BayesOptimalTemplateMatchingNode(FilterBankSortingNode):
"""FilterBanksSortingNode derivative for the BOTM algorithm
Can use two implementations of the Bayes Optimal Template-Matching (BOTM)
algorithm as presented in [2]. First implementation uses explicitly
constructed overlap channels for the extend of the complete input
signal, the other implementation uses subtractive interference
cancellation (SIC) on epochs of the signal, where the template
discriminants are greater the the noise discriminant.
"""
## constructor
def __init__(self, **kwargs):
"""
:type ovlp_taus: list
:keyword ovlp_taus: None or list of tau values. If list of tau
values is given, discriminant-functions for all pair-wise
template overlap cases with the given tau values will be created
and evaluated. If None a greedy subtractive interference
cancellation (SIC) approach will be used.
Default=None
:type spk_pr: float
:keyword spk_pr: spike prior value
Default=1e-6
:type noi_pr: float
:keyword noi_pr: noise prior value
Default=1e0
:type spk_pr_bias: tuple
:keyword spk_pr_bias: will only be used when the resolution method is
'sic'. After a spike has been found in a spike epoch, its
discriminant will be biased by the `bias` for `extend` samples.
Default=None
:type sic_guard: bool
:keyword sic_guard: when True, test before setting a spike that
removing it does not increase the norm of all discriminants.
"""
# kwargs
ovlp_taus = kwargs.pop('ovlp_taus', None)
noi_pr = kwargs.pop('noi_pr', 1e0)
spk_pr = kwargs.pop('spk_pr', 1e-6)
spk_pr_bias = kwargs.pop('spk_pr_bias', None)
self.use_sic_guard = kwargs.pop('sic_guard', True)
# super
super(BayesOptimalTemplateMatchingNode, self).__init__(**kwargs)
# members
self._ovlp_taus = ovlp_taus
if self._ovlp_taus is not None:
self._ovlp_taus = list(self._ovlp_taus)
if self.verbose.has_print:
print 'using overlap channels'
else:
if self.verbose.has_print:
print 'using subtractive interference cancelation'
self._disc = None
self._pr_n = None
self._lpr_n = None
self._pr_s = None
self._lpr_s = None
self._pr_s_b = None
self._oc_idx = None
self.noise_prior = noi_pr
self.spike_prior = spk_pr
self.spike_prior_bias = spk_pr_bias
## properties
def get_noise_prior(self):
return self._pr_n
def set_noise_prior(self, value):
if value <= 0.0:
raise ValueError('noise prior <= 0.0')
if value > 1.0:
raise ValueError('noise prior > 1.0')
self._pr_n = float(value)
self._lpr_n = sp.log(self._pr_n)
noise_prior = property(get_noise_prior, set_noise_prior)
def get_spike_prior(self):
return self._pr_s
def set_spike_prior(self, value):
if value <= 0.0:
raise ValueError('spike prior <= 0.0')
if value > 1.0:
raise ValueError('spike prior > 1.0')
self._pr_s = float(value)
self._lpr_s = sp.log(self._pr_s)
spike_prior = property(get_spike_prior, set_spike_prior)
def get_spike_prior_bias(self):
return self._pr_s_b
def set_spike_prior_bias(self, value):
if value is None:
return
if len(value) != 2:
raise ValueError('expecting tuple of length 2')
value = float(value[0]), int(value[1])
if value[1] < 1:
raise ValueError('extend cannot be non-positive')
self._pr_s_b = value
spike_prior_bias = property(get_spike_prior_bias, set_spike_prior_bias)
## filter bank sorting interface
def _post_filter(self):
"""build discriminant functions, prepare for sorting"""
# tune filter outputs to prob. model
ns = self._fout.shape[0]
nf = self.nf
if self._ovlp_taus is not None:
nf += nf * (nf - 1) * 0.5 * len(self._ovlp_taus)
self._disc = sp.empty((ns, nf), dtype=self.dtype)
self._disc[:] = sp.nan
for i in xrange(self.nf):
self._disc[:, i] = (self._fout[:, i] + self._lpr_s -
.5 * self.get_xcorrs_at(i))
self._build_overlap(ns, self._disc, self._ovlp_taus)
def _build_overlap(self, ns, disc, ovlp_taus):
# build overlap channels from filter outputs for overlap channels
if ovlp_taus is not None:
self._oc_idx = {}
oc_idx = self.nf
# Build correct indices when filters are deactivated
oc_map = {}
off = 0
for f in xrange(self.nf):
if not self.bank[f].active:
off += 1
oc_map[f] = None
else:
oc_map[f] = f - off
for f0 in xrange(self.nf):
for f1 in xrange(f0 + 1, self.nf):
for tau in ovlp_taus:
self._oc_idx[oc_idx] = (
oc_map[f0], oc_map[f1], tau)
f0_lim = [max(0, 0 - tau), min(ns, ns - tau)]
f1_lim = [max(0, 0 + tau), min(ns, ns + tau)]
if oc_map[f0] is None or oc_map[f1] is None:
disc[f0_lim[0]:f0_lim[1], oc_idx] = 0
else:
disc[f0_lim[0]:f0_lim[1], oc_idx] = (
disc[f0_lim[0]:f0_lim[1], f0] +
disc[f1_lim[0]:f1_lim[1], f1] -
self.get_xcorrs_at(f0, f1, tau))
oc_idx += 1
import copy
def _sort_chunk(self):
"""sort this chunk on the calculated discriminant functions
method: "och"
Examples for overlap samples
tau=-2 tau=-1 tau=0 tau=1 tau=2
f1: |-----| |-----| |-----| |-----| |-----|
f2: |-----| |-----| |-----| |-----| |-----|
res: +++ ++++ +++++ ++++ +++
method: "sic"
TODO:
"""
# init
if self.nf == 0:
return
spk_ep = epochs_from_binvec(
sp.nanmax(self._disc, axis=1) > self._lpr_n)
if spk_ep.size == 0:
return
l, r = get_cut(2 * self._tf)
for i in xrange(spk_ep.shape[0]):
# FIX: for now we just continue for empty epochs,
# where do they come from anyways?!
if spk_ep[i, 1] - spk_ep[i, 0] < 1:
continue
mc = self._disc[spk_ep[i, 0]:spk_ep[i, 1], :].argmax(0).argmax()
s = self._disc[spk_ep[i, 0]:spk_ep[i, 1], mc].argmax() + spk_ep[
i, 0]
spk_ep[i] = [s - l, s + r]
spk_ep[i][0] = max(0, spk_ep[i][0])
spk_ep[i][1] = min(self._disc.shape[0], spk_ep[i][1])
# check epochs
spk_ep = merge_epochs(spk_ep)
n_ep = spk_ep.shape[0]
for i in xrange(n_ep):
ep_fout = self._fout[spk_ep[i, 0]:spk_ep[i, 1]+1, :]
ep_fout_norm = sp_la.norm(ep_fout)
ep_disc = self._disc[spk_ep[i, 0]:spk_ep[i, 1]+1, :].copy()
self._sort_sic(
i, spk_ep, n_ep, ep_fout, ep_fout_norm, ep_disc,
self._ovlp_taus)
#del ep_fout, ep_disc, sub
def _sort_sic(self, i, spk_ep, n_ep, ep_fout, ep_fout_norm, ep_disc,
ovlp_taus):
""" Perform sorting on given discriminants
"""
niter = 0
while sp.nanmax(ep_disc) > self._lpr_n:
# warn on spike overflow
niter += 1
if niter > self.nf:
logging.warn(
'more spikes than filters found! '
'epoch: [%d:%d] %d' % (
spk_ep[i][0] + self._chunk_offset,
spk_ep[i][1] + self._chunk_offset,
niter))
#if niter > 2 * self.nf:
# break
# find epoch details
ep_t = sp.nanargmax(sp.nanmax(ep_disc, axis=1))
ep_c = sp.nanargmax(ep_disc[ep_t])
# Find involved templates
templ_idx = []
if ep_c < self.nf:
templ_idx.append((ep_c, 0))
else:
# was overlap
my_oc_idx = self._oc_idx[ep_c]
# Corner case?
if my_oc_idx[2] == max(ovlp_taus) or \
my_oc_idx[2] == min(ovlp_taus):
self._sort_sic(
i, spk_ep, n_ep, ep_fout[:, :self.nf],
ep_fout_norm, ep_disc[:, :self.nf], None)
return
templ_idx.append((self.get_idx_for(my_oc_idx[0]), 0))
templ_idx.append(
(self.get_idx_for(my_oc_idx[1]), my_oc_idx[2]))
# build subtrahend
sub = shifted_matrix_sub(
sp.zeros_like(ep_disc[:, :self.nf]),
self._xcorrs[templ_idx[0][0], :, :].T,
templ_idx[0][1] + ep_t - self._tf + 1)
for tidx in templ_idx[1:]:
sub += shifted_matrix_sub(
sp.zeros_like(ep_disc[:, :self.nf]),
self._xcorrs[tidx[0], :, :].T,
tidx[1] + ep_t - self._tf + 1)
# apply subtrahend
if not self.use_sic_guard or \
ep_fout_norm > sp_la.norm(ep_fout + sub):
## DEBUG
if self.verbose.get_has_plot(1):
from spikeplot import xvf_tensor, plt, COLOURS
x_range = sp.arange(ep_disc.shape[0])
f = plt.figure()
f.suptitle('spike epoch [%d:%d] #%d' %
(spk_ep[i, 0] + self._chunk_offset,
spk_ep[i, 1] + self._chunk_offset,
niter))
ax1 = f.add_subplot(211)
ax1.set_color_cycle(
['k'] + COLOURS[:self.nf] * 2)
ax1.plot(x_range, sp.zeros_like(x_range),
ls='--')
ax1.plot(x_range, ep_disc, label='pre_sub')
ax1.axvline(x_range[ep_t], c='k')
ax2 = f.add_subplot(212, sharex=ax1, sharey=ax1)
ax2.set_color_cycle(['k'] + COLOURS[:self.nf])
ax2.plot(x_range, sp.zeros_like(x_range),
ls='--')
ax2.plot(x_range, sub)
ax2.axvline(x_range[ep_t], c='k')
## BUGED
ep_disc[:, :self.nf] += sub
ep_fout[:, :self.nf] += sub
ep_fout_norm = sp_la.norm(ep_fout)
if self._pr_s_b is not None:
bias, extend = self._pr_s_b
if ep_c < self.nf:
ep_disc[
max(ep_t - extend, 0):
min(ep_t + extend, ep_disc.shape[0]),
ep_c] -= bias
else:
my_oc_idx = self._oc_idx[ep_c]
fid0 = self.get_idx_for(my_oc_idx[0])
ep_disc[
max(ep_t - extend, 0):
min(ep_t + extend, ep_disc.shape[0]),
fid0] -= bias
fid1 = self.get_idx_for(my_oc_idx[1])
ep_disc[max(ep_t + my_oc_idx[2] - extend, 0):
min(ep_t + my_oc_idx[2] + extend,
ep_disc.shape[0]), fid1] -= bias
ns = ep_disc.shape[0]
self._build_overlap(ns, ep_disc, ovlp_taus)
## DEBUG
if self.verbose.get_has_plot(1):
ax1.plot(x_range, ep_disc, ls=':', lw=2,
label='post_sub')
## BUGED
# lets fill in the results
if ep_c < self.nf:
# was single unit
fid = self.get_idx_for(ep_c)
self.rval[fid].append(
spk_ep[i, 0] + ep_t + self._chunk_offset)
else:
# was overlap
my_oc_idx = self._oc_idx[ep_c]
fid0 = self.get_idx_for(my_oc_idx[0])
self.rval[fid0].append(
spk_ep[i, 0] + ep_t + self._chunk_offset)
fid1 = self.get_idx_for(my_oc_idx[1])
self.rval[fid1].append(
spk_ep[i, 0] + ep_t + my_oc_idx[2] +
self._chunk_offset)
else:
break
## BOTM implementation
def posterior_prob(self, obs, with_noise=False):
"""posterior probabilities for data under the model
:type obs: ndarray
:param obs: observations to be evaluated [n, tf, nc]
:type with_noise: bool
:param with_noise: if True, include the noise cluster as component
in the mixture.
Default=False
:rtype: ndarray
:returns: matrix with per component posterior probabilities [n, c]
"""
# check obs
obs = sp.atleast_2d(obs)
if len(obs) == 0:
raise ValueError('no observations passed!')
data = []
if obs.ndim == 2:
if obs.shape[1] != self._tf * self._nc:
raise ValueError('data dimensions not compatible with model')
for i in xrange(obs.shape[0]):
data.append(obs[i])
elif obs.ndim == 3:
if obs.shape[1:] != (self._tf, self._nc):
raise ValueError('data dimensions not compatible with model')
for i in xrange(obs.shape[0]):
data.append(mcvec_to_conc(obs[i]))
data = sp.asarray(data, dtype=sp.float64)
# build comps
comps = self.get_template_set(mc=False)
if with_noise:
comps = sp.vstack((comps, sp.zeros((self._tf * self._nc))))
comps = comps.astype(sp.float64)
if len(comps) == 0:
return sp.zeros((len(obs), 1))
# build priors
prior = sp.array([self._lpr_s] * len(comps), dtype=sp.float64)
if with_noise:
prior[-1] = self._lpr_n
# get sigma
try:
sigma = self._ce.get_cmx(tf=self._tf).astype(sp.float64)
except:
return sp.zeros((len(obs), 1))
# calc log probs
lpr = log_multivariate_normal_density(data, comps, sigma,
'tied') + prior
logprob = logsumexp(lpr, axis=1)
return sp.exp(lpr - logprob[:, sp.newaxis])
def component_divergence(self, obs, with_noise=False, loading=False,
subdim=None):
"""component probabilities under the model
:type obs: ndarray
:param obs: observations to be evaluated [n, tf, nc]
:type with_noise: bool
:param with_noise: if True, include the noise cluster as component
in the mixture.
Default=False
:type loading: bool
:param loading: if True, use the loaded matrix
Default=False
:type subdim: int
:param subdim: dimensionality of subspace to build the inverse over.
if None ignore
Default=None
:rtype: ndarray
:returns: divergence from means of current filter bank[n, c]
"""
# check data
obs = sp.atleast_2d(obs)
if len(obs) == 0:
raise ValueError('no observations passed!')
data = []
if obs.ndim == 2:
if obs.shape[1] != self._tf * self._nc:
raise ValueError('data dimensions not compatible with model')
for i in xrange(obs.shape[0]):
data.append(obs[i])
elif obs.ndim == 3:
if obs.shape[1:] != (self._tf, self._nc):
raise ValueError('data dimensions not compatible with model')
for i in xrange(obs.shape[0]):
data.append(mcvec_to_conc(obs[i]))
data = sp.asarray(data, dtype=sp.float64)
# build component
comps = self.get_template_set(mc=False)
if with_noise:
comps = sp.vstack((comps, sp.zeros((self._tf * self._nc))))
comps = comps.astype(sp.float64)
if len(comps) == 0:
return sp.ones((len(obs), 1)) * sp.inf
# get sigma
try:
if loading is True:
sigma_inv = self._ce.get_icmx_loaded(tf=self._tf).astype(
sp.float64)
else:
sigma_inv = self._ce.get_icmx(tf=self._tf).astype(sp.float64)
if subdim is not None:
subdim = int(subdim)
svd = self._ce.get_svd(tf=self._tf).astype(sp.float64)
sv = svd[1].copy()
t = sp.finfo(self._ce.dtype).eps * len(sv) * svd[1].max()
sv[sv < t] = 0.0
sigma_inv = sp.dot(svd[0][:, :subdim],
sp.dot(sp.diag(1. / sv[:subdim]),
svd[2][:subdim]))
except:
return sp.ones((len(obs), 1)) * sp.inf
# return component wise divergence
rval = sp.zeros((obs.shape[0], comps.shape[0]), dtype=sp.float64)
for n in xrange(obs.shape[0]):
x = data[n] - comps
for c in xrange(comps.shape[0]):
rval[n, c] = sp.dot(x[c], sp.dot(sigma_inv, x[c]))
return rval
# for legacy compatibility
BOTMNode = BayesOptimalTemplateMatchingNode
class AdaptiveBayesOptimalTemplateMatchingNode(
BayesOptimalTemplateMatchingNode):
"""Adaptive BOTM Node
adaptivity here means,backwards sense, that known templates and
covariances are adapted local temporal changes. In the forward sense a
parallel spike detection is matched to find currently unidenified units
in the data.
"""
def __init__(self, **kwargs):
"""
:type learn_templates: int
:keyword learn_templates: if non-negative integer, adapt the filters
with the found events aligned at that sample. If negative,
calculate the alignment samples as int(.25*self.tf)
Default=-1
:type learn_noise: str or None
:keyword learn_noise: if not None, adapt the noise covariance matrix
with from the noise epochs. This has to be either 'sort' to
learn from the non overlapping sorting events,
or 'det' to lean from the detection. Else, do not learn the noise.
Default='sort'
:type det_cls: ThresholdDetectorNode
:keyword det_cls: the class of detector node to use for the spike
detection running in parallel to the sorting,
this must be a subclass of 'ThresholdDetectorNode'.
Default=MTEO_DET
:type det_limit: int
:keyword det_limit: capacity of the ringbuffer to hold the unexplained
spikes.
Default=2000
:type det_forget: int
:keyword det_forget: Unexplained spikes that are older than this
amount of samples will be forgotten. A reclustering to find
new nodes will be started if ``det_limit`` unexplained spikes
are found during ``det_forget`` samples. If this value is 0,
no reclustering will occur.
Default=1000000
:type clus_num_reclus: int or list
:type clus_num_reclus: Number of clusters that will be used in a
reclustering of unexplained spikes.
Default: 4
:type clus_min_size: int
:keyword clus_min_size: Minimum number of spikes in a cluster of
unexplained spikes for a new unit to be created from that cluster
during reclustering.
Default=50
:type clus_use_amplitudes: bool
:keyword clus_use_amplitudes: Determines if amplitudes (max-min) for
each channel are used in addition to PCA features for clustering.
Default=True
:type clus_pca_features: int
:keyword clus_pca_features: The number of PCA features to use during
clustering.
Default=10
:type clus_algo: str
:keyword clus_algo: Name of the clustering algorithm to use.
Allowed are all names HomoscedasticClusteringNode can use,
e.g. 'gmm' or 'meanshift'.
Default='gmm'
:type clus_params: dict
:keyword clus_params: Dictionary of parameters for chosen algorithm.
Contents depend on clustering algorithm:
* 'gmm'
* 'min_clusters' Minimum number of clusters to try.
Default=1
* 'max_clusters' Maximum number of clusters to try.
Default=14
* 'mean_shift'
* Empty.
:type clus_merge_rsf: int
:keyword clus_params: Resampling factor used for realignment before
checking
if clusters should be merged.
Default=16
:type clus_merge_dist: float
:keyword clus_merge_dist: Maximum euclidean distance between two
clusters
that will be merged. Set to 0 to turn off automatic cluster merging.
Default=0.0
:type minimum_snr: float
:keyword minimum_snr: Templates with a signal to noise ratio below this
value are dropped.
Default = 0.3
:type minimum_rate: float
:keyword minimum_rate: Templates with a firing rate (in Hertz) below
this value are dropped.
Default = 0.1
:type det_kwargs: dict
:keyword det_kwargs: keywords for the spike detector that will be
run in parallel on the data.
Default=MTEO_KWARGS
"""
# kwargs
learn_templates = kwargs.pop('learn_templates', -1)
learn_templates_rsf = kwargs.pop('learn_templates_rsf', 1.0)
learn_noise = kwargs.pop('learn_noise', None)
det_cls = kwargs.pop('det_cls')
if det_cls is None:
det_cls = MTEO_DET
det_kwargs = kwargs.pop('det_kwargs')
if det_kwargs is None:
det_kwargs = MTEO_KWARGS
det_limit = kwargs.pop('det_limit', 4000)
self._forget_samples = kwargs.pop('det_forget', 1000000)
self._mad_scaling = kwargs.pop('clus_mad_scaling', False)
self._min_new_cluster_size = kwargs.pop('clus_min_size', 30)
self._num_reclus = kwargs.pop('clus_num_reclus', 4)
self._use_amplitudes = kwargs.pop('clus_use_amplitudes', True)
self._pca_features = kwargs.pop('clus_pca_features', 10)
self._cluster_algo = kwargs.pop('clus_algo', 'gmm')
self._cluster_params = kwargs.pop('clus_params', {})
self._merge_dist = kwargs.pop('clus_merge_dist', 0.0)
self._merge_rsf = kwargs.pop('clus_merge_rsf', 16)
self._external_spike_train = None
self._minimum_snr = kwargs.pop('minimum_snr', 0.3)
self._minimum_rate = kwargs.pop('minimum_rate', 0.1)
# check det_cls
#if not issubclass(det_cls, ThresholdDetectorNode):
# raise TypeError(
# '\'det_cls\' of type ThresholdDetectorNode is required!')
if learn_noise is not None:
if learn_noise not in ['det', 'sort']:
learn_noise = None
# super
super(AdaptiveBayesOptimalTemplateMatchingNode, self).__init__(**kwargs)
if learn_templates < 0:
learn_templates = int(0.25 * self._tf)
# members
self._det = None
self._det_cls = det_cls
self._det_kwargs = det_kwargs
self._det_limit = int(det_limit)
self._det_buf = None
self._det_samples = None
self._learn_noise = learn_noise
self._learn_templates = learn_templates
self._learn_templates_rsf = learn_templates_rsf
self._sample_offset = 0 # Count how often the sorting was executed
# Number of samples before unexplained spikes are ignored
# align at (learn_templates)
if self._learn_templates < 0:
self._learn_templates = .25
if isinstance(self._learn_templates, float):
if 0.0 <= self._learn_templates <= 1.0:
self._learn_templates *= self.tf
self._learn_templates = int(self._learn_templates)
# for initialisation set correct self._cluster method
self._cluster = self._cluster_init
self._det_buf = MxRingBuffer(capacity=self._det_limit,
dimension=(self._tf * self._nc),
dtype=self.dtype)
# Saves (global) samples of unexplained spike events
self._det_samples = collections.deque(maxlen=self._det_limit)
# mad scale value
if self._mad_scaling is False:
self._mad_scaling = None
else:
self._mad_scaling = 0.0
## properties
def get_det(self):
if self._det is None:
self._det = self._det_cls(tf=self._tf, **self._det_kwargs)
if self.verbose.has_print:
print self._det
return self._det
det = property(get_det)
## filter bank sorting interface
def _event_explained(self, ev, padding=15):
"""check event for explanation by the filter bank"""
# early exit if no discriminants are present
if not self._disc.size:
return False
# cut relevant piece of the discriminants
data_ep = ev - self._learn_templates, \
ev - self._learn_templates + self.tf
disc_ep = data_ep[0] + self._tf / 2, \
data_ep[1] + self._tf / 2
if self._external_spike_train is not None:
disc_ep = (disc_ep[0] - self._chunk_offset,
disc_ep[1] - self._chunk_offset)
if self.verbose.has_plot:
try:
from spikeplot import mcdata
ep = data_ep[0] - padding, disc_ep[1] + padding
mcdata(
data=self._chunk[ep[0]:ep[1]],
#other=self._disc[at[0]:at[1]], events=evts,
other=self._disc[ep[0]:ep[1]],
x_offset=ep[0],
events={0: [ev], 1: [data_ep[0] + self._tf]},
epochs={0: [data_ep], 1: [disc_ep]},
title='det@%s(%s) disc@%s' % (
ev, self._learn_templates, ev + self._tf),
show=True)
except ImportError:
pass
#self.se_cnt += 1
start = max(0, disc_ep[0] - padding)
stop = min(self._disc.shape[0], disc_ep[1] + padding)
return self._disc[start:stop, :].max() >= 0.0
def _post_sort(self):
"""check the spike sorting against multi unit"""
if self._external_spike_train is None:
self.det.reset()
self.det(self._chunk, ck0=self._chunk_offset,
ck1=self._chunk_offset + len(self._chunk))
if self.det.events is None:
return
events = self.det.events
else:
events = self._external_spike_train[sp.logical_and(
self._external_spike_train >= self._chunk_offset,
self._external_spike_train < self._chunk_offset + len(
self._chunk))]
events_explained = sp.array([self._event_explained(e) for e in events])
if self.verbose.has_print:
print 'spks not explained:', (events_explained == False).sum()
if sp.any(events_explained == False):
data = self._chunk
if self._mad_scaling is not None:
data = 1.0 / self._mad_scaling * self._chunk.copy()
spks, st = get_aligned_spikes(
data, events[events_explained == False],
tf=self._tf, mc=False, kind=self._align_kind,
align_at=self._learn_templates, rsf=self._learn_templates_rsf)
self._det_buf.extend(spks)
self._det_samples.extend(self._sample_offset + st)
self._disc = None
def _execute(self, x, ex_st=None):
if self._mad_scaling is not None:
alpha = self._ce._weight
mad_scale = mad_scaling(x)[1]
if sp.any(self._mad_scaling):
self._mad_scaling = (1.0 - alpha) * self._mad_scaling
self._mad_scaling += alpha * mad_scale
else:
self._mad_scaling = mad_scale
# set the external spike train
self._external_spike_train = ex_st
# call super to get sorting
rval = super(AdaptiveBayesOptimalTemplateMatchingNode, self)._execute(x)
# adaption
self._adapt_noise()
self._adapt_filter_drop()
self._adapt_filter_current()
self._adapt_filter_new()
# learn slow noise statistic changes
self._sample_offset += x.shape[0] # Increase sample offset
return rval
## adaption methods
def _adapt_noise(self):
if self._learn_noise:
nep = None
if self._learn_noise == 'sort':
if len(self.rval) > 0:
nep = epochs_from_spiketrain_set(
self.rval,
cut=(self._learn_templates,
self._tf - self._learn_templates),
end=self._data.shape[0])['noise']
elif self._learn_noise == 'det':
if self._external_spike_train is not None:
nep = epochs_from_spiketrain_set(
{666: self._external_spike_train},
cut=(self._learn_templates,
self._tf - self._learn_templates),
end=self._data.shape[0])['noise']
elif len(self.det.events) > 0:
nep = self.det.get_epochs(
## this does not have to be the correct cut for the
## detection events! best would be to do an
# alignment here!
cut=(self._learn_templates,
self._tf - self._learn_templates),
merge=True, invert=True)
else:
raise ValueError('unrecognised value for learn_noise: %s' % str(
self._learn_noise))
try:
self._ce.update(self._data, epochs=nep)
except ValueError, e:
logging.warn(str(e))
def _adapt_filter_drop(self):
nsmpl = self._data.shape[0]
for u in list(self._idx_active_set):
# 1) snr drop
if self.bank[u].snr < self._minimum_snr:
self.deactivate(u)
logging.warn('deactivating filter %s, snr' % str(u))
# 2) rate drop
if hasattr(self.bank[u], 'rate'):
try:
nspks = len(self.rval[u])
except:
nspks = 0
self.bank[u].rate.observation(nspks, nsmpl)
if self.bank[u].rate.filled and \
self.bank[u].rate.estimate() < self._minimum_rate:
self.deactivate(u)
logging.warn('deactivating filter %s, rate' % str(u))
self._check_internals()
def _adapt_filter_current(self):
"""adapt templates/filters using non overlapping spikes"""
# check and init
if self._data is None or self.rval is None:
return
# adapt filters with found waveforms
for u in self.rval:
spks_u = self.spikes_u(u, mc=True, exclude_overlaps=True,
align_at=self._learn_templates or -1,
align_kind=self._align_kind,
align_rsf=self._learn_templates_rsf)
if spks_u.size == 0:
continue
self.bank[u].extend_xi_buf(spks_u)
self.bank[u].rate.observation(spks_u.shape[0], self._data.shape[0])
print [(u, f.rate.estimate()) for (u, f) in self.bank.items()]
def _adapt_filter_new(self):
if self._det_buf.is_full and \
(self._cluster == self._cluster_init or
(self._forget_samples > 0 and
self._det_samples[0] > self._sample_offset -
self._forget_samples)):
if self.verbose.has_print:
print 'det_buf is full!'
self._cluster()
else:
if self.verbose.has_print:
print 'self._det_buf volume:', self._det_buf
## something from robert
def resampled_mean_dist(self, spks1, spks2):
""" Caclulate distance of resampled means from two sets of spikes
"""
# resample and realign means to check distance
means = {}
means[0] = mcvec_from_conc(spks1.mean(0), nc=self._nc)
means[1] = mcvec_from_conc(spks2.mean(0), nc=self._nc)
if self._merge_rsf != 1:
for u in means.iterkeys():
means[u] = sp.signal.resample(
means[u], self._merge_rsf * means[u].shape[0])
if self._align_kind == 'min':
tau = get_tau_align_min(
sp.array([means[u]]),
self._learn_templates * self._merge_rsf)
elif self._align_kind == 'max':
tau = get_tau_align_max(
sp.array([means[u]]),
self._learn_templates * self._merge_rsf)
elif self._align_kind == 'energy':
tau = get_tau_align_energy(
sp.array([means[u]]),
self._learn_templates * self._merge_rsf)
else:
tau = 0
# Realignment shouldn't need to be drastic
max_dist = 2 * self._merge_rsf
l = means[u].shape[0]
if abs(tau) > max_dist:
logging.warn(('Could not realign %d, distance: %d ' %
(u, tau)))
tau = 0
means[u] = mcvec_to_conc(
means[u][max_dist + tau:l - max_dist + tau, :])
else:
means[0] = mcvec_to_conc(means[0])
means[1] = mcvec_to_conc(means[1])
return sp.spatial.distance.cdist(
sp.atleast_2d(means[0]), sp.atleast_2d(means[1]), 'euclidean')
## cluster methods
def _cluster_init(self):
"""cluster step for initialisation"""
# get all spikes and clear buffers
spks = self._det_buf[:].copy()
self._det_buf.clear()
self._det_samples.clear()
# noise covariance matrix, and scaling due to median average deviation
C = self._ce.get_cmx(tf=self._tf, chan_set=self._chan_set)
if self._mad_scaling is not None:
C *= mad_scale_op_mx(self._mad_scaling, self._tf)
# processing chain
pre_pro = PrewhiteningNode(ncov=C) + \
PCANode(output_dim=self._pca_features)
sigma_factor = 4.0
min_clusters = self._cluster_params.get('min_clusters', 1)
max_clusters = self._cluster_params.get('max_clusters', 14)
rep = 0 if self._cluster_algo == 'meanshift' else 4
clus = HomoscedasticClusteringNode(
clus_type=self._cluster_algo,
cvtype='full',
debug=self.verbose.has_print,
sigma_factor=sigma_factor,
crange=range(min_clusters, max_clusters + 1),
max_iter=256, repeats=rep)
# create features
if self._use_amplitudes:
n_spikes = spks.shape[0]
spks_pp = sp.zeros((n_spikes, self._pca_features + self._nc))
spks_pp[:, :self._pca_features] = pre_pro(spks)
all = vec2ten(spks, self._nc)
all_amp = all.max(axis=1) - all.min(axis=1)
# Scale amplitude features to a level near pca features
all_amp *= sigma_factor * 5 / all_amp.max()
spks_pp[:, self._pca_features:] = all_amp
else:
spks_pp = pre_pro(spks)
# cluster
clus(spks_pp)
if self.verbose.is_verbose is True:
clus.plot(spks_pp, show=True)
lbls = clus.labels
if self.verbose.has_plot:
clus.plot(spks_pp, show=True)
if self._merge_dist > 0.0:
merged = True
while merged:
merged = False
for i in sp.unique(lbls):
spks_i = spks[lbls == i]
#for inner in xrange(i):
for inner in sp.unique(lbls):
if i >= inner:
continue
spks_inner = spks[lbls == inner]
d = self.resampled_mean_dist(spks_i, spks_inner)
if self.verbose.has_print:
print 'Distance %d-%d: %f' % (i, inner, d)
if d <= self._merge_dist:
lbls[lbls == i] = inner
if self.verbose.has_print:
print 'Merged', i, 'and', inner, '-'
merged = True
break
if merged:
break
if self._mad_scaling is not None:
# if we have scaled the spikes, rescale to original scale
spks *= mad_scale_op_vec(1.0 / self._mad_scaling, self._tf)
for i in sp.unique(lbls):
spks_i = spks[lbls == i]
if len(spks_i) < self._min_new_cluster_size:
self._det_buf.extend(spks_i)
if self.verbose.has_print:
print 'Unit %d rejected, only %d spikes' % (i, len(spks_i))
continue
spk_i = mcvec_from_conc(spks_i.mean(0), nc=self._nc)
self.create_filter(spk_i)
if self.verbose.has_print:
print 'Unit %d accepted, with %d spikes' % (i, len(spks_i))
del pre_pro, clus, spks, spks_pp
self._cluster = self._cluster_base
def _cluster_base(self):
"""cluster step for normal operation"""
# get all spikes and clear buffer
spks = self._det_buf[:].copy()
self._det_buf.clear()
self._det_samples.clear()
# noise covariance matrix, and scaling due to median average deviation
C = self._ce.get_cmx(tf=self._tf, chan_set=self._chan_set)
if self._mad_scaling is not None:
C *= mad_scale_op_mx(self._mad_scaling, self._tf)
# processing chain
pre_pro = PrewhiteningNode(ncov=C) + \
PCANode(output_dim=10)
clus = HomoscedasticClusteringNode(
clus_type='gmm',
cvtype='tied',
debug=self.verbose.has_print,
sigma_factor=4.0,
crange=range(1, self._num_reclus + 1),
max_iter=256)
spks_pp = pre_pro(spks)
clus(spks_pp)
lbls = clus.labels
if self.verbose.has_plot:
clus.plot(spks_pp, show=True)
if self._mad_scaling is not None:
spks *= mad_scale_op_vec(1.0 / self._mad_scaling, self._tf)
for i in sp.unique(lbls):
if self.verbose.has_print:
print 'checking new unit:',
spks_i = spks[lbls == i]
if len(spks_i) < self._min_new_cluster_size:
self._det_buf.extend(spks_i)
if self.verbose.has_print:
print 'rejected, only %d spikes' % len(spks_i)
else:
spk_i = mcvec_from_conc(spks_i.mean(0), nc=self._nc)
self.create_filter(spk_i)
if self.verbose.has_print:
print 'accepted, with %d spikes' % len(spks_i)
del pre_pro, clus, spks, spks_pp
def _update_mad_value(self, mad):
"""update the mad value if `mad_scaling` is True"""
## shortcut
ABOTMNode = AdaptiveBayesOptimalTemplateMatchingNode
##---MAIN
if __name__ == '__main__':
pass
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2016-2021, Babak Farrokhi
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import getopt
import ipaddress
import os
import signal
import socket
import sys
import time
from statistics import stdev
import dns.flags
import dns.resolver
import requests
from util.dns import PROTO_UDP, PROTO_TCP, PROTO_TLS, PROTO_HTTPS, proto_to_text, unsupported_feature
from util.shared import __version__
__author__ = 'Babak Farrokhi (babak@farrokhi.net)'
__license__ = 'BSD'
__progname__ = os.path.basename(sys.argv[0])
shutdown = False
def usage():
print("""%s version %s
usage: %s [-46DeFhqTvX] [-i interval] [-s server] [-p port] [-P port] [-S address] [-c count] [-t type] [-w wait] hostname
-h --help Show this help
-q --quiet Quiet
-v --verbose Print actual dns response
-s --server DNS server to use (default: first entry from /etc/resolv.conf)
-p --port DNS server port number (default: 53 for TCP/UDP and 853 for TLS)
-T --tcp Use TCP as transport protocol
-X --tls Use TLS as transport protocol
-H --doh Use HTTPS as transport protols (DoH)
-4 --ipv4 Use IPv4 as default network protocol
-6 --ipv6 Use IPv6 as default network protocol
-P --srcport Query source port number (default: 0)
-S --srcip Query source IP address (default: default interface address)
-c --count Number of requests to send (default: 10, 0 for infinity)
-w --wait Maximum wait time for a reply (default: 2 seconds)
-i --interval Time between each request (default: 1 seconds)
-t --type DNS request record type (default: A)
-e --edns Disable EDNS0 (default: Enabled)
-D --dnssec Enable 'DNSSEC desired' flag in requests. Implies EDNS.
-F --flags Display response flags
""" % (__progname__, __version__, __progname__))
sys.exit(0)
def setup_signal_handler():
try:
signal.signal(signal.SIGTSTP, signal.SIG_IGN) # ignore CTRL+Z
signal.signal(signal.SIGINT, signal_handler) # custom CTRL+C handler
except AttributeError: # not all signals are supported on all platforms
pass
def signal_handler(sig, frame):
global shutdown
if shutdown: # pressed twice, so exit immediately
sys.exit(0)
shutdown = True # pressed once, exit gracefully
def validate_server_address(dnsserver, address_family):
"""checks if we have a valid dns server address and resolve if it is a hostname"""
try:
ipaddress.ip_address(dnsserver)
except ValueError: # so it is not a valid IPv4 or IPv6 address, so try to resolve host name
try:
dnsserver = socket.getaddrinfo(dnsserver, port=None, family=address_family)[1][4][0]
except OSError:
print('Error: cannot resolve hostname:', dnsserver, file=sys.stderr, flush=True)
sys.exit(1)
return dnsserver
def main():
setup_signal_handler()
if len(sys.argv) == 1:
usage()
# defaults
rdatatype = 'A'
count = 10
timeout = 2
interval = 1
quiet = False
verbose = False
show_flags = False
dnsserver = None # do not try to use system resolver by default
dst_port = 53 # default for UDP and TCP
src_port = 0
src_ip = None
proto = PROTO_UDP
use_edns = True
want_dnssec = False
af = socket.AF_INET
qname = 'wikipedia.org'
try:
opts, args = getopt.getopt(sys.argv[1:], "qhc:s:t:w:i:vp:P:S:T46eDFXH",
["help", "count=", "server=", "quiet", "type=", "wait=", "interval=", "verbose",
"port=", "srcip=", "tcp", "ipv4", "ipv6", "srcport=", "edns", "dnssec", "flags",
"tls", "doh"])
except getopt.GetoptError as err:
# print help information and exit:
print(err, file=sys.stderr) # will print something like "option -a not recognized"
usage()
if args and len(args) == 1:
qname = args[0]
else:
usage()
for o, a in opts:
if o in ("-h", "--help"):
usage()
elif o in ("-c", "--count"):
count = abs(int(a))
elif o in ("-v", "--verbose"):
verbose = True
elif o in ("-s", "--server"):
dnsserver = a
elif o in ("-q", "--quiet"):
quiet = True
verbose = False
elif o in ("-w", "--wait"):
timeout = int(a)
elif o in ("-i", "--interval"):
interval = float(a)
elif o in ("-t", "--type"):
rdatatype = a
elif o in ("-T", "--tcp"):
proto = PROTO_TCP
elif o in ("-X", "--tls"):
proto = PROTO_TLS
dst_port = 853 # default for DoT, unless overriden using -p
elif o in ("-H", "--doh"):
proto = PROTO_HTTPS
dst_port = 443 # default for DoH, unless overriden using -p
elif o in ("-4", "--ipv4"):
af = socket.AF_INET
elif o in ("-6", "--ipv6"):
af = socket.AF_INET6
elif o in ("-e", "--edns"):
use_edns = False
elif o in ("-D", "--dnssec"):
want_dnssec = True
elif o in ("-F", "--flags"):
show_flags = True
elif o in ("-p", "--port"):
dst_port = int(a)
elif o in ("-P", "--srcport"):
src_port = int(a)
if src_port < 1024:
print("WARNING: Source ports below 1024 are only available to superuser", flush=True)
elif o in ("-S", "--srcip"):
src_ip = a
else:
usage()
# Use system DNS server if parameter is not specified
# remember not all systems have /etc/resolv.conf (i.e. Android)
if dnsserver is None:
dnsserver = dns.resolver.get_default_resolver().nameservers[0]
dnsserver = validate_server_address(dnsserver, af)
if use_edns:
query = dns.message.make_query(qname, rdatatype, dns.rdataclass.IN,
use_edns=True, want_dnssec=want_dnssec,
ednsflags=dns.flags.edns_from_text('DO'), payload=8192)
else:
query = dns.message.make_query(qname, rdatatype, dns.rdataclass.IN,
use_edns=False, want_dnssec=want_dnssec)
response_time = []
i = 0
print("%s DNS: %s:%d, hostname: %s, proto: %s, rdatatype: %s, flags: %s" %
(__progname__, dnsserver, dst_port, qname, proto_to_text(proto), rdatatype,
dns.flags.to_text(query.flags)), flush=True)
while not shutdown:
if 0 < count <= i:
break
else:
i += 1
try:
stime = time.perf_counter()
if proto is PROTO_UDP:
answers = dns.query.udp(query, dnsserver, timeout=timeout, port=dst_port,
source=src_ip, source_port=src_port, ignore_unexpected=True)
elif proto is PROTO_TCP:
answers = dns.query.tcp(query, dnsserver, timeout=timeout, port=dst_port,
source=src_ip, source_port=src_port)
elif proto is PROTO_TLS:
if hasattr(dns.query, 'tls'):
answers = dns.query.tls(query, dnsserver, timeout, dst_port,
src_ip, src_port)
else:
unsupported_feature()
elif proto is PROTO_HTTPS:
if hasattr(dns.query, 'https'):
answers = dns.query.https(query, dnsserver, timeout, dst_port,
src_ip, src_port)
else:
unsupported_feature()
etime = time.perf_counter()
except dns.resolver.NoNameservers as e:
if not quiet:
print("No response to DNS request", file=sys.stderr, flush=True)
if verbose:
print("error:", e, file=sys.stderr, flush=True)
sys.exit(1)
except (requests.exceptions.ConnectTimeout, dns.exception.Timeout):
if not quiet:
print("Request timeout", flush=True)
except requests.exceptions.ReadTimeout:
if not quiet:
print("Read timeout", flush=True)
except ValueError:
if not quiet:
print("Invalid Response", flush=True)
continue
else:
# convert time to milliseconds, considering that
# time property is retruned differently by query.https
if type(answers.time) is datetime.timedelta:
elapsed = answers.time.total_seconds() * 1000
else:
elapsed = answers.time * 1000
response_time.append(elapsed)
if not quiet:
if show_flags:
flags = " [%s] %s" % (dns.flags.to_text(answers.flags), dns.rcode.to_text(answers.rcode()))
else:
flags = ""
print("%d bytes from %s: seq=%-3d time=%.3f ms%s" % (
len(answers.to_wire()), dnsserver, i, elapsed, flags), flush=True)
if verbose:
print(answers.to_text(), flush=True)
time_to_next = (stime + interval) - etime
if time_to_next > 0:
time.sleep(time_to_next)
r_sent = i
r_received = len(response_time)
r_lost = r_sent - r_received
r_lost_percent = (100 * r_lost) / r_sent
if response_time:
r_min = min(response_time)
r_max = max(response_time)
r_avg = sum(response_time) / r_received
if len(response_time) > 1:
r_stddev = stdev(response_time)
else:
r_stddev = 0
else:
r_min = 0
r_max = 0
r_avg = 0
r_stddev = 0
print('\n--- %s dnsping statistics ---' % dnsserver, flush=True)
print('%d requests transmitted, %d responses received, %.0f%% lost' % (r_sent, r_received, r_lost_percent),
flush=True)
print('min=%.3f ms, avg=%.3f ms, max=%.3f ms, stddev=%.3f ms' % (r_min, r_avg, r_max, r_stddev), flush=True)
if __name__ == '__main__':
main()
|
|
import os
import numpy as np
import sys
import cStringIO
import re
import scipy.io as sio
import copy
def cell2strtable(celltable, delim='\t'):
''' convert a cell table into a string table that can be printed nicely
Parameters:
celltable - array-like, ndarray with rows and columns in desired order
delim - str, delimter to combine columns of celltable
[default = '\\t' (strictly 4 spaces)]
Returns:
strtable - str, string version of celltable that prints as table
Example:
celltable = np.array([['Column 1 Title','Column 2 Title',''],
['Row 2 Column 1 is longer...','Row 2 Column 2','Extra Column!']])
delim='\t'
strtable = cell2strtable(celltable, delim)
print(strtable)
Column 1 Title Column 2 Title
Row 2 Column 1 is longer... Row 2 Column 2 Extra Column!
'''
# change \t to 4 spaces
if delim == '\t':
delim = ' '
# check that celltable is ndarray and object
if type(celltable) != np.ndarray:
celltable = np.array([celltable], dtype=np.object)
elif celltable.dtype != np.object: # copy as np.object
celltable = copy.deepcopy(celltable).astype(np.object)
else: # copy as is
celltable = copy.deepcopy(celltable)
# if len(shape) == 1, reshape
if len(celltable.shape)==1:
celltable = np.reshape(celltable, (1,celltable.shape[0]))
# convert all to string
for i,x in enumerate(celltable.ravel()):
celltable.ravel()[i] = np.str(x)
# get max length in each column
max_len = []
for x in celltable.transpose():
max_len.append(np.max([len(y) for y in x]))
# pad each column with zeros
for i,r in enumerate(celltable):
for ii,c in enumerate(r):
if len(c) < max_len[i]:
spaces = ''.join([' ' for n in range(max_len[ii]-len(c))])
celltable[i][ii] = c + spaces
# join strings with delim
strtable = []
if len(celltable.shape) > 1:
for r in range(celltable.shape[0]):
strtable.append(delim.join(celltable[r]))
strtable = '\n'.join(strtable)
else:
strtable = delim.join(celltable)
return strtable
def py2mat(A, filename, variable):
''' load from or save to matlab format
Parameters:
A - object, object to save (set to None if loading from filename)
filename - str, file to load from or save to
variable - str, variable name to load or save
Returns:
A - object, object converted from file or converted to matlab format
Example:
A = {0: {'spm': {'temporal': {'st': {'nslices': {0: 28},
'prefix': {0: u'a'},
'refslice': {0: 1},
'scans': {0: {0: u'<UNDEFINED>'}},
'so': {0: 1, 1: 3, 2: 5, 3: 7, 4: 9, 5: 11, 6: 13, 7: 15, 8: 17,
9: 19, 10: 21, 11: 23, 12: 25, 13: 27, 14: 2, 15: 4, 16: 6,
17: 8, 18: 10, 19: 12, 20: 14, 21: 16, 22: 18, 23: 20, 24: 22,
25: 24, 26: 26, 27: 28},
'ta': {0: 1.9285714285714286},
'tr': {0: 2}}}}}}
'''
# load from filename
if A==None:
# init out
out = np.array([], np.object)
# load filename as matlab dtype
A = sio.loadmat(filename, mat_dtype=True)
A = A[variable]
# get substructs of A
S0 = struct2sub(A)
# for each level, get dtype
S1 = np.empty(len(S0), dtype=np.object).tolist()
cell = np.zeros(len(S0), dtype=np.bool).tolist()
for i,S_ in enumerate(S0):
S1[i] = []
cell[i] = []
for n in range(1, len(S_)):
A_ = subsref(A, S_[:n])
# cell index
if A_.dtype == np.object:
# set single index
if A_.ndim == 1:
S1[i].append(S_[n])
cell[i].append(copy.deepcopy(S1[i]))
# set cell array
elif A_.shape[0] > 1:
S1[i].append(S_[n])
cell[i].append(copy.deepcopy(S1[i]))
# field name
elif A_.dtype.names != None:
# set fieldname
if A_.ndim == 0:
S1[i].append(A_.dtype.names[S_[n]])
# set noncell array
elif A_.shape[0] > 1:
S1[i].append(S_[n])
elif A_.ndim > 0 and A_.shape[0] > 1:
S1[i].append(S_[n])
# set values
for S0_, S1_ in zip(S0, S1):
item = subsref(A, S0_)
out = subsasgn(out, S1_, item, list)
# set cells as numpy arrays
for C_ in cell:
# first cell is implied
for c in C_[1:]:
out = subsasgn(out, c, np.array([subsref(out, c)], np.object))
else: # copy A
A = copy.deepcopy(A)
# get substructs for A at each level
S0 = struct2sub(A, dict_out=True)
# set additional dimension for matlab
for k in S0.keys():
for S_ in S0[k]:
A_ = subsref(A, S_)
# if list without following or preceding list, set extra dim
if type(A_)==list and type(subsref(A, S_[:-1]))!=list and \
type(A_[0])!=list:
A = subsasgn(A, S_, [A_])
S0 = struct2sub(A, dict_out=True)
# set dicts as arrays with dtype
l = S0.keys()
l.reverse()
for k in l:
for S_ in S0[k]:
A_ = subsref(A, S_)
# set dict to array with keys as dtype
if type(A_) == dict:
A = subsasgn(A, S_, np.array([tuple(A_.values())],
np.dtype([(k, np.object) for k in A_.keys()])))
S0 = struct2sub(A, dict_out=True)
# set out to dict using variable
out = {variable: A}
# save mat
sio.savemat(filename, out)
return out
def subsref(A, S):
''' return value from A using references in S
Parameters:
A - object, object to return value from
S - list, indices/fields to reference to obtain value from A (see Example)
Returns:
value - any, value to index from A using S
Example:
A = {0: {'test': [9,8,7]}}
S = [0, 'test', 1]
value = subsref(A, S)
value =
8
'''
# copy S
S = list(S)
# copy A
value = copy.deepcopy(A)
# for each substruct, get value
for S_ in S:
if type(S_) == str and re.match('.*:.*', S_) != None:
value = eval('value[{S_}]'.format(S_=S_))
else:
value = value[S_]
return value
def subsasgn(A, S, C, append_type=None):
''' set value in A using reference in S
Parameters:
A - object, object to set value
S - list, indices/fields to reference when setting value
C - any, value to set in A at reference S
append_type - type, type of iterable to append if needed (e.g., list)
[default is None, sets to type(A)]
Returns:
A - object, updated object with value set at reference S
Example:
A = {0: {'spm': {'util': {'disp': {'data': '<UNDEFINED>'}}}}}
S = [0, 'spm', 'util', 'disp', 'data']
C = './mri/anatomical.nii'
subsasgn(A, S, C)
A =
{0: {'spm': {'util': {'disp': {'data': './mri/anatomical.nii'}}}}}
Note: Only tested for dict, list, and ndarray. If S == [], A is set to C
'''
# copy A
A = copy.deepcopy(A)
value = A
# set default for setting new index
if append_type == None:
def_val = type(A)([])
else:
def_val = append_type([])
# ensure def_val has ndim > 0
if type(def_val).__module__ == np.__name__ and def_val.ndim == 0:
def_val = np.array([None], dtype=A.dtype)
# for each level in S, index value
for i,S_ in enumerate(S):
# add new key to dict
if type(value) == dict and S_ not in value.keys():
value[S_] = copy.deepcopy(def_val)
# set value to dict and add key with new value type(A)
elif type(value) != dict and type(S_) == str:
value = {}
value[S_] = copy.deepcopy(def_val)
# append list
elif type(value) == list and S_ >= len(value):
for _ in range(S_ - len(value) + 1):
value.append(copy.deepcopy(def_val))
# append ndarray with None
elif type(value).__module__ == np.__name__:
if value.ndim == 0:
value = np.array([value])
if S_ >= len(value):
for _ in range(S_ - len(value) + 1):
value = np.append(value, None)
# if None, set as list
elif value == None:
value = []
for _ in range(S_ - len(value) + 1):
value.append([])
# set value to A at current substruct
if i > 0 and len(S[:i]) > 0:
exec('A' + sub2str(S[:i]) + '= value')
else:
A = value
# evaluate : string
if type(S_) == str and re.match('.*:.*', S_) != None:
value = eval('value[{S_}]'.format(S_=S_))
else: # index value using S_
value = value[S_]
# set complete reference to C
if len(S) > 0:
exec('A' + sub2str(S) + '= C')
else: # simple set
A = C
return A
def sub2str(S):
''' convert a "substruct" to a "string representation" or vice versa
Parameters:
S - list or str, substruct/string representation to convert
Returns:
S - list or str, converted substruct/string representation
Example 1:
S = [0, 'field1', 0, 'field2', 1]
str_rep = sub2str(S)
str_rep =
'[0]["field1"][0]["field2"][1]'
Example 2:
str_rep = '["field1"]["field2"][4]'
S = sub2str(str_rep)
S =
['field1', 'field2', 4]
'''
# copy S
if type(S) != str:
S = list(S)
# init output
out = []
# if str, output array
if type(S) == str:
S = re.split('[\[\]]', S)
S = [S for S in S if S != '']
for S_ in S:
if S_.isdigit():
out.append(int(S_))
else:
out.append(re.sub('"', '', S_))
else: # if array, output str
if not np.iterable(S):
S = [S,]
for S_ in S:
if type(S_) == str:
out.append('"' + S_ + '"')
else:
out.append(str(S_))
out = '[' + ']['.join(out) + ']'
return out
def struct2sub(A, r=np.inf, dict_out=False):
''' return all "substructs" from A through levels r
Parameters:
A - object, object to return substructs from
r - number, number of levels to search when obtaining substructs. Returns
substruct lists with maximum length of r + 1 (0 is first level)
[default is np.inf, i.e. all levels of A]
dict_out - bool, return each level list of substruct as dict with keys
corresponding to levels
[default is False]
Returns:
S - list, list of substructs for each value in A through levels r
Example:
A = {'test': {0: 12, 1: '2'}, 'test2': 3}
r = 1
S =
[['test', 0], ['test', 1], ['test2']]
'''
# copy A
A = copy.deepcopy(A)
# get substruct based on type
S = {0: []}
if type(A) == dict:
S[0] = [[S_] for S_ in A.keys()]
elif type(A) == list or type(A) == tuple:
S[0] = [[S_] for S_ in range(len(A))]
elif type(A).__module__ == np.__name__:
if A.ndim > 0 or type(A) == np.void:
A = list(A)
S[0] = [[S_] for S_ in range(len(A))]
# ensure list is not empty
if len(S[0]) == 0:
S[0] = [[],]
# # if r is zero, return
if r == 0:
return S[0]
# for each level, get struct2sub and append to previous
r_ = 0
while r_ < r:
S[r_+1] = []
for S0 in S[r_]:
for S1 in struct2sub(subsref(A, S0), 0):
S[r_+1].append(S0 + S1)
if len(struct2sub(subsref(A, S0), 0)) == 0:
S[r_+1].append(S[r_])
if S[r_] == S[r_+1]:
S.pop(r_+1, None)
break
else:
r_ += 1
if dict_out: # return dict
return S
else: # return S at level r_
return S[r_]
def pebl_getfield(A, S=None, R=None, expr=None, fun=None, r=np.inf):
''' get values from object, A, using substructs or string representations
Parameters:
A - object, object to return values from
Options:
S - list, substruct to get value from A
[defualt is None]
R - list or str, string representation to get value from A
[default is None]
expr - str, expression to search string representations to get value
from A
[default is None]
fun - dict, dict containing function to search for values within A. keys
within the dict should contain 'fun', and integers corresponding to
argument index (see Example 2). Each C will be input as the argument
not contained in the dict keys (i.e. at index 0 for Example 2).
[default is None]
r - int, number of level to search within A (each level is field or
index reference)
[default is np.inf]
Returns:
C - list, values returned from A
(i.e. C[0] == subsref(A, S[0]) or eval('A' + R[0]))
S - list, substructs used to return values from A
R - list, string representations used to return values from A
Example 1:
A = {0: {'spm': {'util': {'disp': {'data': '<UNDEFINED>'}}}}}
expr = '.*\["disp"]'
C, S, R = pebl_getfield(A, expr=expr)
C =
array([{'data': '<UNDEFINED>'}], dtype=object)
S =
[[0, 'spm', 'util', 'disp']]
R =
['[0]["spm"]["util"]["disp"]']
Example 2:
A = {'test1': {0: 3}, 'test2': [2,3,4,5], 'test3': []}
fun = {'fun': np.equal, 1: 3}
C, S, R = pebl_getfield(A, fun=fun)
C =
array([3, 3], dtype=object)
S =
[['test1', 0], ['test2', 1]]
R =
['["test1"][0]', '["test2"][1]']
'''
# if S exists, get copy
if S != None:
if type(S)!=list or type(S[0])!=list:
S = [S,]
else:
S = list(S)
else: # get substructs of A
S = []
if not np.iterable(r):
r = [r,]
for rr in r:
S = S + struct2sub(A, rr)
# if R exists, update S
if R != None:
if not np.iterable(R):
R = [R,]
else:
R = list(R)
S = []
for R_ in R:
S.append(sub2str(R_))
else: # if R doesnt exist, set from S
R = []
for S_ in S:
R.append(sub2str(S_))
# find R using regex
if expr != None:
tmp = list(R)
R = []
# copy expr
if type(expr) == str:
expr = [expr,]
else:
expr = list(expr)
for e in expr:
m = [re.findall(e, R_) for R_ in tmp]
m = np.unique([m[0] for m in m if len(m) > 0])
R = np.append(R, m)
R = np.unique(R).tolist()
# update S
S = []
for R_ in R:
S.append(sub2str(R_))
# use subsref to get values
C = []
for S_ in S:
C.append(subsref(A, S_))
# search using function
if fun != None:
# copy fun
if type(fun) != dict:
fun = {'fun': fun}
else:
fun = dict(fun)
# set fnd array of false
fnd = np.zeros(len(C), dtype=np.bool)
# get key positions for function call
key_ns = [k for k in fun.keys() if type(k) == int]
key_rng = range(np.max(key_ns)+1)
c_idx = [k for k in key_rng if k not in key_ns]
if len(c_idx) == 0:
c_idx = np.max(key_ns)+1
else:
c_idx = c_idx[0]
# for each C_ evalutate function
for i, C_ in enumerate(C):
# set c_idx to C_
fun[c_idx] = C_
# set args for input
args = [fun[k] for k in key_rng]
# evaluate function
tmp = fun['fun'](*args)
if tmp == NotImplemented:
fnd[i] = False
else:
fnd[i] = tmp
# set to true indices
C = np.array(C, dtype=np.object)[fnd].tolist()
S = np.array(S, dtype=np.object)[fnd].tolist()
R = np.array(R, dtype=np.object)[fnd].tolist()
# return C, S, R
return C, S, R
def pebl_setfield(A, C, S=None, R=None, expr=None, fun=None, r=np.inf):
''' set values in object, A, using substructs or string representations
Parameters:
A - object, object to set values
C - list, list of values to set in A
S - list, substructs referencing location to set values in A
[default is None]
R - list, string representations referencing location to set values in A
[default is None]
expr - str, expression to search R in order to set values in A
[defualt is None]
fun - dict, dict to find locations in A to set values
[defualt is None]
r - int, number of levels in A to search if S or R are not set directly
[default is np.inf]
Returns:
A - object, updated object with values set
Note:
See pebl_getfield for further description on Parameters.
Example:
A = {0: {'test1': [1,2,3]}}
S = [0, 'test1', -1]
C = []
'''
# init C
if type(C) != list: # make iterable
C = [C,]
else: # copy
C = list(C)
# if no S and no R, set from A using pebl_getfield
if S==None and R==None:
_, S, R = pebl_getfield(A, expr=expr, fun=fun, r=r)
# check for S and R separately
if R==None:
R = []
elif type(R) == str: # set as iterable
R = [R,]
else: # copy
R = list(R)
if S==None:
S = []
for R_ in R:
S.append(sub2str(R_))
elif type(S)!=list or type(S[0])!=list: # make iterable
S = [S,]
else: # copy
S = list(S)
# set C based on S
if type(C) != list or len(C)==1 or len(S) == 1:
C = np.repeat([C], len(S)).tolist()
elif len(C) != len(S):
C = C[:np.min([len(C),len(S)])]
S = S[:np.min([len(C),len(S)])]
# init R for output
R = []
for C_, S_ in zip(C, S):
# update R
R.append(sub2str(S_))
# set A
A = subsasgn(A, S_, C_)
return A
def pebl_search(folder, expr, ftype, n_levels=np.inf, verbose=False):
''' search a folder, subfolders, and files for expr
Parameters:
folder - str, folder to begin search
expr - str, expression to search within folders/files
ftype - str, file type to narrow search (or 'dir' to search folders)
n_levels - int, number of directory levels to search
[default is np.inf]
verbose - bool, print folder/file currrently being searched
[default is False]
Returns:
files - list, fullpath files that contained expression in name or text
Example 1:
folder = os.curdir
expr = 'utils.*'
ftype = 'dir'
files = pebl_search(folder, expr, ftype)
files =
['/pebl/pebl/functions/utils.py',
'/pebl/pebl/functions/utils.pyc']
Example 2:
folder = os.curdir
expr = 'def pebl_search'
ftype = '.*\.py$'
files = pebl_search(folder, expr, ftype)
files =
['/pebl/pebl/functions/utils.py']
'''
# init files output
files = np.array([])
# set folder to fullpath
folder = os.path.abspath(folder)
# get names of files/folders in folder
names = os.listdir(folder)
# get indices of directories
dir_tf = np.array([os.path.isdir(os.path.join(folder,n)) for n in names],
dtype=np.bool)
# regex for ftype matches
matches = [re.match(ftype, n) for n in names]
ftype_tf = np.array([m != None for m in matches], dtype=np.bool)
# find files to search
file_tf = np.invert(dir_tf) * ftype_tf
# if dir, search files for expr
if ftype == 'dir':
fnd = np.array([re.match(expr, n) != None for n in names], dtype=np.bool)
for i in np.where(fnd)[0]:
files = np.append(files, os.path.join(folder,names[i]))
else: # for each file, search text for expr
for i in np.where(file_tf)[0]:
if verbose:
print('Searching {name}'.format(name=names[i]))
with open(os.path.join(folder,names[i]), 'r') as f:
txt = f.read()
if len(re.findall(expr, txt)) > 0:
files = np.append(files, os.path.join(folder,names[i]))
# search additional levels
if n_levels > 0 and np.any(dir_tf):
for i in np.where(dir_tf)[0]:
if verbose:
print('Searching {dir}'.format(dir=names[i]))
files = np.append(files, pebl_search(os.path.join(folder,names[i]),
expr, ftype, n_levels=n_levels-1, verbose=verbose))
# return files as list
return files.tolist()
|
|
# Here's a probably-not-very-good port of the Adafruit Arduino library for
# this sensor. The Adafruit library is located here:
# https://github.com/adafruit/Adafruit_TCS34725
# This port is for the Beaglebone Black and the Adafruit_BBIO and Adafruit_I2C
# libraries. By AKA MEDIA SYSTEM
from Adafruit_I2C import Adafruit_I2C
import time
class TCS34725():
TCS34725_ADDRESS = 0x29
TCS34725_COMMAND_BIT = 0x80
TCS34725_ENABLE = 0x00
TCS34725_ENABLE_AIEN = 0x10 # RGBC Interrupt Enable
TCS34725_ENABLE_WEN = 0x08 # Wait enable - Writing 1 activates the wait timer
TCS34725_ENABLE_AEN = 0x02 # RGBC Enable - Writing 1 actives the ADC, 0 disables it
TCS34725_ENABLE_PON = 0x01 # Power on - Writing 1 activates the internal oscillator, 0 disables it
TCS34725_ATIME = 0x01 # Integration time
TCS34725_WTIME = 0x03 # Wait time (if TCS34725_ENABLE_WEN is asserted
TCS34725_WTIME_2_4MS = 0xFF # WLONG0 = 2.4ms WLONG1 = 0.029s
TCS34725_WTIME_204MS = 0xAB # WLONG0 = 204ms WLONG1 = 2.45s
TCS34725_WTIME_614MS = 0x00 # WLONG0 = 614ms WLONG1 = 7.4s
TCS34725_AILTL = 0x04 # Clear channel lower interrupt threshold
TCS34725_AILTH = 0x05
TCS34725_AIHTL = 0x06 # Clear channel upper interrupt threshold
TCS34725_AIHTH = 0x07
TCS34725_PERS = 0x0C # Persistence register - basic SW filtering mechanism for interrupts
TCS34725_PERS_NONE = 0b0000 # Every RGBC cycle generates an interrupt
TCS34725_PERS_1_CYCLE = 0b0001 # 1 clean channel value outside threshold range generates an interrupt
TCS34725_PERS_2_CYCLE = 0b0010 # 2 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_3_CYCLE = 0b0011 # 3 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_5_CYCLE = 0b0100 # 5 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_10_CYCLE = 0b0101 # 10 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_15_CYCLE = 0b0110 # 15 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_20_CYCLE = 0b0111 # 20 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_25_CYCLE = 0b1000 # 25 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_30_CYCLE = 0b1001 # 30 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_35_CYCLE = 0b1010 # 35 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_40_CYCLE = 0b1011 # 40 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_45_CYCLE = 0b1100 # 45 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_50_CYCLE = 0b1101 # 50 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_55_CYCLE = 0b1110 # 55 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_60_CYCLE = 0b1111 # 60 clean channel values outside threshold range generates an interrupt
TCS34725_CONFIG = 0x0D
TCS34725_CONFIG_WLONG = 0x02 # Choose between short and long (12x wait times via TCS34725_WTIME
TCS34725_CONTROL = 0x0F # Set the gain level for the sensor
TCS34725_ID = 0x12 # 0x44 = TCS34721/TCS34725, 0x4D = TCS34723/TCS34727
TCS34725_STATUS = 0x13
TCS34725_STATUS_AINT = 0x10 # RGBC Clean channel interrupt
TCS34725_STATUS_AVALID = 0x01 # Indicates that the RGBC channels have completed an integration cycle
TCS34725_CDATAL = 0x14 # Clear channel data
TCS34725_CDATAH = 0x15
TCS34725_RDATAL = 0x16 # Red channel data
TCS34725_RDATAH = 0x17
TCS34725_GDATAL = 0x18 # Green channel data
TCS34725_GDATAH = 0x19
TCS34725_BDATAL = 0x1A # Blue channel data
TCS34725_BDATAH = 0x1B
# TCS34725_INTEGRATIONTIME_2_4MS = 0xFF, /**< 2.4ms - 1 cycle - Max Count: 1024
# TCS34725_INTEGRATIONTIME_24MS = 0xF6, /**< 24ms - 10 cycles - Max Count: 10240
# TCS34725_INTEGRATIONTIME_50MS = 0xEB, /**< 50ms - 20 cycles - Max Count: 20480
# TCS34725_INTEGRATIONTIME_101MS = 0xD5, /**< 101ms - 42 cycles - Max Count: 43008
# TCS34725_INTEGRATIONTIME_154MS = 0xC0, /**< 154ms - 64 cycles - Max Count: 65535
# TCS34725_INTEGRATIONTIME_700MS = 0x00 /**< 700ms - 256 cycles - Max Count: 65535
_tcs34725Initialised = False
_tcs34725Gain = 0
_tcs34725IntegrationTime = 0x00
def __init__(self, *args, **kwargs):
self.i2c = Adafruit_I2C(self.TCS34725_ADDRESS)
def write8(self, reg, val):
self.i2c.write8(self.TCS34725_COMMAND_BIT | reg, val & 0xFF)
def read16(self, reg):
return self.i2c.readU16Rev(self.TCS34725_COMMAND_BIT | reg)
def read8(self, reg):
return self.i2c.readU8(self.TCS34725_COMMAND_BIT | reg)
def begin(self):
x = self.read8(self.TCS34725_ID)
if x != 0x44: # code I was basing this on expects 0x44, not sure why. Got 0x12
print 'did not get the expected response from sensor: ', x
return False
self._tcs34725Initialised = True
self.setIntegrationTime(self._tcs34725IntegrationTime)
self.setGain(0)
self.enable()
return True
def enable(self):
self.write8(self.TCS34725_ENABLE, self.TCS34725_ENABLE_PON)
time.sleep(0.003)
self.write8(self.TCS34725_ENABLE, (self.TCS34725_ENABLE_PON | self.TCS34725_ENABLE_AEN))
def disable(self):
reg = 0
reg = self.i2c.readU8(self.TCS34725_COMMAND_BIT | self.TCS34725_ENABLE)
self.write8(self.TCS34725_ENABLE, (reg & ~(self.TCS34725_ENABLE_PON | self.TCS34725_ENABLE_AEN)))
def setIntegrationTime(self, theTime):
if theTime not in [0xFF,0xF6,0xEB,0xD5,0xC0,0x00]:
print 'setting integration time to 0x00, %s is illegal' % theTime
theTime = 0x00
self.write8(self.TCS34725_ATIME, theTime)
# self.i2c.write8(self.TCS34725_ATIME, theTime)
self._tcs34725IntegrationTime = theTime
def setGain(self, gain):
# TCS34725_GAIN_1X = 0x00, /**< No gain
# TCS34725_GAIN_4X = 0x01, /**< 2x gain
# TCS34725_GAIN_16X = 0x02, /**< 16x gain
# TCS34725_GAIN_60X = 0x03 /**< 60x gain
if gain not in [0,1,2,3]:
print 'setting gain to 0, %s is illegal' % gain
gain = 0
self.write8(self.TCS34725_CONTROL, gain)
self._tcs34725Gain = gain
def getStatus(self):
return self.i2c.readU8(self.TCS34725_COMMAND_BIT | self.TCS34725_STATUS)
def getRawData(self):
c = self.read16(self.TCS34725_CDATAL)
r = self.read16(self.TCS34725_RDATAL)
g = self.read16(self.TCS34725_GDATAL)
b = self.read16(self.TCS34725_BDATAL)
if self._tcs34725IntegrationTime == 0xFF:
time.sleep(0.0024)
elif self._tcs34725IntegrationTime == 0xF6:
time.sleep(0.024)
elif self._tcs34725IntegrationTime == 0xEB:
time.sleep(0.050)
elif self._tcs34725IntegrationTime == 0xD5:
time.sleep(0.101)
elif self._tcs34725IntegrationTime == 0xC0:
time.sleep(0.154)
elif self._tcs34725IntegrationTime == 0x00:
time.sleep(0.700)
else:
time.sleep(0.700)
return c, r, g, b
def getRawRGBData(self):
r = self.read16(self.TCS34725_RDATAL)
g = self.read16(self.TCS34725_GDATAL)
b = self.read16(self.TCS34725_BDATAL)
if self._tcs34725IntegrationTime == 0xFF:
time.sleep(0.0024)
elif self._tcs34725IntegrationTime == 0xF6:
time.sleep(0.024)
elif self._tcs34725IntegrationTime == 0xEB:
time.sleep(0.050)
elif self._tcs34725IntegrationTime == 0xD5:
time.sleep(0.101)
elif self._tcs34725IntegrationTime == 0xC0:
time.sleep(0.154)
elif self._tcs34725IntegrationTime == 0x00:
time.sleep(0.700)
else:
time.sleep(0.700)
return r, g, b
def getRGBData(self):
c = self.read16(self.TCS34725_CDATAL)
r = self.read16(self.TCS34725_RDATAL)
g = self.read16(self.TCS34725_GDATAL)
b = self.read16(self.TCS34725_BDATAL)
sumL = r+g+b
print 'sumL is ',sumL
# r = self.mapVals(r,0,sumL,0,255)
# g = self.mapVals(g,0,sumL,0,255)
# b = self.mapVals(b,0,sumL,0,255)
r = int(256*(r/float(sumL)))
g = int(256*(g/float(sumL)))
b = int(256*(b/float(sumL)))
if self._tcs34725IntegrationTime == 0xFF:
time.sleep(0.0024)
elif self._tcs34725IntegrationTime == 0xF6:
time.sleep(0.024)
elif self._tcs34725IntegrationTime == 0xEB:
time.sleep(0.050)
elif self._tcs34725IntegrationTime == 0xD5:
time.sleep(0.101)
elif self._tcs34725IntegrationTime == 0xC0:
time.sleep(0.154)
elif self._tcs34725IntegrationTime == 0x00:
time.sleep(0.700)
else:
time.sleep(0.700)
return c, r, g, b
def getWebColors(self, maximum):
# convenience function that doesn't work well
# the "maximum" parameter should be the max observed value for each channel
# which is a pain to calibrate manually, so this prob isn't useful
r = self.read16(self.TCS34725_RDATAL)
g = self.read16(self.TCS34725_GDATAL)
b = self.read16(self.TCS34725_BDATAL)
if self._tcs34725IntegrationTime == 0xFF:
time.sleep(0.0024)
elif self._tcs34725IntegrationTime == 0xF6:
time.sleep(0.024)
elif self._tcs34725IntegrationTime == 0xEB:
time.sleep(0.050)
elif self._tcs34725IntegrationTime == 0xD5:
time.sleep(0.101)
elif self._tcs34725IntegrationTime == 0xC0:
time.sleep(0.154)
elif self._tcs34725IntegrationTime == 0x00:
time.sleep(0.700)
else:
time.sleep(0.700)
r = int(self.mapVals(r,0,maximum,0,255))
g = int(self.mapVals(g,0,maximum,0,255))
b = int(self.mapVals(b,0,maximum,0,255))
return r, g, b
def mapVals(self, val, inMin, inMax, outMin, outMax):
toRet = outMin + (outMax - outMin) * ((val - inMin) / (inMax - inMin))
return self.clamp(toRet, outMin, outMax)
def clamp(self, val, min, max):
if (val < min):
val = min
if (val > max):
val = max
return val
def calculateColorTemperature(self, r, g, b):
# this is all from the Adafruit C library
# 1. Map RGB values to their XYZ counterparts.
# Based on 6500K fluorescent, 3000K fluorescent
# and 60W incandescent values for a wide range.
# Note: Y = Illuminance or lux
X = (-0.14282 * r) + (1.54924 * g) + (-0.95641 * b);
Y = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b);
Z = (-0.68202 * r) + (0.77073 * g) + ( 0.56332 * b);
# 2. Calculate the chromaticity co-ordinates
xc = (X) / (X + Y + Z);
yc = (Y) / (X + Y + Z);
# 3. Use McCamy's formula to determine the CCT
n = (xc - 0.3320) / (0.1858 - yc);
# Calculate the final CCT
cct = (449.0 * pow(n, 3)) + (3525.0 * pow(n, 2)) + (6823.3 * n) + 5520.33;
return cct
def calculateLux(self, r, g, b):
return ((-0.32466 * r) + (1.57837 * g) + (-0.73191 * b))
def setInterrupt(self, theBool):
r = self.read8(self.TCS34725_ENABLE)
if theBool:
r |= self.TCS34725_ENABLE_AIEN
else:
r &= ~self.TCS34725_ENABLE_AIEN
self.write8(self.TCS34725_ENABLE, r)
def clearInterrupt(self):
self.write8(self.TCS34725_ADDRESS, 0x66)
def setInterruptLimits(self, lo, hi):
pass
|
|
#!/usr/bin/env python2.7
import os
import sys
import tempfile
import shutil
from subprocess import call, check_output, CalledProcessError
import tarfile
from gzip import GzipFile
import StringIO
import argparse
NODEJS_DIST = 'http://nodejs.org/dist'
PLATFORM = sys.platform
def download(url, filename, verbose=True):
from urllib2 import urlopen, HTTPError, URLError
try:
connection = urlopen(url)
except HTTPError as e:
print 'Failed downloading %s: HTTPError %s' % (url, e.code)
exit(1)
except URLError as e:
print 'Failed downloading %s: URLError %s' % (url, e)
exit(1)
if verbose:
print 'Downloading: %s -> %s' % (url, filename)
with open(filename, 'wb') as output:
output.write(connection.read())
return filename
#
#
def nodejs_get_version(allow_system_node, prefix):
try:
if allow_system_node:
return str(check_output('node --version', shell=True)).rstrip()
elif PLATFORM == 'win32':
node_path = os.path.join(prefix, 'Scripts', 'node')
return str(check_output('%s --version' % node_path, shell=True)).rstrip()
else:
node_path = os.path.join(prefix, 'bin', 'node')
return str(check_output('test -x %s && %s --version' % (node_path, node_path),
shell=True)).rstrip()
except CalledProcessError:
return ''
#
#
def nodejs_install_binary_win32(version, filename, prefix):
if filename is None:
url = '%s/%s/node.exe' % (NODEJS_DIST, version)
download(url, '%s/Scripts/node.exe' % prefix)
else:
shutil.copyfile(filename, '%s/Scripts/node.exe' % prefix)
with tempfile.NamedTemporaryFile(suffix='.tar.gz') as f:
tmpfile = f.name
f.close()
# download the source and extract the npm module
basename = 'node-%s' % version
srcurl = '%s/%s/%s.tar.gz' % (NODEJS_DIST, version, basename)
download(srcurl, tmpfile)
depsprefix = '%s/deps/' % basename
npmprefix = '%snpm' % depsprefix
moduledir = os.path.join(prefix, 'Scripts', 'node_modules/')
with GzipFile(tmpfile, mode='rb') as gzipfile:
tardata = gzipfile.read()
with tarfile.open(fileobj=StringIO.StringIO(tardata), mode='r') as tarobj:
entries = tarobj.getmembers()
npmfiles = [(e.name.replace(depsprefix, moduledir), e) for e in entries
if e.isfile() and e.name.startswith(npmprefix)]
for target, npmfile in npmfiles:
tarentry = tarobj.extractfile(npmfile)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
with open(target, 'w') as output:
output.write(tarentry.read())
shutil.copyfile('%s/Scripts/node_modules/npm/bin/npm.cmd' % prefix,
'%s/Scripts/npm.cmd' % prefix)
#
#
def nodejs_install_binary_unix(version, platform, prefix):
basename = 'node-%s-%s-x86' % (version, platform)
url = '%s/%s/%s.tar.gz' % (NODEJS_DIST, version, basename)
with tempfile.NamedTemporaryFile(suffix='.tar.gz') as zipfile:
filename = zipfile.name
zipfile.close()
download(url, filename)
excludes = ['ChangeLog', 'LICENSE', 'README*']
tar_cmd = 'tar -xzf %s --strip-components 1 -C %s %s' \
% (filename, prefix, ' '.join(['--exclude "%s"' % e for e in excludes]))
print 'Executing: %s' % tar_cmd
if 0 != call(tar_cmd, shell=True):
raise Exception('failed to extract nodejs')
#
#
def nodejs_install_source_unix(version, prefix):
basename = 'node-%s' % version
url = '%s/%s/%s.tar.gz' % (NODEJS_DIST, version, basename)
tmpd = tempfile.mkdtemp()
filename = os.path.join(tmpd, '%s.tar.gz' % basename)
download(url, filename)
def docall(cmd, cwd):
print 'Executing: %s' % cmd
return call(cmd, shell=True, cwd=cwd)
srcdir = os.path.join(tmpd, basename)
if 0 != docall('tar -xzf %s.tar.gz' % basename, tmpd) or \
0 != docall('./configure --prefix=%s' % prefix, srcdir) or \
0 != docall('make V= -j 5', srcdir) or \
0 != docall('make install', srcdir):
print 'Error building nodejs from source.'
exit(1)
# Clean up the entire temporary directory
shutil.rmtree(tmpd)
############################################################
def typescript_install(version, prefix):
if 'win32' == PLATFORM:
npm_path = '%s\\Scripts\\npm.cmd' % prefix
else:
npm_path = '%s/bin/npm' % prefix
if version is not None:
package_specifier = 'typescript@%s' % version
else:
package_specifier = 'typescript'
if 0 != call('%s install -g %s' % (npm_path, package_specifier), shell=True):
raise Exception('failed to install typescript via npm')
############################################################
#
#
#
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--version', default='v0.8.14')
parser.add_argument('--typescript', action='store_true', help='Install TypeScript compiler package')
parser.add_argument('--typescript-version', default='0.9.1')
parser.add_argument('-f', '--force', action='store_true')
parser.add_argument('--allow-system-node', action='store_true',
help='Allow use of an existing node install')
parser.add_argument('--prefix')
parser.add_argument('downloaded_file', nargs='?', default=None)
args = parser.parse_args(sys.argv[1:])
filename = args.downloaded_file
version = args.version
if args.prefix:
prefix = args.prefix
else:
prefix = os.path.abspath('env')
# print 'Version: %s (current=%s), PLATFORM: %s' \
# % (version, current_version, PLATFORM)
install_nodejs = True
if not args.force:
current_version = nodejs_get_version(args.allow_system_node, prefix)
if version == current_version:
print 'NodeJS version %s already installed.' % version
install_nodejs = False
if install_nodejs and version != '-':
print 'Installing nodejs-%s' % version
if 'darwin' == PLATFORM:
# nodejs_install_source_unix(version)
nodejs_install_binary_unix(version, PLATFORM, prefix)
elif PLATFORM.startswith('linux'):
nodejs_install_source_unix(version, prefix)
# nodejs_install_binary_unix(version, 'linux')
elif 'win32' == PLATFORM:
nodejs_install_binary_win32(version, filename, prefix)
else:
print 'Skipping nodejs.'
ts_version = args.typescript_version
if (not args.typescript) or ('-' == ts_version):
print 'Skipping typescript.'
else:
print 'Installing typescript-%s' % ts_version
typescript_install(ts_version, prefix)
return 0
if '__main__' == __name__:
exit(main())
|
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import copy
import requests
from sixgill.sixgill_request_classes.sixgill_auth_request import SixgillAuthRequest
from sixgill.sixgill_actionable_alert_client import SixgillActionableAlertClient
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
CHANNEL_CODE = '7698e8287dfde53dcd13082be750a85a'
MAX_INCIDENTS = 100
DEFAULT_INCIDENTS = '50'
MAX_DAYS_BACK = 30
DEFAULT_DAYS_BACK = '1'
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
DEMISTO_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
THREAT_LEVEL_TO_SEVERITY = {
'imminent': 3,
'emerging': 2,
'unknown': 0
}
TO_DEMISTO_STATUS = {
'in_treatment': 1,
'resolved': 2,
'treatment_required': 0
}
VERIFY = not demisto.params().get("insecure", True)
SESSION = requests.Session()
''' HELPER FUNCTIONS '''
def get_incident_init_params():
params_dict = {
'threat_level': demisto.params().get('threat_level', None),
'threat_type': demisto.params().get('threat_type', None)
}
return {param_k: param_v for param_k, param_v in params_dict.items() if param_v}
def item_to_incident(item_info, sixgill_alerts_client):
incident: Dict[str, Any] = dict()
incidents = []
items = []
# get fields that are shared in case of sub alerts
add_sub_alerts_shared_fields(incident, item_info)
sub_alerts = item_info.pop('sub_alerts', None)
if sub_alerts:
# add any sub alert as incident
for sub_alert in sub_alerts:
sub_item = copy.deepcopy(item_info)
sub_item.update(sub_alert)
items.append(sub_item)
else:
items.append(item_info)
for item in items:
sub_incident = copy.deepcopy(incident)
# add all other fields
add_sub_alerts_fields(sub_incident, item, sixgill_alerts_client)
sub_incident['rawJSON'] = json.dumps(item)
incidents.append(sub_incident)
return incidents
def add_sub_alerts_shared_fields(incident, item_info):
incident['name'] = item_info.get('title', 'Cybersixgill Alert')
incident_date = datetime.strptime(item_info.get('date'), DATETIME_FORMAT)
incident['occurred'] = incident_date.strftime(DEMISTO_DATETIME_FORMAT)
incident['severity'] = THREAT_LEVEL_TO_SEVERITY[item_info.get('threat_level', 'unknown')]
incident['CustomFields'] = {
'cybersixgillthreatlevel': item_info.get('threat_level', 'unknown'),
'cybersixgillthreattype': item_info.get('threats', []),
'cybersixgillassessment': item_info.get('assessment', None),
'cybersixgillrecommendations': '\n\n-----------\n\n'.join(item_info.get('recommendations', [])),
'incidentlink': f"https://portal.cybersixgill.com/#/?actionable_alert={item_info.get('id', '')}"
}
def add_sub_alerts_fields(incident, item_info, sixgill_alerts_client):
status = item_info.get('status', {}).get('name', 'treatment_required')
incident['status'] = TO_DEMISTO_STATUS[status]
content_item = {'creator': None, 'title': '', 'content': '', 'description': item_info.get('description', '')}
# cve alert
if item_info.get('es_id') == 'Not Applicable' and 'cve_id' in item_info.get('additional_info'):
content_item['content'] = f'https://portal.cybersixgill.com/#/cve/{item_info.get("additional_info",{}).get("cve_id", "")}'
else:
content = sixgill_alerts_client.get_actionable_alert_content(actionable_alert_id=item_info.get('id'),
aggregate_alert_id=item_info.get('aggregate_alert_id', None))
# get item full content
content = content.get('items', None)
if content:
if content[0].get('_id'):
es_items = [item['_source'] for item in content if item['_id'] == item_info['es_id']]
if es_items:
content_item['title'] = es_items[0].get('title')
content_item['content'] = es_items[0].get('content')
content_item['creator'] = es_items[0].get('creator')
else:
# github alert
content_item['content'] = '\n\n-----------\n\n'.join(
[f'Repository name: {github_item.get("Repository name", "")}\nCustomer Keywords:'
f' {github_item.get("Customer Keywords", "")}\n URL: {github_item.get("URL", "")}'
for github_item in content])
incident['details'] = f"{content_item.get('description')}\n\n{content_item.get('title', '')}\n" \
f"\n{content_item.get('content', '')}"
triggered_assets = []
for key, value in item_info.get('additional_info', {}).items():
if 'matched_' in key:
triggered_assets.extend(value)
incident['CustomFields'].update({
'cybersixgillstatus': status.replace('_', ' ').title(),
'cybersixgillsite': item_info.get('site', None),
'cybersixgillactor': content_item.get('creator', None),
'cybersixgilltriggeredassets': triggered_assets
})
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module():
"""
Performs basic Auth request
"""
response = SESSION.send(request=SixgillAuthRequest(demisto.params()['client_id'],
demisto.params()['client_secret'],
CHANNEL_CODE).prepare(), verify=VERIFY)
if not response.ok:
raise Exception("Auth request failed - please verify client_id, and client_secret.")
def fetch_incidents():
last_run = demisto.getLastRun()
if 'last_fetch_time' in last_run:
last_fetch_time = datetime.strptime(last_run['last_fetch_time'], DATETIME_FORMAT)
demisto.info(f'Found last run, fetching new alerts from {last_fetch_time}')
else:
days_back = int(demisto.params().get('first_fetch_days', DEFAULT_DAYS_BACK))
if days_back > MAX_DAYS_BACK:
demisto.info(f'Days back({days_back}) is larger than the maximum, setting to {MAX_DAYS_BACK}')
days_back = MAX_DAYS_BACK
last_fetch_time = datetime.now() - timedelta(days=days_back)
demisto.info(f'First run, fetching alerts from {last_fetch_time}')
max_incidents_to_return = int(demisto.params().get('max_fetch', DEFAULT_INCIDENTS))
if max_incidents_to_return > MAX_INCIDENTS:
demisto.info(f'Max incidents({max_incidents_to_return}) is larger than the maximum, setting to {MAX_INCIDENTS}')
max_incidents_to_return = MAX_INCIDENTS
sixgill_alerts_client = SixgillActionableAlertClient(client_id=demisto.params()['client_id'],
client_secret=demisto.params()['client_secret'],
channel_id=CHANNEL_CODE,
logger=demisto,
session=SESSION,
verify=VERIFY)
filter_alerts_kwargs = get_incident_init_params()
incidents = []
items = sixgill_alerts_client.get_actionable_alerts_bulk(limit=MAX_INCIDENTS, **filter_alerts_kwargs)
if len(items) > 0:
newest_incident_date = datetime.strptime(items[0].get('date'), DATETIME_FORMAT)
offset = 0
items_to_add = []
if newest_incident_date > last_fetch_time:
# finding all new alerts since last fetch time
while items:
for item in items:
if datetime.strptime(item.get('date'), DATETIME_FORMAT) > last_fetch_time:
items_to_add.append(item)
if len(items_to_add) - offset == len(items):
offset += len(items)
items = sixgill_alerts_client.get_actionable_alerts_bulk(limit=MAX_INCIDENTS, offset=offset,
**filter_alerts_kwargs)
else:
items = []
demisto.info(f'Found {len(items_to_add)} new alerts since {last_fetch_time}')
# getting more info about oldest ~max_incidents_to_return(can be more because of sub alerts)
if len(items_to_add):
items_to_add.reverse()
newest_incident_date = items_to_add[-1].get('date')
for item in items_to_add:
item_info = sixgill_alerts_client.get_actionable_alert(actionable_alert_id=item.get('id'))
item_info['date'] = item.get('date')
new_incidents = item_to_incident(item_info, sixgill_alerts_client)
incidents.extend(new_incidents)
if len(incidents) >= max_incidents_to_return:
newest_incident_date = item.get('date')
break
demisto.info(f'Adding {len(incidents)} to demisto')
demisto.incidents(incidents)
if len(incidents):
demisto.info(f'Update last fetch time to: {newest_incident_date}')
demisto.setLastRun({
'last_fetch_time': newest_incident_date
})
def update_alert_status():
"""
Updates the actionable alert status.
"""
alert_status = demisto.args().get('alert_status')
alert_id = demisto.args().get('alert_id')
alert_body = {
"status": {
"status": alert_status
}
}
sixgill_alerts_client = SixgillActionableAlertClient(client_id=demisto.params()['client_id'],
client_secret=demisto.params()['client_secret'],
channel_id=CHANNEL_CODE,
logger=demisto,
session=SESSION,
verify=VERIFY)
res = sixgill_alerts_client.update_actionable_alert(actionable_alert_id=alert_id, json_body=alert_body)
if res.get('status') == 200:
demisto.results("Actionable alert status updated")
''' COMMANDS MANAGER / SWITCH PANEL '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
try:
SESSION.proxies = handle_proxy()
command = demisto.command()
if command == 'test-module':
test_module()
demisto.results('ok')
elif command == "fetch-incidents":
fetch_incidents()
elif command == "cybersixgill-update-alert-status":
update_alert_status()
except Exception as e:
return_error("Failed to execute {} command. Error: {}".format(demisto.command(), str(e)))
|
|
# coding=utf-8
"""Link together information on examples, models and evaluation for easier queries."""
from itertools import izip
import re
from numba import jit
import os.path as op
import numpy as np
import pandas as pd
from pandas.core.index import MultiIndex
from sklearn.decomposition import NMF
from whatami import whatable
from manysources.analyses.cooccurrences import molecules_coocurrences_df, sources_coocurrences_df
from manysources.analyses.logreg_weights import logreg_weights
from manysources.analyses.losses import read_losses
from manysources.common.misc import ensure_dir
from manysources.datasets import ManysourcesDataset, MANYSOURCES_DATA_ROOT
from manysources.experiments import DEFAULT_EXPIDS, MANYSOURCES_FEATS
from pillow_utils import rdkit2im
from manysources.analyses.scores import merge_cache_scores
@whatable
class Hub(object):
def __init__(self,
dset_id='bcrp',
expids=DEFAULT_EXPIDS,
lso=True,
model='logreg3',
feats='ecfps1',
score_norm=None):
super(Hub, self).__init__()
self.dset_id = dset_id
self.lso = lso
self.model = model
self.feats = feats
self.score_norm = score_norm
self.expids = expids
# lazy
self._dset = None
self._df_losses = None
_HUBS = {}
@classmethod
def hub(cls,
dset_id='bcrp',
expids=tuple(range(4096)),
lso=True,
model='logreg3',
feats='ecfps1',
score_norm=None):
hub = cls(dset_id=dset_id, expids=expids, lso=lso, model=model, feats=feats, score_norm=score_norm)
hid = hub.what().id()
if hid not in Hub._HUBS:
Hub._HUBS[hid] = hub
return Hub._HUBS[hid]
@classmethod
def lchubs(cls,
dset_id='bcrp',
expids=tuple(range(4096)),
model='logreg3',
feats='ecfps1',
score_norm=None):
return cls.hub(dset_id=dset_id, expids=expids, model=model, feats=feats, score_norm=score_norm, lso=True), \
cls.hub(dset_id=dset_id, expids=expids, model=model, feats=feats, score_norm=score_norm, lso=False)
def dset(self):
"""
Returns the dataset hub.
:rtype: ManysourcesDataset
"""
if self._dset is None:
self._dset = ManysourcesDataset(dset=self.dset_id)
return self._dset
def mols(self):
"""
Returns the molecules side of life of the dataset hub.
:rtype: manysources.common.molecules.MoleculesFromRDKit
"""
return self.dset().mols()
def aucs(self):
"""
Returns a dataframe with as many rows as experiments and three columns: (mean AUC, stddev AUC, stacked AUC).
It would look like this:
|----------------------------------------------------|
| | meanROCAUC | stddevROCAUC | stackedROCAUC |
|----------------------------------------------------|
| expid1 | 0.75 | 0.04 | 0.76 |
| expid2 | 0.78 | 0.04 | 0.76 |
| ... | ... | ... | ... |
|----------------------------------------------------|
:rtype: pandas.DataFrame
"""
raise NotImplementedError()
def scores(self, tidy=True):
"""If tidy, returns a dataframe looking like that:
|---------------------------------|
| molid | expid | fold | score |
|---------------------------------|
| molid1 | 1 | 3 | 0.2334 |
| molid1 | 2 | 0 | 0.4553 |
| ... | ... | ... | ... |
|---------------------------------|
Else, returns a dataframe with expid as index and as many columns as mols (in which case we will
take into account self.lso).
|--------------------------------|
| | mol1 | mol2 | ... |
|--------------------------------|
| expid1 | 0.2334 | 0.783 | ... |
| expid2 | 0.2054 | 0.541 | ... |
| ... | ... | ... | ... |
|--------------------------------|
"""
scores, expids, folds, molids, y = merge_cache_scores(dset_id=self.dset_id,
model=self.model,
feats=self.feats,
calib=self.score_norm,
lso=self.lso)
df = pd.DataFrame(data=scores.T, index=expids, columns=molids)
# select the relevant expid only
# TODO: allow to read only these in scores_df
# FIXME: warn if we cannot find all requested expids, document that can be the case
expids = [expid for expid in self.expids if expid in expids]
df = df.loc[expids]
if not tidy:
return df
else:
df['expid'] = df.index # use reset_index
tidy_df = pd.melt(df, id_vars='expid', var_name='molid', value_name='score')
folds_tidy = np.concatenate(folds)
tidy_df['fold'] = folds_tidy
return tidy_df[['molid', 'expid', 'fold', 'score']]
def squared_losses(self):
"""
Returns a dataframe with squared losses per experiment and molecule (can be used to compute, e.g., Brier score).
Returns a pandas dataframe (expid, molid) -> loss.
Rows and cols appear sorted.
Loss is squared loss: (label - score) ** 2
NaNs (failed experiments) are removed.
It would look like this:
|----------------------------|
| | mol1 | mol2 | ... |
|----------------------------|
| expid1 | 0.75 | 0.23 | ... |
| expid2 | 0.31 | 0.33 | ... |
| ... | ... | ... | ... |
|----------------------------|
:rtype: pandas.DataFrame
"""
if self._df_losses is None:
dfl, _ = read_losses(dset=self.dset_id,
expids=self.expids,
feats=self.feats,
model=self.model,
lso=self.lso,
calibration=self.score_norm)
self._df_losses = dfl.sort_index(axis=0).sort_index(axis=1).dropna(axis=1)
return self._df_losses
def mcoocs(self):
"""
Returns a multilevel-indexed dataframe of molecules coocurrences in test for each partition train/test.
The dataframe returned by this function has
- a sorted index with two levels (expid, fold)
- a sorted column index, one column per molecule
- boolean values
It would look like this
|----------------------------------------|
| index | data |
|-----------------|----------------------|
| expid | foldid | mol1 | mol2 | ... |
|----------------------------------------|
| 0 | 0 | False | False | ... |
| 1 | 0 | True | False | ... |
| ... | ... | ... | ... | ... |
|----------------------------------------|
:rtype: pandas.DataFrame
"""
mcoocs, molids, expids, folds = molecules_coocurrences_df(
expids=self.expids,
dset=self.dset_id,
feats=self.feats,
model=self.model,
lso=self.lso
)
index = MultiIndex.from_arrays(arrays=(expids, folds))
index.levels[0].name = 'expid'
index.levels[1].name = 'fold'
mcooc_df = pd.DataFrame(data=mcoocs,
index=index,
columns=molids)
return mcooc_df.sort_index(axis=0).sort_index(axis=1)
def scoocs(self):
"""
Returns a multilevel-indexed dataframe of sources coocurrences in test for each partition train/test.
The dataframe from this palyndromic function has:
- a sorted index with two levels (expid, fold)
- a sorted column index, one column per source
- boolean values
It would look like this
|----------------------------------------|
| index | data |
|-----------------|----------------------|
| expid | foldid | src1 | src2 | ... |
|----------------------------------------|
| 0 | 0 | False | False | ... |
| 1 | 0 | True | False | ... |
| ... | ... | ... | ... | ... |
|----------------------------------------|
:rtype: pandas.DataFrame
"""
scoocs, sources, expids, folds = sources_coocurrences_df(
expids=self.expids,
dset=self.dset_id,
feats=self.feats,
model=self.model,
lso=self.lso
)
index = MultiIndex.from_arrays(arrays=(expids, folds))
index.levels[0].name = 'expid'
index.levels[1].name = 'fold'
mcooc_df = pd.DataFrame(data=scoocs,
index=index,
columns=sources)
return mcooc_df.sort_index(axis=0).sort_index(axis=1)
def mean_loss_matrix(self, mols_in_rows=True, rows_in_train=False):
"""
Returns two dataframes: mean loss and number of occurrences.
These dataframes:
- have mols or sources in rows (depending of *mols_in_rows*)
- have mols in columns
- each entry contains the mean loss (and count) when
* the mol in col is in test
* whatever in the row is in train or test (depending on *rows_in_train*)
They would look like this:
|-----------------------------------------|
| index | data |
|-----------------|-----------------------|
| source or molid | molid1 | molid2 | ... |
|-----------------------------------------|
| molid1 | 0.83 | 0.02 | ... |
| molid2 | 0.17 | 0.01 | ... |
| ... | ... | ... | ... |
|-----------------------------------------|
|-----------------------------------------|
| index | data |
|-----------------|-----------------------|
| source or molid | molid1 | molid2 | ... |
|-----------------------------------------|
| molid1 | 794 | 733 | ... |
| molid2 | 680 | 667 | ... |
| ... | ... | ... | ... |
|-----------------------------------------|
:rtype: (pandas.DataFrame, pandas.DataFrame)
"""
matid = 'mir=%r#rit=%r' % (mols_in_rows, rows_in_train)
cache_dir = op.join(MANYSOURCES_DATA_ROOT, 'results', 'mlms', self.what().id(maxlength=1))
ensure_dir(cache_dir)
cache_file = op.join(cache_dir, matid + '.pkl')
if not op.isfile(cache_file):
# row masks
rows_df = self.mcoocs() if mols_in_rows else self.scoocs()
rows_df = ~rows_df if rows_in_train else rows_df
# col masks
cols_df = self.mcoocs()
# losses
loss_df = self.squared_losses().T
# sanity checks for columns
assert np.all(cols_df.columns == loss_df.index)
@jit(nopython=True)
def update_cooc_cooc(res_matrix, norm_matrix, losses, rows, cols):
for row in rows:
for col in cols:
res_matrix[row, col] += losses[col]
norm_matrix[row, col] += 1
loss_matrix = np.zeros((len(rows_df.columns), len(cols_df.columns)))
norm_matrix = np.zeros((len(rows_df.columns), len(cols_df.columns)))
for expid in loss_df.columns:
print 'Computing mean loss for expid=%d' % expid
if expid not in rows_df.index.levels[0]:
print '\t Cannot find coocurrences, skipping...'
continue
exp_losses = loss_df[expid]
#
# FIXME: this does not seem right now that we have a multilevel df
#
for rowmask, colmask in izip(rows_df.loc[expid].values,
cols_df.loc[expid].values):
update_cooc_cooc(loss_matrix,
norm_matrix,
exp_losses.values,
np.where(rowmask)[0],
np.where(colmask)[0])
losses_df = pd.DataFrame(loss_matrix / norm_matrix,
index=sorted(rows_df.columns),
columns=cols_df.columns)
counts_df = pd.DataFrame(norm_matrix,
index=sorted(rows_df.columns),
columns=cols_df.columns)
pd.to_pickle((losses_df, counts_df), cache_file)
return pd.read_pickle(cache_file)
def fold_coocurrence_tensor(self):
raise NotImplementedError('To implement and myabe even factorize with PARAFACT')
def as_pil_images(self):
# generates 2D PIL representations of the molecules
# TODO: this must go to mols class
return [rdkit2im(mol) for mol in self.mols()]
def logreg_models(self):
"""
Returns a 4-tuple with the weights for the logistic regressions, intercepts and experiment coordinates.
- the first element is is a num_rows x num_features sparse matrix;
each row corresponds to the weights for an "expid-fold" model
the colums are the features
- the second element is a num_rows array of logistic regression intercepts
- the third and fourth elements are num_rows long numpy arrays with the coordinates of the experiments;
these are expids + fold_ids (e.g. (expid=2, fold=1))
:rtype: (scipy.sparse.csr_matrix, np.array, np.array, np.array)
"""
return logreg_weights(dset=self.dset_id,
expids=self.expids,
feats=self.feats,
model=self.model,
lso=self.lso)
def molid2index(self, molid, molids=None):
# FIXME: this is better from molecules and whatnot, inefficient, with little petits eyes pressure
if molids is None:
molids, _, _ = self.feature_matrix()
return np.where(molids == molid)[0][0]
def feature_matrix(self):
"""
Returns the feature matrix, along with the relevant molids (these for which feature extraction worked)
and the labels.
The feature matrix can be either a dense numpy array or an sparse matrix (so no type annotation, sorry).
"""
molids, X, y = MANYSOURCES_FEATS[self.feats].extractor(self.dset())
return molids, X, y
def mol_feats(self, molid):
"""Returns a numpy array with the indices of the features occurring in the molecule."""
molids, X, y = self.feature_matrix()
mol_index = self.molid2index(molid, molids=molids)
if isinstance(X, np.ndarray):
raise Exception('Probably this makes no sense, all is relevant, but we could just get the non-zero')
return X[mol_index, :].indices
def relevant_folds(self, molid):
"""Returns a list of tuples (expid, foldnum) where the molid was in test."""
mcoocs = self.mcoocs()
return mcoocs[mcoocs[molid]].index
def relevant_models(self, molid):
weights, intercepts, expids, foldnums = self.logreg_models()
relevant_folds = self.relevant_folds(molid)
rows = [row for row, (expid, foldnum) in enumerate(izip(expids, foldnums))
if (expid, foldnum) in relevant_folds]
return weights[rows, :], intercepts[rows], relevant_folds
def smiles(self, findices):
return self.dset().ecfps(no_dupes=True).i2s[findices] # FIXME quick and dirty
def logreg_replay(self, molid):
# TODO: do we need to calibrate / normalise?
# in other words, should we replay all at the same time or read the scores from the already done preds?
if 'logreg' not in self.model:
raise Exception('logreg replay can only be applied to logistic regression models')
relevant_feats = self.mol_feats(molid)
smiles = self.smiles(relevant_feats)
weights, intercepts, relevant_folds = self.relevant_models(molid)
print weights.shape
print relevant_feats
print len(relevant_feats)
relevant_weights = weights.tocsc()[:, relevant_feats].tocsr().toarray() # FIXME to_dense
nfeats = float(relevant_weights.shape[0] * relevant_weights.shape[1])
# print relevant_weights.shape, relevant_weights.nnz / nfeats, len(relevant_feats)
molas = relevant_weights.dot(np.ones(len(relevant_feats)))
neg_feats = relevant_weights < 0
pos_feats = relevant_weights > 0
print neg_feats.sum(), pos_feats.sum()
print relevant_weights.shape
mean_weights = relevant_weights.mean(axis=0)
order = mean_weights.argsort()
print zip(smiles[order], ['%.4f' % num for num in mean_weights[order]])
linear_term = np.exp(molas + intercepts)
predictions = linear_term / (1 + linear_term)
print predictions
# + intercepts
def expid_only(names, keep_lso=False):
"""Given a list of names (e.g. the columns in losses), return a new list keeping only the expid#lso part."""
if not keep_lso:
return [name.partition('#lso=')[0] for name in names]
p = re.compile(r'(expid=\d+#lso=(?:True|False))')
return [p.search(name).group(0) for name in names]
def hubigraph(hub, edges='loss'):
"""Creates a nice ubigraph plot from a results hub.
Nodes are molecules.
Edges represent similarity in either:
- logreg weight space
- loss space
- score space
"""
mols = hub.mols().mols()
if edges == 'loss':
losses = hub.squared_losses()
# print losses
# X = losses.T.values
# nns = NearestNeighbors()
# nns.fit(X)
# A = nns.kneighbors_graph(X, n_neighbors=5)
# print A
def cooc_loss_matrix_expectations(hub):
"""Checks and documents expectations for the various coocurrence-loss matrices.
The usefulness of such matrices is still to determine:
- to find higher order interaction
"""
# Kill the memory
m2m_train, _ = hub.mean_loss_matrix(mols_in_rows=True, rows_in_train=True)
m2m_trest, _ = hub.mean_loss_matrix(mols_in_rows=True, rows_in_train=False)
s2m_train, _ = hub.mean_loss_matrix(mols_in_rows=False, rows_in_train=True)
s2m_trest, _ = hub.mean_loss_matrix(mols_in_rows=False, rows_in_train=False)
if hub.lso:
# Check LSO expectations for the mean loss matrix
# Each molecule in columns have the same score for all the molecules in the same source
# The score is NaN for all the molecules in the same source
# m2m_trest
pass
else:
# Check CRS expectations for the mean loss matrix
pass
if __name__ == '__main__':
hub_lso = Hub(lso=True, dset_id='bcrp')
hub_crs = Hub(lso=False, dset_id='bcrp')
weights, intercepts, expids, foldids = hub_lso.logreg_models()
# This seems correct too
# Note that in LSO, we will have the same value for each molecule in the same source
meanl, counts = hub_crs.mean_loss_matrix(mols_in_rows=True, rows_in_train=True)
print meanl
exit(22)
nmf = NMF()
print nmf.fit_transform(meanl)
print('Done')
exit(0)
#
# TODO: cache coocurrences if needed
#
|
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os.path
import requests
import cStringIO
import PIL.Image
import numpy as np
import scipy.misc
import math
from . import is_url, HTTP_TIMEOUT, errors
# Library defaults:
# PIL.Image:
# size -- (width, height)
# np.array:
# shape -- (height, width, channels)
# range -- [0-255]
# dtype -- uint8
# channels -- RGB
# caffe.datum:
# datum.data type -- bytes (uint8)
# datum.float_data type -- float32
# when decoding images, channels are BGR
# DIGITS:
# image_dims -- (height, width, channels)
# List of supported file extensions
# Use like "if filename.endswith(SUPPORTED_EXTENSIONS)"
SUPPORTED_EXTENSIONS = ('.png','.jpg','.jpeg','.bmp','.ppm')
def load_image(path):
"""
Reads a file from `path` and returns a PIL.Image with mode 'L' or 'RGB'
Raises LoadImageError
Arguments:
path -- path to the image, can be a filesystem path or a URL
"""
image = None
if is_url(path):
try:
r = requests.get(path,
allow_redirects=False,
timeout=HTTP_TIMEOUT)
r.raise_for_status()
stream = cStringIO.StringIO(r.content)
image = PIL.Image.open(stream)
except requests.exceptions.RequestException as e:
raise errors.LoadImageError, e.message
except IOError as e:
raise errors.LoadImageError, e.message
elif os.path.exists(path):
try:
image = PIL.Image.open(path)
image.load()
except IOError as e:
raise errors.LoadImageError, 'IOError: %s' % e.message
else:
raise errors.LoadImageError, '"%s" not found' % path
if image.mode in ['L', 'RGB']:
# No conversion necessary
return image
elif image.mode in ['1']:
# Easy conversion to L
return image.convert('L')
elif image.mode in ['LA']:
# Deal with transparencies
new = PIL.Image.new('L', image.size, 255)
new.paste(image, mask=image.convert('RGBA'))
return new
elif image.mode in ['CMYK', 'YCbCr']:
# Easy conversion to RGB
return image.convert('RGB')
elif image.mode in ['P', 'RGBA']:
# Deal with transparencies
new = PIL.Image.new('RGB', image.size, (255, 255, 255))
new.paste(image, mask=image.convert('RGBA'))
return new
else:
raise errors.LoadImageError, 'Image mode "%s" not supported' % image.mode
def upscale(image, ratio):
"""
return upscaled image array
Arguments:
image -- a (H,W,C) numpy.ndarray
ratio -- scaling factor (>1)
"""
if not isinstance(image, np.ndarray):
raise ValueError('Expected ndarray')
if ratio<1:
raise ValueError('Ratio must be greater than 1 (ratio=%f)' % ratio)
width = int(math.floor(image.shape[1] * ratio))
height = int(math.floor(image.shape[0] * ratio))
channels = image.shape[2]
out = np.ndarray((height, width, channels),dtype=np.uint8)
for x, y in np.ndindex((width,height)):
out[y,x] = image[math.floor(y/ratio), math.floor(x/ratio)]
return out
def resize_image(image, height, width,
channels=None,
resize_mode=None,
):
"""
Resizes an image and returns it as a np.array
Arguments:
image -- a PIL.Image or numpy.ndarray
height -- height of new image
width -- width of new image
Keyword Arguments:
channels -- channels of new image (stays unchanged if not specified)
resize_mode -- can be crop, squash, fill or half_crop
"""
if resize_mode is None:
resize_mode = 'squash'
if resize_mode not in ['crop', 'squash', 'fill', 'half_crop']:
raise ValueError('resize_mode "%s" not supported' % resize_mode)
if channels not in [None, 1, 3]:
raise ValueError('unsupported number of channels: %s' % channels)
if isinstance(image, PIL.Image.Image):
# Convert image mode (channels)
if channels is None:
image_mode = image.mode
if image_mode == 'L':
channels = 1
elif image_mode == 'RGB':
channels = 3
else:
raise ValueError('unknown image mode "%s"' % image_mode)
elif channels == 1:
# 8-bit pixels, black and white
image_mode = 'L'
elif channels == 3:
# 3x8-bit pixels, true color
image_mode = 'RGB'
if image.mode != image_mode:
image = image.convert(image_mode)
image = np.array(image)
elif isinstance(image, np.ndarray):
if image.dtype != np.uint8:
image = image.astype(np.uint8)
if image.ndim == 3 and image.shape[2] == 1:
image = image.reshape(image.shape[:2])
if channels is None:
if image.ndim == 2:
channels = 1
elif image.ndim == 3 and image.shape[2] == 3:
channels = 3
else:
raise ValueError('invalid image shape: %s' % (image.shape,))
elif channels == 1:
if image.ndim != 2:
if image.ndim == 3 and image.shape[2] == 3:
# color to grayscale
image = np.dot(image, [0.299, 0.587, 0.114]).astype(np.uint8)
else:
raise ValueError('invalid image shape: %s' % (image.shape,))
elif channels == 3:
if image.ndim == 2:
# grayscale to color
image = np.repeat(image,3).reshape(image.shape + (3,))
elif image.shape[2] != 3:
raise ValueError('invalid image shape: %s' % (image.shape,))
else:
raise ValueError('resize_image() expected a PIL.Image.Image or a numpy.ndarray')
# No need to resize
if image.shape[0] == height and image.shape[1] == width:
return image
### Resize
interp = 'bilinear'
width_ratio = float(image.shape[1]) / width
height_ratio = float(image.shape[0]) / height
if resize_mode == 'squash' or width_ratio == height_ratio:
return scipy.misc.imresize(image, (height, width), interp=interp)
elif resize_mode == 'crop':
# resize to smallest of ratios (relatively larger image), keeping aspect ratio
if width_ratio > height_ratio:
resize_height = height
resize_width = int(round(image.shape[1] / height_ratio))
else:
resize_width = width
resize_height = int(round(image.shape[0] / width_ratio))
image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp)
# chop off ends of dimension that is still too long
if width_ratio > height_ratio:
start = int(round((resize_width-width)/2.0))
return image[:,start:start+width]
else:
start = int(round((resize_height-height)/2.0))
return image[start:start+height,:]
else:
if resize_mode == 'fill':
# resize to biggest of ratios (relatively smaller image), keeping aspect ratio
if width_ratio > height_ratio:
resize_width = width
resize_height = int(round(image.shape[0] / width_ratio))
if (height - resize_height) % 2 == 1:
resize_height += 1
else:
resize_height = height
resize_width = int(round(image.shape[1] / height_ratio))
if (width - resize_width) % 2 == 1:
resize_width += 1
image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp)
elif resize_mode == 'half_crop':
# resize to average ratio keeping aspect ratio
new_ratio = (width_ratio + height_ratio) / 2.0
resize_width = int(round(image.shape[1] / new_ratio))
resize_height = int(round(image.shape[0] / new_ratio))
if width_ratio > height_ratio and (height - resize_height) % 2 == 1:
resize_height += 1
elif width_ratio < height_ratio and (width - resize_width) % 2 == 1:
resize_width += 1
image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp)
# chop off ends of dimension that is still too long
if width_ratio > height_ratio:
start = int(round((resize_width-width)/2.0))
image = image[:,start:start+width]
else:
start = int(round((resize_height-height)/2.0))
image = image[start:start+height,:]
else:
raise Exception('unrecognized resize_mode "%s"' % resize_mode)
# fill ends of dimension that is too short with random noise
if width_ratio > height_ratio:
padding = (height - resize_height)/2
noise_size = (padding, width)
if channels > 1:
noise_size += (channels,)
noise = np.random.randint(0, 255, noise_size).astype('uint8')
image = np.concatenate((noise, image, noise), axis=0)
else:
padding = (width - resize_width)/2
noise_size = (height, padding)
if channels > 1:
noise_size += (channels,)
noise = np.random.randint(0, 255, noise_size).astype('uint8')
image = np.concatenate((noise, image, noise), axis=1)
return image
def embed_image_html(image):
"""
Returns an image embedded in HTML base64 format
(Based on Caffe's web_demo)
Arguments:
image -- a PIL.Image or np.ndarray
"""
if image is None:
return None
elif isinstance(image, PIL.Image.Image):
pass
elif isinstance(image, np.ndarray):
image = PIL.Image.fromarray(image)
else:
raise ValueError('image must be a PIL.Image or a np.ndarray')
# Read format from the image
fmt = image.format
if not fmt:
# default to JPEG
fmt = 'jpeg'
else:
fmt = fmt.lower()
string_buf = cStringIO.StringIO()
image.save(string_buf, format=fmt)
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/%s;base64,%s' % (fmt, data)
def get_layer_vis_square(data,
allow_heatmap = True,
normalize = True,
min_img_dim = 100,
max_width = 1200,
):
"""
Returns a vis_square for the given layer data
Arguments:
data -- a np.ndarray
Keyword arguments:
allow_heatmap -- if True, convert single channel images to heatmaps
normalize -- whether to normalize the data when visualizing
max_width -- maximum width for the vis_square
"""
if data.ndim == 1:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data[:, np.newaxis, np.newaxis]
elif data.ndim == 2:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data.reshape((data.shape[0]*data.shape[1], 1, 1))
elif data.ndim == 3:
if data.shape[0] == 3:
# interpret as a color image
# (1, H, W,3)
data = data[[2,1,0],...] # BGR to RGB (see issue #59)
data = data.transpose(1,2,0)
data = data[np.newaxis,...]
else:
# interpret as grayscale images
# (N, H, W)
pass
elif data.ndim == 4:
if data.shape[0] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(1,2,3,0)
data = data[:,:,:,[2,1,0]] # BGR to RGB (see issue #59)
elif data.shape[1] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(0,2,3,1)
data = data[:,:,:,[2,1,0]] # BGR to RGB (see issue #59)
else:
# interpret as HxW grayscale images
# (N, H, W)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2], data.shape[3]))
else:
raise RuntimeError('unrecognized data shape: %s' % (data.shape,))
# chop off data so that it will fit within max_width
padsize = 0
width = data.shape[2]
if width > max_width:
data = data[:1,:max_width,:max_width]
else:
if width > 1:
padsize = 1
width += 1
n = max(max_width/width,1)
n *= n
data = data[:n]
if not allow_heatmap and data.ndim == 3:
data = data[...,np.newaxis]
vis = vis_square(data,
padsize = padsize,
normalize = normalize,
)
# find minimum dimension and upscale if necessary
_min = sorted(vis.shape[:2])[0]
if _min < min_img_dim:
# upscale image
ratio = min_img_dim/float(_min)
vis = upscale(vis, ratio)
return vis
def vis_square(images,
padsize=1,
normalize=False,
colormap='jet',
):
"""
Visualize each image in a grid of size approx sqrt(n) by sqrt(n)
Returns a np.array image
(Based on Caffe's filter_visualization notebook)
Arguments:
images -- an array of shape (N, H, W) or (N, H, W, C)
if C is not set, a heatmap is computed for the result
Keyword arguments:
padsize -- how many pixels go inbetween the tiles
normalize -- if true, scales (min, max) across all images out to (0, 1)
colormap -- a string representing one of the suppoted colormaps
"""
assert 3 <= images.ndim <= 4, 'images.ndim must be 3 or 4'
# convert to float since we're going to do some math
images = images.astype('float32')
if normalize:
images -= images.min()
if images.max() > 0:
images /= images.max()
images *= 255
if images.ndim == 3:
# they're grayscale - convert to a colormap
redmap, greenmap, bluemap = get_color_map(colormap)
red = np.interp(images*(len(redmap)-1)/255.0, xrange(len(redmap)), redmap)
green = np.interp(images*(len(greenmap)-1)/255.0, xrange(len(greenmap)), greenmap)
blue = np.interp(images*(len(bluemap)-1)/255.0, xrange(len(bluemap)), bluemap)
# Slap the channels back together
images = np.concatenate( (red[...,np.newaxis], green[...,np.newaxis], blue[...,np.newaxis]), axis=3 )
images = np.minimum(images,255)
images = np.maximum(images,0)
# convert back to uint8
images = images.astype('uint8')
# Compute the output image matrix dimensions
n = int(np.ceil(np.sqrt(images.shape[0])))
ny = n
nx = n
length = images.shape[0]
if n*(n-1) >= length:
nx = n-1
# Add padding between the images
padding = ((0, nx*ny - length), (0, padsize), (0, padsize)) + ((0, 0),) * (images.ndim - 3)
padded = np.pad(images, padding, mode='constant', constant_values=255)
# Tile the images beside each other
tiles = padded.reshape( (ny, nx) + padded.shape[1:]).transpose( (0,2,1,3) + tuple(range(4, padded.ndim + 1)))
tiles = tiles.reshape((ny * tiles.shape[1], nx * tiles.shape[3]) + tiles.shape[4:])
if tiles.shape[-1] == 1:
# grayscale to color
tiles = np.dstack([tiles.squeeze()] * 3)
return tiles
def get_color_map(name):
"""
Return a colormap as (redmap, greenmap, bluemap)
Arguments:
name -- the name of the colormap. If unrecognized, will default to 'jet'.
"""
redmap = [0]
greenmap = [0]
bluemap = [0]
if name == 'white':
# essentially a noop
redmap = [0,1]
greenmap = [0,1]
bluemap = [0,1]
elif name == 'simple':
redmap = [0,1,1,1]
greenmap = [0,0,1,1]
bluemap = [0,0,0,1]
elif name == 'hot':
redmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
greenmap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603163, 0.0714285714285714, 0.1111111111111112, 0.1507936507936507, 0.1904761904761905, 0.23015873015873, 0.2698412698412698, 0.3095238095238093, 0.3492063492063491, 0.3888888888888888, 0.4285714285714284, 0.4682539682539679, 0.5079365079365079, 0.5476190476190477, 0.5873015873015872, 0.6269841269841268, 0.6666666666666665, 0.7063492063492065, 0.746031746031746, 0.7857142857142856, 0.8253968253968254, 0.8650793650793651, 0.9047619047619047, 0.9444444444444442, 0.984126984126984, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04761904761904745, 0.1269841269841265, 0.2063492063492056, 0.2857142857142856, 0.3650793650793656, 0.4444444444444446, 0.5238095238095237, 0.6031746031746028, 0.6825396825396828, 0.7619047619047619, 0.8412698412698409, 0.92063492063492, 1]
elif name == 'rainbow':
redmap = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9365079365079367, 0.8571428571428572, 0.7777777777777777, 0.6984126984126986, 0.6190476190476191, 0.53968253968254, 0.4603174603174605, 0.3809523809523814, 0.3015873015873018, 0.2222222222222223, 0.1428571428571432, 0.06349206349206415, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603208, 0.08465608465608465, 0.1375661375661377, 0.1904761904761907, 0.2433862433862437, 0.2962962962962963, 0.3492063492063493, 0.4021164021164023, 0.4550264550264553, 0.5079365079365079, 0.5608465608465609, 0.6137566137566139, 0.666666666666667]
greenmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9841269841269842, 0.9047619047619047, 0.8253968253968256, 0.7460317460317465, 0.666666666666667, 0.587301587301587, 0.5079365079365079, 0.4285714285714288, 0.3492063492063493, 0.2698412698412698, 0.1904761904761907, 0.1111111111111116, 0.03174603174603208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01587301587301582, 0.09523809523809534, 0.1746031746031744, 0.2539682539682535, 0.333333333333333, 0.412698412698413, 0.4920634920634921, 0.5714285714285712, 0.6507936507936507, 0.7301587301587302, 0.8095238095238093, 0.8888888888888884, 0.9682539682539679, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
elif name == 'winter':
greenmap = [0, 1]
bluemap = [1, 0.5]
else:
if name != 'jet':
print 'Warning: colormap "%s" not supported. Using jet instead.' % name
redmap = [0,0,0,0,0.5,1,1,1,0.5]
greenmap = [0,0,0.5,1,1,1,0.5,0,0]
bluemap = [0.5,1,1,1,0.5,0,0,0,0]
return 255.0 * np.array(redmap), 255.0 * np.array(greenmap), 255.0 * np.array(bluemap)
|
|
import re
import string
import unicodedata
ALPHANUMERIC = set(string.ascii_letters + string.digits)
PUNCTUATION = set(string.punctuation)
STRING_TYPES = (bytes, str)
DEFAULT_ENCODING = 'UTF-8'
_TO_CAMEL_REGEX = re.compile(r'_+([a-zA-Z0-9])')
_TO_SNAKE_REGEX_1 = re.compile(r'(.)([A-Z][a-z]+)')
_TO_SNAKE_REGEX_2 = re.compile(r'([a-z0-9])([A-Z])')
_TO_SNAKE_REGEX_3 = re.compile(r'[_]{2,}')
_TO_SNAKE_PATTERN = r'\1_\2'
def camel_to_constant(s):
""" :return: s with camel segments upper cased and separated by underscores """
return _camel_to_underscored(s, True)
def camel_to_snake(s):
""" :return: s with camel segments lower cased and separated by underscores """
return _camel_to_underscored(s, False)
def _camel_to_underscored(s, to_constant):
if s is None or len(s) == 0:
return s
converted = _TO_SNAKE_REGEX_1.sub(_TO_SNAKE_PATTERN, s)
converted = _TO_SNAKE_REGEX_2.sub(_TO_SNAKE_PATTERN, converted)
converted = _TO_SNAKE_REGEX_3.sub('_', converted)
return converted.upper() if to_constant else converted.lower()
def constant_to_camel(s):
""" :return: the snake cased s without underscores, and each segment but the first capitalized """
return _underscored_to_camel(s)
def snake_to_camel(s):
""" :return: the snake cased s without underscores, and each segment but the first capitalized """
return _underscored_to_camel(s)
def _underscored_to_camel(s):
if s is None or len(s) == 0:
return s
return _TO_CAMEL_REGEX.sub(lambda match: match.group(1).upper(), s.strip('_').lower())
def find_all(s, sub, start=0, end=0, limit=-1, reverse=False):
"""
Find all indexes of sub in s.
:param s: the string to search
:param sub: the string to search for
:param start: the index in s at which to begin the search (same as in ''.find)
:param end: the index in s at which to stop searching (same as in ''.find)
:param limit: the maximum number of matches to find
:param reverse: if False search s forwards, otherwise search backwards
:return: all occurrences of substring sub in string s
"""
indexes = []
if not bool(s and sub):
return indexes
lstr = len(s)
if lstr <= start:
return indexes
lsub = len(sub)
if lstr < lsub:
return indexes
if limit == 0:
return indexes
elif limit < 0:
limit = lstr
end = min(end, lstr) or lstr
idx = s.rfind(sub, start, end) if reverse else s.find(sub, start, end)
while idx != -1:
indexes.append(idx)
if reverse:
idx = s.rfind(sub, start, idx - lstr)
else:
idx = s.find(sub, idx + lsub, end)
if len(indexes) >= limit:
break
return indexes
def splitany(s, sep=None, maxsplit=-1):
"""
Splits "s" into substrings using "sep" as the delimiter string. Behaves like str.split, except that:
1. Single strings are parsed into characters, any of which may be used as a delimiter
2. Lists or tuples of multiple character strings may be provided, and thus used as delimiters
If "sep" is None, a single character, or a list with one string, str.split is called directly.
Otherwise, "s" is parsed iteratively until all delimiters have been found, or maxsplit has been reached.
:param s: the unicode or binary string to split
:param sep: a string or list of strings to use as delimiter in the split (defaults to whitespace):
if a string, split on any char; if a list or tuple, split on any of its values
:param maxsplit: if provided, the maximum number of splits to perform
:return: the list of substrings in "s" between occurrences of "sep"
"""
if s is None:
return []
elif not isinstance(s, STRING_TYPES):
raise TypeError('Cannot split a {t}: {s}'.format(s=s, t=type(s).__name__))
elif sep is None:
return s.split(sep, maxsplit)
elif not isinstance(sep, (bytes, list, str, tuple)):
raise TypeError('Cannot split on a {t}: {s}'.format(s=sep, t=type(sep).__name__))
else:
split_on_any_char = isinstance(sep, STRING_TYPES)
if split_on_any_char:
# Sync and wrap to prevent issues with Binary: b'a'[0] == 97
seps = [_sync_string_to(s, sep)]
elif all(isinstance(sub, STRING_TYPES) for sub in sep):
# Sync, but also sort keys by length to do largest matches first
seps = [_sync_string_to(s, sub) for sub in sep]
else:
invalid_seps = [sub for sub in sep if not isinstance(sep, STRING_TYPES)]
raise TypeError(f'Cannot split on the following: {invalid_seps}')
if len(s) == 0 or len(seps) == 0 or maxsplit == 0:
return [s]
elif len(seps) == 1:
# Reduce to single char or list item
# Call split if sep like: 'a', ['a'], ['ab']
# Otherwise, split on any if sep like: 'ab'
seps = seps[0]
if not split_on_any_char or len(seps) == 1:
return s.split(seps, maxsplit)
as_text = isinstance(seps, (list, str, tuple))
parts = []
start = 0
rest = None
try:
while maxsplit < 0 or maxsplit >= len(parts):
rest = s if start == 0 else rest[start:]
# Sort based on (index_in_sep, negative_len_of_sep) to do largest matches first
if as_text:
stop = min((rest.index(sub), 0 - len(sub)) for sub in seps if sub in rest)
else:
# Iterating over bytes results in int values
stop = min((rest.index(sub), 0 - len(bytes([sub]))) for sub in seps if sub in rest)
parts.append(rest if maxsplit == len(parts) else rest[:stop[0]])
start = stop[0] - stop[1] # Skip full index of last delim
except ValueError:
parts.append(rest)
return parts
def _sync_string_to(bin_or_str, string):
""" Ensure two unicode or binary strings are the same type """
if isinstance(string, type(bin_or_str)):
return string
elif isinstance(string, bytes):
return string.decode(DEFAULT_ENCODING)
else:
return string.encode(DEFAULT_ENCODING)
def to_ascii_equivalent(text):
""" Converts any non-ASCII characters (accents, etc.) to their best-fit ASCII equivalents """
if text is None:
return None
elif isinstance(text, bytes):
text = text.decode(DEFAULT_ENCODING)
elif not isinstance(text, str):
text = str(text)
text = ''.join(_ASCII_PUNCTUATION_MAP.get(c, c) for c in text)
return ''.join(c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn')
_ASCII_PUNCTUATION_MAP = {
# Hyphens and dashes
u'\u2010': u'-', u'\u2011': u'-', u'\u2012': u'-', u'\u2013': u'-', u'\u2014': u'-', u'\u2015': u'-',
u'\uff0d': u'-', u'\uff63': u'-',
# Single quotes
u'\u02b9': u"'", u'\u02bb': u"'", u'\u02bc': u"'", u'\u02bd': u"'", u'\u02be': u"'", u'\u02bf': u"'",
u'\u2018': u"'", u'\u2019': u"'", u'\u201a': u"'", u'\u201b': u"'",
# Double quotes
u'\u02ba': u'"', u'\u201c': u'"', u'\u201d': u'"', u'\u201e': u'"', u'\u201f': u'"', u'\u2e42': u'"',
# Commas
u'\u2e32': u',', u'\u2e34': u',', u'\u2e41': u',', u'\u3001': u',',
u'\ufe10': u',', u'\ufe11': u',', u'\ufe50': u',', u'\ufe51': u',', u'\uff0c': u',', u'\uff64': u',',
}
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import socket
import re
from functools import reduce
def ip2int(ip):
return reduce(lambda a,b: a*256+b, map(int, ip.split('.')), 0)
def int2ip(ip):
if not 0 <= ip <= 0xffffffff:
raise ValueError('long ip must be between 0 and 0xffffffff inclusive')
return '%d.%d.%d.%d' % (ip>>24 & 255, ip>>16 & 255, ip>>8 & 255, ip & 255)
class whois(object):
# host list
ANICHOST = 'whois.arin.net'
RNICHOST = 'whois.ripe.net'
PNICHOST = 'whois.apnic.net'
IANAHOST = 'whois.iana.org'
AFRINIC = 'whois.afrinic.net'
# list of host specific whois queries
QUERY = {
ANICHOST: 'n + %s',
RNICHOST: '-V Md5.0 %s',
AFRINIC: '-V Md5.0 %s',
}
# use IANA as the default whois host
DEFAULT_HOST = IANAHOST
DEFAULT_PORT = 43
TIMEOUT = 0.5
# list of host referral fields
FIELDS_REFER = {
'hostname': (
('referralserver', 'refer', 'whois'),
(r'\S+://(?P<hostname>\w[-.a-z0-9]+)(?::(?P<port>\d{1,5}))?', r'\w[-.a-z0-9]+')
)
}
# list of special whois fields
FIELDS_WHOIS = {
'country': (
('country', 'zip code', 'country-code'),
(r'[a-z]+',)
),
'orgname': (
(
'descr', 'orgname', 'org-name',
'organization', 'netname', 'network-name',
'service name', 'owner', 'name', 'id', 'responsible'
),
(r'[^\r\n]+',)
),
'ipv4range': (
(
'ip-network-range', 'ip-network',
'inetnum', 'netrange', 'ipv4 address',
),
(
r'(?P<ipv4_from>[\d.]+)\D*-\D*(?P<ipv4_to>[\d.]+)',
r'(?P<ipv4_addr>[\d.]+)/(?P<ipv4_mask>\d{1,2})'
)
),
}
class BreakLoop(Exception):
pass
@classmethod
def query(cls, query, hostname=None, port=None):
# set default hostname if None
hostname = hostname or cls.DEFAULT_HOST
port = port or cls.DEFAULT_PORT
# open a tcp socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(cls.TIMEOUT)
stack = []
response = b''
try:
sock.connect((hostname, port))
# send a query
format = cls.QUERY.get(hostname, '%s') + '\r\n'
sock.send((format % query).encode('ascii'))
# read the response
while True:
buf = sock.recv(4096)
response += buf
if not buf:
break
except socket.timeout:
return ''
finally:
sock.close()
response = response.decode('utf-8', errors='ignore')
stack.append(response)
# see if we are being redirected to another whois server
referral = cls.parse_fields(cls.FIELDS_REFER, response)
# do another query
if referral.get('hostname', None):
try:
port = int(referral['port'])
except (KeyError, ValueError, TypeError):
pass
stack.extend(cls.query(query, referral['hostname'], port))
return stack
@classmethod
def parse(cls, response):
"""
Parse a whois response.
Return a dictionary mapping fields described with `FIELDS_WHOIS`
to their corresponding parsed values.
If provided response argument is a stack (i.e. an iterable containing
ordered response data receieved from referring whois sources), try the data
in reversed order untill one of the described fields get a non-empty result.
Args:
response - plain text or stack-like response.
"""
if not isinstance(response, (tuple, list)):
response = [response]
for text in reversed(response):
parsed = cls.parse_fields(cls.FIELDS_WHOIS, text)
# attempt to expand ipv4 range
if parsed.get('ipv4_mask', None):
try:
parsed['ipv4range'] = cls.expand_ipv4_address(parsed['ipv4_addr'], int(parsed['ipv4_mask']))
except ValueError:
parsed['ipv4range'] = None
elif parsed.get('ipv4_from', None):
parsed['ipv4range'] = parsed['ipv4_from'], parsed['ipv4_to']
# fix country
if parsed.get('country'):
# USA > us
parsed['country'] = parsed['country'][:2].lower() if 1 < len(parsed['country']) <= 3 else None
# if any of the above has been parsed, return the result as successful
if [value for value in parsed.values() if value is not None]:
return parsed
# use the latest parsed value as the last resort
try:
return parsed
# provided response stack is empty
except NameError:
return None
@classmethod
def whois(cls, address, hostname=None, port=None, raw=False):
stack = cls.query(address, hostname, port)
# do not parse/format the response
if raw:
return '\n'.join(stack)
return cls.parse(stack)
@classmethod
def parse_fields(cls, fields, text):
result = {}
for alias, (fields, patterns) in fields.items():
try:
for field in fields:
for pattern in patterns:
matched = re.search(
r'^(?:[^:]+:)?{}[ \t]*:[ \t]*({})'.format(field, pattern), text, re.I | re.M
)
if matched:
result[alias] = matched.group(1)
# update the dict with named groups (if any)
result.update(matched.groupdict())
raise cls.BreakLoop()
except cls.BreakLoop:
pass
else:
result[alias] = None
return result
@staticmethod
def expand_ipv4_address(address, mask=None):
mask = mask or 32
# replace missing ip parts with zeroes, e.g. 187.12 -> 187.12.0.0
ipv4_from = '.'.join((address.split('.') + ['0']*4)[:4])
ipv4_to = int2ip(ip2int(ipv4_from) + 2**(32-mask)-1)
return (ipv4_from, ipv4_to)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('addr')
parser.add_argument('--pretty', action='store_true', default=False)
args = parser.parse_args()
result = whois.whois(args.addr, raw=not args.pretty)
# display dict items
if isinstance(result, dict):
for item in result.items():
print('%s: %s' % item)
# display result as is
else:
print(result)
|
|
"""
Utils.
"""
import copy
from collections import OrderedDict
import inflection
from django.conf import settings
from django.utils import six
from django.utils.module_loading import import_string as import_class_from_dotted_path
from django.utils.translation import ugettext_lazy as _
from rest_framework.exceptions import APIException
try:
from rest_framework.serializers import ManyRelatedField
except ImportError:
ManyRelatedField = type(None)
try:
from rest_framework_nested.relations import HyperlinkedRouterField
except ImportError:
HyperlinkedRouterField = type(None)
def get_resource_name(context):
"""
Return the name of a resource.
"""
view = context.get('view')
# Sanity check to make sure we have a view.
if not view:
raise APIException(_('Could not find view.'))
# Check to see if there is a status code and return early
# with the resource_name value of `errors`.
try:
code = str(view.response.status_code)
except (AttributeError, ValueError):
pass
else:
if code.startswith('4') or code.startswith('5'):
return 'errors'
try:
resource_name = getattr(view, 'resource_name')
except AttributeError:
try:
serializer = view.get_serializer_class()
return get_resource_type_from_serializer(serializer)
except AttributeError:
try:
resource_name = get_resource_type_from_model(view.model)
except AttributeError:
resource_name = view.__class__.__name__
if not isinstance(resource_name, six.string_types):
# The resource name is not a string - return as is
return resource_name
# the name was calculated automatically from the view > pluralize and format
resource_name = format_relation_name(resource_name)
return resource_name
def get_serializer_fields(serializer):
fields = None
if hasattr(serializer, 'child'):
fields = getattr(serializer.child, 'fields')
meta = getattr(serializer.child, 'Meta', None)
if hasattr(serializer, 'fields'):
fields = getattr(serializer, 'fields')
meta = getattr(serializer, 'Meta', None)
if fields:
meta_fields = getattr(meta, 'meta_fields', {})
for field in meta_fields:
try:
fields.pop(field)
except KeyError:
pass
return fields
def format_keys(obj, format_type=None):
"""
Takes either a dict or list and returns it with camelized keys only if
JSON_API_FORMAT_KEYS is set.
:format_type: Either 'dasherize', 'camelize' or 'underscore'
"""
if format_type is None:
format_type = getattr(settings, 'JSON_API_FORMAT_KEYS', False)
if format_type in ('dasherize', 'camelize', 'underscore', 'capitalize'):
if isinstance(obj, dict):
formatted = OrderedDict()
for key, value in obj.items():
if format_type == 'dasherize':
# inflection can't dasherize camelCase
key = inflection.underscore(key)
formatted[inflection.dasherize(key)] \
= format_keys(value, format_type)
elif format_type == 'camelize':
formatted[inflection.camelize(key, False)] \
= format_keys(value, format_type)
elif format_type == 'capitalize':
formatted[inflection.camelize(key)] \
= format_keys(value, format_type)
elif format_type == 'underscore':
formatted[inflection.underscore(key)] \
= format_keys(value, format_type)
return formatted
if isinstance(obj, list):
return [format_keys(item, format_type) for item in obj]
else:
return obj
else:
return obj
def format_value(value, format_type=None):
if format_type is None:
format_type = getattr(settings, 'JSON_API_FORMAT_KEYS', False)
if format_type == 'dasherize':
# inflection can't dasherize camelCase
value = inflection.underscore(value)
value = inflection.dasherize(value)
elif format_type == 'camelize':
value = inflection.camelize(value, False)
elif format_type == 'capitalize':
value = inflection.camelize(value)
elif format_type == 'underscore':
value = inflection.underscore(value)
return value
def format_relation_name(value, format_type=None):
if format_type is None:
format_type = getattr(settings, 'JSON_API_FORMAT_RELATION_KEYS', False)
pluralize = getattr(settings, 'JSON_API_PLURALIZE_RELATION_TYPE', False)
if format_type:
# format_type will never be None here so we can use format_value
value = format_value(value, format_type)
return inflection.pluralize(value) if pluralize else value
def parse_relation_name(value, format_type=None):
if format_type is None:
format_type = getattr(settings, 'JSON_API_PARSE_INCLUDE_KEYS', False)
singularize = getattr(settings, 'JSON_API_SINGULARIZE_INCLUDE_TYPE', False)
if format_type:
value = format_value(value, format_type)
return inflection.singularize(value) if singularize else value
def get_related_resource_type(relation):
if hasattr(relation, '_meta'):
relation_model = relation._meta.model
elif hasattr(relation, 'model'):
# the model type was explicitly passed as a kwarg to ResourceRelatedField
relation_model = relation.model
elif hasattr(relation, 'get_queryset') and relation.get_queryset() is not None:
relation_model = relation.get_queryset().model
else:
parent_serializer = relation.parent
if hasattr(parent_serializer, 'Meta'):
parent_model = parent_serializer.Meta.model
else:
parent_model = parent_serializer.parent.Meta.model
if relation.source:
if relation.source != '*':
parent_model_relation = getattr(parent_model, relation.source)
else:
parent_model_relation = getattr(parent_model, relation.field_name)
else:
parent_model_relation = getattr(parent_model, parent_serializer.field_name)
if hasattr(parent_model_relation, 'related'):
try:
relation_model = parent_model_relation.related.related_model
except AttributeError:
# Django 1.7
relation_model = parent_model_relation.related.model
elif hasattr(parent_model_relation, 'field'):
relation_model = parent_model_relation.field.related.model
else:
return get_related_resource_type(parent_model_relation)
return get_resource_type_from_model(relation_model)
def get_instance_or_manager_resource_type(resource_instance_or_manager):
if hasattr(resource_instance_or_manager, 'model'):
return get_resource_type_from_manager(resource_instance_or_manager)
if hasattr(resource_instance_or_manager, '_meta'):
return get_resource_type_from_instance(resource_instance_or_manager)
pass
def get_resource_type_from_model(model):
json_api_meta = getattr(model, 'JSONAPIMeta', None)
return getattr(
json_api_meta,
'resource_name',
format_relation_name(model.__name__))
def get_resource_type_from_queryset(qs):
return get_resource_type_from_model(qs.model)
def get_resource_type_from_instance(instance):
return get_resource_type_from_model(instance._meta.model)
def get_resource_type_from_manager(manager):
return get_resource_type_from_model(manager.model)
def get_resource_type_from_serializer(serializer):
return getattr(
serializer.Meta,
'resource_name',
get_resource_type_from_model(serializer.Meta.model))
def get_included_serializers(serializer):
included_serializers = copy.copy(getattr(serializer, 'included_serializers', dict()))
for name, value in six.iteritems(included_serializers):
if not isinstance(value, type):
if value == 'self':
included_serializers[name] = serializer if isinstance(serializer, type) else serializer.__class__
else:
included_serializers[name] = import_class_from_dotted_path(value)
return included_serializers
class Hyperlink(six.text_type):
"""
A string like object that additionally has an associated name.
We use this for hyperlinked URLs that may render as a named link
in some contexts, or render as a plain URL in others.
Comes from Django REST framework 3.2
https://github.com/tomchristie/django-rest-framework
"""
def __new__(self, url, name):
ret = six.text_type.__new__(self, url)
ret.name = name
return ret
is_hyperlink = True
|
|
# do not pre-load
# Import any DAObject classes that you will need
from docassemble.base.util import Individual, Person, DAObject
# Import the SQLObject and some associated utility functions
from docassemble.base.sql import alchemy_url, connect_args, upgrade_db, SQLObject, SQLObjectRelationship
# Import SQLAlchemy names
from sqlalchemy import Column, ForeignKey, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# Only allow these names (DAObject classes) to be imported with a modules block
__all__ = ['Bank', 'Customer', 'BankCustomer']
# Create the base class for SQLAlchemy table definitions
Base = declarative_base()
# SQLAlchemy table definition for a Bank
class BankModel(Base):
__tablename__ = 'bank'
id = Column(Integer, primary_key=True)
routing = Column(String(250), unique=True)
name = Column(String(250))
# SQLAlchemy table definition for a Customer
class CustomerModel(Base):
__tablename__ = 'customer'
id = Column(Integer, primary_key=True)
ssn = Column(String(250), unique=True)
first_name = Column(String(250))
last_name = Column(String(250))
address = Column(String(250))
unit = Column(String(250))
city = Column(String(250))
state = Column(String(250))
zip = Column(String(250))
# SQLAlchemy table definition for keeping track of which Banks have which Customers
class BankCustomerModel(Base):
__tablename__ = 'bank_customer'
id = Column(Integer, primary_key=True)
bank_id = Column(Integer, ForeignKey('bank.id', ondelete='CASCADE'), nullable=False)
customer_id = Column(Integer, ForeignKey('customer.id', ondelete='CASCADE'), nullable=False)
# Form the URL for connecting to the database based on the "demo db" directive in the Configuration
url = alchemy_url('demo db')
# Build the "engine" for connecting to the SQL server, using the URL for the database.
conn_args = connect_args('demo db')
if url.startswith('postgres'):
engine = create_engine(url, connect_args=conn_args, pool_pre_ping=False)
else:
engine = create_engine(url, pool_pre_ping=False)
# Create the tables
Base.metadata.create_all(engine)
# Get SQLAlchemy ready
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)()
# Perform any necessary database schema updates using alembic, if there is an alembic
# directory and alembic.ini file in the package.
upgrade_db(url, __file__, engine, conn_args=conn_args)
# Define Bank as both a DAObject and SQLObject
class Bank(Person, SQLObject):
# This tells the SQLObject code what the SQLAlchemy model is
_model = BankModel
# This tells the SQLObject code how to access the database
_session = DBSession
# This indicates that an object is not ready to be saved to SQL unless the "name" column is defined
_required = ['name']
# This indicates that the human-readable unique identifier for the table is the column "routing"
_uid = 'routing'
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
# This runs necessary SQLObject initialization code for the instance
self.sql_init()
# The db_get function specifies how to get attributes from the DAObject for purposes of setting SQL column values
def db_get(self, column):
if column == 'name':
return self.name.text
if column == 'routing':
return self.routing
raise Exception("Invalid column " + column)
# The db_set function specifies how to set attributes of the DAObject on the basis of non-null SQL column values
def db_set(self, column, value):
if column == 'name':
self.name.text = value
elif column == 'routing':
self.routing = value
else:
raise Exception("Invalid column " + column)
# The db_null function specifies how to delete attributes of the DAObject when the SQL column value becomes null
def db_null(self, column):
if column == 'name':
del self.name.text
elif column == 'routing':
del self.routing
else:
raise Exception("Invalid column " + column)
# This is an example of a method that uses SQLAlchemy to return True or False
def has_customer(self, customer):
if not (self.ready() and customer.ready()):
raise Exception("has_customer: cannot retrieve data")
# this opens a connection to the SQL database
db_entry = self._session.query(BankCustomerModel).filter(BankCustomerModel.bank_id == self.id, BankCustomerModel.customer_id == customer.id).first()
if db_entry is None:
return False
return True
# This is an example of a method that uses SQLAlchemy to add a record to the BankCustomer SQL table
# to indicate that a bank has a customer. Note that it is designed to be idempotent; it will not add
# a duplicate record.
def add_customer(self, customer):
if not self.has_customer(customer):
db_entry = BankCustomerModel(bank_id=self.id, customer_id=customer.id)
self._session.add(db_entry)
self._session.commit()
# This is an example of a method that uses SQLAlchemy to return a list of Customer objects.
# It uses the by_id() class method to return a Customer object for the given id.
def get_customers(self):
if not self.ready():
raise Exception("get_customers: cannot retrieve data")
results = []
for db_entry in self._session.query(BankCustomerModel).filter(BankCustomerModel.bank_id == self.id).all():
results.append(Customer.by_id(db_entry.customer_id))
return results
# This is an example of a method that uses SQLAlchemy to delete a bank-customer relationship
def del_customer(self, customer):
if not (self.ready() and customer.ready()):
raise Exception("del_customer: cannot retrieve data")
self._session.query(BankCustomerModel).filter(BankCustomerModel.bank_id == self.id, BankCustomerModel.customer_id == customer.id).delete()
self._session.commit()
class Customer(Individual, SQLObject):
_model = CustomerModel
_session = DBSession
_required = ['first_name']
_uid = 'ssn'
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.sql_init()
def db_get(self, column):
if column == 'ssn':
return self.ssn
if column == 'first_name':
return self.name.first
if column == 'last_name':
return self.name.last
if column == 'address':
return self.address.address
if column == 'unit':
return self.address.unit
if column == 'city':
return self.address.city
if column == 'state':
return self.address.state
if column == 'zip':
return self.address.zip
raise Exception("Invalid column " + column)
def db_set(self, column, value):
if column == 'ssn':
self.ssn = value
elif column == 'first_name':
self.name.first = value
elif column == 'last_name':
self.name.last = value
elif column == 'address':
self.address.address = value
elif column == 'unit':
self.address.unit = value
elif column == 'city':
self.address.city = value
elif column == 'state':
self.address.state = value
elif column == 'zip':
self.address.zip = value
def db_null(self, column):
if column == 'ssn':
del self.ssn
elif column == 'first_name':
del self.name.first
elif column == 'last_name':
del self.name.last
elif column == 'address':
del self.address.address
elif column == 'unit':
del self.address.unit
elif column == 'city':
del self.address.city
elif column == 'state':
del self.address.state
elif column == 'zip':
del self.address.zip
class BankCustomer(DAObject, SQLObjectRelationship):
_model = BankCustomerModel
_session = DBSession
_parent = [Bank, 'bank', 'bank_id']
_child = [Customer, 'customer', 'customer_id']
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.rel_init(*pargs, **kwargs)
def db_get(self, column):
if column == 'bank_id':
return self.bank.id
if column == 'customer_id':
return self.customer.id
raise Exception("Invalid column " + column)
def db_set(self, column, value):
if column == 'bank_id':
self.bank = Bank.by_id(value)
elif column == 'customer_id':
self.customer = Customer.by_id(value)
else:
raise Exception("Invalid column " + column)
# A db_find_existing method is defined here because the default db_find_existing() method for
# the SQLObject class tries to find existing records based on a unique identifier column indicated
# by the _uid attribute. Since the unique identifier for a bank-customer relationship record is
# not a single column, but rather the combination of bank ID and customer ID, there is no _uid
# column for the default db_find_existing() method to use. But we can write our own method for
# how to locate an existing record based on Python object attributes (.bank.id and .customer.id).
def db_find_existing(self):
try:
return self._session.query(BankCustomerModel).filter(BankCustomerModel.bank_id == self.bank.id, BankCustomerModel.customer_id == self.customer.id).first()
except:
return None
|
|
import os, sys, re
# import hou
main = __import__('__main__')
hou = main.__dict__['hou']
import hqt
reload (hqt)
from managers.completeWidget import contextCompleterClass
path = os.path.join(os.path.dirname(__file__), 'houdini')
main = __import__('__main__')
ns = main.__dict__
for mod in [os.path.splitext(x)[0] for x in os.listdir(path)]:
if not mod in ns:
try:
exec 'import ' + mod in ns
except:
pass
if not path in sys.path:
sys.path.insert(0, path)
from pw_multiScriptEditor import scriptEditor
reload(scriptEditor)
def show(*args, **kwargs):
hqt.show(scriptEditor.scriptEditorClass, *args, **kwargs)
# EXAMPLE SHELF BUTTON
# H13
# path = 'path/to/MultiScriptEditor_module'
# # example c:/houdini/python/lib
# if not path in sys.path:
# sys.path.append(path)
# import pw_multiScriptEditor
# reload(pw_multiScriptEditor)
# pw_multiScriptEditor.showHoudini(ontop=1)
# H14
#import sys
# path = 'path/to/MultiScriptEditor_module'
# # example c:/houdini/python/lib
# if not path in sys.path:
# sys.path.append(path)
# import pw_multiScriptEditor
# reload(pw_multiScriptEditor)
# pw_multiScriptEditor.showHoudini(name='Multi Script Editor',replacePyPanel=1, hideTitleMenu=0)
###################### CONTEXT FUNCTIONS
# def saveToNode(code, node):
# definition = node.type().definition()
# definition.sections()["PythonCook"].setContents(code)
# saveToNode('import hou',hou.selectedNodes()[0])
def getAllDifinitions():
names = []
for key in hou.nodeTypeCategories().keys():
names += hou.nodeTypeCategories()[key].nodeTypes().keys()
names = list(set(names))
return names
roots = ['obj', 'shop', 'ch', 'vex', 'img', 'out']
nodes = list(set(getAllDifinitions()))
def completer(line):
# node types
func = ['createNode', 'createInputNode', 'createOutputNode']
for f in func:
p = r"\.%s\(['\"](\w*)$" % f
m = re.search(p, line)
if m:
name = m.group(1)
if name:
auto = [x for x in nodes if x.lower().startswith(name.lower())]
else:
auto = nodes
l = len(name)
return [contextCompleterClass(x, x[l:], True) for x in auto], None
# absolute path
p = r"(?<=['\"]{1})(/[\w/]*)$"
m = re.search(p, line)
if m:
name = m.group(0)
auto, add = getChildrenFromPath(name)
if auto or add:
return auto, add
return None, None
def getChildrenFromPath(path):
sp = path.rsplit('/', 1)
if not sp[0]: # rootOnly
if sp[1]:
nodes = [contextCompleterClass(x, x[len(sp[1]):]) for x in roots if x.startswith(sp[1])]
return nodes, None
else:
nodes = [contextCompleterClass(x, x) for x in roots]
return nodes, None
# add parms
else:
node = hou.node(sp[0][1:])
if node:
nd = list(set([x.name() for x in node.children()]))
nodes = [contextCompleterClass(x, x[len(sp[1]):]) for x in sorted(nd) if x.startswith(sp[1])]
ch = list(set([x.name() for x in node.parms()] + [x.name() for x in node.parmTuples()]))
channels = [contextCompleterClass(x, x[len(sp[1]):]) for x in sorted(ch) if x.startswith(sp[1])]
return nodes, channels
return None, None
def contextMenu(parent):
m = houdiniMenuClass(parent)
return m
class houdiniMenuClass(hqt.QMenu):
def __init__(self, parent):
super(houdiniMenuClass, self).__init__('Houdini', parent)
self.par = parent
self.addAction(hqt.QAction('Save To Node', parent, triggered=self.saveToNode))
self.addAction(hqt.QAction('Read From Node', parent, triggered=self.readFromNode))
def readFromNode(self):
sel = hou.selectedNodes()
if sel:
res = self.getSectionsFromNode(sel[0])
if not res:
hou.ui.displayMessage('Sections not found1')
return
keys = res.keys()
s = hou.ui.selectFromList(keys, exclusive=1)
if s:
source = res[keys[s[0]]]
text = '#Empty'
if isinstance(source, hou.Parm):
text = source.eval()
elif isinstance(source, hou.HDASection):
text = source.contents()
self.par.tab.addNewTab(sel[0].name()+'|'+source.name(), text)
else:
hou.ui.displayMessage('Select One Node')
def saveToNode(self):
sel = hou.selectedNodes()
if sel:
res = self.getSectionsFromNode(sel[0])
if res:
text = self.par.tab.getCurrentText()
keys = res.keys()
s = hou.ui.selectFromList(keys, exclusive=1)
if s:
source = res[keys[s[0]]]
if isinstance(source, hou.Parm):
source.set(text)
elif isinstance(source, hou.HDASection):
source.setContents(text.strip())
else:
hou.ui.displayMessage('Sections not found')
return
else:
hou.ui.displayMessage('Select One Node')
def getSectionsFromNode(self, node):
default = ['Help', 'TypePropertiesOptions', 'ExtraFileOptions', 'Tools.shelf', 'InternalFileOptions', 'Contents.gz', 'CreateScript', 'DialogScript']
res = {}
Def = node.type().definition()
if Def:
sections = Def.sections()
for s in sections:
if not sections[s].name() in default:
res[s] = sections[s]
pySop = hou.parm(node.path() + '/python')
if pySop:
res['PythonSOP'] = pySop
return res
def wrapDroppedText(namespace, text, event):
if event.keyboardModifiers() == hqt.Qt.AltModifier:
syntax = []
#node
for node_parm in text.split(','):
node = hou.node(node_parm)
if node:
syntax.append('hou.node("%s")' % node_parm)
#parmTuple
spl = text.split(',')
if len(list(set([x[:-1] for x in spl]))) == 1:
parmTuple = hou.parmTuple(spl[0][:-1])
if parmTuple:
syntax.append('hou.parmTuple("%s")' % spl[0][:-1])
# parm
if not syntax:
for node_parm in text.split(','):
parm = hou.parm(node_parm)
if parm:
syntax.append('hou.parm("%s")' % node_parm)
if syntax:
return '\n'.join(syntax)
return text
|
|
"""Testacases for functionality involving items."""
from tests.base_test import BaseTest
import json
from bucketlist.models import Item
from bucketlist.item_routes import get_item_by_id
class TestItem(BaseTest):
"""Test items routes."""
def get_token(self):
"""Return authentication token."""
self.user = {"email": "user@bucketlist.com",
"password": "password"}
r = self.app.post("/api/v1/auth/login",
data=self.user)
output = json.loads(r.data.decode())
token = output.get("token")
return {"token": token}
def test_can_add_new_item(self):
"""Tests if a user can add a new item in a bucketlist."""
self.item = {"name": "Go to Hawaii", "done": "n",
"buckeltist_id": 1}
r = self.app.post("/api/v1/bucketlists/1/items/", data=self.item,
headers=self.get_token())
self.assertEqual(r.status_code, 201)
message = json.loads(r.data.decode())
self.assertIn("created successfully", message["message"])
def test_error_when_missing_name_or_status(self):
"""Tests for error message when the name or status is missing."""
self.item = {"name": "", "done": "",
"buckeltist_id": 1}
r = self.app.post("/api/v1/bucketlists/1/items/", data=self.item,
headers=self.get_token())
self.assertEqual(r.status_code, 400)
message = json.loads(r.data.decode())
self.assertIn("Name and/or Status can not be empty", message["message"])
def test_shows_message_when_item_already_exists(self):
"""Tests that a message is shown when an item already exists."""
self.item = {"name": "Enjoy the beautiful beaches of Hawaii",
"done": "Y", "buckeltist_id": 1}
r = self.app.post("/api/v1/bucketlists/1/items/", data=self.item,
headers=self.get_token())
self.assertEqual(r.status_code, 400)
message = json.loads(r.data.decode())
self.assertIn("already exists", message["message"])
def test_user_can_update_a_bucketlist_item(self):
"""Tests that a user can update an existing bucketlist item."""
self.item = {"name": "New Item", "bucketlist_id": 1, "done": "y"}
r = self.app.put("/api/v1/bucketlists/1/items/1", data=self.item,
headers=self.get_token())
self.assertEqual(r.status_code, 200)
self.assertTrue(Item.query.filter_by(name="New Item").first())
self.assertFalse(Item.query.filter_by(
name="Enjoy the beautiful beaches of Hawaii").first())
def test_user_cannot_update_a_bucketlist_item_with_the_old_name(self):
"""Tests that a user can update an existing bucketlist item."""
self.item = {"name": "Enjoy the beautiful beaches of Hawaii",
"bucketlist_id": 1, "done": "n"}
r = self.app.put("/api/v1/bucketlists/1/items/1", data=self.item,
headers=self.get_token())
message = json.loads(r.data.decode())
self.assertEqual(r.status_code, 200)
self.assertIn("Use a new name", message["message"])
def test_message_when_user_updates_to_name_that_already_exists(self):
"""Tests for an error message.
shown when a user tries to update to a item name which already exists.
"""
self.item = {"name": "Go to Hawaii", "done": "y",
"buckeltist_id": 1}
self.app.post("/api/v1/bucketlists/1/items/", data=self.item,
headers=self.get_token())
self.item2 = {"name": "Enjoy the beautiful beaches of Hawaii",
"bucketlist_id": 1, "done": "y"}
r = self.app.put("/api/v1/bucketlists/1/items/2", data=self.item2,
headers=self.get_token())
self.assertEqual(r.status_code, 400)
message = json.loads(r.data.decode())
self.assertIn("already exists", message["message"])
def test_message_when_user_updates_an_item_that_doesnot_exists(self):
"""Tests for an error message shown when a user tries to update an item which doesnot exist.""" # noqa
self.item = {"name": "Enjoy the beautiful beaches of Hawaii",
"bucketlist_id": 1, "done": "n"}
r = self.app.put("/api/v1/bucketlists/1/items/2", data=self.item,
headers=self.get_token())
r2 = self.app.put("/api/v1/bucketlists/2/items/1", data=self.item,
headers=self.get_token())
message = json.loads(r.data.decode())
message2 = json.loads(r2.data.decode())
self.assertEqual(r.status_code, 404)
self.assertEqual(r2.status_code, 404)
self.assertIn("does not exist", message["message"])
self.assertIn("does not exist", message2["message"])
def test_message_when_user_updates_a_bucketlist_that_has_no_items(self):
"""Tests for an error message when a user tries to update a bucketlist which has no items.""" # noqa
self.bucketlist = {"title": "Love",
"description": "I want to marry a princcess",
"created_by": '1'}
self.app.post("/api/v1/bucketlists/", data=self.bucketlist,
headers=self.get_token())
self.item = {"name": "Enjoy the beautiful sands of Hawaii",
"bucketlist_id": 1, "done": "True"}
r = self.app.put("/api/v1/bucketlists/2/items/1", data=self.item,
headers=self.get_token())
self.assertEqual(r.status_code, 404)
message = json.loads(r.data.decode())
self.assertIn("has no items", message["message"])
def test_user_can_delete_a_bucketlist_item(self):
"""Tests that a user can delete an existing bucketlist item."""
r = self.app.delete("/api/v1/bucketlists/1/items/1",
headers=self.get_token())
message = json.loads(r.data.decode())
self.assertEqual(r.status_code, 200)
self.assertIn("deleted succesfully", message["message"])
self.assertFalse(Item.query.all())
def test_user_cannot_delete_a_nonexisting_bucketlist_item(self):
"""Tests that a user can delete an existing bucketlist item."""
self.bucketlist = {"title": "Love",
"description": "I want to marry a princess",
"created_by": 1}
self.app.post("/api/v1/bucketlists/", data=self.bucketlist,
headers=self.get_token())
r1 = self.app.delete("/api/v1/bucketlists/1/items/2",
headers=self.get_token())
r2 = self.app.delete("/api/v1/bucketlists/2/items/1",
headers=self.get_token())
r3 = self.app.delete("/api/v1/bucketlists/3/items/1",
headers=self.get_token())
message1 = json.loads(r1.data.decode())
message2 = json.loads(r2.data.decode())
message3 = json.loads(r3.data.decode())
self.assertEqual(r1.status_code, 404)
self.assertEqual(r2.status_code, 404)
self.assertEqual(r3.status_code, 404)
self.assertIn("item doesnot exist", message1["message"])
self.assertIn("does not have any items", message2["message"])
self.assertIn("Bucketlist does not exist", message3["message"])
def test_item_done_field_accepts_y_or_n_only_in_post(self):
"""Tests that the item done field accpets Y/y and N/n only in post."""
self.item = {"name": "Go to Hawaii", "done": "True",
"buckeltist_id": 1}
r = self.app.post("/api/v1/bucketlists/1/items/", data=self.item,
headers=self.get_token())
self.assertEqual(r.status_code, 200)
message = json.loads(r.data.decode())
self.assertIn("use Y/N or y/n", message["message"])
def test_item_done_field_accepts_y_or_n_only_in_put(self):
"""Tests that the item done field accpets Y/y and N/n only in put."""
self.item = {"name": "Go to Hawaii", "done": "True",
"buckeltist_id": 1}
r = self.app.put("/api/v1/bucketlists/1/items/1", data=self.item,
headers=self.get_token())
self.assertEqual(r.status_code, 200)
message = json.loads(r.data.decode())
self.assertIn("use Y/N or y/n", message["message"])
def test_user_can_get_all_bucketlist_items(self):
"""Tests that a user can get a list of all bucketlist items."""
r = self.app.get("/api/v1/bucketlists/1/items/",
headers=self.get_token())
message = json.loads(r.data.decode())
self.assertEqual(r.status_code, 200)
self.assertIn("Enjoy the beautiful", message["items"][0]["name"])
def test_message_when_there_are_no_bucketlist_items(self):
"""Tests message shown when there are no items."""
self.app.delete("/api/v1/bucketlists/1/items/1",
headers=self.get_token())
r = self.app.get("/api/v1/bucketlists/1/items/",
headers=self.get_token())
message = json.loads(r.data.decode())
self.assertEqual(r.status_code, 200)
self.assertIn("No items", message["message"])
def test_message_when_the_bucketlist_doesnt_exist(self):
"""Tests message shown when the bucketlist doesnot exist."""
r = self.app.get("/api/v1/bucketlists/2/items/",
headers=self.get_token())
message = json.loads(r.data.decode())
self.assertEqual(r.status_code, 404)
self.assertIn("does not exist", message["message"])
def test_get_item_by_id_works(self):
"""Tests if helper function works."""
self.assertEqual(get_item_by_id(1, 1).name,
"Enjoy the beautiful beaches of Hawaii")
def test_get_item_by_id_returns_none(self):
"""Tests if helper function returns none if item doesnt exist."""
self.assertEqual(get_item_by_id(1, 2),
None)
self.assertEqual(get_item_by_id(2, 1),
None)
|
|
"""Support for Tado sensors for each zone."""
import logging
from homeassistant.const import ATTR_ID, ATTR_NAME, TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from . import DATA_TADO
_LOGGER = logging.getLogger(__name__)
ATTR_DATA_ID = "data_id"
ATTR_DEVICE = "device"
ATTR_ZONE = "zone"
CLIMATE_HEAT_SENSOR_TYPES = [
"temperature",
"humidity",
"power",
"link",
"heating",
"tado mode",
"overlay",
"early start",
]
CLIMATE_COOL_SENSOR_TYPES = [
"temperature",
"humidity",
"power",
"link",
"ac",
"tado mode",
"overlay",
]
HOT_WATER_SENSOR_TYPES = ["power", "link", "tado mode", "overlay"]
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
tado = hass.data[DATA_TADO]
try:
zones = tado.get_zones()
except RuntimeError:
_LOGGER.error("Unable to get zone info from mytado")
return
sensor_items = []
for zone in zones:
if zone["type"] == "HEATING":
for variable in CLIMATE_HEAT_SENSOR_TYPES:
sensor_items.append(
create_zone_sensor(tado, zone, zone["name"], zone["id"], variable)
)
elif zone["type"] == "HOT_WATER":
for variable in HOT_WATER_SENSOR_TYPES:
sensor_items.append(
create_zone_sensor(tado, zone, zone["name"], zone["id"], variable)
)
elif zone["type"] == "AIR_CONDITIONING":
for variable in CLIMATE_COOL_SENSOR_TYPES:
sensor_items.append(
create_zone_sensor(tado, zone, zone["name"], zone["id"], variable)
)
me_data = tado.get_me()
sensor_items.append(
create_device_sensor(
tado,
me_data,
me_data["homes"][0]["name"],
me_data["homes"][0]["id"],
"tado bridge status",
)
)
if sensor_items:
add_entities(sensor_items, True)
def create_zone_sensor(tado, zone, name, zone_id, variable):
"""Create a zone sensor."""
data_id = f"zone {name} {zone_id}"
tado.add_sensor(
data_id,
{ATTR_ZONE: zone, ATTR_NAME: name, ATTR_ID: zone_id, ATTR_DATA_ID: data_id},
)
return TadoSensor(tado, name, zone_id, variable, data_id)
def create_device_sensor(tado, device, name, device_id, variable):
"""Create a device sensor."""
data_id = f"device {name} {device_id}"
tado.add_sensor(
data_id,
{
ATTR_DEVICE: device,
ATTR_NAME: name,
ATTR_ID: device_id,
ATTR_DATA_ID: data_id,
},
)
return TadoSensor(tado, name, device_id, variable, data_id)
class TadoSensor(Entity):
"""Representation of a tado Sensor."""
def __init__(self, store, zone_name, zone_id, zone_variable, data_id):
"""Initialize of the Tado Sensor."""
self._store = store
self.zone_name = zone_name
self.zone_id = zone_id
self.zone_variable = zone_variable
self._unique_id = f"{zone_variable} {zone_id}"
self._data_id = data_id
self._state = None
self._state_attributes = None
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.zone_name} {self.zone_variable}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
if self.zone_variable == "temperature":
return self.hass.config.units.temperature_unit
if self.zone_variable == "humidity":
return "%"
if self.zone_variable == "heating":
return "%"
if self.zone_variable == "ac":
return ""
@property
def icon(self):
"""Icon for the sensor."""
if self.zone_variable == "temperature":
return "mdi:thermometer"
if self.zone_variable == "humidity":
return "mdi:water-percent"
def update(self):
"""Update method called when should_poll is true."""
self._store.update()
data = self._store.get_data(self._data_id)
if data is None:
_LOGGER.debug("Received no data for zone %s", self.zone_name)
return
unit = TEMP_CELSIUS
if self.zone_variable == "temperature":
if "sensorDataPoints" in data:
sensor_data = data["sensorDataPoints"]
temperature = float(sensor_data["insideTemperature"]["celsius"])
self._state = self.hass.config.units.temperature(temperature, unit)
self._state_attributes = {
"time": sensor_data["insideTemperature"]["timestamp"],
"setting": 0, # setting is used in climate device
}
# temperature setting will not exist when device is off
if (
"temperature" in data["setting"]
and data["setting"]["temperature"] is not None
):
temperature = float(data["setting"]["temperature"]["celsius"])
self._state_attributes[
"setting"
] = self.hass.config.units.temperature(temperature, unit)
elif self.zone_variable == "humidity":
if "sensorDataPoints" in data:
sensor_data = data["sensorDataPoints"]
self._state = float(sensor_data["humidity"]["percentage"])
self._state_attributes = {"time": sensor_data["humidity"]["timestamp"]}
elif self.zone_variable == "power":
if "setting" in data:
self._state = data["setting"]["power"]
elif self.zone_variable == "link":
if "link" in data:
self._state = data["link"]["state"]
elif self.zone_variable == "heating":
if "activityDataPoints" in data:
activity_data = data["activityDataPoints"]
if (
"heatingPower" in activity_data
and activity_data["heatingPower"] is not None
):
self._state = float(activity_data["heatingPower"]["percentage"])
self._state_attributes = {
"time": activity_data["heatingPower"]["timestamp"]
}
elif self.zone_variable == "ac":
if "activityDataPoints" in data:
activity_data = data["activityDataPoints"]
if "acPower" in activity_data and activity_data["acPower"] is not None:
self._state = activity_data["acPower"]["value"]
self._state_attributes = {
"time": activity_data["acPower"]["timestamp"]
}
elif self.zone_variable == "tado bridge status":
if "connectionState" in data:
self._state = data["connectionState"]["value"]
elif self.zone_variable == "tado mode":
if "tadoMode" in data:
self._state = data["tadoMode"]
elif self.zone_variable == "overlay":
if "overlay" in data and data["overlay"] is not None:
self._state = True
self._state_attributes = {
"termination": data["overlay"]["termination"]["type"]
}
else:
self._state = False
self._state_attributes = {}
elif self.zone_variable == "early start":
if "preparation" in data and data["preparation"] is not None:
self._state = True
else:
self._state = False
|
|
from pysb.simulator.base import Simulator, SimulationResult, SimulatorException
from pysb.bng import BngFileInterface, load_equations, generate_hybrid_model
import numpy as np
import logging
from pysb.logging import EXTENDED_DEBUG
from pysb.core import as_complex_pattern, Parameter, \
InvalidComplexPatternException
import collections
from collections.abc import Iterable
import os
class BngSimulator(Simulator):
""" Simulate a model using BioNetGen """
_supports = {
'multi_initials': True,
'multi_param_values': True
}
_SIMULATOR_TYPES = ['ssa', 'nf', 'pla', 'ode']
def __init__(self, model, tspan=None, initials=None, param_values=None,
cleanup=True, verbose=False):
super(BngSimulator, self).__init__(model, tspan=tspan,
initials=initials,
param_values=param_values,
verbose=verbose)
self.cleanup = cleanup
self._outdir = None
def run(self, tspan=None, initials=None, param_values=None, n_runs=1,
method='ssa', output_dir=None, output_file_basename=None,
cleanup=None, population_maps=None, **additional_args):
"""
Simulate a model using BioNetGen
Parameters
----------
tspan: vector-like
time span of simulation
initials: vector-like, optional
initial conditions of model
param_values : vector-like or dictionary, optional
Values to use for every parameter in the model. Ordering is
determined by the order of model.parameters.
If not specified, parameter values will be taken directly from
model.parameters.
n_runs: int
number of simulations to run
method : str
Type of simulation to run. Must be one of:
* 'ssa' - Stochastic Simulation Algorithm (direct method with
propensity sorting)
* 'nf' - Stochastic network free simulation with NFsim.
Performs Hybrid Particle/Population simulation if
population_maps argument is supplied
* 'pla' - Partioned-leaping algorithm (variant of tau-leaping
algorithm)
* 'ode' - ODE simulation (Sundials CVODE algorithm)
output_dir : string, optional
Location for temporary files generated by BNG. If None (the
default), uses a temporary directory provided by the system. A
temporary directory with a random name is created within the
supplied location.
output_file_basename : string, optional
This argument is used as a prefix for the temporary BNG
output directory, rather than the individual files.
cleanup : bool, optional
If True (default), delete the temporary files after the
simulation is finished. If False, leave them in place (Useful for
debugging). The default value, None, means to use the value
specified in :py:func:`__init__`.
population_maps: list of PopulationMap
List of :py:class:`PopulationMap` objects for hybrid
particle/population modeling. Only used when method='nf'.
additional_args: kwargs, optional
Additional arguments to pass to BioNetGen
Examples
--------
Simulate a model using network free simulation (NFsim):
>>> from pysb.examples import robertson
>>> from pysb.simulator.bng import BngSimulator
>>> model = robertson.model
>>> sim = BngSimulator(model, tspan=np.linspace(0, 1))
>>> x = sim.run(n_runs=1, method='nf')
"""
super(BngSimulator, self).run(tspan=tspan,
initials=initials,
param_values=param_values,
_run_kwargs=locals()
)
if cleanup is None:
cleanup = self.cleanup
if method not in self._SIMULATOR_TYPES:
raise ValueError("Method must be one of " +
str(self._SIMULATOR_TYPES))
if method != 'nf' and population_maps:
raise ValueError('population_maps argument is only used when '
'method is "nf"')
if method == 'nf':
if population_maps is not None and (not isinstance(
population_maps, Iterable) or
any(not isinstance(pm, PopulationMap) for pm in
population_maps)):
raise ValueError('population_maps should be a list of '
'PopulationMap objects')
model_additional_species = self.initials_dict.keys()
else:
model_additional_species = None
tspan_lin_spaced = np.allclose(
self.tspan,
np.linspace(self.tspan[0], self.tspan[-1], len(self.tspan))
)
if method == 'nf' and (not tspan_lin_spaced or self.tspan[0] != 0.0):
raise SimulatorException('NFsim requires tspan to be linearly '
'spaced starting at t=0')
# BNG requires t_start even when supplying sample_times
additional_args['t_start'] = self.tspan[0]
if tspan_lin_spaced:
# Just supply t_end and n_steps
additional_args['n_steps'] = len(self.tspan) - 1
additional_args['t_end'] = self.tspan[-1]
else:
additional_args['sample_times'] = self.tspan
additional_args['method'] = method
additional_args['print_functions'] = True
verbose_bool = self._logger.logger.getEffectiveLevel() <= logging.DEBUG
extended_debug = self._logger.logger.getEffectiveLevel() <= \
EXTENDED_DEBUG
additional_args['verbose'] = extended_debug
params_names = [g.name for g in self._model.parameters]
n_param_sets = self.initials_length
total_sims = n_runs * n_param_sets
self._logger.info('Running %d BNG %s simulations' % (total_sims,
method))
model_to_load = None
hpp_bngl = None
if population_maps:
self._logger.debug('Generating hybrid particle-population model')
hpp_bngl = generate_hybrid_model(
self._model,
population_maps,
model_additional_species,
verbose=extended_debug)
else:
model_to_load = self._model
with BngFileInterface(model_to_load,
verbose=verbose_bool,
output_dir=output_dir,
output_prefix=output_file_basename,
cleanup=cleanup,
model_additional_species=model_additional_species
) as bngfile:
if hpp_bngl:
hpp_bngl_filename = os.path.join(bngfile.base_directory,
'hpp_model.bngl')
self._logger.debug('HPP BNGL:\n\n' + hpp_bngl)
with open(hpp_bngl_filename, 'w') as f:
f.write(hpp_bngl)
if method != 'nf':
# TODO: Write existing netfile if already generated
bngfile.action('generate_network', overwrite=True,
verbose=extended_debug)
if output_file_basename is None:
prefix = 'pysb'
else:
prefix = output_file_basename
sim_prefix = 0
for pset_idx in range(n_param_sets):
for n in range(len(params_names)):
bngfile.set_parameter(params_names[n],
self.param_values[pset_idx][n])
for cp, values in self.initials_dict.items():
if population_maps:
for pm in population_maps:
if pm.complex_pattern.is_equivalent_to(cp):
cp = pm.counter_species
break
bngfile.set_concentration(cp, values[pset_idx])
for sim_rpt in range(n_runs):
tmp = additional_args.copy()
tmp['prefix'] = '{}{}'.format(prefix, sim_prefix)
bngfile.action('simulate', **tmp)
bngfile.action('resetConcentrations')
sim_prefix += 1
if hpp_bngl:
bngfile.execute(reload_netfile=hpp_bngl_filename,
skip_file_actions=True)
else:
bngfile.execute()
if method != 'nf':
load_equations(self.model, bngfile.net_filename)
list_of_yfull = \
BngFileInterface.read_simulation_results_multi(
[bngfile.base_filename + str(n) for n in range(total_sims)])
tout = []
species_out = []
obs_exp_out = []
for i in range(total_sims):
yfull = list_of_yfull[i]
yfull_view = yfull.view(float).reshape(len(yfull), -1)
tout.append(yfull_view[:, 0])
if method == 'nf':
obs_exp_out.append(yfull_view[:, 1:])
else:
species_out.append(yfull_view[:,
1:(len(self.model.species) + 1)])
if len(self.model.observables) or len(self.model.expressions):
obs_exp_out.append(yfull_view[:,
(len(self.model.species) + 1):
(len(self.model.species) + 1) +
len(self.model.observables) +
len(self.model.expressions_dynamic())])
return SimulationResult(self, tout=tout, trajectories=species_out,
observables_and_expressions=obs_exp_out,
simulations_per_param_set=n_runs)
class PopulationMap(object):
"""
Population map for BioNetGen hybrid particle/population simulation
For use with the :class:`BngSimulator`.
References
----------
Hogg et al. 2014:
http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003544
BioNetGen HPP documentation:
http://bionetgen.org/index.php/Hybrid_particle-population_model_generator
"""
def __init__(self, complex_pattern, lumping_rate, counter_species=None):
try:
self.complex_pattern = as_complex_pattern(complex_pattern)
except InvalidComplexPatternException:
raise ValueError('complex_pattern must be a ComplexPattern')
if not isinstance(lumping_rate, Parameter):
raise ValueError('lumping_rate must be a %s' % Parameter.__class__)
self.lumping_rate = lumping_rate
if counter_species is None:
self.counter_species = None
else:
self.counter_species = str(counter_species)
def __repr__(self):
return 'PopulationMap({}, {}, {})'.format(
self.complex_pattern,
self.lumping_rate,
'None' if self.counter_species is None else '{}'.format(
self.counter_species)
)
|
|
# -*- coding: utf-8 -*-
import os
from Summarizer.polish_summaries_corpus_reader import read_psc_file
from Summarizer.summarization.summarizer import Summarizer
class EvaluatorWrapper():
def __init__(self, model, features, config):
self.summarizer = Summarizer.Instance()
self.summarizer.set_model(model, features, config)
self.evaluation_method = StrictEvaluator(self.summarizer, config.get_length()).evaluate
def evaluate(self, test_dir):
all_tp, all_fp, all_fn, final_precision, final_recall, f_score = self.evaluation_method(test_dir)
print "###########################################"
print "Evaluation done."
print "True positives: %s False positives: %s False negatives: %s" % (all_tp, all_fp, all_fn)
print "Precision: %.2f Recall: %.2f, F-1 Score: %.2f" % (final_precision, final_recall, f_score)
def average(l):
return sum(l) / len(l)
class EvaluationResult(object):
def __init__(self, tp=0, fp=0, fn=0):
self.tp = tp
self.fp = fp
self.fn = fn
def get_precision(self):
return float(self.tp) / (self.tp + self.fp) if (self.tp + self.fp) != 0 else 0
def get_recall(self):
return float(self.tp) / (self.tp + self.fn) if (self.tp + self.fn) != 0 else 0
def get_f_score(self):
precision = self.get_precision()
recall = self.get_recall()
return 2 * (precision * recall) / (precision + recall)
def __str__(self):
return "Precision: %.2f Recall: %.2f, F-1 Score: %.2f" % (
self.get_precision(), self.get_recall(), self.get_f_score())
class AbstractEvaluator(object):
"""Some description that tells you it's abstract,
often listing the methods you're expected to supply."""
def __init__(self, summarizer, length):
self.summarizer = summarizer
self.length = length
def evaluate(self, test_dir, model, features, stop_list):
raise NotImplementedError("Should have implemented this")
class StrictEvaluator(AbstractEvaluator):
def evaluate(self, test_dir):
all_tp = 0
all_fp = 0
all_fn = 0
all_precisions = []
all_recalls = []
for root, dirs, files in os.walk(test_dir, topdown=False):
dir_precisions = []
dir_recalls = []
if not files:
continue
prev_doc = None
for name in files:
file_path = os.path.join(root, name)
doc_psc = read_psc_file(file_path, prev_doc)
prev_doc = doc_psc
expected_sentences = doc_psc.summaries[self.length.value]
scored_sentences = set([s[0] for s in
self.summarizer.create_summary(doc_psc).get_scored_sentences_numbers(
len(doc_psc.summaries[self.length.value]))])
tp = len(expected_sentences.intersection(scored_sentences))
fp = len(scored_sentences.difference(expected_sentences))
fn = len(expected_sentences.difference(scored_sentences))
dir_result = EvaluationResult(tp, fp, fn)
dir_precisions.append(dir_result.get_precision())
dir_recalls.append(dir_result.get_recall())
all_tp += tp
all_fp += fp
all_fn += fn
# print "File: ", name
# print "TP: ", tp, " FP: ", fp, " FN: ", fn
# print "Precision: ", precision, " Recall: ", recall
dir_p = average(dir_precisions)
dir_r = average(dir_recalls)
all_precisions.append(dir_p)
all_recalls.append(dir_r)
print "File %s, Precision: %.2f File Recall: %.2f" % (root[-16:], dir_p, dir_r)
# break # remove this !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
final_precision = average(all_precisions)
final_recall = average(all_recalls)
f_score = 2 * (final_precision * final_recall) / (final_precision + final_recall)
# print "Max precision in file: %2f, Max recall in file: %.2f" % (max(all_precisions), max(all_recalls))
return all_tp, all_fp, all_fn, final_precision, final_recall, f_score
class CorpusEvaluator(AbstractEvaluator):
def evaluate(self, test_dir):
all_tp = 0
all_fp = 0
all_fn = 0
for root, dirs, files in os.walk(test_dir, topdown=False):
if not files:
continue
prev_doc = None
for name in files:
file_path = os.path.join(root, name)
doc_psc = read_psc_file(file_path, prev_doc)
prev_doc = doc_psc
expected_sentences = doc_psc.summaries[self.length.value]
scored_sentences = set([s[0] for s in
self.summarizer.create_summary(doc_psc).get_scored_sentences_numbers(
len(doc_psc.summaries[self.length.value]))])
tp = len(expected_sentences.intersection(scored_sentences))
fp = len(scored_sentences.difference(expected_sentences))
fn = len(expected_sentences.difference(scored_sentences))
all_tp += tp
all_fp += fp
all_fn += fn
# print "File: ", name
# print "TP: ", tp, " FP: ", fp, " FN: ", fn
# print "Precision: ", precision, " Recall: ", recall
# print "File %s, Precision: %.2f File Recall: %.2f" % (root[-16:], dir_p, dir_r)
print "File %s, evaluated" % (root[-16:],)
# break # remove this !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
precision = float(all_tp) / (all_tp + all_fp) if (all_tp + all_fp) != 0 else 0
recall = float(all_tp) / (all_tp + all_fn) if (all_tp + all_fn) != 0 else 0
f_score = 2 * (precision * recall) / (precision + recall)
# print "Max precision in file: %2f, Max recall in file: %.2f" % (max(all_precisions), max(all_recalls))
return all_tp, all_fp, all_fn, precision, recall, f_score
class WideEvaluator(AbstractEvaluator):
def evaluate(self, test_dir):
all_tp = 0
all_fp = 0
all_fn = 0
all_precisions = []
all_recalls = []
for root, dirs, files in os.walk(test_dir, topdown=False):
dir_precisions = []
dir_recalls = []
if not files:
continue
prev_doc = None
expected_sentences = []
for name in files:
file_path = os.path.join(root, name)
doc_psc = read_psc_file(file_path, prev_doc)
prev_doc = doc_psc
expected_sentences += doc_psc.summaries[self.length.value]
# print "File: ", name
# print "TP: ", tp, " FP: ", fp, " FN: ", fn
# print "Precision: ", precision, " Recall: ", recall
scored_sentences = set([s[0] for s in
self.summarizer.create_summary(doc_psc).get_scored_sentences_numbers(
len(doc_psc.summaries[self.length.value]))])
expected_sentences = set(expected_sentences)
tp = len(expected_sentences.intersection(scored_sentences))
fp = len(scored_sentences.difference(expected_sentences))
fn = len(expected_sentences.difference(scored_sentences))
precision = float(tp) / (tp + fp) if (tp + fp) != 0 else 0
recall = float(tp) / (tp + fn) if (tp + fn) != 0 else 0
all_tp += tp
all_fp += fp
all_fn += fn
all_precisions.append(precision)
all_recalls.append(recall)
print "File %s, Precision: %.2f File Recall: %.2f" % (root[-16:], precision, recall)
# break # remove this !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
final_precision = average(all_precisions)
final_recall = average(all_recalls)
f_score = 2 * (final_precision * final_recall) / (final_precision + final_recall)
# print "Max precision in file: %2f, Max recall in file: %.2f" % (max(all_precisions), max(all_recalls))
return all_tp, all_fp, all_fn, final_precision, final_recall, f_score
|
|
# Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from immuno.mutate import mutate_protein_from_transcript
import immuno.ensembl.annotation as ensembl
from immuno.ensembl.transcript_data import EnsemblReferenceData
# Test case CTNNB1
# Transcript Id: ENST00000453024
# Gene Id : ENSG00000168036
# 17 Exons
# Length 2,841
# Chrom : 3
# Start : 41,240,936
# End : 41,281,227
ref_data = EnsemblReferenceData()
def test_complement_base():
assert ensembl.reverse_complement("G") == "C"
def test_complement_seq():
assert ensembl.reverse_complement("TCTCATCCAGGTACCAGCCAATG") == "AGAGTAGGTCCATGGTCGGTTAC"[::-1]
def test_get_strand_CASP9():
genomic_transcript = "ENST00000333868"
forward = ensembl.is_forward_strand(genomic_transcript)
assert(forward == False)
def test_get_strand_CTNNB1():
genomic_transcript = "ENST00000453024"
forward = ensembl.is_forward_strand(genomic_transcript)
assert(forward == True)
def test_load_CTNNB1_cdna_transcript():
genomic_transcript = "ENST00000453024"
transcript = ref_data.get_cdna(genomic_transcript)
assert(transcript is not None)
assert(len(transcript) == 2841), (transcript, len(transcript))
def test_load_CTNNB1_protein_transcript():
protein_transcript = "ENSP00000427553"
transcript = ref_data.get_protein(protein_transcript)
assert(transcript is not None)
assert(transcript[0] == 'M'), (transcript, len(transcript))
assert(transcript[-1] == 'E'), (transcript, len(transcript))
def test_load_CTNNB1_exon_from_transcript():
transcript_id = "ENST00000453024"
exons = ensembl.get_exons_from_transcript(transcript_id)
assert(exons.shape[0] == 17)
transcript_id = "ENST00000405570"
exons = ensembl.get_exons_from_transcript(transcript_id)
assert(exons.shape[0] == 16)
def test_load_CTNNB1_exon_from_transcript_length():
transcript_id = 'ENST00000405570'
transcript = ref_data.get_cdna(transcript_id)
exons = ensembl.get_exons_from_transcript(transcript_id)
exons['length'] = exons['seq_region_end_exon'] - exons['seq_region_start_exon'] + 1
assert(exons['length'].sum() == len(transcript)), exons
def test_load_SMAD4_cdna_transcript():
transcript_id = "ENST00000342988"
transcript = ref_data.get_cdna(transcript_id)
assert transcript is not None
assert len(transcript) == 8769, len(transcript)
assert transcript[0] == 'A', transcript[0]
assert transcript[-1] == 'T', transcript[-1]
def test_get_gene_from_pos():
variant = {
'chr' : '3',
'pos' : 41250936,
'ref' : 'A',
'alt' : 'C'
}
vcf = pd.DataFrame.from_records([variant])
transcripts_df = ensembl.annotate_vcf_transcripts(vcf)
assert( "ENSG00000168036" in set(transcripts_df['stable_id_gene']))
def test_get_transcript_from_pos():
variant = {
'chr' : '3',
'pos' : 41250936,
'ref' : 'A',
'alt' : 'C'
}
vcf = pd.DataFrame.from_records([variant])
transcripts_ids = ensembl.annotate_vcf_transcripts(vcf)
assert( "ENST00000453024" in set(transcripts_ids['stable_id_transcript']))
def test_get_all_transcript_from_pos():
variant = {
'chr' : '3',
'pos' : 41275636,
'ref' : 'G',
'alt' : 'A'
}
vcf = pd.DataFrame.from_records([variant])
transcripts_ids = ensembl.annotate_vcf_transcripts(vcf)
transcript_ids = set(transcripts_ids['stable_id_transcript'])
assert( "ENST00000405570" in transcript_ids)
assert( "ENST00000396183" in transcript_ids)
assert( "ENST00000349496" in transcript_ids)
assert( "ENST00000453024" in transcript_ids)
assert( "ENST00000396185" in transcript_ids)
def test_get_transcript_index_from_pos():
variant = {
'chr' : '3',
'pos' : 41275636,
'ref' : 'G',
'alt' : 'A'
}
transcript_id = 'ENST00000405570'
idx = ensembl.get_transcript_index_from_pos(
41275636, transcript_id, skip_untranslated_region = False)
assert(idx == 1686), idx
transcript = ref_data.get_cdna(transcript_id)
assert(transcript[idx] == variant['ref'])
def test_get_5prime_utr_length_RET():
transcript_id = "ENST00000355710"
exons = ensembl.get_exons_from_transcript(transcript_id)
utr_length = ensembl.get_five_prime_utr_length(exons)
print utr_length
assert(utr_length == 232)
def test_get_3prime_utr_length_RET():
transcript_id = "ENST00000355710"
exons = ensembl.get_exons_from_transcript(transcript_id)
utr_length = ensembl.get_three_prime_utr_length(exons)
print utr_length
assert(utr_length == 2082)
def test_get_5prime_utr_length_CTNNB1():
transcript_id = "ENST00000405570"
exons = ensembl.get_exons_from_transcript(transcript_id)
utr_length = ensembl.get_five_prime_utr_length(exons)
print utr_length
assert(utr_length == 156)
def test_get_3prime_utr_length_CTNNB1():
transcript_id = "ENST00000405570"
exons = ensembl.get_exons_from_transcript(transcript_id)
utr_length = ensembl.get_three_prime_utr_length(exons)
print utr_length
assert(utr_length == 11)
def test_get_5prime_utr_length_reverse_strand_CASP9():
transcript_id = "ENST00000333868"
exons = ensembl.get_exons_from_transcript(transcript_id)
utr_length = ensembl.get_five_prime_utr_length(exons, forward = False)
print utr_length
assert(utr_length == 95)
def test_get_3prime_utr_length_reverse_strand_CASP9():
transcript_id = "ENST00000333868"
exons = ensembl.get_exons_from_transcript(transcript_id)
utr_length = ensembl.get_three_prime_utr_length(exons, forward = False)
print utr_length
assert(utr_length == 673)
def test_get_transcript_and_mutate_vcf():
variant = {
'chr' : '10',
'pos' : 43617416,
'ref' : 'T',
'alt' : 'C'
}
vcf = pd.DataFrame.from_records([variant])
transcripts_ids = ensembl.annotate_vcf_transcripts(vcf)
transcript_ids = set(transcripts_ids['stable_id_transcript'])
assert( "ENST00000355710" in transcript_ids)
assert( "ENST00000340058" in transcript_ids)
transcript_id = "ENST00000355710"
cdna_idx = ensembl.get_transcript_index_from_pos(
variant['pos'], transcript_id, skip_untranslated_region = False)
assert cdna_idx is not None
assert cdna_idx < 5569
cdna_transcript = ref_data.get_cdna(transcript_id)
assert(cdna_transcript[cdna_idx] == variant['ref'])
cds_idx = ensembl.get_transcript_index_from_pos(
variant['pos'], transcript_id, skip_untranslated_region = True)
assert cds_idx is not None
cds_transcript = ref_data.get_cds(transcript_id)
assert(cds_transcript[cds_idx] == variant['ref'])
region = mutate_protein_from_transcript(
cds_transcript,
cds_idx,
variant['ref'],
variant['alt'],
padding = 10)
assert region is not None
assert len(region.seq) == 21, (region.seq, len(region.seq))
assert region.seq == 'RSQGRIPVKWTAIESLFDHIY'
def test_interval_search():
intervals = [ (7,13), (17,19), (21, 24), (35, 45), (47, 50), (60, 70)]
idx = ensembl.get_idx_from_interval(7, intervals)
assert(idx == 0), idx
idx = ensembl.get_idx_from_interval(13, intervals)
assert(idx == 6), idx
idx = ensembl.get_idx_from_interval(14, intervals)
assert(idx is None), idx
idx = ensembl.get_idx_from_interval(12, intervals)
assert(idx == 5), idx
idx = ensembl.get_idx_from_interval(17, intervals)
assert(idx == 7), idx
idx = ensembl.get_idx_from_interval(18, intervals)
assert(idx == 8), idx
idx = ensembl.get_idx_from_interval(23, intervals)
assert(idx == 12), idx
idx = ensembl.get_idx_from_interval(51, intervals)
assert(idx is None), idx
def test_peptide_from_transcript_PARS2():
"""
test_peptide_from_transcript:
"""
transcript_id = 'ENST00000371279'
variant = {
'chr' : '1',
'pos' : 55224569,
'ref' : 'T',
'alt' : 'G'
}
cds_idx = ensembl.get_transcript_index_from_pos(
variant['pos'], transcript_id, skip_untranslated_region = True)
assert cds_idx is not None
assert cds_idx == 265
if __name__ == '__main__':
from dsltools import testing_helpers
testing_helpers.run_local_tests()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from collections import defaultdict
import os
from twitter.common.collections import OrderedSet
from pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.build_environment import get_buildroot
from pants.base.build_graph import sort_targets
from pants.base.exceptions import TaskError
class JvmDependencyAnalyzer(object):
def __init__(self,
context,
check_missing_deps,
check_missing_direct_deps,
check_unnecessary_deps,
target_whitelist):
self._context = context
self._context.products.require_data('classes_by_target')
self._context.products.require_data('ivy_jar_products')
self._check_missing_deps = check_missing_deps
self._check_missing_direct_deps = check_missing_direct_deps
self._check_unnecessary_deps = check_unnecessary_deps
# These targets we will not report as having any dependency issues even if they do.
self._target_whitelist = OrderedSet(target_whitelist)
def _compute_targets_by_file(self):
"""Returns a map from abs path of source, class or jar file to an OrderedSet of targets.
The value is usually a singleton, because a source or class file belongs to a single target.
However a single jar may be provided (transitively or intransitively) by multiple JarLibrary
targets. But if there is a JarLibrary target that depends on a jar directly, then that
"canonical" target will be the first one in the list of targets.
"""
targets_by_file = defaultdict(OrderedSet)
# Multiple JarLibrary targets can provide the same (org, name).
jarlibs_by_id = defaultdict(set)
# Compute src -> target.
with self._context.new_workunit(name='map_sources'):
buildroot = get_buildroot()
# Look at all targets in-play for this pants run. Does not include synthetic targets,
for target in self._context.targets():
if isinstance(target, JvmTarget):
for src in target.sources_relative_to_buildroot():
targets_by_file[os.path.join(buildroot, src)].add(target)
elif isinstance(target, JarLibrary):
for jardep in target.jar_dependencies:
jarlibs_by_id[(jardep.org, jardep.name)].add(target)
# TODO(Tejal Desai): pantsbuild/pants/65: Remove java_sources attribute for ScalaLibrary
if isinstance(target, ScalaLibrary):
for java_source in target.java_sources:
for src in java_source.sources_relative_to_buildroot():
targets_by_file[os.path.join(buildroot, src)].add(target)
# Compute class -> target.
with self._context.new_workunit(name='map_classes'):
classes_by_target = self._context.products.get_data('classes_by_target')
for tgt, target_products in classes_by_target.items():
for _, classes in target_products.abs_paths():
for cls in classes:
targets_by_file[cls].add(tgt)
# Compute jar -> target.
with self._context.new_workunit(name='map_jars'):
with IvyTaskMixin.symlink_map_lock:
all_symlinks_map = self._context.products.get_data('symlink_map').copy()
# We make a copy, so it's safe to use outside the lock.
def register_transitive_jars_for_ref(ivyinfo, ref):
deps_by_ref_memo = {}
def get_transitive_jars_by_ref(ref1, visited=None):
if ref1 in deps_by_ref_memo:
return deps_by_ref_memo[ref1]
else:
visited = visited or set()
if ref1 in visited:
return set() # Ivy allows circular deps.
visited.add(ref1)
jars = set()
jars.update(ivyinfo.modules_by_ref[ref1].artifacts)
for dep in ivyinfo.deps_by_caller.get(ref1, []):
jars.update(get_transitive_jars_by_ref(dep, visited))
deps_by_ref_memo[ref1] = jars
return jars
target_key = (ref.org, ref.name)
if target_key in jarlibs_by_id:
# These targets provide all the jars in ref, and all the jars ref transitively depends on.
jarlib_targets = jarlibs_by_id[target_key]
for jar in get_transitive_jars_by_ref(ref):
# Register that each jarlib_target provides jar (via all its symlinks).
symlinks = all_symlinks_map.get(os.path.realpath(jar.path), [])
for symlink in symlinks:
for jarlib_target in jarlib_targets:
targets_by_file[symlink].add(jarlib_target)
ivy_products = self._context.products.get_data('ivy_jar_products')
if ivy_products:
for ivyinfos in ivy_products.values():
for ivyinfo in ivyinfos:
for ref in ivyinfo.modules_by_ref:
register_transitive_jars_for_ref(ivyinfo, ref)
return targets_by_file
def _compute_transitive_deps_by_target(self):
"""Map from target to all the targets it depends on, transitively."""
# Sort from least to most dependent.
sorted_targets = reversed(sort_targets(self._context.targets()))
transitive_deps_by_target = defaultdict(set)
# Iterate in dep order, to accumulate the transitive deps for each target.
for target in sorted_targets:
transitive_deps = set()
for dep in target.dependencies:
transitive_deps.update(transitive_deps_by_target.get(dep, []))
transitive_deps.add(dep)
# Need to handle the case where a java_sources target has dependencies.
# In particular if it depends back on the original target.
if hasattr(target, 'java_sources'):
for java_source_target in target.java_sources:
for transitive_dep in java_source_target.dependencies:
transitive_deps_by_target[java_source_target].add(transitive_dep)
transitive_deps_by_target[target] = transitive_deps
return transitive_deps_by_target
def check(self, srcs, actual_deps):
"""Check for missing deps.
See docstring for _compute_missing_deps for details.
"""
if self._check_missing_deps or self._check_missing_direct_deps or self._check_unnecessary_deps:
missing_file_deps, missing_tgt_deps, missing_direct_tgt_deps = \
self._compute_missing_deps(srcs, actual_deps)
buildroot = get_buildroot()
def shorten(path): # Make the output easier to read.
for prefix in [buildroot, self._context.ivy_home]:
if path.startswith(prefix):
return os.path.relpath(path, prefix)
return path
def filter_whitelisted(missing_deps):
# Removing any targets that exist in the whitelist from the list of dependency issues.
return [(tgt_pair, evidence) for (tgt_pair, evidence) in missing_deps
if tgt_pair[0].address.reference() not in self._target_whitelist]
missing_tgt_deps = filter_whitelisted(missing_tgt_deps)
if self._check_missing_deps and (missing_file_deps or missing_tgt_deps):
for (tgt_pair, evidence) in missing_tgt_deps:
evidence_str = '\n'.join([' %s uses %s' % (shorten(e[0]), shorten(e[1]))
for e in evidence])
self._context.log.error(
'Missing BUILD dependency %s -> %s because:\n%s'
% (tgt_pair[0].address.reference(), tgt_pair[1].address.reference(), evidence_str))
for (src_tgt, dep) in missing_file_deps:
self._context.log.error('Missing BUILD dependency %s -> %s'
% (src_tgt.address.reference(), shorten(dep)))
if self._check_missing_deps == 'fatal':
raise TaskError('Missing deps.')
missing_direct_tgt_deps = filter_whitelisted(missing_direct_tgt_deps)
if self._check_missing_direct_deps and missing_direct_tgt_deps:
for (tgt_pair, evidence) in missing_direct_tgt_deps:
evidence_str = '\n'.join([' %s uses %s' % (shorten(e[0]), shorten(e[1]))
for e in evidence])
self._context.log.warn('Missing direct BUILD dependency %s -> %s because:\n%s' %
(tgt_pair[0].address, tgt_pair[1].address, evidence_str))
if self._check_missing_direct_deps == 'fatal':
raise TaskError('Missing direct deps.')
if self._check_unnecessary_deps:
raise TaskError('Unnecessary dep warnings not implemented yet.')
def _compute_missing_deps(self, srcs, actual_deps):
"""Computes deps that are used by the compiler but not specified in a BUILD file.
These deps are bugs waiting to happen: the code may happen to compile because the dep was
brought in some other way (e.g., by some other root target), but that is obviously fragile.
Note that in practice we're OK with reliance on indirect deps that are only brought in
transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
cases aren't as fragile as a completely missing dependency. It's still a good idea to have
explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
easy to find and reason about.
- actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
compiler.
Returns a triple (missing_file_deps, missing_tgt_deps, missing_direct_tgt_deps) where:
- missing_file_deps: a list of pairs (src_tgt, dep_file) where src_tgt requires dep_file, and
we're unable to map to a target (because its target isn't in the total set of targets in play,
and we don't want to parse every BUILD file in the workspace just to find it).
- missing_tgt_deps: a list of pairs (src_tgt, dep_tgt) where src_tgt is missing a necessary
transitive dependency on dep_tgt.
- missing_direct_tgt_deps: a list of pairs (src_tgt, dep_tgt) where src_tgt is missing a direct
dependency on dep_tgt but has a transitive dep on it.
All paths in the input and output are absolute.
"""
def must_be_explicit_dep(dep):
# We don't require explicit deps on the java runtime, so we shouldn't consider that
# a missing dep.
return not dep.startswith(self._context.java_home)
def target_or_java_dep_in_targets(target, targets):
# We want to check if the target is in the targets collection
#
# However, for the special case of scala_library that has a java_sources
# reference we're ok if that exists in targets even if the scala_library does not.
if target in targets:
return True
elif target.is_scala:
return any(t in targets for t in target.java_sources)
else:
return False
# TODO: If recomputing these every time becomes a performance issue, memoize for
# already-seen targets and incrementally compute for new targets not seen in a previous
# partition, in this or a previous chunk.
targets_by_file = self._compute_targets_by_file()
transitive_deps_by_target = self._compute_transitive_deps_by_target()
# Find deps that are actual but not specified.
with self._context.new_workunit(name='scan_deps'):
missing_file_deps = OrderedSet() # (src, src).
missing_tgt_deps_map = defaultdict(list) # (tgt, tgt) -> a list of (src, src) as evidence.
missing_direct_tgt_deps_map = defaultdict(list) # The same, but for direct deps.
buildroot = get_buildroot()
abs_srcs = [os.path.join(buildroot, src) for src in srcs]
for src in abs_srcs:
src_tgt = next(iter(targets_by_file.get(src)))
if src_tgt is not None:
for actual_dep in filter(must_be_explicit_dep, actual_deps.get(src, [])):
actual_dep_tgts = targets_by_file.get(actual_dep)
# actual_dep_tgts is usually a singleton. If it's not, we only need one of these
# to be in our declared deps to be OK.
if actual_dep_tgts is None:
missing_file_deps.add((src_tgt, actual_dep))
elif not target_or_java_dep_in_targets(src_tgt, actual_dep_tgts):
# Obviously intra-target deps are fine.
canonical_actual_dep_tgt = next(iter(actual_dep_tgts))
if actual_dep_tgts.isdisjoint(transitive_deps_by_target.get(src_tgt, [])):
missing_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append((src, actual_dep))
elif canonical_actual_dep_tgt not in src_tgt.dependencies:
# The canonical dep is the only one a direct dependency makes sense on.
missing_direct_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append(
(src, actual_dep))
else:
raise TaskError('Requested dep info for unknown source file: %s' % src)
return (list(missing_file_deps),
missing_tgt_deps_map.items(),
missing_direct_tgt_deps_map.items())
|
|
# -*- coding: UTF-8 -*-
"""
Provides a summary after each test run.
"""
from __future__ import absolute_import, division, print_function
import sys
from time import time as time_now
from behave.model import Rule, ScenarioOutline # MAYBE: Scenario
from behave.model_core import Status
from behave.reporter.base import Reporter
from behave.formatter.base import StreamOpener
# ---------------------------------------------------------------------------
# CONSTANTS:
# ---------------------------------------------------------------------------
# -- DISABLED: OPTIONAL_STEPS = ('untested', 'undefined')
OPTIONAL_STEPS = (Status.untested,) # MAYBE: Status.undefined
STATUS_ORDER = (Status.passed, Status.failed, Status.skipped,
Status.undefined, Status.untested)
# ---------------------------------------------------------------------------
# UTILITY FUNCTIONS:
# ---------------------------------------------------------------------------
def pluralize(word, count=1, suffix="s"):
if count == 1:
return word
# -- OTHERWISE:
return "{0}{1}".format(word, suffix)
def compute_summary_sum(summary):
"""Compute sum of all summary counts (except: all)
:param summary: Summary counts (as dict).
:return: Sum of all counts (as integer).
"""
counts_sum = 0
for name, count in summary.items():
if name == "all":
continue # IGNORE IT.
counts_sum += count
return counts_sum
def format_summary0(statement_type, summary):
parts = []
for status in STATUS_ORDER:
if status.name not in summary:
continue
counts = summary[status.name]
if status in OPTIONAL_STEPS and counts == 0:
# -- SHOW-ONLY: For relevant counts, suppress: untested items, etc.
continue
if not parts:
# -- FIRST ITEM: Add statement_type to counter.
label = statement_type
if counts != 1:
label += 's'
part = u"%d %s %s" % (counts, label, status.name)
else:
part = u"%d %s" % (counts, status.name)
parts.append(part)
return ", ".join(parts) + "\n"
def format_summary(statement_type, summary):
parts = []
for status in STATUS_ORDER:
if status.name not in summary:
continue
counts = summary[status.name]
if status in OPTIONAL_STEPS and counts == 0:
# -- SHOW-ONLY: For relevant counts, suppress: untested items, etc.
continue
name = status.name
if status.name == "passed":
statement = pluralize(statement_type, counts)
name = u"%s passed" % statement
part = u"%d %s" % (counts, name)
parts.append(part)
return ", ".join(parts) + "\n"
# -- PREPARED:
def format_summary2(statement_type, summary, end="\n"):
"""Format the summary line for one statement type.
.. code-block::
6 scenarios (passed: 5, failed: 1, skipped: 0, untested: 0)
:param statement_type:
:param summary:
:return:
"""
parts = []
for status in STATUS_ORDER:
if status.name not in summary:
continue
counts = summary[status.name]
if status in OPTIONAL_STEPS and counts == 0:
# -- SHOW-ONLY: For relevant counts, suppress: untested items, etc.
continue
parts.append((status.name, counts))
counts_sum = summary["all"]
statement = pluralize(statement_type, sum)
parts_text = ", ".join(["{0}: {1}".format(name, value)
for name, value in parts])
return "{count:4} {statement:<9} ({parts}){end}".format(
count=counts_sum, statement=statement, parts=parts_text, end=end)
# ---------------------------------------------------------------------------
# REPORTERS:
# ---------------------------------------------------------------------------
class SummaryReporter(Reporter):
show_failed_scenarios = True
output_stream_name = "stdout"
def __init__(self, config):
super(SummaryReporter, self).__init__(config)
stream = getattr(sys, self.output_stream_name, sys.stderr)
self.stream = StreamOpener.ensure_stream_with_encoder(stream)
summary_zero_data = {
"all": 0,
Status.passed.name: 0,
Status.failed.name: 0,
Status.skipped.name: 0,
Status.untested.name: 0
}
self.feature_summary = summary_zero_data.copy()
self.rule_summary = summary_zero_data.copy()
self.scenario_summary = summary_zero_data.copy()
self.step_summary = {Status.undefined.name: 0}
self.step_summary.update(summary_zero_data)
self.duration = 0.0
self.run_starttime = 0
self.run_endtime = 0
self.failed_scenarios = []
self.show_rules = True
def testrun_started(self, timestamp=None):
if timestamp is None:
timestamp = time_now()
self.run_starttime = timestamp
def testrun_finished(self, timestamp=None):
if timestamp is None:
timestamp = time_now()
self.run_endtime = timestamp
def print_failing_scenarios(self, stream=None):
if stream is None:
stream = self.stream
stream.write("\nFailing scenarios:\n")
for scenario in self.failed_scenarios:
stream.write(u" %s %s\n" % (scenario.location, scenario.name))
def compute_summary_sums(self):
"""(Re)Compute summary sum of all counts (except: all)."""
summaries = [
self.feature_summary,
self.rule_summary,
self.scenario_summary,
self.step_summary
]
for summary in summaries:
summary["all"] = compute_summary_sum(summary)
def print_summary(self, stream=None, with_duration=True):
if stream is None:
stream = self.stream
self.compute_summary_sums()
has_rules = (self.rule_summary["all"] > 0)
stream.write(format_summary("feature", self.feature_summary))
if self.show_rules and has_rules:
# -- HINT: Show only rules, if any exists.
self.stream.write(format_summary("rule", self.rule_summary))
stream.write(format_summary("scenario", self.scenario_summary))
stream.write(format_summary("step", self.step_summary))
# -- DURATION:
if with_duration:
timings = (int(self.duration / 60.0), self.duration % 60)
stream.write('Took %dm%02.3fs\n' % timings)
# -- REPORTER-API:
def feature(self, feature):
if self.run_starttime == 0:
# -- DISCOVER: TEST-RUN started.
self.testrun_started()
self.process_feature(feature)
def end(self):
self.testrun_finished()
# -- SHOW FAILED SCENARIOS (optional):
if self.show_failed_scenarios and self.failed_scenarios:
self.print_failing_scenarios()
self.stream.write("\n")
# -- SHOW SUMMARY COUNTS:
self.print_summary()
def process_run_items_for(self, parent):
for run_item in parent:
if isinstance(run_item, Rule):
self.process_rule(run_item)
elif isinstance(run_item, ScenarioOutline):
self.process_scenario_outline(run_item)
else:
# assert isinstance(run_item, Scenario)
self.process_scenario(run_item)
def process_feature(self, feature):
self.duration += feature.duration
self.feature_summary[feature.status.name] += 1
self.process_run_items_for(feature)
def process_rule(self, rule):
self.rule_summary[rule.status.name] += 1
self.process_run_items_for(rule)
def process_scenario(self, scenario):
if scenario.status == Status.failed:
self.failed_scenarios.append(scenario)
self.scenario_summary[scenario.status.name] += 1
for step in scenario:
self.step_summary[step.status.name] += 1
def process_scenario_outline(self, scenario_outline):
for scenario in scenario_outline.scenarios:
self.process_scenario(scenario)
|
|
"""Module for scheduling profiles through egtaonline"""
import asyncio
import contextlib
import logging
from gameanalysis import paygame
from gameanalysis import rsgame
from gameanalysis import utils as gu
from egta import profsched
from egta import utils as eu
class _EgtaOnlineScheduler(
profsched._AOpenableScheduler
): # pylint: disable=too-many-instance-attributes,protected-access
"""A profile scheduler that schedules through egta online
Parameters
----------
game : RsGame
The gameanalysis basegame representing the game to schedule.
api : EgtaOnlineApi
The api object to be uased to query EGTA Online.
game_id : int
The id of the egtaonline game to use. It must match the setup of game.
Use the egtaonline `get_or_create_game` if you need to get a game_id
from a configuration.
sleep_time : int
Time in seconds between queries to egtaonline to determine if profiles
have finished. This should probably be set roughly equal to the time it
takes for a simulation to run.
simultaneous_obs : int
The number of simultaneous observations to schedule at a time. EGTA
Online will use this when scheduling.
max_scheduled : int
The maximum number of observations to schedule simultaneously. Keeping
this low helps prevent starving others of flux cycles.
obs_memory : int
The amount of memory in MB to allocate for each simulation.
obs_time : int
The amount of time in seconds to give each simulation to run. Too low
and long running simulations will get cancelled giving you biased
samples, too long and it will take longer to schedule jobs on flux.
"""
def __init__( # pylint: disable=too-many-arguments
self,
game,
api,
game_id,
sleep_time,
simultaneous_obs,
max_scheduled,
obs_memory,
obs_time,
):
super().__init__(game.role_names, game.strat_names, game.num_role_players)
self._api = api
self._game = paygame.samplegame_copy(rsgame.empty_copy(game))
self._game_id = game_id
self._sleep_time = sleep_time
self._obs_memory = obs_memory
self._obs_time = obs_time
self._simult_obs = simultaneous_obs
self._is_open = False
self._profiles = {}
self._prof_ids = {}
self._sched = None
self._fetcher = None
self._sched_lock = asyncio.Lock()
self._scheduled = asyncio.BoundedSemaphore(max_scheduled * simultaneous_obs)
def _check_fetcher(self):
"""Check if background coroutine has failed"""
if self._fetcher.done() and self._fetcher.exception() is not None:
raise self._fetcher.exception()
async def sample_payoffs(self, profile):
gu.check(self._is_open, "not open")
self._check_fetcher()
hprof = gu.hash_array(profile)
data = self._profiles.setdefault(
hprof, ([0], [0], [0], [None], asyncio.Queue())
)
scheduled, _, claimed, prof_id, pays = data
claimed[0] += 1
if scheduled[0] < claimed[0]:
scheduled[0] += self._simult_obs
async with self._sched_lock:
for _ in range(self._simult_obs):
await self._scheduled.acquire()
pid = prof_id[0]
if pid is not None:
await self._sched.remove_profile(pid)
assignment = self._game.profile_to_repr(profile)
prof_id[0] = (await self._sched.add_profile(assignment, scheduled[0]))[
"id"
]
if pid is None:
self._prof_ids[prof_id[0]] = data
pay = await pays.get()
self._check_fetcher()
return pay
async def _fetch(self): # pylint: disable=too-many-locals
"""Fetch current scheduling status"""
try:
while True:
logging.info(
"query scheduler %d for game %d", self._sched["id"], self._game_id
)
info = await self._sched.get_requirements()
gu.check(info["active"], "scheduler was deactivated")
reqs = info["scheduling_requirements"]
for req in reqs:
prof_id = req["id"]
if prof_id not in self._prof_ids:
continue # race condition
scheduled, received, _, _, pays = self._prof_ids[prof_id]
if req["current_count"] <= received[0]:
continue
egta_prof = await self._api.get_profile(prof_id)
jobs = await egta_prof.get_observations()
obs = self._game.samplepay_from_json(jobs)
num = obs.shape[0] - received[0]
# Only un-schedule the amount different than what you
# thought you scheduled. We can get other observations from
# other schedulers.
for _ in range(min(num, scheduled[0] - received[0])):
self._scheduled.release()
received[0] += num
obs = obs[:num].copy()
obs.setflags(write=False)
for obser in obs:
pays.put_nowait(obser)
await asyncio.sleep(self._sleep_time)
except Exception as ex:
for _, (received,), (claimed,), _, pays in self._profiles.values():
for _ in range(claimed - received):
pays.put_nowait(None)
raise ex
async def aopen(self): # pylint: disable=too-many-locals
"""Open the eosched"""
gu.check(not self._is_open, "already open")
try:
game = await self._api.get_game(self._game_id)
obs = await game.get_observations()
gu.check(
rsgame.empty_copy(self._game) == rsgame.empty_json(obs),
"egtaonline game didn't match specified game",
)
conf = dict(obs.get("configuration", ()) or ())
profiles = obs.get("profiles", ()) or ()
# Parse profiles
num_profs = len(profiles)
num_pays = 0
for jprof in profiles:
pid = jprof["id"]
prof, spays = self._game.profsamplepay_from_json(jprof)
spays.setflags(write=False)
hprof = gu.hash_array(prof)
pays = asyncio.Queue()
num_spays = len(spays)
num_pays += num_spays
for pay in spays:
pays.put_nowait(pay)
data = ([num_spays], [num_spays], [0], [pid], pays)
self._profiles[hprof] = data
self._prof_ids[pid] = data
logging.info(
"found %d existing profiles with %d payoffs in game %d",
num_profs,
num_pays,
self._game_id,
)
# Create and start scheduler
self._sched = await obs.create_generic_scheduler(
"egta_" + eu.random_string(20),
True,
self._obs_memory,
self._obs_time,
self._simult_obs,
1,
conf,
)
logging.warning(
"created scheduler %d for running simulations of game %d: "
"https://%s/generic_schedulers/%d",
self._sched["id"],
self._game_id,
self._api.domain,
self._sched["id"],
)
self._fetcher = asyncio.ensure_future(self._fetch())
self._is_open = True
except Exception as ex:
await self.aclose()
raise ex
return self
async def aclose(self):
"""Close the eosched"""
if self._fetcher is not None:
self._fetcher.cancel()
with contextlib.suppress(Exception, asyncio.CancelledError):
await self._fetcher
self._fetcher = None
if self._sched is not None:
await self._sched.deactivate()
logging.info(
"deactivated scheduler %d for game %d", self._sched["id"], self._game_id
)
self._sched = None
if self._sched_lock.locked():
self._sched_lock.release()
while True:
try:
self._scheduled.release()
except ValueError:
break # Fully reset
self._profiles.clear()
self._prof_ids.clear()
def __str__(self):
return str(self._game_id)
def eosched( # pylint: disable=too-many-arguments
game,
api,
game_id,
sleep_time,
simultaneous_obs,
max_scheduled,
obs_memory,
obs_time,
):
"""Create an egtaonline scheduler"""
return _EgtaOnlineScheduler(
game,
api,
game_id,
sleep_time,
simultaneous_obs,
max_scheduled,
obs_memory,
obs_time,
)
|
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
# ASN.1 source file:///usr/share/snmp/mibs/SNMP-NOTIFICATION-MIB.txt
# Produced by pysmi-0.4.0 at Sat Feb 16 12:12:15 2019
#
if 'mibBuilder' not in globals():
import sys
sys.stderr.write(__doc__)
sys.exit(1)
(Integer,
OctetString,
ObjectIdentifier) = mibBuilder.importSymbols(
"ASN1",
"Integer",
"OctetString",
"ObjectIdentifier")
(NamedValues,) = mibBuilder.importSymbols(
"ASN1-ENUMERATION",
"NamedValues")
(ConstraintsIntersection,
SingleValueConstraint,
ValueRangeConstraint,
ValueSizeConstraint,
ConstraintsUnion) = mibBuilder.importSymbols(
"ASN1-REFINEMENT",
"ConstraintsIntersection",
"SingleValueConstraint",
"ValueRangeConstraint",
"ValueSizeConstraint",
"ConstraintsUnion")
(SnmpAdminString,) = mibBuilder.importSymbols(
"SNMP-FRAMEWORK-MIB",
"SnmpAdminString")
(SnmpTagValue,
snmpTargetParamsName) = mibBuilder.importSymbols(
"SNMP-TARGET-MIB",
"SnmpTagValue",
"snmpTargetParamsName")
(ModuleCompliance,
ObjectGroup,
NotificationGroup) = mibBuilder.importSymbols(
"SNMPv2-CONF",
"ModuleCompliance",
"ObjectGroup",
"NotificationGroup")
(ModuleIdentity,
iso,
Counter64,
MibScalar,
MibTable,
MibTableRow,
MibTableColumn,
Gauge32,
NotificationType,
Bits,
snmpModules,
IpAddress,
Integer32,
ObjectIdentity,
Unsigned32,
MibIdentifier,
Counter32,
TimeTicks) = mibBuilder.importSymbols(
"SNMPv2-SMI",
"ModuleIdentity",
"iso",
"Counter64",
"MibScalar",
"MibTable",
"MibTableRow",
"MibTableColumn",
"Gauge32",
"NotificationType",
"Bits",
"snmpModules",
"IpAddress",
"Integer32",
"ObjectIdentity",
"Unsigned32",
"MibIdentifier",
"Counter32",
"TimeTicks")
(RowStatus,
DisplayString,
TextualConvention,
StorageType) = mibBuilder.importSymbols(
"SNMPv2-TC",
"RowStatus",
"DisplayString",
"TextualConvention",
"StorageType")
snmpNotificationMIB = ModuleIdentity(
(1, 3, 6, 1, 6, 3, 13)
)
snmpNotificationMIB.setRevisions(
("2002-10-14 00:00",
"1998-08-04 00:00",
"1997-07-14 00:00")
)
snmpNotificationMIB.setLastUpdated("200210140000Z")
if mibBuilder.loadTexts:
snmpNotificationMIB.setOrganization("""\
IETF SNMPv3 Working Group
""")
snmpNotificationMIB.setContactInfo("""\
WG-email: snmpv3@lists.tislabs.com Subscribe: majordomo@lists.tislabs.com In
message body: subscribe snmpv3 Co-Chair: Russ Mundy Network Associates
Laboratories Postal: 15204 Omega Drive, Suite 300 Rockville, MD 20850-4601 USA
EMail: mundy@tislabs.com Phone: +1 301-947-7107 Co-Chair: David Harrington
Enterasys Networks Postal: 35 Industrial Way P. O. Box 5004 Rochester, New
Hampshire 03866-5005 USA EMail: dbh@enterasys.com Phone: +1 603-337-2614 Co-
editor: David B. Levi Nortel Networks Postal: 3505 Kesterwood Drive Knoxville,
Tennessee 37918 EMail: dlevi@nortelnetworks.com Phone: +1 865 686 0432 Co-
editor: Paul Meyer Secure Computing Corporation Postal: 2675 Long Lake Road
Roseville, Minnesota 55113 EMail: paul_meyer@securecomputing.com Phone: +1 651
628 1592 Co-editor: Bob Stewart Retired
""")
if mibBuilder.loadTexts:
snmpNotificationMIB.setDescription("""\
This MIB module defines MIB objects which provide mechanisms to remotely
configure the parameters used by an SNMP entity for the generation of
notifications. Copyright (C) The Internet Society (2002). This version of this
MIB module is part of RFC 3413; see the RFC itself for full legal notices.
""")
_SnmpNotifyObjects_ObjectIdentity = ObjectIdentity
snmpNotifyObjects = _SnmpNotifyObjects_ObjectIdentity(
(1, 3, 6, 1, 6, 3, 13, 1)
)
_SnmpNotifyTable_Object = MibTable
snmpNotifyTable = _SnmpNotifyTable_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 1)
)
if mibBuilder.loadTexts:
snmpNotifyTable.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyTable.setDescription("""\
This table is used to select management targets which should receive
notifications, as well as the type of notification which should be sent to each
selected management target.
""")
_SnmpNotifyEntry_Object = MibTableRow
snmpNotifyEntry = _SnmpNotifyEntry_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 1, 1)
)
snmpNotifyEntry.setIndexNames(
(1, "SNMP-NOTIFICATION-MIB", "snmpNotifyName"),
)
if mibBuilder.loadTexts:
snmpNotifyEntry.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyEntry.setDescription("""\
An entry in this table selects a set of management targets which should receive
notifications, as well as the type of notification which should be sent to each
selected management target. Entries in the snmpNotifyTable are created and
deleted using the snmpNotifyRowStatus object.
""")
class _SnmpNotifyName_Type(SnmpAdminString):
"""Custom type snmpNotifyName based on SnmpAdminString"""
subtypeSpec = SnmpAdminString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(1, 32),
)
_SnmpNotifyName_Type.__name__ = "SnmpAdminString"
_SnmpNotifyName_Object = MibTableColumn
snmpNotifyName = _SnmpNotifyName_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 1, 1, 1),
_SnmpNotifyName_Type()
)
snmpNotifyName.setMaxAccess("not-accessible")
if mibBuilder.loadTexts:
snmpNotifyName.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyName.setDescription("""\
The locally arbitrary, but unique identifier associated with this
snmpNotifyEntry.
""")
_SnmpNotifyTag_Type = SnmpTagValue
_SnmpNotifyTag_Object = MibTableColumn
snmpNotifyTag = _SnmpNotifyTag_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 1, 1, 2),
_SnmpNotifyTag_Type()
)
snmpNotifyTag.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyTag.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyTag.setDescription("""\
This object contains a single tag value which is used to select entries in the
snmpTargetAddrTable. Any entry in the snmpTargetAddrTable which contains a tag
value which is equal to the value of an instance of this object is selected. If
this object contains a value of zero length, no entries are selected.
""")
class _SnmpNotifyType_Type(Integer32):
"""Custom type snmpNotifyType based on Integer32"""
defaultValue = 1
subtypeSpec = Integer32.subtypeSpec
subtypeSpec += ConstraintsUnion(
SingleValueConstraint(
*(1,
2)
)
)
namedValues = NamedValues(
*(("inform", 2),
("trap", 1))
)
_SnmpNotifyType_Type.__name__ = "Integer32"
_SnmpNotifyType_Object = MibTableColumn
snmpNotifyType = _SnmpNotifyType_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 1, 1, 3),
_SnmpNotifyType_Type().clone('trap')
)
snmpNotifyType.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyType.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyType.setDescription("""\
This object determines the type of notification to be generated for entries in
the snmpTargetAddrTable selected by the corresponding instance of
snmpNotifyTag. This value is only used when generating notifications, and is
ignored when using the snmpTargetAddrTable for other purposes. If the value of
this object is trap(1), then any messages generated for selected rows will
contain Unconfirmed-Class PDUs. If the value of this object is inform(2), then
any messages generated for selected rows will contain Confirmed-Class PDUs.
Note that if an SNMP entity only supports generation of Unconfirmed-Class PDUs
(and not Confirmed-Class PDUs), then this object may be read-only.
""")
class _SnmpNotifyStorageType_Type(StorageType):
"""Custom type snmpNotifyStorageType based on StorageType"""
_SnmpNotifyStorageType_Object = MibTableColumn
snmpNotifyStorageType = _SnmpNotifyStorageType_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 1, 1, 4),
_SnmpNotifyStorageType_Type().clone('nonVolatile')
)
snmpNotifyStorageType.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyStorageType.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyStorageType.setDescription("""\
The storage type for this conceptual row. Conceptual rows having the value
'permanent' need not allow write-access to any columnar objects in the row.
""")
_SnmpNotifyRowStatus_Type = RowStatus
_SnmpNotifyRowStatus_Object = MibTableColumn
snmpNotifyRowStatus = _SnmpNotifyRowStatus_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 1, 1, 5),
_SnmpNotifyRowStatus_Type()
)
snmpNotifyRowStatus.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyRowStatus.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyRowStatus.setDescription("""\
The status of this conceptual row. To create a row in this table, a manager
must set this object to either createAndGo(4) or createAndWait(5).
""")
_SnmpNotifyFilterProfileTable_Object = MibTable
snmpNotifyFilterProfileTable = _SnmpNotifyFilterProfileTable_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 2)
)
if mibBuilder.loadTexts:
snmpNotifyFilterProfileTable.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterProfileTable.setDescription("""\
This table is used to associate a notification filter profile with a particular
set of target parameters.
""")
_SnmpNotifyFilterProfileEntry_Object = MibTableRow
snmpNotifyFilterProfileEntry = _SnmpNotifyFilterProfileEntry_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 2, 1)
)
snmpNotifyFilterProfileEntry.setIndexNames(
(1, "SNMP-TARGET-MIB", "snmpTargetParamsName"),
)
if mibBuilder.loadTexts:
snmpNotifyFilterProfileEntry.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterProfileEntry.setDescription("""\
An entry in this table indicates the name of the filter profile to be used when
generating notifications using the corresponding entry in the
snmpTargetParamsTable. Entries in the snmpNotifyFilterProfileTable are created
and deleted using the snmpNotifyFilterProfileRowStatus object.
""")
class _SnmpNotifyFilterProfileName_Type(SnmpAdminString):
"""Custom type snmpNotifyFilterProfileName based on SnmpAdminString"""
subtypeSpec = SnmpAdminString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(1, 32),
)
_SnmpNotifyFilterProfileName_Type.__name__ = "SnmpAdminString"
_SnmpNotifyFilterProfileName_Object = MibTableColumn
snmpNotifyFilterProfileName = _SnmpNotifyFilterProfileName_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 2, 1, 1),
_SnmpNotifyFilterProfileName_Type()
)
snmpNotifyFilterProfileName.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyFilterProfileName.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterProfileName.setDescription("""\
The name of the filter profile to be used when generating notifications using
the corresponding entry in the snmpTargetAddrTable.
""")
class _SnmpNotifyFilterProfileStorType_Type(StorageType):
"""Custom type snmpNotifyFilterProfileStorType based on StorageType"""
_SnmpNotifyFilterProfileStorType_Object = MibTableColumn
snmpNotifyFilterProfileStorType = _SnmpNotifyFilterProfileStorType_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 2, 1, 2),
_SnmpNotifyFilterProfileStorType_Type().clone('nonVolatile')
)
snmpNotifyFilterProfileStorType.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyFilterProfileStorType.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterProfileStorType.setDescription("""\
The storage type for this conceptual row. Conceptual rows having the value
'permanent' need not allow write-access to any columnar objects in the row.
""")
_SnmpNotifyFilterProfileRowStatus_Type = RowStatus
_SnmpNotifyFilterProfileRowStatus_Object = MibTableColumn
snmpNotifyFilterProfileRowStatus = _SnmpNotifyFilterProfileRowStatus_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 2, 1, 3),
_SnmpNotifyFilterProfileRowStatus_Type()
)
snmpNotifyFilterProfileRowStatus.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyFilterProfileRowStatus.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterProfileRowStatus.setDescription("""\
The status of this conceptual row. To create a row in this table, a manager
must set this object to either createAndGo(4) or createAndWait(5). Until
instances of all corresponding columns are appropriately configured, the value
of the corresponding instance of the snmpNotifyFilterProfileRowStatus column is
'notReady'. In particular, a newly created row cannot be made active until the
corresponding instance of snmpNotifyFilterProfileName has been set.
""")
_SnmpNotifyFilterTable_Object = MibTable
snmpNotifyFilterTable = _SnmpNotifyFilterTable_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 3)
)
if mibBuilder.loadTexts:
snmpNotifyFilterTable.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterTable.setDescription("""\
The table of filter profiles. Filter profiles are used to determine whether
particular management targets should receive particular notifications. When a
notification is generated, it must be compared with the filters associated with
each management target which is configured to receive notifications, in order
to determine whether it may be sent to each such management target. A more
complete discussion of notification filtering can be found in section 6. of
[SNMP-APPL].
""")
_SnmpNotifyFilterEntry_Object = MibTableRow
snmpNotifyFilterEntry = _SnmpNotifyFilterEntry_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 3, 1)
)
snmpNotifyFilterEntry.setIndexNames(
(0, "SNMP-NOTIFICATION-MIB", "snmpNotifyFilterProfileName"),
(1, "SNMP-NOTIFICATION-MIB", "snmpNotifyFilterSubtree"),
)
if mibBuilder.loadTexts:
snmpNotifyFilterEntry.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterEntry.setDescription("""\
An element of a filter profile. Entries in the snmpNotifyFilterTable are
created and deleted using the snmpNotifyFilterRowStatus object.
""")
_SnmpNotifyFilterSubtree_Type = ObjectIdentifier
_SnmpNotifyFilterSubtree_Object = MibTableColumn
snmpNotifyFilterSubtree = _SnmpNotifyFilterSubtree_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 3, 1, 1),
_SnmpNotifyFilterSubtree_Type()
)
snmpNotifyFilterSubtree.setMaxAccess("not-accessible")
if mibBuilder.loadTexts:
snmpNotifyFilterSubtree.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterSubtree.setDescription("""\
The MIB subtree which, when combined with the corresponding instance of
snmpNotifyFilterMask, defines a family of subtrees which are included in or
excluded from the filter profile.
""")
class _SnmpNotifyFilterMask_Type(OctetString):
"""Custom type snmpNotifyFilterMask based on OctetString"""
defaultHexValue = ""
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(0, 16),
)
_SnmpNotifyFilterMask_Type.__name__ = "OctetString"
_SnmpNotifyFilterMask_Object = MibTableColumn
snmpNotifyFilterMask = _SnmpNotifyFilterMask_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 3, 1, 2),
_SnmpNotifyFilterMask_Type()
)
snmpNotifyFilterMask.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyFilterMask.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterMask.setDescription("""\
The bit mask which, in combination with the corresponding instance of
snmpNotifyFilterSubtree, defines a family of subtrees which are included in or
excluded from the filter profile. Each bit of this bit mask corresponds to a
sub-identifier of snmpNotifyFilterSubtree, with the most significant bit of the
i-th octet of this octet string value (extended if necessary, see below)
corresponding to the (8*i - 7)-th sub-identifier, and the least significant bit
of the i-th octet of this octet string corresponding to the (8*i)-th sub-
identifier, where i is in the range 1 through 16. Each bit of this bit mask
specifies whether or not the corresponding sub-identifiers must match when
determining if an OBJECT IDENTIFIER matches this family of filter subtrees; a
'1' indicates that an exact match must occur; a '0' indicates 'wild card',
i.e., any sub-identifier value matches. Thus, the OBJECT IDENTIFIER X of an
object instance is contained in a family of filter subtrees if, for each sub-
identifier of the value of snmpNotifyFilterSubtree, either: the i-th bit of
snmpNotifyFilterMask is 0, or the i-th sub-identifier of X is equal to the i-th
sub-identifier of the value of snmpNotifyFilterSubtree. If the value of this
bit mask is M bits long and there are more than M sub-identifiers in the
corresponding instance of snmpNotifyFilterSubtree, then the bit mask is
extended with 1's to be the required length. Note that when the value of this
object is the zero-length string, this extension rule results in a mask of
all-1's being used (i.e., no 'wild card'), and the family of filter subtrees is
the one subtree uniquely identified by the corresponding instance of
snmpNotifyFilterSubtree.
""")
class _SnmpNotifyFilterType_Type(Integer32):
"""Custom type snmpNotifyFilterType based on Integer32"""
defaultValue = 1
subtypeSpec = Integer32.subtypeSpec
subtypeSpec += ConstraintsUnion(
SingleValueConstraint(
*(1,
2)
)
)
namedValues = NamedValues(
*(("excluded", 2),
("included", 1))
)
_SnmpNotifyFilterType_Type.__name__ = "Integer32"
_SnmpNotifyFilterType_Object = MibTableColumn
snmpNotifyFilterType = _SnmpNotifyFilterType_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 3, 1, 3),
_SnmpNotifyFilterType_Type().clone('included')
)
snmpNotifyFilterType.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyFilterType.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterType.setDescription("""\
This object indicates whether the family of filter subtrees defined by this
entry are included in or excluded from a filter. A more detailed discussion of
the use of this object can be found in section 6. of [SNMP-APPL].
""")
class _SnmpNotifyFilterStorageType_Type(StorageType):
"""Custom type snmpNotifyFilterStorageType based on StorageType"""
_SnmpNotifyFilterStorageType_Object = MibTableColumn
snmpNotifyFilterStorageType = _SnmpNotifyFilterStorageType_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 3, 1, 4),
_SnmpNotifyFilterStorageType_Type()
)
snmpNotifyFilterStorageType.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyFilterStorageType.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterStorageType.setDescription("""\
The storage type for this conceptual row. Conceptual rows having the value
'permanent' need not allow write-access to any columnar objects in the row.
""")
_SnmpNotifyFilterRowStatus_Type = RowStatus
_SnmpNotifyFilterRowStatus_Object = MibTableColumn
snmpNotifyFilterRowStatus = _SnmpNotifyFilterRowStatus_Object(
(1, 3, 6, 1, 6, 3, 13, 1, 3, 1, 5),
_SnmpNotifyFilterRowStatus_Type()
)
snmpNotifyFilterRowStatus.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpNotifyFilterRowStatus.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterRowStatus.setDescription("""\
The status of this conceptual row. To create a row in this table, a manager
must set this object to either createAndGo(4) or createAndWait(5).
""")
_SnmpNotifyConformance_ObjectIdentity = ObjectIdentity
snmpNotifyConformance = _SnmpNotifyConformance_ObjectIdentity(
(1, 3, 6, 1, 6, 3, 13, 3)
)
_SnmpNotifyCompliances_ObjectIdentity = ObjectIdentity
snmpNotifyCompliances = _SnmpNotifyCompliances_ObjectIdentity(
(1, 3, 6, 1, 6, 3, 13, 3, 1)
)
_SnmpNotifyGroups_ObjectIdentity = ObjectIdentity
snmpNotifyGroups = _SnmpNotifyGroups_ObjectIdentity(
(1, 3, 6, 1, 6, 3, 13, 3, 2)
)
snmpNotifyGroup = ObjectGroup(
(1, 3, 6, 1, 6, 3, 13, 3, 2, 1)
)
snmpNotifyGroup.setObjects(
*(("SNMP-NOTIFICATION-MIB", "snmpNotifyTag"),
("SNMP-NOTIFICATION-MIB", "snmpNotifyType"),
("SNMP-NOTIFICATION-MIB", "snmpNotifyStorageType"),
("SNMP-NOTIFICATION-MIB", "snmpNotifyRowStatus"))
)
if mibBuilder.loadTexts:
snmpNotifyGroup.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyGroup.setDescription("""\
A collection of objects for selecting which management targets are used for
generating notifications, and the type of notification to be generated for each
selected management target.
""")
snmpNotifyFilterGroup = ObjectGroup(
(1, 3, 6, 1, 6, 3, 13, 3, 2, 2)
)
snmpNotifyFilterGroup.setObjects(
*(("SNMP-NOTIFICATION-MIB", "snmpNotifyFilterProfileName"),
("SNMP-NOTIFICATION-MIB", "snmpNotifyFilterProfileStorType"),
("SNMP-NOTIFICATION-MIB", "snmpNotifyFilterProfileRowStatus"),
("SNMP-NOTIFICATION-MIB", "snmpNotifyFilterMask"),
("SNMP-NOTIFICATION-MIB", "snmpNotifyFilterType"),
("SNMP-NOTIFICATION-MIB", "snmpNotifyFilterStorageType"),
("SNMP-NOTIFICATION-MIB", "snmpNotifyFilterRowStatus"))
)
if mibBuilder.loadTexts:
snmpNotifyFilterGroup.setStatus("current")
if mibBuilder.loadTexts:
snmpNotifyFilterGroup.setDescription("""\
A collection of objects providing remote configuration of notification filters.
""")
snmpNotifyBasicCompliance = ModuleCompliance(
(1, 3, 6, 1, 6, 3, 13, 3, 1, 1)
)
if mibBuilder.loadTexts:
snmpNotifyBasicCompliance.setStatus(
"current"
)
if mibBuilder.loadTexts:
snmpNotifyBasicCompliance.setDescription("""\
The compliance statement for minimal SNMP entities which implement only SNMP
Unconfirmed-Class notifications and read-create operations on only the
snmpTargetAddrTable.
""")
snmpNotifyBasicFiltersCompliance = ModuleCompliance(
(1, 3, 6, 1, 6, 3, 13, 3, 1, 2)
)
if mibBuilder.loadTexts:
snmpNotifyBasicFiltersCompliance.setStatus(
"current"
)
if mibBuilder.loadTexts:
snmpNotifyBasicFiltersCompliance.setDescription("""\
The compliance statement for SNMP entities which implement SNMP Unconfirmed-
Class notifications with filtering, and read-create operations on all related
tables.
""")
snmpNotifyFullCompliance = ModuleCompliance(
(1, 3, 6, 1, 6, 3, 13, 3, 1, 3)
)
if mibBuilder.loadTexts:
snmpNotifyFullCompliance.setStatus(
"current"
)
if mibBuilder.loadTexts:
snmpNotifyFullCompliance.setDescription("""\
The compliance statement for SNMP entities which either implement only SNMP
Confirmed-Class notifications, or both SNMP Unconfirmed-Class and Confirmed-
Class notifications, plus filtering and read-create operations on all related
tables.
""")
mibBuilder.exportSymbols(
"SNMP-NOTIFICATION-MIB",
**{"snmpNotificationMIB": snmpNotificationMIB,
"snmpNotifyObjects": snmpNotifyObjects,
"snmpNotifyTable": snmpNotifyTable,
"snmpNotifyEntry": snmpNotifyEntry,
"snmpNotifyName": snmpNotifyName,
"snmpNotifyTag": snmpNotifyTag,
"snmpNotifyType": snmpNotifyType,
"snmpNotifyStorageType": snmpNotifyStorageType,
"snmpNotifyRowStatus": snmpNotifyRowStatus,
"snmpNotifyFilterProfileTable": snmpNotifyFilterProfileTable,
"snmpNotifyFilterProfileEntry": snmpNotifyFilterProfileEntry,
"snmpNotifyFilterProfileName": snmpNotifyFilterProfileName,
"snmpNotifyFilterProfileStorType": snmpNotifyFilterProfileStorType,
"snmpNotifyFilterProfileRowStatus": snmpNotifyFilterProfileRowStatus,
"snmpNotifyFilterTable": snmpNotifyFilterTable,
"snmpNotifyFilterEntry": snmpNotifyFilterEntry,
"snmpNotifyFilterSubtree": snmpNotifyFilterSubtree,
"snmpNotifyFilterMask": snmpNotifyFilterMask,
"snmpNotifyFilterType": snmpNotifyFilterType,
"snmpNotifyFilterStorageType": snmpNotifyFilterStorageType,
"snmpNotifyFilterRowStatus": snmpNotifyFilterRowStatus,
"snmpNotifyConformance": snmpNotifyConformance,
"snmpNotifyCompliances": snmpNotifyCompliances,
"snmpNotifyBasicCompliance": snmpNotifyBasicCompliance,
"snmpNotifyBasicFiltersCompliance": snmpNotifyBasicFiltersCompliance,
"snmpNotifyFullCompliance": snmpNotifyFullCompliance,
"snmpNotifyGroups": snmpNotifyGroups,
"snmpNotifyGroup": snmpNotifyGroup,
"snmpNotifyFilterGroup": snmpNotifyFilterGroup}
)
|
|
import os
import collections
import datetime
import shutil
import subprocess
from uitools.qt import Qt, QtCore, QtGui
PlayblastInfo = collections.namedtuple('PlayblastInfo', (
'name',
'directory',
'first_frame',
'user_category',
'created_at',
'audio',
'maya_file'
))
def parse_audio_txt(path):
audio = None
maya_file = None
frame_rate = 24
with open(path) as f:
for line in f.readlines():
if line.startswith('#'):
continue
line = line.rstrip()
if line.lower() == 'ntsc':
frame_rate = 29.97
if os.path.exists(line):
name, ext = os.path.splitext(line)
if ext.lower() in ('.wav', '.aif'):
audio = line
if ext.lower() in ('.mb', '.ma'):
maya_file = line
return audio, maya_file, frame_rate
class PlayblastThumbnail(QtGui.QLabel):
def __init__(self, path):
self._path = path
self._loaded = False
super(PlayblastThumbnail, self).__init__()
self.setAlignment(Qt.AlignCenter)
self.setPixmap(QtGui.QPixmap(self._path).scaled(100, 57, Qt.KeepAspectRatio, Qt.SmoothTransformation))
self._loaded = True
def sizeHint(self):
if self._loaded:
return self.pixmap().size()
else:
return QtCore.QSize(100, 57)
class PlayblastTable(QtGui.QTableWidget):
refresh = QtCore.pyqtSignal()
def __init__(self, parent = None):
super(PlayblastTable, self).__init__(parent)
self.setColumnCount(3)
self.setColumnWidth(0, 100)
self.verticalHeader().hide()
self.horizontalHeader().setStretchLastSection(True)
self.setHorizontalHeaderLabels(['First Frame', 'Name', 'Creation Time'])
self.setAlternatingRowColors(True)
self.setSelectionBehavior(self.SelectRows)
self.setSortingEnabled(True)
self.sortItems(2, Qt.DescendingOrder)
self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.itemDoubleClicked .connect(lambda x: self.flipbook_playblast())
def add_playblasts(self, playblasts):
for playblast in sorted(playblasts, key = lambda pb: (' None' if pb.user_category == 'none' else pb.user_category, pb.name)):
row = self.rowCount()
self.setRowCount(row + 1)
self.setRowHeight(row, 57)
thumb = PlayblastThumbnail(playblast.first_frame)
thumb.playblast = playblast
self.setCellWidget(row, 0, thumb)
name = QtGui.QTableWidgetItem(playblast.name)
self.setItem(row, 1, name)
date = QtGui.QTableWidgetItem(playblast.created_at.isoformat(' '))
self.setItem(row, 2, date)
def contextMenuEvent(self, event):
menu = QtGui.QMenu(self)
flipbook_action = menu.addAction("Flipbook")
flipbook_action.triggered.connect(self.flipbook_playblast)
qt_action = menu.addAction("Make Quicktime")
qt_action.triggered.connect(self.make_quicktime)
refresh_action = menu.addAction("Refresh")
refresh_action.triggered.connect(self.refresh.emit)
delete_action = menu.addAction("Delete")
delete_action.triggered.connect(self.delete_playblasts)
action = menu.exec_(event.globalPos())
def current_playblast(self):
row = self.currentRow()
thumb = self.cellWidget(row, 0) if row is not None else None
playblast = thumb and thumb.playblast
return playblast
def current_path(self):
playblast = self.current_playblast()
path = playblast and playblast.directory
return path if path and os.path.exists(path) else None
def delete_playblasts(self):
rows = []
for item in self.selectedItems():
if not item.row() in rows:
rows.append(item.row())
for row in reversed(sorted(rows)):
dirname = None
thumb = self.cellWidget(row, 0) if row is not None else None
if thumb and thumb.playblast and thumb.playblast.directory and os.path.exists(thumb.playblast.directory):
dirname = thumb.playblast.directory
print "rm", dirname, row
self.removeRow(row)
if dirname:
shutil.rmtree(dirname)
def flipbook_playblast(self):
playblast = self.current_playblast()
cmd = ['rv', '[', os.path.join(playblast.directory, '*.jpg'), '-fps', str(24), ']']
if playblast.audio:
cmd.extend(['-over', '[', playblast.audio, ']'])
# fix for launching rv from maya on mac
# http://www.tweaksoftware.com/static/documentation/rv/current/html/maya_tools_help.html#_osx_maya_2014
env = dict(os.environ)
if 'QT_MAC_NO_NATIVE_MENUBAR' in env:
del env['QT_MAC_NO_NATIVE_MENUBAR']
print subprocess.list2cmdline(cmd)
proc = subprocess.Popen(cmd, env = env)
def make_quicktime(self):
playblast = self.current_playblast()
cmd = ['make_quicktime', playblast.first_frame]
if playblast.audio:
cmd.extend(['--audio', playblast.audio])
if playblast.maya_file:
cmd.extend(['--shotdir', playblast.maya_file])
print subprocess.list2cmdline(cmd)
subprocess.Popen(cmd)
class Picker(QtGui.QTabWidget):
pathChanged = QtCore.pyqtSignal(object)
def __init__(self, parent = None, selection_mode = QtGui.QTableWidget.SingleSelection,):
super(Picker, self).__init__(parent)
self._playblasts = []
self._selection_mode = selection_mode
self._find_legacy_playblasts()
self._tables_by_name = {}
self._setup_ui()
def _setup_ui(self):
self.currentChanged.connect(self._current_tab_changed)
tables = self._tables_by_name
for playblast in sorted(self._playblasts,
key=lambda pb: (' None' if pb.user_category == 'none' else pb.user_category, pb.name)
):
if playblast.user_category not in tables:
table = PlayblastTable()
table.itemSelectionChanged.connect(self._table_selection_changed)
table.refresh.connect(self.refresh)
if self._selection_mode:
table.setSelectionMode(self._selection_mode)
tables[playblast.user_category] = table
self.addTab(table, "Playblasts" if playblast.user_category == "none" else playblast.user_category.title())
table = tables[playblast.user_category]
table.add_playblasts([playblast])
for table in tables.itervalues():
table.resizeColumnToContents(1)
table.resizeColumnToContents(2)
def refresh(self):
for table in self._tables_by_name.itervalues():
table.clearContents()
table.setRowCount(0)
self._playblasts = []
self._find_legacy_playblasts()
self._setup_ui()
def _find_legacy_playblasts(self):
# This is the folder that they are stored in.
if not os.path.exists('/var/tmp/srv_playblast'):
return
for name in os.listdir('/var/tmp/srv_playblast'):
directory = os.path.join('/var/tmp/srv_playblast', name)
# Try to grab the first frame.
try:
file_names = os.listdir(directory)
except OSError as e:
if e.errno == 20: # Not a folder.
continue
raise
frame_gen = (x for x in sorted(file_names) if os.path.splitext(x)[1] in ('.jpg', '.jpeg'))
first_frame = next(frame_gen, None)
if first_frame is None:
continue
audio = None
maya_file = None
audio_text = next((x for x in sorted(file_names) if os.path.splitext(x)[1] in ('.txt',)))
if audio_text:
audio, maya_file, frame_rate = parse_audio_txt(os.path.join(directory, audio_text))
first_frame = os.path.join(directory, first_frame)
user_category_path = os.path.join(directory, 'approval_status')
user_category = open(user_category_path).read() if os.path.exists(user_category_path) else None
user_category = str(user_category).lower()
self._playblasts.append(PlayblastInfo(
name = name,
directory = directory,
user_category = user_category,
first_frame = first_frame,
created_at = datetime.datetime.fromtimestamp(os.path.getctime(first_frame)),
audio = audio,
maya_file = maya_file
))
def autoSetMinimumWidth(self):
width = 0
for table in self._tables_by_name.itervalues():
width = max(width, sum(table.columnWidth(i) for i in xrange(table.columnCount())))
if width:
self.setMinimumWidth(width)
def _table_selection_changed(self):
path = self.currentPath()
self.pathChanged.emit(path)
def _current_tab_changed(self):
path = self.currentPath()
self.pathChanged.emit(path)
def currentPath(self):
table = self.currentWidget()
return table.current_path()
if __name__ == '__main__':
import sys
app = QtGui.QApplication([])
widget = Picker()
widget.autoSetMinimumWidth()
widget.show()
widget.raise_()
sys.exit(app.exec_())
|
|
import copy
import unittest
import warnings
import mock
import numpy as np
import pytest
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import optimizer
from chainer import optimizers
from chainer import serializer
from chainer import testing
from chainer.testing import attr
import chainerx
if chainerx.is_available():
import chainerx.testing
_backend_params = [
# NumPy
{},
{'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
class TestHyperparameter(unittest.TestCase):
def setUp(self):
self.parent = optimizer.Hyperparameter()
self.parent.x = 1
self.parent.y = 2
self.child = optimizer.Hyperparameter(self.parent)
self.child.y = 3
self.child.z = 4
def test_getattr(self):
self.assertTrue(hasattr(self.parent, 'x'))
self.assertEqual(self.parent.x, 1)
self.assertTrue(hasattr(self.parent, 'y'))
self.assertEqual(self.parent.y, 2)
self.assertFalse(hasattr(self.parent, 'z'))
self.assertTrue(hasattr(self.child, 'x'))
self.assertEqual(self.child.x, 1)
self.assertTrue(hasattr(self.child, 'y'))
self.assertEqual(self.child.y, 3)
self.assertTrue(hasattr(self.child, 'z'))
self.assertEqual(self.child.z, 4)
def test_get_dict(self):
self.assertEqual(self.parent.get_dict(), {'x': 1, 'y': 2})
self.assertEqual(self.child.get_dict(), {'x': 1, 'y': 3, 'z': 4})
def test_repr(self):
self.assertEqual(repr(self.parent), 'Hyperparameter(x=1, y=2)')
self.assertEqual(repr(self.child), 'Hyperparameter(x=1, y=3, z=4)')
def test_deep_copy(self):
parent_copy, child_copy = copy.deepcopy([self.parent, self.child])
self.assertEqual(self.child.get_dict(), child_copy.get_dict())
self.assertEqual(self.parent.get_dict(), parent_copy.get_dict())
self.assertIs(child_copy.parent, parent_copy)
class DummyDeserializer(serializer.Deserializer):
def __init__(self, target):
super(DummyDeserializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
if value is None:
value = self.target[key]
elif isinstance(value, np.ndarray):
np.copyto(value, self.target[key])
else:
value = type(value)(np.asarray(self.target[key]))
return value
def _create_update_rule(has_states):
class SimpleUpdateRule(optimizer.UpdateRule):
def update_core_cpu(self, param):
pass
def update_core_gpu(self, param):
pass
def _init_state(data):
state = update_rule.state
state['a'] = 0
state['b'] = np.array([1, 2, 3], dtype=np.float32)
update_rule = SimpleUpdateRule()
update_rule.update_core_cpu = mock.MagicMock(
wraps=update_rule.update_core_cpu)
update_rule.update_core_gpu = mock.MagicMock(
wraps=update_rule.update_core_gpu)
update_rule.update_core_chainerx = mock.MagicMock(
wraps=update_rule.update_core_chainerx)
if has_states:
update_rule.init_state = _init_state
return update_rule
def _create_var():
data = np.ones((2, 3), np.float32)
grad = np.ones_like(data)
var = chainer.Variable(data, grad=grad)
return var
@testing.backend.inject_backend_tests(
[
'test_update',
'test_add_hook',
'test_add_hook_with_name',
'test_add_hook_with_function_name',
],
_backend_params)
class TestUpdateRule(unittest.TestCase):
def setUp(self):
self.update_rule = _create_update_rule(has_states=False)
self.var = _create_var()
def check_update(self, backend_config):
var = self.var
var.to_device(backend_config.device)
update_rule = self.update_rule
update_rule.update(var)
xp = backend_config.xp
# First check update_core_chainerx.
# If xp is chainerx, fallback xp is assigned to it for the second
# check.
if xp is chainerx:
self.assertEqual(
self.update_rule.update_core_chainerx.call_count, 1)
xp = backend_config.device.fallback_device.xp
else:
self.assertEqual(
self.update_rule.update_core_chainerx.call_count, 0)
# Secondly check update_core_cpu and _gpu.
if xp is np:
self.assertEqual(update_rule.update_core_cpu.call_count, 1)
self.assertEqual(update_rule.update_core_gpu.call_count, 0)
elif xp is cuda.cupy:
self.assertEqual(self.update_rule.update_core_cpu.call_count, 0)
self.assertEqual(self.update_rule.update_core_gpu.call_count, 1)
def test_update(self, backend_config):
self.check_update(backend_config)
def test_add_hook(self, backend_config):
hook = mock.MagicMock()
self.update_rule.add_hook(hook)
self.check_update(backend_config)
self.assertEqual(hook.call_count, 1)
args = hook.call_args_list[0][0]
self.assertEqual(len(args), 2)
self.assertIs(args[0], self.update_rule)
self.assertIs(args[1], self.var)
def test_add_hook_with_name(self, backend_config):
hook = mock.MagicMock()
self.update_rule.add_hook(hook, name='hook')
self.check_update(backend_config)
self.assertEqual(hook.call_count, 1)
args = hook.call_args_list[0][0]
self.assertEqual(len(args), 2)
self.assertIs(args[0], self.update_rule)
self.assertIs(args[1], self.var)
def test_remove_hook(self, backend_config):
hook = mock.MagicMock()
self.update_rule.add_hook(hook, name='hook')
self.update_rule.remove_hook('hook')
self.check_update(backend_config)
self.assertEqual(hook.call_count, 0)
def test_add_hook_with_function_name(self, backend_config):
hook_body = mock.MagicMock()
def foo(update_rule, data, grad):
hook_body(update_rule, data, grad)
self.update_rule.add_hook(foo)
self.update_rule.remove_hook('foo')
self.check_update(backend_config)
self.assertEqual(hook_body.call_count, 0)
def test_add_hook_no_name(self):
class CallableWithoutName(object):
def __call__(self, update_rule, param):
pass
with self.assertRaises(ValueError):
self.update_rule.add_hook(CallableWithoutName())
def test_add_hook_duplicated_name(self):
self.update_rule.add_hook(mock.MagicMock(), name='foo')
with self.assertRaises(KeyError):
self.update_rule.add_hook(mock.MagicMock(), name='foo')
def test_remove_hook_not_exist(self):
with self.assertRaises(KeyError):
self.update_rule.remove_hook('foo')
def test_disabled_update_rule(self):
self.update_rule.update_core = mock.MagicMock()
self.update_rule.enabled = False
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core.call_count, 0)
self.update_rule.enabled = True
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core.call_count, 1)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestOptimizerSerialize(unittest.TestCase):
def setUp(self):
self.update_rule = _create_update_rule(has_states=True)
def get_target(self, backend_config):
target = {}
target['t'] = 100
target['a'] = 1
target['b'] = (
backend_config.get_array(np.array([2, 3, 4], dtype=np.float32)))
return target
def test_deserialize(self, backend_config):
target = self.get_target(backend_config)
self.update_rule.serialize(DummyDeserializer(target))
self.assertEqual(self.update_rule.t, target['t'])
self.assertIsNotNone(self.update_rule.state)
self.assertEqual(self.update_rule.state['a'], target['a'])
backend_config.xp.testing.assert_array_equal(
self.update_rule.state['b'], target['b'])
def test_deserialize_by_strict_deserializer(self, backend_config):
target = self.get_target(backend_config)
del target['a']
with self.assertRaises(KeyError):
self.update_rule.serialize(DummyDeserializer(target))
def test_deserialize_by_nonstrict_deserializer(self, backend_config):
target = self.get_target(backend_config)
target['a'] = None
self.update_rule.serialize(DummyDeserializer(target))
self.assertEqual(self.update_rule.t, target['t'])
self.assertIsNone(self.update_rule.state)
def test_deserialize_disabled_update_rule_by_strict_deserializer(
self, backend_config):
self.update_rule.enabled = False
target = self.get_target(backend_config)
del target['a']
self.update_rule.serialize(DummyDeserializer(target))
self.assertEqual(self.update_rule.t, target['t'])
self.assertIsNone(self.update_rule.state)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestUpdateRuleCopyState(unittest.TestCase):
def setUp(self):
self.update_rule = _create_update_rule(has_states=True)
def test_state_copy(self, backend_config, _):
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertTrue(
backend_config.device.is_array_supported(
self.update_rule.state['b']))
self.update_rule.update_core = update_core
var = _create_var()
var.to_device(backend_config.device)
self.update_rule.update(var)
def test_state_copy_to_another_device(
self, backend_config1, backend_config2):
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertTrue(
backend_config2.device.is_array_supported(
self.update_rule.state['b']))
var1 = _create_var()
var1.to_device(backend_config1.device)
# call update with arrays on GPU 0 (tested by another method)
self.update_rule.update_core = lambda param: None
self.update_rule.update(var1)
# check if it copies the states correctly when arrays on another device
# are passed
self.update_rule.update_core = update_core
var2 = _create_var()
var2.to_device(backend_config2.device)
self.update_rule.update(var2)
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
def test_new_epoch(self):
self.optimizer.new_epoch()
self.assertEqual(1, self.optimizer.epoch)
def test_invalid_new_epoch(self):
self.optimizer.use_auto_new_epoch = True
with self.assertRaises(RuntimeError):
self.optimizer.new_epoch()
def test_auto_new_epoch(self):
self.optimizer.use_auto_new_epoch = True
self.optimizer.new_epoch(auto=True)
self.assertEqual(1, self.optimizer.epoch)
def test_invalid_auto_new_epoch(self):
with self.assertRaises(RuntimeError):
self.optimizer.new_epoch(auto=True)
@attr.chainerx
class TestOptimizerWithChainerxImplementation(unittest.TestCase):
# This test ensures an optimizer can update ChainerX array by overriding
# update_core_chainerx().
def test_upate(self):
initial_p = np.array([1., 2., 3.], np.float32)
x = chainerx.array([2., 4., 6.], np.float32)
expected_p = 4. * initial_p - 6. * backend.CpuDevice().send(x)
class ChainerxUpdateRule(optimizer.UpdateRule):
call_count = 0
def update_core_chainerx(self, param):
# p <= 3 * p - 2 * (dy/dp)
array = param.array
t1 = param.array.as_grad_stopped() * 3.
t2 = param.grad.as_grad_stopped() * 2.
delta = t1 - t2
array += delta
self.call_count += 1
class ChainerxOptimizer(optimizer.GradientMethod):
def create_update_rule(self):
return ChainerxUpdateRule(self.hyperparam)
class Link(chainer.Link):
def __init__(self):
super(Link, self).__init__()
with self.init_scope():
self.p = chainer.Parameter(initial_p)
def forward(self, x):
return 3. * x * self.p
link = Link()
link.to_device('native:0')
y = link(x)
y.backward()
optimizer_ = ChainerxOptimizer()
optimizer_.setup(link)
optimizer_.update()
assert link.p.update_rule.call_count == 1
np.testing.assert_array_equal(
backend.CpuDevice().send(link.p.array), expected_p)
class TestOptimizerHook(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def test_add_hook(self):
h1 = mock.MagicMock(timing='pre')
h1.call_for_each_param = False
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.optimizer)
def test_add_hook_call_for_each_param(self):
h1 = mock.MagicMock(timing='pre')
h1.call_for_each_param = True
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.target.param.update_rule, self.target.param)
def test_remove_hook(self):
h1 = mock.MagicMock(timing='pre')
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.remove_hook('h1')
self.optimizer.call_hooks()
self.assertFalse(h1.called)
def test_duplicated_hook(self):
self.optimizer.setup(self.target)
self.optimizer.add_hook(lambda s: None, 'h1', timing='pre')
with self.assertRaises(KeyError):
self.optimizer.add_hook(lambda s: None, 'h1', timing='pre')
def test_invalid_hook(self):
self.optimizer.setup(self.target)
with self.assertRaises(TypeError):
self.optimizer.add_hook(1)
def test_add_hook_before_setup(self):
with self.assertRaises(RuntimeError):
self.optimizer.add_hook(lambda s: None, 'h1')
class SimpleLink(chainer.Link):
def __init__(self, w, g):
super(SimpleLink, self).__init__()
with self.init_scope():
self.param = chainer.Parameter(w)
self.param.grad = g
@testing.backend.inject_backend_tests(['test_update'], _backend_params)
class TestGradientMethod(unittest.TestCase):
def setUp(self):
self.optimizer = chainer.GradientMethod()
self.target = chainer.ChainList(
SimpleLink(np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32)),
SimpleLink(np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32)))
self.optimizer.create_update_rule = mock.MagicMock
def test_setup(self):
create_update_rule = mock.MagicMock()
target = self.target
optimizer = self.optimizer
optimizer.create_update_rule = create_update_rule
optimizer.setup(target)
self.assertEqual(create_update_rule.call_count, 2)
self.assertEqual(create_update_rule.call_args_list[0], [(), {}])
self.assertEqual(create_update_rule.call_args_list[1], [(), {}])
def test_update(self, backend_config):
target = self.target
optimizer = self.optimizer
target.to_device(backend_config.device)
optimizer.setup(target)
self.assertEqual(optimizer.t, 0)
optimizer.update()
self.assertEqual(optimizer.t, 1)
param1 = target[0].param
param2 = target[1].param
param1.update_rule.update.assert_called_once_with(param1)
param2.update_rule.update.assert_called_once_with(param2)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product({
'override_pattern': [
'generic', # only update_core() is overridden
'cpu_gpu', # update_core_{cpu,gpu} are overridden
'cpu_gpu_chx', # update_core_{cpu,gpu,chainerx} are overridden
],
}))
class TestGradientMethodUpdate(unittest.TestCase):
"""Ensures UpdateRule's appropriate methods are called, for various
override patterns and parameters with various conditions."""
def create(self, device):
class MyLink(chainer.Link):
def __init__(self):
super(MyLink, self).__init__()
with self.init_scope():
self.p1 = chainer.Parameter() # uninitialized
self.p2 = chainer.Parameter( # initialized, with grad
np.array([3, 2], np.float32))
self.p2.grad = np.array([13, 12], np.float32)
self.p3 = chainer.Parameter( # initialized, without grad
np.array([5, 7], np.float32))
call_record = []
override_pattern = self.override_pattern
class MyUpdateRule(optimizer.UpdateRule):
if override_pattern == 'generic':
def update_core(self, param):
call_record.append(('update_core', param))
elif override_pattern == 'cpu_gpu':
def update_core_cpu(self, param):
call_record.append(('update_core_cpu', param))
def update_core_gpu(self, param):
call_record.append(('update_core_gpu', param))
elif override_pattern == 'cpu_gpu_chx':
def update_core_cpu(self, param):
call_record.append(('update_core_cpu', param))
def update_core_gpu(self, param):
call_record.append(('update_core_gpu', param))
def update_core_chainerx(self, param):
call_record.append(('update_core_chainerx', param))
else:
assert False, override_pattern
class MyOptimizer(optimizer.GradientMethod):
def create_update_rule(self):
return MyUpdateRule()
optimizer_ = MyOptimizer()
target = MyLink()
target.to_device(device)
optimizer_.setup(target)
return optimizer_, call_record
def test_update(self, backend_config):
device = backend_config.device
override_pattern = self.override_pattern
optimizer, call_record = self.create(device)
optimizer.update()
self.assertEqual(len(call_record), 3)
# Detemine the expected method name that was called.
if override_pattern == 'generic':
method_name = 'update_core'
elif override_pattern == 'cpu_gpu':
if isinstance(device, backend.ChainerxDevice):
xp = device.fallback_device.xp
else:
xp = device.xp
if xp is np:
method_name = 'update_core_cpu'
else:
assert xp is cuda.cupy
method_name = 'update_core_gpu'
elif override_pattern == 'cpu_gpu_chx':
if isinstance(device, backend.ChainerxDevice):
method_name = 'update_core_chainerx'
elif device.xp is np:
method_name = 'update_core_cpu'
else:
assert device.xp is cuda.cupy
method_name = 'update_core_gpu'
else:
assert False, override_pattern
# Check call record.
# TODO(niboshi): Check the param argument as well.
self.assertEqual(call_record[0][0], method_name)
self.assertEqual(call_record[1][0], method_name)
self.assertEqual(call_record[2][0], method_name)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product({
'shape': [(4, 3, 2)],
'dtype': [np.float16, np.float32, np.float64],
'loss_scale': [None, 1, 10],
}))
class TestGradientMethodLossScale(unittest.TestCase):
def setUp(self):
param0_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
param0_grad = np.copy(param0_data)
param1_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
param1_grad = np.copy(param1_data)
self.target = chainer.ChainList(
SimpleLink(param0_data, param0_grad),
SimpleLink(param1_data, param1_grad))
lr = 1.0
if self.loss_scale is not None:
lr = self.loss_scale
for i in range(2):
self.target[i].param._loss_scale = self.loss_scale
# TODO(niboshi): Do not use SGD in GradientMethod test
self.optimizer = chainer.optimizers.SGD(lr)
def test_update(self, backend_config):
if backend_config.device.name == '@cupy:1':
# TODO(niboshi): Fix it
raise unittest.SkipTest(
'Loss scale does not work with cupy multi-device.')
target = self.target
optimizer = self.optimizer
target.to_device(backend_config.device)
optimizer.setup(target)
optimizer.update()
xp = backend.get_array_module(target[0].param)
expected_data = xp.zeros(self.shape, dtype=self.dtype)
rtol, atol = 1e-4, 1e-5
if self.dtype is np.float16:
rtol, atol = 1e-1, 1e-2
for i in range(2):
testing.assert_allclose(
target[i].param.data, expected_data,
rtol=rtol, atol=atol)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestCleargradHook(unittest.TestCase):
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def test_cleargrad(self, backend_config):
class CleargradHook(object):
name = 'Cleargrad'
timing = 'pre'
def __init__(self, _):
pass
def __call__(self, opt):
for param in opt.target.params():
# Clear all grads
param.cleargrad()
target = self.target
target.to_device(backend_config.device)
# TODO(niboshi): Do not use SGD in GradientMethod test
opt = optimizers.SGD(lr=1)
opt.setup(target)
opt.add_hook(CleargradHook(self))
opt.add_hook(DummyHook(self))
opt.update()
class DummyOptimizer(chainer.GradientMethod):
def __init__(self, test):
super(DummyOptimizer, self).__init__()
self.test = test
def create_update_rule(self):
return mock.MagicMock()
class DummyHook(object):
name = 'Dummy'
timing = 'pre'
def __init__(self, test):
self.test = test
def __call__(self, opt):
for param in opt.target.params():
# Confirm all grads are not None
self.test.assertIsNotNone(param.grad)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestGradientMethodClearGrads(unittest.TestCase):
def setUp(self):
self.optimizer = DummyOptimizer(self)
self.target = SimpleLink(
np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32))
self.optimizer.setup(self.target)
self.optimizer.add_hook(DummyHook(self))
def test_update(self, backend_config):
target = self.target
optimizer = self.optimizer
target.to_device(backend_config.device)
target.cleargrads()
optimizer.update()
class TestDeprecatedOptimizerHooksEmitsWarning(unittest.TestCase):
def setUp(self):
self.context = warnings.catch_warnings(record=True)
self.warnings = self.context.__enter__()
warnings.filterwarnings(action='always', category=DeprecationWarning)
def tearDown(self):
self.context.__exit__()
def test_gradient_clipping(self):
chainer.optimizer.GradientClipping(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_gradient_hard_clipping(self):
chainer.optimizer.GradientHardClipping(1., 2.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_gradient_noise(self):
chainer.optimizer.GradientNoise(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_lasso(self):
chainer.optimizer.Lasso(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_weight_decay(self):
chainer.optimizer.WeightDecay(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
@testing.parameterize(*testing.product({
# None: dtype is not given by initializer.
# Otherwise: it's given by initializer.
'dtype': [None, np.float16, np.float32, np.float64]
}))
class TestUpdateRuleUseFp32Update(unittest.TestCase):
def test_uninitialized_parameter(self):
dtype = self.dtype
def initializer(array):
assert False # never called
# Set initializer.dtype to specify the parameter's dtype
if dtype is not None:
initializer.dtype = dtype
# Create an uninitialized parameter
param = chainer.Parameter(initializer)
assert param.array is None
if dtype is not None:
assert param.dtype == dtype
# Create an update rule with custom update_core
record = []
update_rule = chainer.UpdateRule()
def update_core(param):
# param.dtype may not be retrieved because it can be uninitialized
# and dtype is not given (i.e. self.dtype is None)
try:
param_dtype = param.dtype
except RuntimeError:
param_dtype = None
record.append({
'param': param,
'dtype': param_dtype,
})
update_rule.update_core = update_core
# Enable fp32 update
update_rule.use_fp32_update()
# Call update_rule.update
update_rule.update(param)
if dtype == np.float16:
assert record[0]['param'] is not param
assert record[0]['dtype'] == np.float32
else:
assert record[0]['param'] is param
assert record[0]['dtype'] == dtype
# The original parameter is kept uninitialized and its dtype is
# unchanged.
assert param.array is None
if dtype is not None:
assert param.dtype == dtype
else:
with pytest.raises(RuntimeError):
param.dtype
testing.run_module(__name__, __file__)
|
|
import os
import re
import datetime
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse as urlreverse
from django.template.loader import render_to_string
from ietf.idtracker.models import (InternetDraft, PersonOrOrgInfo, IETFWG,
IDAuthor, EmailAddress, IESGLogin, BallotInfo)
from ietf.submit.models import TempIdAuthors, IdSubmissionDetail, Preapproval
from ietf.utils.mail import send_mail, send_mail_message
from ietf.utils.log import log
from ietf.utils import unaccent
from ietf.ietfauth.decorators import has_role
from ietf.doc.models import *
from ietf.person.models import Person, Alias, Email
from ietf.doc.utils import add_state_change_event
from ietf.message.models import Message
# Some useful states
UPLOADED = 1
AWAITING_AUTHENTICATION = 4
MANUAL_POST_REQUESTED = 5
POSTED = -1
POSTED_BY_SECRETARIAT = -2
CANCELLED = -4
INITIAL_VERSION_APPROVAL_REQUESTED = 10
# Not a real WG
NONE_WG = 1027
def request_full_url(request, submission):
subject = 'Full URL for managing submission of draft %s' % submission.filename
from_email = settings.IDSUBMIT_FROM_EMAIL
to_email = submission.confirmation_email_list()
url = settings.IDTRACKER_BASE_URL + urlreverse('draft_status_by_hash',
kwargs=dict(submission_id=submission.submission_id,
submission_hash=submission.get_hash()))
send_mail(request, to_email, from_email, subject, 'submit/request_full_url.txt',
{'submission': submission,
'url': url})
def perform_post(request, submission):
group_id = submission.group_acronym and submission.group_acronym.pk or NONE_WG
state_change_msg = ''
try:
draft = InternetDraft.objects.get(filename=submission.filename)
draft.title = submission.id_document_name
draft.group_id = group_id
draft.filename = submission.filename
draft.revision = submission.revision
draft.revision_date = submission.submission_date
draft.file_type = submission.file_type
draft.txt_page_count = submission.txt_page_count
draft.last_modified_date = datetime.date.today()
draft.abstract = submission.abstract
draft.status_id = 1 # Active
draft.expired_tombstone = 0
draft.save()
except InternetDraft.DoesNotExist:
draft = InternetDraft.objects.create(
title=submission.id_document_name,
group_id=group_id,
filename=submission.filename,
revision=submission.revision,
revision_date=submission.submission_date,
file_type=submission.file_type,
txt_page_count=submission.txt_page_count,
start_date=datetime.date.today(),
last_modified_date=datetime.date.today(),
abstract=submission.abstract,
status_id=1, # Active
intended_status_id=8, # None
)
update_authors(draft, submission)
if draft.idinternal:
from ietf.idrfc.utils import add_document_comment
add_document_comment(None, draft, "New version available")
if draft.idinternal.cur_sub_state_id == 5 and draft.idinternal.rfc_flag == 0: # Substate 5 Revised ID Needed
draft.idinternal.prev_sub_state_id = draft.idinternal.cur_sub_state_id
draft.idinternal.cur_sub_state_id = 2 # Substate 2 AD Followup
draft.idinternal.save()
state_change_msg = "Sub state has been changed to AD Follow up from New Id Needed"
add_document_comment(None, draft, state_change_msg)
move_docs(submission)
submission.status_id = POSTED
send_announcements(submission, draft, state_change_msg)
submission.save()
def perform_postREDESIGN(request, submission):
system = Person.objects.get(name="(System)")
group_id = submission.group_acronym_id or NONE_WG
try:
draft = Document.objects.get(name=submission.filename)
save_document_in_history(draft)
except Document.DoesNotExist:
draft = Document(name=submission.filename)
draft.intended_std_level = None
prev_rev = draft.rev
draft.type_id = "draft"
draft.time = datetime.datetime.now()
draft.title = submission.id_document_name
if not (group_id == NONE_WG and draft.group and draft.group.type_id == "area"):
# don't overwrite an assigned area if it's still an individual
# submission
draft.group_id = group_id
draft.rev = submission.revision
draft.pages = submission.txt_page_count
draft.abstract = submission.abstract
was_rfc = draft.get_state_slug() == "rfc"
if not draft.stream:
stream_slug = None
if draft.name.startswith("draft-iab-"):
stream_slug = "iab"
elif draft.name.startswith("draft-irtf-"):
stream_slug = "irtf"
elif draft.name.startswith("draft-ietf-") and (draft.group.type_id != "individ" or was_rfc):
stream_slug = "ietf"
if stream_slug:
draft.stream = StreamName.objects.get(slug=stream_slug)
draft.expires = datetime.datetime.now() + datetime.timedelta(settings.INTERNET_DRAFT_DAYS_TO_EXPIRE)
draft.save()
a = submission.tempidauthors_set.filter(author_order=0)
if a:
submitter = ensure_person_email_info_exists(a[0]).person
else:
submitter = system
draft.set_state(State.objects.get(used=True, type="draft", slug="active"))
DocAlias.objects.get_or_create(name=submission.filename, document=draft)
update_authors(draft, submission)
# new revision event
e = NewRevisionDocEvent(type="new_revision", doc=draft, rev=draft.rev)
e.time = draft.time #submission.submission_date
e.by = submitter
e.desc = "New version available: <b>%s-%s.txt</b>" % (draft.name, draft.rev)
e.save()
if draft.stream_id == "ietf" and draft.group.type_id == "wg" and draft.rev == "00":
# automatically set state "WG Document"
draft.set_state(State.objects.get(used=True, type="draft-stream-%s" % draft.stream_id, slug="wg-doc"))
if draft.get_state_slug("draft-iana-review") in ("ok-act", "ok-noact", "not-ok"):
prev_state = draft.get_state("draft-iana-review")
next_state = State.objects.get(used=True, type="draft-iana-review", slug="changed")
draft.set_state(next_state)
add_state_change_event(draft, submitter, prev_state, next_state)
# clean up old files
if prev_rev != draft.rev:
from ietf.idrfc.expire import move_draft_files_to_archive
move_draft_files_to_archive(draft, prev_rev)
# automatic state changes
state_change_msg = ""
if not was_rfc and draft.tags.filter(slug="need-rev"):
draft.tags.remove("need-rev")
draft.tags.add("ad-f-up")
e = DocEvent(type="changed_document", doc=draft)
e.desc = "Sub state has been changed to <b>AD Followup</b> from <b>Revised ID Needed</b>"
e.by = system
e.save()
state_change_msg = e.desc
move_docs(submission)
submission.status_id = POSTED
announce_to_lists(request, submission)
announce_new_version(request, submission, draft, state_change_msg)
announce_to_authors(request, submission)
submission.save()
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
perform_post = perform_postREDESIGN
def send_announcements(submission, draft, state_change_msg):
announce_to_lists(request, submission)
if draft.idinternal and not draft.idinternal.rfc_flag:
announce_new_version(request, submission, draft, state_change_msg)
announce_to_authors(request, submission)
def announce_to_lists(request, submission):
authors = []
for i in submission.tempidauthors_set.order_by('author_order'):
if not i.author_order:
continue
authors.append(i.get_full_name())
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
m = Message()
m.by = Person.objects.get(name="(System)")
if request.user.is_authenticated():
try:
m.by = request.user.get_profile()
except Person.DoesNotExist:
pass
m.subject = 'I-D Action: %s-%s.txt' % (submission.filename, submission.revision)
m.frm = settings.IDSUBMIT_ANNOUNCE_FROM_EMAIL
m.to = settings.IDSUBMIT_ANNOUNCE_LIST_EMAIL
if submission.group_acronym:
m.cc = submission.group_acronym.email_address
m.body = render_to_string('submit/announce_to_lists.txt', dict(submission=submission,
authors=authors,
settings=settings,))
m.save()
m.related_docs.add(Document.objects.get(name=submission.filename))
send_mail_message(request, m)
else:
subject = 'I-D Action: %s-%s.txt' % (submission.filename, submission.revision)
from_email = settings.IDSUBMIT_ANNOUNCE_FROM_EMAIL
to_email = [settings.IDSUBMIT_ANNOUNCE_LIST_EMAIL]
if submission.group_acronym:
cc = [submission.group_acronym.email_address]
else:
cc = None
send_mail(request, to_email, from_email, subject, 'submit/announce_to_lists.txt',
{'submission': submission,
'authors': authors}, cc=cc, save_message=True)
def announce_new_version(request, submission, draft, state_change_msg):
to_email = []
if draft.idinternal.state_change_notice_to:
to_email.append(draft.idinternal.state_change_notice_to)
if draft.idinternal.job_owner:
to_email.append(draft.idinternal.job_owner.person.email()[1])
try:
if draft.idinternal.ballot:
for p in draft.idinternal.ballot.positions.all():
if p.discuss == 1 and p.ad.user_level == IESGLogin.AD_LEVEL:
to_email.append(p.ad.person.email()[1])
except BallotInfo.DoesNotExist:
pass
subject = 'New Version Notification - %s-%s.txt' % (submission.filename, submission.revision)
from_email = settings.IDSUBMIT_ANNOUNCE_FROM_EMAIL
send_mail(request, to_email, from_email, subject, 'submit/announce_new_version.txt',
{'submission': submission,
'msg': state_change_msg})
def announce_new_versionREDESIGN(request, submission, draft, state_change_msg):
to_email = []
if draft.notify:
to_email.append(draft.notify)
if draft.ad:
to_email.append(draft.ad.role_email("ad").address)
if draft.stream_id == "iab":
to_email.append("IAB Stream <iab-stream@iab.org>")
elif draft.stream_id == "ise":
to_email.append("Independent Submission Editor <rfc-ise@rfc-editor.org>")
elif draft.stream_id == "irtf":
to_email.append("IRSG <irsg@irtf.org>")
# if it has been sent to the RFC Editor, keep them in the loop
if draft.get_state_slug("draft-iesg") in ("ann", "rfcqueue"):
to_email.append("RFC Editor <rfc-editor@rfc-editor.org>")
active_ballot = draft.active_ballot()
if active_ballot:
for ad, pos in active_ballot.active_ad_positions().iteritems():
if pos and pos.pos_id == "discuss":
to_email.append(ad.role_email("ad").address)
if to_email:
subject = 'New Version Notification - %s-%s.txt' % (submission.filename, submission.revision)
from_email = settings.IDSUBMIT_ANNOUNCE_FROM_EMAIL
send_mail(request, to_email, from_email, subject, 'submit/announce_new_version.txt',
{'submission': submission,
'msg': state_change_msg})
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
announce_new_version = announce_new_versionREDESIGN
def announce_to_authors(request, submission):
authors = submission.tempidauthors_set.all()
to_email = list(set(submission.confirmation_email_list() + [u'%s <%s>' % i.email() for i in authors]))
from_email = settings.IDSUBMIT_ANNOUNCE_FROM_EMAIL
subject = 'New Version Notification for %s-%s.txt' % (submission.filename, submission.revision)
if submission.group_acronym:
wg = submission.group_acronym.group_acronym.acronym
elif submission.filename.startswith('draft-iesg'):
wg = 'IESG'
else:
wg = 'Individual Submission'
send_mail(request, to_email, from_email, subject, 'submit/announce_to_authors.txt',
{'submission': submission,
'submitter': authors[0].get_full_name(),
'wg': wg})
def find_person(first_name, last_name, middle_initial, name_suffix, email):
person_list = None
if email:
person_list = PersonOrOrgInfo.objects.filter(emailaddress__address=email).distinct()
if person_list and len(person_list) == 1:
return person_list[0]
if not person_list:
person_list = PersonOrOrgInfo.objects.all()
person_list = person_list.filter(first_name=first_name,
last_name=last_name)
if middle_initial:
person_list = person_list.filter(middle_initial=middle_initial)
if name_suffix:
person_list = person_list.filter(name_suffix=name_suffix)
if person_list:
return person_list[0]
return None
def update_authors(draft, submission):
# TempAuthor of order 0 is submitter
new_authors = list(submission.tempidauthors_set.filter(author_order__gt=0))
person_pks = []
for author in new_authors:
person = find_person(author.first_name, author.last_name,
author.middle_initial, author.name_suffix,
author.email_address)
if not person:
person = PersonOrOrgInfo(
first_name=author.first_name,
last_name=author.last_name,
middle_initial=author.middle_initial or '',
name_suffix=author.name_suffix or '',
)
person.save()
if author.email_address:
EmailAddress.objects.create(
address=author.email_address,
priority=1,
type='INET',
person_or_org=person,
)
person_pks.append(person.pk)
try:
idauthor = IDAuthor.objects.get(
document=draft,
person=person,
)
idauthor.author_order = author.author_order
except IDAuthor.DoesNotExist:
idauthor = IDAuthor(
document=draft,
person=person,
author_order=author.author_order,
)
idauthor.save()
draft.authors.exclude(person__pk__in=person_pks).delete()
def get_person_from_author(author):
persons = None
# try email
if author.email_address:
persons = Person.objects.filter(email__address=author.email_address).distinct()
if len(persons) == 1:
return persons[0]
if not persons:
persons = Person.objects.all()
# try full name
p = persons.filter(alias__name=author.get_full_name()).distinct()
if p:
return p[0]
return None
def ensure_person_email_info_exists(author):
person = get_person_from_author(author)
# make sure we got a person
if not person:
person = Person()
person.name = author.get_full_name()
person.ascii = unaccent.asciify(person.name)
person.save()
Alias.objects.create(name=person.name, person=person)
if person.name != person.ascii:
Alias.objects.create(name=ascii, person=person)
# make sure we got an email address
if author.email_address:
addr = author.email_address.lower()
else:
# we're in trouble, use a fake one
addr = u"unknown-email-%s" % person.name.replace(" ", "-")
try:
email = person.email_set.get(address=addr)
except Email.DoesNotExist:
try:
# maybe it's pointing to someone else
email = Email.objects.get(address=addr)
except Email.DoesNotExist:
# most likely we just need to create it
email = Email(address=addr)
email.active = False
email.person = person
email.save()
return email
def update_authorsREDESIGN(draft, submission):
# order 0 is submitter
authors = []
for author in submission.tempidauthors_set.exclude(author_order=0).order_by('author_order'):
email = ensure_person_email_info_exists(author)
a = DocumentAuthor.objects.filter(document=draft, author=email)
if a:
a = a[0]
else:
a = DocumentAuthor(document=draft, author=email)
a.order = author.author_order
a.save()
authors.append(email)
draft.documentauthor_set.exclude(author__in=authors).delete()
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
update_authors = update_authorsREDESIGN
def get_person_for_user(user):
try:
return user.get_profile().person()
except:
return None
def is_secretariat(user):
if not user or not user.is_authenticated():
return False
return bool(user.groups.filter(name='Secretariat'))
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.liaisons.accounts import is_secretariat, get_person_for_user
def move_docs(submission):
for ext in submission.file_type.split(','):
source = os.path.join(settings.IDSUBMIT_STAGING_PATH, '%s-%s%s' % (submission.filename, submission.revision, ext))
dest = os.path.join(settings.IDSUBMIT_REPOSITORY_PATH, '%s-%s%s' % (submission.filename, submission.revision, ext))
if os.path.exists(source):
os.rename(source, dest)
else:
if os.path.exists(dest):
log("Intended to move '%s' to '%s', but found source missing while destination exists.")
else:
raise ValueError("Intended to move '%s' to '%s', but found source and destination missing.")
def remove_docs(submission):
for ext in submission.file_type.split(','):
source = os.path.join(settings.IDSUBMIT_STAGING_PATH, '%s-%s%s' % (submission.filename, submission.revision, ext))
if os.path.exists(source):
os.unlink(source)
def get_approvable_submissions(user):
if not user.is_authenticated():
return []
res = IdSubmissionDetail.objects.filter(status=INITIAL_VERSION_APPROVAL_REQUESTED).order_by('-submission_date')
if has_role(user, "Secretariat"):
return res
# those we can reach as chair
return res.filter(group_acronym__role__name="chair", group_acronym__role__person__user=user)
def get_preapprovals(user):
if not user.is_authenticated():
return []
posted = IdSubmissionDetail.objects.distinct().filter(status__in=[POSTED, POSTED_BY_SECRETARIAT]).values_list('filename', flat=True)
res = Preapproval.objects.exclude(name__in=posted).order_by("-time").select_related('by')
if has_role(user, "Secretariat"):
return res
acronyms = [g.acronym for g in Group.objects.filter(role__person__user=user, type="wg")]
res = res.filter(name__regex="draft-[^-]+-(%s)-.*" % "|".join(acronyms))
return res
def get_recently_approved(user, since):
if not user.is_authenticated():
return []
res = IdSubmissionDetail.objects.distinct().filter(status__in=[POSTED, POSTED_BY_SECRETARIAT], submission_date__gte=since, revision="00").order_by('-submission_date')
if has_role(user, "Secretariat"):
return res
# those we can reach as chair
return res.filter(group_acronym__role__name="chair", group_acronym__role__person__user=user)
class DraftValidation(object):
def __init__(self, draft):
self.draft = draft
self.warnings = {}
self.passes_idnits = self.passes_idnits()
self.wg = self.get_working_group()
self.authors = self.get_authors()
self.submitter = self.get_submitter()
def passes_idnits(self):
passes_idnits = self.check_idnits_success(self.draft.idnits_message)
return passes_idnits
def get_working_group(self):
if self.draft.group_acronym and self.draft.group_acronym.pk == NONE_WG:
return None
return self.draft.group_acronym
def check_idnits_success(self, idnits_message):
if not idnits_message:
return False
success_re = re.compile('\s+Summary:\s+0\s+|No nits found')
if success_re.search(idnits_message):
return True
return False
def is_valid_attr(self, key):
if key in self.warnings.keys():
return False
return True
def is_valid(self):
self.validate_metadata()
return not bool(self.warnings.keys()) and self.passes_idnits
def validate_metadata(self):
self.validate_revision()
self.validate_title()
self.validate_authors()
self.validate_abstract()
self.validate_creation_date()
self.validate_wg()
self.validate_files()
def validate_files(self):
if self.draft.status_id in [POSTED, POSTED_BY_SECRETARIAT]:
return
for ext in self.draft.file_type.split(','):
source = os.path.join(settings.IDSUBMIT_STAGING_PATH, '%s-%s%s' % (self.draft.filename, self.draft.revision, ext))
if not os.path.exists(source):
self.add_warning('document_files', '"%s" were not found in the staging area.<br />We recommend you that you cancel this submission and upload your files again.' % os.path.basename(source))
break
def validate_title(self):
if not self.draft.id_document_name:
self.add_warning('title', 'Title is empty or was not found')
def validate_wg(self):
if self.wg and not self.wg.status_id == IETFWG.ACTIVE:
self.add_warning('group', 'Group exists but is not an active group')
def validate_abstract(self):
if not self.draft.abstract:
self.add_warning('abstract', 'Abstract is empty or was not found')
def add_warning(self, key, value):
self.warnings.update({key: value})
def validate_revision(self):
if self.draft.status_id in [POSTED, POSTED_BY_SECRETARIAT]:
return
revision = self.draft.revision
existing_revisions = [int(i.revision_display()) for i in InternetDraft.objects.filter(filename=self.draft.filename)]
expected = 0
if existing_revisions:
expected = max(existing_revisions) + 1
try:
if int(revision) != expected:
self.add_warning('revision', 'Invalid Version Number (Version %02d is expected)' % expected)
except ValueError:
self.add_warning('revision', 'Revision not found')
def validate_authors(self):
if not self.authors:
self.add_warning('authors', 'No authors found')
return
def validate_creation_date(self):
date = self.draft.creation_date
if not date:
self.add_warning('creation_date', 'Creation Date field is empty or the creation date is not in a proper format')
return
submit_date = self.draft.submission_date
if (date + datetime.timedelta(days=3) < submit_date or
date - datetime.timedelta(days=3) > submit_date):
self.add_warning('creation_date', 'Creation Date must be within 3 days of submission date')
def get_authors(self):
return self.draft.tempidauthors_set.exclude(author_order=0).order_by('author_order')
def get_submitter(self):
submitter = self.draft.tempidauthors_set.filter(author_order=0)
if submitter:
return submitter[0]
elif self.draft.submitter_tag:
try:
return PersonOrOrgInfo.objects.get(pk=self.draft.submitter_tag)
except PersonOrOrgInfo.DoesNotExist:
return False
return None
|
|
import numpy as np
from ..core import *
from ..distributions import *
from ..tuning.starting import find_MAP
import patsy
import theano
import pandas as pd
from collections import defaultdict
from statsmodels.formula.api import glm as glm_sm
import statsmodels.api as sm
from pandas.tools.plotting import scatter_matrix
from . import links
from . import families
def linear_component(formula, data, priors=None,
intercept_prior=None,
regressor_prior=None,
init=True, init_vals=None, family=None,
model=None):
"""Create linear model according to patsy specification.
Parameters
----------
formula : str
Patsy linear model descriptor.
data : array
Labeled array (e.g. pandas DataFrame, recarray).
priors : dict
Mapping prior name to prior distribution.
E.g. {'Intercept': Normal.dist(mu=0, sd=1)}
intercept_prior : pymc distribution
Prior to use for the intercept.
Default: Normal.dist(mu=0, tau=1.0E-12)
regressor_prior : pymc distribution
Prior to use for all regressor(s).
Default: Normal.dist(mu=0, tau=1.0E-12)
init : bool
Whether to set the starting values via statsmodels
Default: True
init_vals : dict
Set starting values externally: parameter -> value
Default: None
family : statsmodels.family
Link function to pass to statsmodels (init has to be True).
See `statsmodels.api.families`
Default: identity
Output
------
(y_est, coeffs) : Estimate for y, list of coefficients
Example
-------
# Logistic regression
y_est, coeffs = glm('male ~ height + weight',
htwt_data,
family=glm.families.Binomial(links=glm.link.Logit))
y_data = Bernoulli('y', y_est, observed=data.male)
"""
if intercept_prior is None:
intercept_prior = Normal.dist(mu=0, tau=1.0E-12)
if regressor_prior is None:
regressor_prior = Normal.dist(mu=0, tau=1.0E-12)
if priors is None:
priors = defaultdict(None)
# Build patsy design matrix and get regressor names.
_, dmatrix = patsy.dmatrices(formula, data)
reg_names = dmatrix.design_info.column_names
if init_vals is None and init:
init_vals = glm_sm(formula, data, family=family).fit().params
else:
init_vals = defaultdict(lambda: None)
# Create individual coefficients
model = modelcontext(model)
coeffs = []
if reg_names[0] == 'Intercept':
prior = priors.get('Intercept', intercept_prior)
coeff = model.Var(reg_names.pop(0), prior)
coeff.tag.test_value = init_vals['Intercept']
coeffs.append(coeff)
for reg_name in reg_names:
prior = priors.get(reg_name, regressor_prior)
coeff = model.Var(reg_name, prior)
coeff.tag.test_value = init_vals[reg_name]
coeffs.append(coeff)
y_est = theano.dot(np.asarray(dmatrix), theano.tensor.stack(*coeffs)).reshape((1, -1))
return y_est, coeffs
def glm(*args, **kwargs):
"""Create GLM after Patsy model specification string.
Parameters
----------
formula : str
Patsy linear model descriptor.
data : array
Labeled array (e.g. pandas DataFrame, recarray).
priors : dict
Mapping prior name to prior distribution.
E.g. {'Intercept': Normal.dist(mu=0, sd=1)}
intercept_prior : pymc distribution
Prior to use for the intercept.
Default: Normal.dist(mu=0, tau=1.0E-12)
regressor_prior : pymc distribution
Prior to use for all regressor(s).
Default: Normal.dist(mu=0, tau=1.0E-12)
init : bool
Whether initialize test values via statsmodels
Default: True
init_vals : dict
Set starting values externally: parameter -> value
Default: None
find_MAP : bool
Whether to call find_MAP on non-initialized nodes.
family : statsmodels.family
Distribution of likelihood, see pymc.glm.families
(init has to be True).
Output
------
vars : List of created random variables (y_est, coefficients etc)
Example
-------
# Logistic regression
vars = glm('male ~ height + weight',
data,
family=glm.families.Binomial(link=glm.links.Logit))
"""
model = modelcontext(kwargs.get('model'))
family = kwargs.pop('family', families.Normal())
call_find_map = kwargs.pop('find_MAP', True)
formula = args[0]
data = args[1]
y_data = np.asarray(patsy.dmatrices(formula, data)[0]).T
# Create GLM
kwargs['family'] = family.create_statsmodel_family()
y_est, coeffs = linear_component(*args, **kwargs)
family.create_likelihood(y_est, y_data)
# Find vars we have not initialized yet
non_init_vars = set(model.vars).difference(set(coeffs))
if len(non_init_vars) != 0 and call_find_map:
start = find_MAP(vars=non_init_vars)
for var in non_init_vars:
var.tag.test_value = start[var.name]
return [y_est] + coeffs + list(non_init_vars)
def plot_posterior_predictive(trace, eval=None, lm=None, samples=30, **kwargs):
"""Plot posterior predictive of a linear model.
:Arguments:
trace : <array>
Array of posterior samples with columns
eval : <array>
Array over which to evaluate lm
lm : function <default: linear function>
Function mapping parameters at different points
to their respective outputs.
input: point, sample
output: estimated value
samples : int <default=30>
How many posterior samples to draw.
Additional keyword arguments are passed to pylab.plot().
"""
import matplotlib.pyplot as plt
if lm is None:
lm = lambda x, sample: sample['Intercept'] + sample['x'] * x
if eval is None:
eval = np.linspace(0, 1, 100)
# Set default plotting arguments
if 'lw' not in kwargs and 'linewidth' not in kwargs:
kwargs['lw'] = .2
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['c'] = 'k'
for rand_loc in np.random.randint(0, len(trace), samples):
rand_sample = trace[rand_loc]
plt.plot(eval, lm(eval, rand_sample), **kwargs)
# Make sure to not plot label multiple times
kwargs.pop('label', None)
plt.title('Posterior predictive')
|
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for memory related flows."""
import copy
import json
import os
import socket
import threading
from grr.client import vfs
from grr.client.client_actions import grr_rekall_test
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.rdfvalues import crypto
class DummyLoadMemoryDriverFlow(flow.GRRFlow):
args_type = rdfvalue.LoadMemoryDriverArgs
@flow.StateHandler()
def Start(self):
self.SendReply(rdfvalue.MemoryInformation(
device=rdfvalue.PathSpec(
path=os.path.join(config_lib.CONFIG["Test.data_dir"], "auth.log"),
pathtype=rdfvalue.PathSpec.PathType.OS),
runs=[rdfvalue.BufferReference(length=638976, offset=5),
rdfvalue.BufferReference(length=145184, offset=643074)]))
class DummyDiskVolumeInfo(flow.GRRFlow):
args_type = rdfvalue.DiskVolumeInfoArgs
@flow.StateHandler()
def Start(self):
if "/opt" in self.args.path_list[0]:
mnt = rdfvalue.UnixVolume(mount_point="/opt")
self.SendReply(rdfvalue.Volume(unix=mnt, bytes_per_sector=4096,
sectors_per_allocation_unit=1,
actual_available_allocation_units=10,
total_allocation_units=100))
else:
mnt = rdfvalue.UnixVolume(mount_point="/var")
self.SendReply(rdfvalue.Volume(unix=mnt, bytes_per_sector=1,
sectors_per_allocation_unit=1,
actual_available_allocation_units=784165,
total_allocation_units=78416500))
class MemoryTest(test_lib.FlowTestsBaseclass):
pass
class TestMemoryCollector(MemoryTest):
"""Tests the MemoryCollector flow."""
def setUp(self):
super(TestMemoryCollector, self).setUp()
self.output_path = "analysis/memory_scanner"
self.key = rdfvalue.AES128Key("1a5eafcc77d428863d4c2441ea26e5a5")
self.iv = rdfvalue.AES128Key("2241b14c64874b1898dad4de7173d8c0")
self.memory_file = os.path.join(config_lib.CONFIG["Test.data_dir"],
"auth.log")
with open(self.memory_file, "r") as f:
self.memory_dump = f.read()
self.assertTrue(self.memory_dump)
self.client_mock = action_mocks.ActionMock("TransferBuffer", "HashBuffer",
"StatFile", "CopyPathToFile",
"SendFile", "DeleteGRRTempFiles",
"GetConfiguration", "Find",
"Grep")
self.old_driver_flow = flow.GRRFlow.classes["LoadMemoryDriver"]
flow.GRRFlow.classes["LoadMemoryDriver"] = DummyLoadMemoryDriverFlow
self.old_diskvolume_flow = flow.GRRFlow.classes["DiskVolumeInfo"]
flow.GRRFlow.classes["DiskVolumeInfo"] = DummyDiskVolumeInfo
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.MEMORY] = test_lib.FakeTestDataVFSHandler
def tearDown(self):
super(TestMemoryCollector, self).tearDown()
flow.GRRFlow.classes["LoadMemoryDriver"] = self.old_driver_flow
flow.GRRFlow.classes["DiskVolumeInfo"] = self.old_diskvolume_flow
def testCallWithDefaultArgumentsDoesNothing(self):
for _ in test_lib.TestFlowHelper(
"MemoryCollector", action_mocks.ActionMock(), client_id=self.client_id,
token=self.token):
pass
def RunWithDownload(self, dump_option, conditions=None):
download_action = rdfvalue.MemoryCollectorDownloadAction(
dump_option=dump_option)
flow_urn = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="MemoryCollector",
conditions=conditions or [],
action=rdfvalue.MemoryCollectorAction(
action_type=rdfvalue.MemoryCollectorAction.Action.DOWNLOAD,
download=download_action
), token=self.token, output=self.output_path)
for _ in test_lib.TestFlowHelper(
flow_urn, self.client_mock,
client_id=self.client_id,
token=self.token):
pass
return aff4.FACTORY.Open(flow_urn, token=self.token)
def testMemoryImageLocalCopyDownload(self):
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type=rdfvalue.MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY,
with_local_copy=rdfvalue.MemoryCollectorWithLocalCopyDumpOption(
gzip=False, check_disk_free_space=False))
flow_obj = self.RunWithDownload(dump_option)
self.assertTrue(flow_obj.state.memory_src_path is not None)
self.assertEqual(
flow_obj.state.downloaded_file,
self.client_id.Add("fs/os").Add(flow_obj.state.memory_src_path.path))
fd = aff4.FACTORY.Open(flow_obj.state.downloaded_file, token=self.token)
self.assertEqual(fd.Read(1024 * 1024), self.memory_dump)
def testMemoryImageLocalCopyDiskCheck(self):
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type=rdfvalue.MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY,
with_local_copy=rdfvalue.MemoryCollectorWithLocalCopyDumpOption(
gzip=False))
flow_obj = self.RunWithDownload(dump_option)
self.assertTrue(flow_obj.state.memory_src_path is not None)
self.assertEqual(
flow_obj.state.downloaded_file,
self.client_id.Add("fs/os").Add(flow_obj.state.memory_src_path.path))
fd = aff4.FACTORY.Open(flow_obj.state.downloaded_file, token=self.token)
self.assertEqual(fd.Read(1024 * 1024), self.memory_dump)
def testMemoryImageLocalCopyNoSpace(self):
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type=rdfvalue.MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY,
with_local_copy=rdfvalue.MemoryCollectorWithLocalCopyDumpOption(
gzip=False, destdir="/opt/tmp/testing"))
self.assertRaises(RuntimeError, self.RunWithDownload, dump_option)
def testMemoryImageLocalCopyDownloadWithOffsetAndLength(self):
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type=rdfvalue.MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY,
with_local_copy=rdfvalue.MemoryCollectorWithLocalCopyDumpOption(
offset=10, length=42, gzip=False))
flow_obj = self.RunWithDownload(dump_option)
self.assertTrue(flow_obj.state.memory_src_path is not None)
self.assertEqual(
flow_obj.state.downloaded_file,
self.client_id.Add("fs/os").Add(flow_obj.state.memory_src_path.path))
fd = aff4.FACTORY.Open(flow_obj.state.downloaded_file, token=self.token)
self.assertEqual(fd.Read(1024 * 1024), self.memory_dump[10:52])
def testMemoryImageWithoutLocalCopyDownload(self):
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type="WITHOUT_LOCAL_COPY")
flow_obj = self.RunWithDownload(dump_option)
self.assertEqual(flow_obj.state.memory_src_path.path, self.memory_file)
self.assertEqual(
flow_obj.state.downloaded_file,
self.client_id.Add("fs/os").Add(flow_obj.state.memory_src_path.path))
fd = aff4.FACTORY.Open(flow_obj.state.downloaded_file, token=self.token)
self.assertEqual(fd.Read(1024 * 1024), self.memory_dump)
def RunWithSendToSocket(self, dump_option, conditions=None):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((socket.gethostname(), 0))
port = sock.getsockname()[1]
send_to_socket_action = rdfvalue.MemoryCollectorSendToSocketAction(
host=socket.gethostname(),
port=port,
key=self.key,
iv=self.iv,
dump_option=dump_option)
flow_urn = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="MemoryCollector",
conditions=conditions or [],
action=rdfvalue.MemoryCollectorAction(
action_type=rdfvalue.MemoryCollectorAction.Action.SEND_TO_SOCKET,
send_to_socket=send_to_socket_action),
token=self.token, output=self.output_path)
socket_data = []
def ReadFromSocket():
sock.listen(1)
client_socket, _ = sock.accept()
while 1:
data = client_socket.recv(1024)
if not data: break
socket_data.append(data)
client_socket.close()
sock.close()
thread = threading.Thread(target=ReadFromSocket)
thread.daemon = True
thread.start()
for _ in test_lib.TestFlowHelper(
flow_urn, self.client_mock, client_id=self.client_id, token=self.token):
pass
thread.join()
encrypted_data = "".join(socket_data)
# Data should be encrypted, so they're not equal
self.assertNotEqual(encrypted_data, self.memory_dump)
cipher = crypto.AES128CBCCipher(key=self.key, iv=self.iv,
mode=crypto.AES128CBCCipher.OP_DECRYPT)
decrypted_data = cipher.Update(encrypted_data) + cipher.Final()
return flow_urn, encrypted_data, decrypted_data
def testMemoryImageLocalCopySendToSocket(self):
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type=rdfvalue.MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY,
with_local_copy=rdfvalue.MemoryCollectorWithLocalCopyDumpOption(
gzip=False))
flow_urn, encrypted, decrypted = self.RunWithSendToSocket(dump_option)
flow_obj = aff4.FACTORY.Open(flow_urn, token=self.token)
# There was a local file, so dest_path should not be empty
self.assertTrue(flow_obj.state.memory_src_path is not None)
# Data should be encrypted, so they're not equal
self.assertNotEqual(encrypted, self.memory_dump)
# Decrypted data should be equal to the memory dump
self.assertEqual(decrypted, self.memory_dump)
def testMemoryImageLocalCopySendToSocketWithOffsetAndLength(self):
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type=rdfvalue.MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY,
with_local_copy=rdfvalue.MemoryCollectorWithLocalCopyDumpOption(
offset=10, length=42, gzip=False))
flow_urn, encrypted, decrypted = self.RunWithSendToSocket(dump_option)
flow_obj = aff4.FACTORY.Open(flow_urn, token=self.token)
# There was a local file, so dest_path should not be empty
self.assertTrue(flow_obj.state.memory_src_path is not None)
# Data should be encrypted, so they're not equal
self.assertNotEqual(encrypted, self.memory_dump)
# Decrypted data should be equal to the memory dump
self.assertEqual(decrypted, self.memory_dump[10:52])
def testMemoryImageWithoutLocalCopySendToSocket(self):
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type="WITHOUT_LOCAL_COPY")
(flow_urn, encrypted, decrypted) = self.RunWithSendToSocket(dump_option)
flow_obj = aff4.FACTORY.Open(flow_urn, token=self.token)
# There was a local file, so dest_path should not be empty
self.assertTrue(flow_obj.state.memory_src_path is not None)
# Data should be encrypted, so they're not equal
self.assertNotEqual(encrypted, self.memory_dump)
# Decrypted data should be equal to the memory dump
self.assertEqual(decrypted, self.memory_dump)
def RunWithNoAction(self, conditions=None):
flow_urn = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="MemoryCollector",
conditions=conditions or [],
action=rdfvalue.MemoryCollectorAction(
action_type=rdfvalue.MemoryCollectorAction.Action.NONE),
token=self.token, output=self.output_path)
for _ in test_lib.TestFlowHelper(
flow_urn, self.client_mock, client_id=self.client_id, token=self.token):
pass
return aff4.FACTORY.Open(flow_urn, token=self.token)
def testMemoryImageLiteralMatchConditionWithNoAction(self):
literal_condition = rdfvalue.MemoryCollectorCondition(
condition_type=rdfvalue.MemoryCollectorCondition.Type.LITERAL_MATCH,
literal_match=rdfvalue.FileFinderContentsLiteralMatchCondition(
mode=rdfvalue.FileFinderContentsLiteralMatchCondition.Mode.ALL_HITS,
literal="session opened for user dearjohn"))
self.RunWithNoAction(conditions=[literal_condition])
output = aff4.FACTORY.Open(self.client_id.Add(self.output_path),
aff4_type="RDFValueCollection",
token=self.token)
self.assertEqual(len(output), 1)
self.assertEqual(output[0].offset, 350)
self.assertEqual(output[0].length, 52)
self.assertEqual(output[0].data, "session): session opened for user "
"dearjohn by (uid=0")
def testMemoryImageRegexMatchConditionWithNoAction(self):
regex_condition = rdfvalue.MemoryCollectorCondition(
condition_type=rdfvalue.MemoryCollectorCondition.Type.REGEX_MATCH,
regex_match=rdfvalue.FileFinderContentsRegexMatchCondition(
mode=rdfvalue.FileFinderContentsLiteralMatchCondition.Mode.ALL_HITS,
regex="session opened for user .*?john"))
self.RunWithNoAction(conditions=[regex_condition])
output = aff4.FACTORY.Open(self.client_id.Add(self.output_path),
aff4_type="RDFValueCollection",
token=self.token)
self.assertEqual(len(output), 1)
self.assertEqual(output[0].offset, 350)
self.assertEqual(output[0].length, 52)
self.assertEqual(output[0].data, "session): session opened for user "
"dearjohn by (uid=0")
def testMemoryImageLiteralMatchConditionWithDownloadAction(self):
literal_condition = rdfvalue.MemoryCollectorCondition(
condition_type=rdfvalue.MemoryCollectorCondition.Type.LITERAL_MATCH,
literal_match=rdfvalue.FileFinderContentsLiteralMatchCondition(
mode=rdfvalue.FileFinderContentsLiteralMatchCondition.Mode.ALL_HITS,
literal="session opened for user dearjohn"))
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type=rdfvalue.MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY,
with_local_copy=rdfvalue.MemoryCollectorWithLocalCopyDumpOption(
gzip=False))
flow_obj = self.RunWithDownload(dump_option, conditions=[literal_condition])
# Check that matches are in the collection
output = aff4.FACTORY.Open(self.client_id.Add(self.output_path),
aff4_type="RDFValueCollection",
token=self.token)
# First item of the collection is the BufferReference, second is the
# path of the downloaded
self.assertEqual(len(output), 2)
self.assertEqual(output[0].offset, 350)
self.assertEqual(output[0].length, 52)
self.assertEqual(output[0].data, "session): session opened for user "
"dearjohn by (uid=0")
self.assertTrue(isinstance(output[1], rdfvalue.StatEntry))
self.assertTrue(flow_obj.state.memory_src_path is not None)
self.assertEqual(
flow_obj.state.downloaded_file,
self.client_id.Add("fs/os").Add(flow_obj.state.memory_src_path.path))
fd = aff4.FACTORY.Open(flow_obj.state.downloaded_file, token=self.token)
self.assertEqual(fd.Read(1024 * 1024), self.memory_dump)
def testDoesNothingWhenConditionDoesNotMatch(self):
literal_condition = rdfvalue.MemoryCollectorCondition(
condition_type=rdfvalue.MemoryCollectorCondition.Type.LITERAL_MATCH,
literal_match=rdfvalue.FileFinderContentsLiteralMatchCondition(
mode=rdfvalue.FileFinderContentsLiteralMatchCondition.Mode.ALL_HITS,
literal="session opened for user foobar"))
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type=rdfvalue.MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY,
with_local_copy=rdfvalue.MemoryCollectorWithLocalCopyDumpOption(
gzip=False))
flow_obj = self.RunWithDownload(dump_option, conditions=[literal_condition])
# Check that there are no matches
with self.assertRaises(aff4.InstantiationError):
aff4.FACTORY.Open(self.client_id.Add(self.output_path),
aff4_type="RDFValueCollection",
token=self.token)
# Assert nothing got downloaded
self.assertTrue("dest_path" not in flow_obj.state)
self.assertTrue("downloaded_file" not in flow_obj.state)
def testMemoryImageLiteralMatchConditionWithSendToSocketAction(self):
literal_condition = rdfvalue.MemoryCollectorCondition(
condition_type=rdfvalue.MemoryCollectorCondition.Type.LITERAL_MATCH,
literal_match=rdfvalue.FileFinderContentsLiteralMatchCondition(
mode=rdfvalue.FileFinderContentsLiteralMatchCondition.Mode.ALL_HITS,
literal="session opened for user dearjohn"))
dump_option = rdfvalue.MemoryCollectorDumpOption(
option_type=rdfvalue.MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY,
with_local_copy=rdfvalue.MemoryCollectorWithLocalCopyDumpOption(
gzip=False))
flow_urn, encrypted, decrypted = self.RunWithSendToSocket(
dump_option, conditions=[literal_condition])
# Check that matches are in the collection
output = aff4.FACTORY.Open(self.client_id.Add(self.output_path),
aff4_type="RDFValueCollection",
token=self.token)
self.assertEqual(len(output), 1)
self.assertEqual(output[0].offset, 350)
self.assertEqual(output[0].length, 52)
self.assertEqual(output[0].data, "session): session opened for user "
"dearjohn by (uid=0")
flow_obj = aff4.FACTORY.Open(flow_urn, token=self.token)
# There was a local file, so dest_path should not be empty
self.assertTrue(flow_obj.state.memory_src_path is not None)
# Data should be encrypted, so they're not equal
self.assertNotEqual(encrypted, self.memory_dump)
# Decrypted data should be equal to the memory dump
self.assertEqual(decrypted, self.memory_dump)
class TestMemoryAnalysis(MemoryTest, grr_rekall_test.RekallTestSuite):
"""Tests the memory analysis flows."""
def testLoadDriverWindows(self):
"""Tests the memory driver deployment flow."""
self.CreateSignedDriver()
self.CreateClient()
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("LoadMemoryDriver",
action_mocks.MemoryClientMock(),
token=self.token,
client_id=self.client_id):
pass
device_urn = self.client_id.Add("devices/memory")
fd = aff4.FACTORY.Open(device_urn, mode="r", token=self.token)
runs = fd.Get(fd.Schema.LAYOUT).runs
self.assertEqual(runs[0].offset, 0x1000)
self.assertEqual(runs[0].length, 0x10000)
self.assertEqual(runs[1].offset, 0x20000)
self.assertEqual(runs[0].length, 0x10000)
def testScanMemory(self):
# Use a file in place of a memory image for simplicity
image_path = os.path.join(self.base_path, "numbers.txt")
self.CreateClient()
self.CreateSignedDriver()
class ClientMock(action_mocks.MemoryClientMock):
"""A mock which returns the image as the driver path."""
def GetMemoryInformation(self, _):
"""Mock out the driver loading code to pass the memory image."""
reply = rdfvalue.MemoryInformation(
device=rdfvalue.PathSpec(
path=image_path,
pathtype=rdfvalue.PathSpec.PathType.OS))
reply.runs.Append(offset=0, length=1000000000)
return [reply]
args = dict(grep=rdfvalue.BareGrepSpec(
literal="88",
mode="ALL_HITS",
),
output="analysis/grep/testing")
# Run the flow.
for _ in test_lib.TestFlowHelper(
"ScanMemory", ClientMock("Grep"), client_id=self.client_id,
token=self.token, **args):
pass
fd = aff4.FACTORY.Open(
rdfvalue.RDFURN(self.client_id).Add("/analysis/grep/testing"),
token=self.token)
self.assertEqual(len(fd), 20)
self.assertEqual(fd[0].offset, 252)
self.assertEqual(fd[0].data, "\n85\n86\n87\n88\n89\n90\n91\n")
class LinuxKcoreMemoryMock(action_mocks.ActionMock):
"""Mock a linux client with kcore available.
Validates that the kcore is used.
"""
def StatFile(self, list_dir_req):
if list_dir_req.pathspec.path == "/proc/kcore":
result = rdfvalue.StatEntry(pathspec=list_dir_req.pathspec,
st_mode=400)
status = rdfvalue.GrrStatus(status=rdfvalue.GrrStatus.ReturnedStatus.OK)
return [result, status]
raise IOError("Not found.")
def RekallAction(self, rekall_request):
if rekall_request.device.path != "/proc/kcore":
return [rdfvalue.GrrStatus(
status=rdfvalue.GrrStatus.ReturnedStatus.GENERIC_ERROR,
error_message="Should use kcore device when present.")]
response = rdfvalue.RekallResponse(json_messages="{}")
return [response, rdfvalue.Iterator(state="FINISHED")]
class TestLinuxMemoryAnalysis(MemoryTest, grr_rekall_test.RekallTestSuite):
"""Tests the memory analysis flow using kcore."""
def CreateClient(self):
client = aff4.FACTORY.Create(self.client_id,
"VFSGRRClient", token=self.token)
client.Set(client.Schema.SYSTEM("Linux"))
client.Close()
def testAnalyzeClientMemmoryKcore(self):
"""Tests the selection of /proc/kcore."""
self.CreateClient()
# Run the flow in the simulated way, with kcore present.
for _ in test_lib.TestFlowHelper("AnalyzeClientMemory",
LinuxKcoreMemoryMock(),
token=self.token,
client_id=self.client_id):
pass
class ListVADBinariesActionMock(action_mocks.ActionMock):
"""Client with real file actions and mocked-out RekallAction."""
def __init__(self, process_list=None):
super(ListVADBinariesActionMock, self).__init__(
"TransferBuffer", "StatFile", "Find", "HashBuffer", "FingerprintFile",
"ListDirectory")
self.process_list = process_list or []
def RekallAction(self, _):
ps_list_file = os.path.join(config_lib.CONFIG["Test.data_dir"],
"rekall_vad_result.dat")
response = rdfvalue.RekallResponse(
json_messages=open(ps_list_file, "rb").read(),
plugin="pslist")
# If we are given process names here we need to craft a Rekall result
# containing them. This is so they point to valid files in the fixture.
if self.process_list:
json_data = json.loads(response.json_messages)
template = json_data[11]
if template[1]["filename"] != ur"\Windows\System32\ntdll.dll":
raise RuntimeError("Test data invalid.")
json_data = []
for process in self.process_list:
new_entry = copy.deepcopy(template)
new_entry[1]["filename"] = process
json_data.append(new_entry)
response.json_messages = json.dumps(json_data)
return [response, rdfvalue.Iterator(state="FINISHED")]
class ListVADBinariesTest(MemoryTest):
"""Tests the Rekall-powered "get processes binaries" flow."""
def setUp(self):
super(ListVADBinariesTest, self).setUp()
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
# Add some user accounts to this client.
fd = aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token)
users = fd.Schema.USER()
users.Append(rdfvalue.User(
username="LocalService", domain="testing-PC",
homedir=r"C:\Users\localservice", sid="S-1-5-20"))
fd.Set(users)
fd.Close()
self.old_driver_flow = flow.GRRFlow.classes["LoadMemoryDriver"]
flow.GRRFlow.classes["LoadMemoryDriver"] = DummyLoadMemoryDriverFlow
def tearDown(self):
super(ListVADBinariesTest, self).tearDown()
flow.GRRFlow.classes["LoadMemoryDriver"] = self.old_driver_flow
def testListsBinaries(self):
client_mock = ListVADBinariesActionMock()
output_path = "analysis/ListVADBinariesTest1"
for _ in test_lib.TestFlowHelper(
"ListVADBinaries",
client_mock,
client_id=self.client_id,
token=self.token,
output=output_path):
pass
fd = aff4.FACTORY.Open(self.client_id.Add(output_path),
token=self.token)
# Sorting output collection to make the test deterministic
paths = sorted([x.CollapsePath() for x in fd])
self.assertIn(u"C:\\Windows\\System32\\wintrust.dll", paths)
self.assertIn(u"C:\\Program Files\\Internet Explorer\\ieproxy.dll", paths)
def testFetchesAndStoresBinary(self):
process1_exe = "\\WINDOWS\\bar.exe"
process2_exe = "\\WINDOWS\\foo.exe"
client_mock = ListVADBinariesActionMock([process1_exe, process2_exe])
output_path = "analysis/ListVADBinariesTest1"
for _ in test_lib.TestFlowHelper(
"ListVADBinaries",
client_mock,
client_id=self.client_id,
token=self.token,
fetch_binaries=True,
output=output_path):
pass
fd = aff4.FACTORY.Open(self.client_id.Add(output_path),
token=self.token)
# Sorting output collection to make the test deterministic
binaries = sorted(fd, key=lambda x: x.aff4path)
self.assertEqual(len(binaries), 2)
self.assertEqual(binaries[0].pathspec.CollapsePath(),
"/C:/WINDOWS/bar.exe")
self.assertEqual(binaries[1].pathspec.CollapsePath(),
"/C:/WINDOWS/foo.exe")
fd = aff4.FACTORY.Open(binaries[0].aff4path, token=self.token)
self.assertEqual(fd.Read(1024), "just bar")
fd = aff4.FACTORY.Open(binaries[1].aff4path, token=self.token)
self.assertEqual(fd.Read(1024), "this is foo")
def testDoesNotFetchDuplicates(self):
process = "\\WINDOWS\\bar.exe"
client_mock = ListVADBinariesActionMock([process, process])
output_path = "analysis/ListVADBinariesTest1"
for _ in test_lib.TestFlowHelper(
"ListVADBinaries",
client_mock,
client_id=self.client_id,
fetch_binaries=True,
token=self.token,
output=output_path):
pass
fd = aff4.FACTORY.Open(self.client_id.Add(output_path),
token=self.token)
binaries = list(fd)
self.assertEqual(len(binaries), 1)
self.assertEqual(binaries[0].pathspec.CollapsePath(),
"/C:/WINDOWS/bar.exe")
fd = aff4.FACTORY.Open(binaries[0].aff4path, token=self.token)
self.assertEqual(fd.Read(1024), "just bar")
def testConditionsOutBinariesUsingRegex(self):
process1_exe = "\\WINDOWS\\bar.exe"
process2_exe = "\\WINDOWS\\foo.exe"
client_mock = ListVADBinariesActionMock([process1_exe, process2_exe])
output_path = "analysis/ListVADBinariesTest1"
for _ in test_lib.TestFlowHelper(
"ListVADBinaries",
client_mock,
client_id=self.client_id,
token=self.token,
output=output_path,
filename_regex=".*bar\\.exe$",
fetch_binaries=True):
pass
fd = aff4.FACTORY.Open(self.client_id.Add(output_path),
token=self.token)
binaries = list(fd)
self.assertEqual(len(binaries), 1)
self.assertEqual(binaries[0].pathspec.CollapsePath(),
"/C:/WINDOWS/bar.exe")
fd = aff4.FACTORY.Open(binaries[0].aff4path, token=self.token)
self.assertEqual(fd.Read(1024), "just bar")
def testIgnoresMissingFiles(self):
process1_exe = "\\WINDOWS\\bar.exe"
client_mock = ListVADBinariesActionMock([process1_exe])
output_path = "analysis/ListVADBinariesTest1"
for _ in test_lib.TestFlowHelper(
"ListVADBinaries",
client_mock,
check_flow_errors=False,
client_id=self.client_id,
token=self.token,
output=output_path,
fetch_binaries=True):
pass
fd = aff4.FACTORY.Open(self.client_id.Add(output_path),
token=self.token)
binaries = list(fd)
self.assertEqual(len(binaries), 1)
self.assertEqual(binaries[0].pathspec.CollapsePath(),
"/C:/WINDOWS/bar.exe")
fd = aff4.FACTORY.Open(binaries[0].aff4path, token=self.token)
self.assertEqual(fd.Read(1024), "just bar")
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
# Copyright 2008 Divmod, Inc. See LICENSE file for details
# -*- test-case-name: xmantissa.test.test_webapp.AuthenticatedWebViewerTests,xmantissa.test.test_publicweb.AnonymousWebViewerTests -*-
"""
A collection of fake versions of various objects used in tests.
There are a lot of classes here because many of them have model/view
interactions that are expressed through adapter registrations, so having
additional types is helpful.
"""
from zope.interface import implements
from twisted.python.components import registerAdapter
from epsilon.structlike import record
from nevow.athena import LiveElement, LiveFragment
from nevow.page import Element
from nevow import rend, loaders
from nevow.inevow import IResource
from xmantissa.ixmantissa import INavigableFragment
class FakeLoader(record('name')):
"""
A fake Nevow loader object.
"""
class FakeTheme(record('themeName loaders')):
"""
A placeholder for theme lookup.
@ivar loaders: A dict of strings to loader objects.
@ivar themeName: A name that describes this theme.
"""
def getDocFactory(self, name, default=None):
"""
@param name: A loader name.
"""
return self.loaders.get(name, default)
class FakeModel(object):
"""
A simple 'model' object that does nothing, for the purposes of adaptation.
"""
class ResourceViewForFakeModel(rend.Page):
"""
Implementor of L{IResource} for L{FakeModel}.
"""
registerAdapter(ResourceViewForFakeModel, FakeModel, IResource)
class _HasModel(object):
"""
A mixin that provides a 'model' attribute for Element subclasses.
This is a simple hack that attempts to cooperatively invoke __init__ so
that its numerous subclasses don't have to define a constructor. If you
want to use it you should read its implementation.
"""
def __init__(self, model):
"""
Set the model attribute and delegate to the other subclass.
"""
self.model = model
self.__class__.__bases__[1].__init__(self)
class FakeElementModel(object):
"""
A simple 'model' object that does nothing, for the purposes of adaptation.
"""
class ElementViewForFakeModel(_HasModel, Element):
"""
L{Element} implementor of L{INavigableFragment} for L{FakeElementModel}.
"""
implements(INavigableFragment)
docFactory = loaders.stan('')
registerAdapter(ElementViewForFakeModel, FakeElementModel, INavigableFragment)
class FakeElementModelWithTheme(object):
"""
A simple 'model' object that does nothing, for the purposes of adaptation
to L{ElementViewForFakeModelWithTheme}.
"""
class ElementViewForFakeModelWithTheme(_HasModel, Element):
"""
L{Element} implementor of L{INavigableFragment} for L{FakeElementModel}.
"""
implements(INavigableFragment)
fragmentName = 'awesome_page'
registerAdapter(ElementViewForFakeModelWithTheme, FakeElementModelWithTheme,
INavigableFragment)
class FakeElementModelWithDocFactory(record('loader')):
"""
A simple 'model' object that does nothing, for the purposes of adaptation
to L{ElementViewForFakeModelWithDocFactory}.
@ivar loader: A loader object (to be used as the view's docFactory).
"""
class ElementViewForFakeModelWithDocFactory(_HasModel, Element):
"""
L{Element} implementor of L{INavigableFragment} for L{FakeElementModel}.
"""
implements(INavigableFragment)
def __init__(self, original):
"""
Set docFactory and proceed as usual.
"""
_HasModel.__init__(self, original)
self.docFactory = original.loader
registerAdapter(ElementViewForFakeModelWithDocFactory, FakeElementModelWithDocFactory,
INavigableFragment)
class FakeElementModelWithThemeAndDocFactory(record('fragmentName loader')):
"""
A simple 'model' object that does nothing, for the purposes of adaptation
to L{ElementViewForFakeModelWithThemeAndDocFactory}.
"""
class ElementViewForFakeModelWithThemeAndDocFactory(_HasModel, Element):
"""
L{Element} implementor of L{INavigableFragment} for L{FakeElementModel}.
"""
implements(INavigableFragment)
def __init__(self, original):
"""
Set docFactory and proceed as usual.
"""
_HasModel.__init__(self, original)
self.docFactory = original.loader
self.fragmentName = original.fragmentName
registerAdapter(ElementViewForFakeModelWithThemeAndDocFactory,
FakeElementModelWithThemeAndDocFactory,
INavigableFragment)
class FakeFragmentModel(object):
"""
A simple 'model' object that does nothing, for the purposes of adaptation.
"""
class FragmentViewForFakeModel(rend.Fragment):
"""
L{Fragment} implementor of L{INavigableFragment} for L{FakeFragmentModel}.
"""
implements(INavigableFragment)
docFactory = loaders.stan('')
registerAdapter(FragmentViewForFakeModel, FakeFragmentModel, INavigableFragment)
class FakeLiveElementModel(object):
"""
A simple 'model' object that does nothing, for the purposes of adaptation.
"""
class LiveElementViewForFakeModel(_HasModel, LiveElement):
"""
L{LiveElement} Implementor of L{INavigableFragment} for
L{FakeLiveElementModel}.
"""
implements(INavigableFragment)
docFactory = loaders.stan('')
registerAdapter(LiveElementViewForFakeModel, FakeLiveElementModel,
INavigableFragment)
class FakeLiveFragmentModel(object):
"""
A simple 'model' object that does nothing, for the purposes of adaptation.
"""
class LiveFragmentViewForFakeModel(LiveFragment):
"""
L{LiveFragment} Implementor of L{INavigableFragment} for
L{FakeLiveFragmentModel}.
"""
implements(INavigableFragment)
docFactory = loaders.stan('')
@classmethod
def wrap(cls, model):
"""
Wrap the given model in this class. Implement this as a method in this
file so that the warning filename will match up...
"""
return cls(model)
registerAdapter(LiveFragmentViewForFakeModel.wrap, FakeLiveFragmentModel,
INavigableFragment)
class FakeElementModelWithLocateChildView(object):
"""
A simple 'model' object that does nothing, for the purposes of adaptation.
"""
def __init__(self, children, beLive):
"""
@param children: an iterable of children to be returned from the view's
locateChild.
"""
self.childs = iter(children) # implemented this way because we want to
# see an error if locateChild is called
# too many times; often this will be a
# sequence of length 1
self.beLive = beLive
def __conform__(self, interface):
"""
@param interface: IResource (for which there is no adapter) or
INavigableFragment (for which there is one, depending on this model's
liveness).
"""
if interface is not INavigableFragment:
return None
if self.beLive:
return LiveElementViewForModelWithLocateChild(self)
else:
return ElementViewForFakeModelWithLocateChild(self)
class _HasLocateChild(_HasModel):
"""
Has a locateChild that delegates to its model.
"""
implements(INavigableFragment)
docFactory = loaders.stan('')
def locateChild(self, ctx, segments):
"""
Stub implementation that merely records whether it was called.
"""
return self.model.childs.next()
class LiveElementViewForModelWithLocateChild(_HasLocateChild, LiveElement):
"""
Live element with a locateChild.
"""
class ElementViewForFakeModelWithLocateChild(_HasLocateChild, Element):
"""
Non-live element with a locateChild.
"""
class FakeCustomizableElementModel(object):
"""
A simple 'model' object that does nothing, for the purposes of adaptation.
"""
username = None
def custom(self, username):
"""
Record the username our view was customized with.
"""
self.username = username
class ElementViewForFakeCustomizableElementModel(_HasModel, Element):
"""
An L{Element} that delegates C{customizeFor} calls to its model.
"""
implements(INavigableFragment)
docFactory = loaders.stan('')
def customizeFor(self, username):
"""
Delegate to model.
"""
self.model.custom(username)
return self
registerAdapter(ElementViewForFakeCustomizableElementModel,
FakeCustomizableElementModel,
INavigableFragment)
class FakeElementModelWithHead(record('head')):
"""
A simple 'model' object that does nothing, for the purposes of adaptation.
"""
def _head(self):
return self.head
class ElementViewForFakeModelWithHead(_HasModel, Element):
"""
L{Element} implementor of L{INavigableFragment} for L{FakeElementModel}.
"""
implements(INavigableFragment)
docFactory = loaders.stan('')
def head(self):
return self.model._head()
registerAdapter(ElementViewForFakeModelWithHead, FakeElementModelWithHead,
INavigableFragment)
|
|
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator
from django.db import transaction
from django.db.models.functions import Lower
from django.http import Http404, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect, render
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.http import is_safe_url
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext
from django.views.decorators.http import require_POST
from wagtail.admin import messages
from wagtail.admin.auth import PermissionPolicyChecker
from wagtail.admin.forms.workflows import (
TaskChooserSearchForm, WorkflowPagesFormSet, get_task_form_class, get_workflow_edit_handler)
from wagtail.admin.modal_workflow import render_modal_workflow
from wagtail.admin.views.generic import CreateView, DeleteView, EditView, IndexView
from wagtail.core.models import (
Page, Task, TaskState, UserPagePermissionsProxy, Workflow, WorkflowState)
from wagtail.core.permissions import task_permission_policy, workflow_permission_policy
from wagtail.core.utils import resolve_model_string
from wagtail.core.workflows import get_task_types
task_permission_checker = PermissionPolicyChecker(task_permission_policy)
class Index(IndexView):
permission_policy = workflow_permission_policy
model = Workflow
context_object_name = 'workflows'
template_name = 'wagtailadmin/workflows/index.html'
add_url_name = 'wagtailadmin_workflows:add'
edit_url_name = 'wagtailadmin_workflows:edit'
page_title = _("Workflows")
add_item_label = _("Add a workflow")
header_icon = 'tasks'
def show_disabled(self):
return self.request.GET.get('show_disabled', 'false') == 'true'
def get_queryset(self):
queryset = super().get_queryset()
if not self.show_disabled():
queryset = queryset.filter(active=True)
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['showing_disabled'] = self.show_disabled()
return context
class Create(CreateView):
permission_policy = workflow_permission_policy
model = Workflow
page_title = _("New workflow")
template_name = 'wagtailadmin/workflows/create.html'
success_message = _("Workflow '{0}' created.")
add_url_name = 'wagtailadmin_workflows:add'
edit_url_name = 'wagtailadmin_workflows:edit'
index_url_name = 'wagtailadmin_workflows:index'
header_icon = 'tasks'
edit_handler = None
def get_edit_handler(self):
if not self.edit_handler:
self.edit_handler = get_workflow_edit_handler().bind_to(request=self.request)
return self.edit_handler
def get_form_class(self):
return self.get_edit_handler().get_form_class()
def get_form(self, form_class=None):
form = super().get_form(form_class)
self.edit_handler = self.edit_handler.bind_to(form=form)
return form
def get_pages_formset(self):
if self.request.method == 'POST':
return WorkflowPagesFormSet(self.request.POST, instance=self.object, prefix='pages')
else:
return WorkflowPagesFormSet(instance=self.object, prefix='pages')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['edit_handler'] = self.edit_handler
context['pages_formset'] = self.get_pages_formset()
return context
def form_valid(self, form):
self.form = form
with transaction.atomic():
self.object = self.save_instance()
pages_formset = self.get_pages_formset()
if pages_formset.is_valid():
pages_formset.save()
success_message = self.get_success_message(self.object)
if success_message is not None:
messages.success(self.request, success_message, buttons=[
messages.button(reverse(self.edit_url_name, args=(self.object.id,)), _('Edit'))
])
return redirect(self.get_success_url())
else:
transaction.set_rollback(True)
return self.form_invalid(form)
class Edit(EditView):
permission_policy = workflow_permission_policy
model = Workflow
page_title = _("Editing workflow")
template_name = 'wagtailadmin/workflows/edit.html'
success_message = _("Workflow '{0}' updated.")
add_url_name = 'wagtailadmin_workflows:add'
edit_url_name = 'wagtailadmin_workflows:edit'
delete_url_name = 'wagtailadmin_workflows:disable'
delete_item_label = _('Disable')
index_url_name = 'wagtailadmin_workflows:index'
enable_item_label = _('Enable')
enable_url_name = 'wagtailadmin_workflows:enable'
header_icon = 'tasks'
edit_handler = None
MAX_PAGES = 5
def get_edit_handler(self):
if not self.edit_handler:
self.edit_handler = get_workflow_edit_handler().bind_to(request=self.request)
return self.edit_handler
def get_form_class(self):
return self.get_edit_handler().get_form_class()
def get_form(self, form_class=None):
form = super().get_form(form_class)
self.edit_handler = self.edit_handler.bind_to(form=form)
return form
def get_pages_formset(self):
if self.request.method == 'POST':
return WorkflowPagesFormSet(self.request.POST, instance=self.get_object(), prefix='pages')
else:
return WorkflowPagesFormSet(instance=self.get_object(), prefix='pages')
def get_paginated_pages(self):
# Get the (paginated) list of Pages to which this Workflow is assigned.
pages = Page.objects.filter(workflowpage__workflow=self.get_object())
pages.paginator = Paginator(pages, self.MAX_PAGES)
page_number = int(self.request.GET.get('p', 1))
paginated_pages = pages.paginator.page(page_number)
return paginated_pages
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['edit_handler'] = self.edit_handler
context['pages'] = self.get_paginated_pages()
context['pages_formset'] = self.get_pages_formset()
context['can_disable'] = (self.permission_policy is None or self.permission_policy.user_has_permission(self.request.user, 'delete')) and self.object.active
context['can_enable'] = (self.permission_policy is None or self.permission_policy.user_has_permission(
self.request.user, 'create')) and not self.object.active
return context
@property
def get_enable_url(self):
return reverse(self.enable_url_name, args=(self.object.pk,))
@transaction.atomic()
def form_valid(self, form):
self.form = form
with transaction.atomic():
self.object = self.save_instance()
successful = True
# Save pages formset
# Note: The pages formset is hidden when the page is inactive
if self.object.active:
pages_formset = self.get_pages_formset()
if pages_formset.is_valid():
pages_formset.save()
else:
transaction.set_rollback(True)
successful = False
if successful:
success_message = self.get_success_message()
if success_message is not None:
messages.success(self.request, success_message, buttons=[
messages.button(reverse(self.edit_url_name, args=(self.object.id,)), _('Edit'))
])
return redirect(self.get_success_url())
return self.form_invalid(form)
class Disable(DeleteView):
permission_policy = workflow_permission_policy
model = Workflow
page_title = _("Disable workflow")
template_name = 'wagtailadmin/workflows/confirm_disable.html'
success_message = _("Workflow '{0}' disabled.")
add_url_name = 'wagtailadmin_workflows:add'
edit_url_name = 'wagtailadmin_workflows:edit'
delete_url_name = 'wagtailadmin_workflows:disable'
index_url_name = 'wagtailadmin_workflows:index'
header_icon = 'tasks'
@property
def get_edit_url(self):
return reverse(self.edit_url_name, args=(self.kwargs['pk'],))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
states_in_progress = WorkflowState.objects.filter(status=WorkflowState.STATUS_IN_PROGRESS).count()
context['warning_message'] = ngettext(
'This workflow is in progress on %(states_in_progress)d page. Disabling this workflow will cancel moderation on this page.',
'This workflow is in progress on %(states_in_progress)d pages. Disabling this workflow will cancel moderation on these pages.',
states_in_progress,
) % {
'states_in_progress': states_in_progress,
}
return context
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.deactivate(user=request.user)
messages.success(request, self.get_success_message())
return redirect(reverse(self.index_url_name))
def usage(request, pk):
workflow = get_object_or_404(Workflow, id=pk)
perms = UserPagePermissionsProxy(request.user)
pages = workflow.all_pages() & perms.editable_pages()
paginator = Paginator(pages, per_page=10)
pages = paginator.get_page(request.GET.get('p'))
return render(request, 'wagtailadmin/workflows/usage.html', {
'workflow': workflow,
'used_by': pages,
})
@require_POST
def enable_workflow(request, pk):
# Reactivate an inactive workflow
workflow = get_object_or_404(Workflow, id=pk)
# Check permissions
if not workflow_permission_policy.user_has_permission(request.user, 'create'):
raise PermissionDenied
# Set workflow to active if inactive
if not workflow.active:
workflow.active = True
workflow.save()
messages.success(request, _("Workflow '{0}' enabled.").format(workflow.name))
# Redirect
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, allowed_hosts={request.get_host()}):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_workflows:edit', workflow.id)
@require_POST
def remove_workflow(request, page_pk, workflow_pk=None):
# Remove a workflow from a page (specifically a single workflow if workflow_pk is set)
# Get the page
page = get_object_or_404(Page, id=page_pk)
# Check permissions
if not workflow_permission_policy.user_has_permission(request.user, 'change'):
raise PermissionDenied
if hasattr(page, 'workflowpage'):
# If workflow_pk is set, this will only remove the workflow if it its pk matches - this prevents accidental
# removal of the wrong workflow via a workflow edit page if the page listing is out of date
if not workflow_pk or workflow_pk == page.workflowpage.workflow.pk:
page.workflowpage.delete()
messages.success(request, _("Workflow removed from Page '{0}'.").format(page.get_admin_display_title()))
# Redirect
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, allowed_hosts={request.get_host()}):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_explore', page.id)
class TaskIndex(IndexView):
permission_policy = task_permission_policy
model = Task
context_object_name = 'tasks'
template_name = 'wagtailadmin/workflows/task_index.html'
add_url_name = 'wagtailadmin_workflows:select_task_type'
edit_url_name = 'wagtailadmin_workflows:edit_task'
page_title = _("Workflow tasks")
add_item_label = _("New workflow task")
header_icon = 'thumbtack'
def show_disabled(self):
return self.request.GET.get('show_disabled', 'false') == 'true'
def get_queryset(self):
queryset = super().get_queryset()
if not self.show_disabled():
queryset = queryset.filter(active=True)
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['showing_disabled'] = self.show_disabled()
return context
def select_task_type(request):
if not task_permission_policy.user_has_permission(request.user, 'add'):
raise PermissionDenied
task_types = [
(model.get_verbose_name(), model._meta.app_label, model._meta.model_name, model.get_description())
for model in get_task_types()
]
# sort by lower-cased version of verbose name
task_types.sort(key=lambda task_type: task_type[0].lower())
if len(task_types) == 1:
# Only one task type is available - redirect straight to the create form rather than
# making the user choose
verbose_name, app_label, model_name, description = task_types[0]
return redirect('wagtailadmin_workflows:add_task', app_label, model_name)
return render(request, 'wagtailadmin/workflows/select_task_type.html', {
'task_types': task_types,
'icon': 'thumbtack',
'title': _("Workflows"),
})
class CreateTask(CreateView):
permission_policy = task_permission_policy
model = None
page_title = _("New workflow task")
template_name = 'wagtailadmin/workflows/create_task.html'
success_message = _("Task '{0}' created.")
add_url_name = 'wagtailadmin_workflows:add_task'
edit_url_name = 'wagtailadmin_workflows:edit_task'
index_url_name = 'wagtailadmin_workflows:task_index'
header_icon = 'thumbtack'
@cached_property
def model(self):
try:
content_type = ContentType.objects.get_by_natural_key(self.kwargs['app_label'], self.kwargs['model_name'])
except (ContentType.DoesNotExist, AttributeError):
raise Http404
# Get class
model = content_type.model_class()
# Make sure the class is a descendant of Task
if not issubclass(model, Task) or model is Task:
raise Http404
return model
def get_form_class(self):
return get_task_form_class(self.model)
def get_add_url(self):
return reverse(self.add_url_name, kwargs={'app_label': self.kwargs.get('app_label'), 'model_name': self.kwargs.get('model_name')})
class EditTask(EditView):
permission_policy = task_permission_policy
model = None
page_title = _("Editing workflow task")
template_name = 'wagtailadmin/workflows/edit_task.html'
success_message = _("Task '{0}' updated.")
add_url_name = 'wagtailadmin_workflows:select_task_type'
edit_url_name = 'wagtailadmin_workflows:edit_task'
delete_url_name = 'wagtailadmin_workflows:disable_task'
index_url_name = 'wagtailadmin_workflows:task_index'
delete_item_label = _('Disable')
enable_item_label = _('Enable')
enable_url_name = 'wagtailadmin_workflows:enable_task'
header_icon = 'thumbtack'
@cached_property
def model(self):
return type(self.get_object())
@cached_property
def page_title(self):
return _("Editing %(task_type)s") % {'task_type': self.get_object().content_type.name}
def get_queryset(self):
if self.queryset is None:
return Task.objects.all()
def get_object(self, queryset=None):
return super().get_object().specific
def get_form_class(self):
return get_task_form_class(self.model, for_edit=True)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['can_disable'] = (self.permission_policy is None or self.permission_policy.user_has_permission(self.request.user, 'delete')) and self.object.active
context['can_enable'] = (self.permission_policy is None or self.permission_policy.user_has_permission(self.request.user, 'create')) and not self.object.active
# TODO: add warning msg when there are pages currently on this task in a workflow, add interaction like resetting task state when saved
return context
@property
def get_enable_url(self):
return reverse(self.enable_url_name, args=(self.object.pk,))
class DisableTask(DeleteView):
permission_policy = task_permission_policy
model = Task
page_title = _("Disable task")
template_name = 'wagtailadmin/workflows/confirm_disable_task.html'
success_message = _("Task '{0}' disabled.")
add_url_name = 'wagtailadmin_workflows:add_task'
edit_url_name = 'wagtailadmin_workflows:edit_task'
delete_url_name = 'wagtailadmin_workflows:disable_task'
index_url_name = 'wagtailadmin_workflows:task_index'
header_icon = 'thumbtack'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
states_in_progress = TaskState.objects.filter(status=TaskState.STATUS_IN_PROGRESS).count()
context['warning_message'] = ngettext(
'This task is in progress on %(states_in_progress)d page. Disabling this task will cause it to be skipped in the moderation workflow.',
'This task is in progress on %(states_in_progress)d pages. Disabling this task will cause it to be skipped in the moderation workflow.',
states_in_progress,
) % {
'states_in_progress': states_in_progress,
}
return context
@property
def get_edit_url(self):
return reverse(self.edit_url_name, args=(self.kwargs['pk'],))
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.deactivate(user=request.user)
messages.success(request, self.get_success_message())
return redirect(reverse(self.index_url_name))
@require_POST
def enable_task(request, pk):
# Reactivate an inactive task
task = get_object_or_404(Task, id=pk)
# Check permissions
if not task_permission_policy.user_has_permission(request.user, 'create'):
raise PermissionDenied
# Set workflow to active if inactive
if not task.active:
task.active = True
task.save()
messages.success(request, _("Task '{0}' enabled.").format(task.name))
# Redirect
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, allowed_hosts={request.get_host()}):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_workflows:edit_task', task.id)
def get_chooser_context():
"""construct context variables needed by the chooser JS"""
return {
'step': 'chooser',
'error_label': _("Server Error"),
'error_message': _("Report this error to your webmaster with the following information:"),
}
def get_task_result_data(task):
"""
helper function: given a task, return the json data to pass back to the
chooser panel
"""
return {
'id': task.id,
'name': task.name,
'edit_url': reverse('wagtailadmin_workflows:edit_task', args=[task.id]),
}
def task_chooser(request):
task_models = get_task_types()
create_model = None
can_create = False
if task_permission_policy.user_has_permission(request.user, 'add'):
can_create = len(task_models) != 0
if len(task_models) == 1:
create_model = task_models[0]
elif 'create_model' in request.GET:
create_model = resolve_model_string(request.GET['create_model'])
if create_model not in task_models:
raise Http404
# Build task types list for "select task type" view
task_types = [
(model.get_verbose_name(), model._meta.app_label, model._meta.model_name, model.get_description())
for model in task_models
]
# sort by lower-cased version of verbose name
task_types.sort(key=lambda task_type: task_type[0].lower())
# Build task type choices for filter on "existing task" tab
task_type_choices = [
(model, model.get_verbose_name())
for model in task_models
]
task_type_choices.sort(key=lambda task_type: task_type[1].lower())
if create_model:
createform_class = get_task_form_class(create_model)
else:
createform_class = None
q = None
if 'q' in request.GET or 'p' in request.GET or 'task_type' in request.GET:
searchform = TaskChooserSearchForm(request.GET, task_type_choices=task_type_choices)
tasks = all_tasks = searchform.task_model.objects.order_by(Lower('name'))
q = ''
if searchform.is_searching():
# Note: I decided not to use wagtailsearch here. This is because
# wagtailsearch creates a new index for each model you make
# searchable and this might affect someone's quota. I doubt there
# would ever be enough tasks to require using anything more than
# an icontains anyway.
q = searchform.cleaned_data['q']
tasks = tasks.filter(name__icontains=q)
# Pagination
paginator = Paginator(tasks, per_page=10)
tasks = paginator.get_page(request.GET.get('p'))
return TemplateResponse(request, "wagtailadmin/workflows/task_chooser/includes/results.html", {
'task_types': task_types,
'searchform': searchform,
'tasks': tasks,
'all_tasks': all_tasks,
'query_string': q,
})
else:
if createform_class:
if request.method == 'POST':
createform = createform_class(request.POST, request.FILES, prefix='create-task')
if createform.is_valid():
task = createform.save()
response = render_modal_workflow(
request, None, None,
None, json_data={'step': 'task_chosen', 'result': get_task_result_data(task)}
)
# Use a different status code so we can tell the difference between validation errors and successful creations
response.status_code = 201
return response
else:
createform = createform_class(prefix='create-task')
else:
if request.method == 'POST':
return HttpResponseBadRequest()
createform = None
searchform = TaskChooserSearchForm(task_type_choices=task_type_choices)
tasks = searchform.task_model.objects.order_by(Lower('name'))
paginator = Paginator(tasks, per_page=10)
tasks = paginator.get_page(request.GET.get('p'))
return render_modal_workflow(request, 'wagtailadmin/workflows/task_chooser/chooser.html', None, {
'task_types': task_types,
'tasks': tasks,
'searchform': searchform,
'createform': createform,
'can_create': can_create,
'add_url': reverse('wagtailadmin_workflows:task_chooser') + '?' + request.GET.urlencode() if create_model else None
}, json_data=get_chooser_context())
def task_chosen(request, task_id):
task = get_object_or_404(Task, id=task_id)
return render_modal_workflow(
request, None, None,
None, json_data={'step': 'task_chosen', 'result': get_task_result_data(task)}
)
|
|
import os
from cuttsum.data import get_resource_manager
from cuttsum.salience import SaliencePredictionAggregator
import gzip
import numpy as np
import re
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.cluster import AffinityPropagation
from datetime import datetime, timedelta
import pandas as pd
from datetime import datetime
class APSummarizer(object):
def __init__(self):
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'ap-summaries')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
# def get_txt_dir(self, prefix, feature_set):
# return os.path.join(self.dir_, prefix + "." + feature_set.fs_name())
#
# def get_txt_path(self, event, prefix, feature_set):
# txt_dir = self.get_txt_dir(prefix, feature_set)
# return os.path.join(txt_dir,
# "ap-sal-{}-txt.tsv.gz".format(event.fs_name()))
#
# def get_ssv_dir(self, prefix, feature_set):
# return os.path.join(self.dir_, prefix + "." + feature_set.fs_name())
def get_tsv_path(self, event, prefix=None, feature_set=None):
return os.path.join(self.dir_,
"ap-{}.tsv.gz".format(event.fs_name()))
def get_dataframe(self, event):
tsv = self.get_tsv_path(event)
if not os.path.exists(tsv):
return None
else:
with gzip.open(tsv, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
return df
def make_summary(self, event, corpus, prefix, feature_set):
string_res = get_resource_manager(u'SentenceStringsResource')
lvec_res = get_resource_manager(u'SentenceLatentVectorsResource')
tsv_path = self.get_tsv_path(event)
updates = []
epoch = datetime.utcfromtimestamp(0)
for hour in event.list_event_hours():
hp1 = hour + timedelta(hours=1)
timestamp = str(int((hp1 - epoch).total_seconds()))
string_df = string_res.get_dataframe(event, hour)
lvec_df = lvec_res.get_dataframe(event, hour)
if string_df is None or lvec_df is None:
continue
string_df = string_df.drop_duplicates(
subset=[u'stream id', u'sentence id'])
lvec_df = lvec_df.drop_duplicates(
subset=[u'stream id', u'sentence id'])
string_df.sort([u"stream id", u"sentence id"], inplace=True)
lvec_df.sort([u"stream id", u"sentence id"], inplace=True)
X = lvec_df.as_matrix()[:,2:].astype(np.float64)
good_rows = np.where(X.any(axis=1))[0]
string_df = string_df.iloc[good_rows]
lvec_df = lvec_df.iloc[good_rows]
assert len(string_df) == len(lvec_df)
n_sents = len(string_df)
for i in xrange(n_sents):
assert string_df[u'stream id'].iloc[i] == \
lvec_df[u'stream id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
lvec_df[u'sentence id'].iloc[i]
lvec_df.reset_index(drop=True, inplace=True)
string_df.reset_index(drop=True, inplace=True)
string_df.drop_duplicates(subset=['streamcorpus'], inplace=True)
string_df['update id'] = string_df['stream id'].map(str) + "-" + \
string_df['sentence id'].map(str)
good_uids = set(string_df['update id'].tolist())
lvec_df['update id'] = lvec_df['stream id'].map(str) + "-" + \
lvec_df['sentence id'].map(str)
lvec_df = lvec_df[lvec_df['update id'].isin(good_uids)].copy()
#sal_df['update id'] = sal_df['stream id'].map(str) + "-" + \
# sal_df['sentence id'].map(str)
#sal_df = sal_df[sal_df['update id'].isin(good_uids)].copy()
string_df.sort([u"stream id", u"sentence id"], inplace=True)
lvec_df.sort([u"stream id", u"sentence id"], inplace=True)
#sal_df.sort([u"stream id", u"sentence id"], inplace=True)
lvec_df.reset_index(drop=True, inplace=True)
string_df.reset_index(drop=True, inplace=True)
#sal_df.reset_index(drop=True, inplace=True)
n_sents = len(string_df)
for i in xrange(n_sents):
assert string_df[u'stream id'].iloc[i] == \
lvec_df[u'stream id'].iloc[i]
#assert string_df[u'stream id'].iloc[i] == \
# sal_df[u'stream id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
lvec_df[u'sentence id'].iloc[i]
#assert string_df[u'sentence id'].iloc[i] == \
# sal_df[u'sentence id'].iloc[i]
good_rows = []
for name, doc in string_df.groupby("stream id"):
for rname, row in doc.iterrows():
scstring = row["streamcorpus"]
words = len(re.findall(r'\b[^\W\d_]+\b', scstring))
socs = len(re.findall(
r'Digg|del\.icio\.us|Facebook|Kwoff|Myspace',
scstring))
langs = len(re.findall(
r'Flash|JavaScript|CSS', scstring, re.I))
assert lvec_df.loc[rname][u'sentence id'] == \
row[u'sentence id']
assert lvec_df.loc[rname][u'stream id'] == \
row[u'stream id']
if words > 9 and len(doc) < 200 \
and socs < 2 and langs < 2:
good_rows.append(rname)
lvec_df = lvec_df.loc[good_rows]
string_df = string_df.loc[good_rows]
n_sents = len(string_df)
if n_sents < 10:
continue
for i in xrange(n_sents):
assert string_df[u'stream id'].iloc[i] == \
lvec_df[u'stream id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
lvec_df[u'sentence id'].iloc[i]
del lvec_df['update id']
#del sal_df['update id']
X = lvec_df.as_matrix()[:,2:].astype(np.float64)
A = cosine_similarity(X)
Aupper = A[np.triu_indices_from(A, k=1)]
Amu = np.mean(Aupper)
Astd = np.std(Aupper)
A = (A - Amu) / Astd
A = A - np.max(A[np.triu_indices_from(A, k=1)])
af = AffinityPropagation(
preference=None, affinity='precomputed', max_iter=100,
damping=.7, verbose=False).fit(A)
II = np.arange(n_sents)
if af.cluster_centers_indices_ is None:
continue
for cnum, cluster in enumerate(np.unique(af.labels_)):
e = af.cluster_centers_indices_[cluster]
cluster_size = II[cluster == af.labels_].shape[0]
scstring = string_df.iloc[e][u'streamcorpus']
stream_id = string_df.iloc[e][u'stream id']
sentence_id = str(string_df.iloc[e][u'sentence id'])
updates.append({"stream id": stream_id,
"sentence id": sentence_id,
"hour": hour,
"timestamp": timestamp,
"cluster size": cluster_size,
"string": scstring})
df = pd.DataFrame(updates,
columns=["stream id", "sentence id", "hour", "timestamp",
"cluster size", "string"])
with gzip.open(tsv_path, u'w') as f:
df.to_csv(f, sep='\t', index=False, index_label=False)
class APSalienceSummarizer(object):
def __init__(self):
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'ap-sal-summaries')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def get_tsv_dir(self, prefix, feature_set):
return os.path.join(self.dir_, prefix + "." + feature_set.fs_name())
def get_tsv_path(self, event, prefix, feature_set):
tsv_dir = self.get_tsv_dir(prefix, feature_set)
return os.path.join(tsv_dir,
"ap-sal-{}.tsv.gz".format(event.fs_name()))
def get_dataframe(self, event, prefix, feature_set):
tsv = self.get_tsv_path(event, prefix, feature_set)
if not os.path.exists(tsv):
return None
else:
with gzip.open(tsv, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
return df
def make_summary(self, event, corpus, prefix, feature_set):
string_res = get_resource_manager(u'SentenceStringsResource')
lvec_res = get_resource_manager(u'SentenceLatentVectorsResource')
spa = SaliencePredictionAggregator()
tsv_path = self.get_tsv_path(event, prefix, feature_set)
updates = []
epoch = datetime.utcfromtimestamp(0)
for hour in event.list_event_hours():
hp1 = hour + timedelta(hours=1)
timestamp = str(int((hp1 - epoch).total_seconds()))
string_df = string_res.get_dataframe(event, hour)
lvec_df = lvec_res.get_dataframe(event, hour)
sal_df = spa.get_dataframe(event, hour, prefix, feature_set)
if string_df is None or lvec_df is None or sal_df is None:
continue
string_df = string_df.drop_duplicates(
subset=[u'stream id', u'sentence id'])
lvec_df = lvec_df.drop_duplicates(
subset=[u'stream id', u'sentence id'])
sal_df = sal_df.drop_duplicates(
subset=[u'stream id', u'sentence id'])
string_df.sort([u"stream id", u"sentence id"], inplace=True)
lvec_df.sort([u"stream id", u"sentence id"], inplace=True)
sal_df.sort([u"stream id", u"sentence id"], inplace=True)
X = lvec_df.as_matrix()[:,2:].astype(np.float64)
good_rows = np.where(X.any(axis=1))[0]
string_df = string_df.iloc[good_rows]
lvec_df = lvec_df.iloc[good_rows]
sal_df = sal_df.iloc[good_rows]
assert len(string_df) == len(lvec_df)
assert len(string_df) == len(sal_df)
n_sents = len(string_df)
for i in xrange(n_sents):
assert string_df[u'stream id'].iloc[i] == \
lvec_df[u'stream id'].iloc[i]
assert string_df[u'stream id'].iloc[i] == \
sal_df[u'stream id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
lvec_df[u'sentence id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
sal_df[u'sentence id'].iloc[i]
lvec_df.reset_index(drop=True, inplace=True)
string_df.reset_index(drop=True, inplace=True)
sal_df.reset_index(drop=True, inplace=True)
string_df.drop_duplicates(subset=['streamcorpus'], inplace=True)
string_df['update id'] = string_df['stream id'].map(str) + "-" + \
string_df['sentence id'].map(str)
good_uids = set(string_df['update id'].tolist())
lvec_df['update id'] = lvec_df['stream id'].map(str) + "-" + \
lvec_df['sentence id'].map(str)
lvec_df = lvec_df[lvec_df['update id'].isin(good_uids)].copy()
sal_df['update id'] = sal_df['stream id'].map(str) + "-" + \
sal_df['sentence id'].map(str)
sal_df = sal_df[sal_df['update id'].isin(good_uids)].copy()
string_df.sort([u"stream id", u"sentence id"], inplace=True)
lvec_df.sort([u"stream id", u"sentence id"], inplace=True)
sal_df.sort([u"stream id", u"sentence id"], inplace=True)
lvec_df.reset_index(drop=True, inplace=True)
string_df.reset_index(drop=True, inplace=True)
sal_df.reset_index(drop=True, inplace=True)
n_sents = len(string_df)
for i in xrange(n_sents):
assert string_df[u'stream id'].iloc[i] == \
lvec_df[u'stream id'].iloc[i]
assert string_df[u'stream id'].iloc[i] == \
sal_df[u'stream id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
lvec_df[u'sentence id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
sal_df[u'sentence id'].iloc[i]
good_rows = []
for name, doc in string_df.groupby("stream id"):
for rname, row in doc.iterrows():
scstring = row["streamcorpus"]
words = len(re.findall(r'\b[^\W\d_]+\b', scstring))
socs = len(re.findall(
r'Digg|del\.icio\.us|Facebook|Kwoff|Myspace',
scstring))
langs = len(re.findall(
r'Flash|JavaScript|CSS', scstring, re.I))
assert lvec_df.loc[rname][u'sentence id'] == \
row[u'sentence id']
assert lvec_df.loc[rname][u'stream id'] == \
row[u'stream id']
assert sal_df.loc[rname][u'sentence id'] == \
row[u'sentence id']
assert sal_df.loc[rname][u'stream id'] == \
row[u'stream id']
if words > 9 and len(doc) < 200 \
and socs < 2 and langs < 2:
good_rows.append(rname)
lvec_df = lvec_df.loc[good_rows]
string_df = string_df.loc[good_rows]
sal_df = sal_df.loc[good_rows]
n_sents = len(string_df)
if n_sents < 10:
continue
for i in xrange(n_sents):
assert string_df[u'stream id'].iloc[i] == \
lvec_df[u'stream id'].iloc[i]
assert string_df[u'stream id'].iloc[i] == \
sal_df[u'stream id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
lvec_df[u'sentence id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
sal_df[u'sentence id'].iloc[i]
del lvec_df['update id']
del sal_df['update id']
X = lvec_df.as_matrix()[:,2:].astype(np.float64)
S = sal_df.as_matrix()[:,2:].astype(np.float64)
s = np.mean(S, axis=1)
A = cosine_similarity(X)
Aupper = A[np.triu_indices_from(A, k=1)]
Amu = np.mean(Aupper)
Astd = np.std(Aupper)
A = (A - Amu) / Astd
max_sim = np.max(
(np.max(A[np.triu_indices_from(A, k=1)]), np.max(s)))
A = A - max_sim
P = s - max_sim
#P = MinMaxScaler(feature_range=(-9, -5)).fit_transform(s)
#A = MinMaxScaler(feature_range=(-3, -1)).fit_transform(A)
#std_s = StandardScaler().fit_transform(s)
#assert X.shape[0] == s.shape[0]
#period = (((hour + timedelta(hours=6)) - \
# event.start).total_seconds() // (6 * 3600))
#cutoff = 2. * period / (1. + period)
af = AffinityPropagation(
preference=P, affinity='precomputed', max_iter=500,
damping=.7, verbose=False).fit(A)
if af.cluster_centers_indices_ is None:
continue
II = np.arange(n_sents)
for cnum, cluster in enumerate(np.unique(af.labels_)):
e = af.cluster_centers_indices_[cluster]
cluster_size = II[cluster == af.labels_].shape[0]
scstring = string_df.iloc[e][u'streamcorpus']
stream_id = string_df.iloc[e][u'stream id']
sentence_id = str(string_df.iloc[e][u'sentence id'])
updates.append({"stream id": stream_id,
"sentence id": sentence_id,
"hour": hour,
"timestamp": timestamp,
"cluster size": cluster_size,
"string": scstring})
df = pd.DataFrame(updates,
columns=["stream id", "sentence id", "hour", "timestamp",
"cluster size", "string"])
with gzip.open(tsv_path, u'w') as f:
df.to_csv(f, sep='\t', index=False, index_label=False)
#if std_s[e] > cutoff:
# if Xcache is None:
# Xcache = X[e]
# else:
# if np.max(cosine_similarity(Xcache, X[e])) >= .5:
# continue
# else:
# Xcache = np.vstack((Xcache, X[e]))
# scstring = string_df.iloc[e][u'streamcorpus']
# sf.write(' '.join([
# str(event.query_id).split(".")[1],
# "cunlp", "apsal",
# string_df.iloc[e][u'stream id'],
# str(string_df.iloc[e][u'sentence id']),
# timestamp, str(s[e]) + "\n"]))
# tf.write('\t'.join([
# str(event.query_id).split(".")[1],
# "cunlp", "apsal",
# string_df.iloc[e][u'stream id'],
# str(string_df.iloc[e][u'sentence id']),
# timestamp, str(s[e]), scstring + "\n"]))
|
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django_auth_ldap.backend import _LDAPUser
from django.test.client import RequestFactory
from typing import Any, Callable, Dict, Optional, Text
from builtins import object
from oauth2client.crypt import AppIdentityError
from django.core import signing
from django.core.urlresolvers import reverse
import jwt
import mock
import re
from zerver.forms import HomepageForm
from zerver.lib.actions import do_deactivate_realm, do_deactivate_user, \
do_reactivate_realm, do_reactivate_user
from zerver.lib.initial_password import initial_password
from zerver.lib.session_user import get_session_dict_user
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import \
get_realm, get_user_profile_by_email, email_to_username, UserProfile, \
PreregistrationUser, Realm
from confirmation.models import Confirmation
from zproject.backends import ZulipDummyBackend, EmailAuthBackend, \
GoogleMobileOauth2Backend, ZulipRemoteUserBackend, ZulipLDAPAuthBackend, \
ZulipLDAPUserPopulator, DevAuthBackend, GitHubAuthBackend, ZulipAuthMixin, \
dev_auth_enabled, password_auth_enabled, github_auth_enabled, \
SocialAuthMixin, AUTH_BACKEND_NAME_MAP
from zerver.views.auth import maybe_send_to_registration
from social.exceptions import AuthFailed
from social.strategies.django_strategy import DjangoStrategy
from social.storage.django_orm import BaseDjangoStorage
from social.backends.github import GithubOrganizationOAuth2, GithubTeamOAuth2, \
GithubOAuth2
from six.moves import urllib
from six.moves.http_cookies import SimpleCookie
import ujson
from zerver.lib.test_helpers import MockLDAP
class AuthBackendTest(TestCase):
def verify_backend(self, backend, good_args=None,
good_kwargs=None, bad_kwargs=None,
email_to_username=None):
# type: (Any, List[Any], Dict[str, Any], Dict[str, Any], Callable[[Text], Text]) -> None
if good_args is None:
good_args = []
if good_kwargs is None:
good_kwargs = {}
email = u"hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
username = email
if email_to_username is not None:
username = email_to_username(email)
# If bad_kwargs was specified, verify auth fails in that case
if bad_kwargs is not None:
self.assertIsNone(backend.authenticate(username, **bad_kwargs))
# Verify auth works
result = backend.authenticate(username, *good_args, **good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated user
do_deactivate_user(user_profile)
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
# Reactivate the user and verify auth works again
do_reactivate_user(user_profile)
result = backend.authenticate(username, *good_args, **good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated realm
do_deactivate_realm(user_profile.realm)
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
# Verify auth works again after reactivating the realm
do_reactivate_realm(user_profile.realm)
result = backend.authenticate(username, *good_args, **good_kwargs)
self.assertEqual(user_profile, result)
# ZulipDummyBackend isn't a real backend so the remainder
# doesn't make sense for it
if isinstance(backend, ZulipDummyBackend):
return
# Verify auth fails if the auth backend is disabled on server
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipDummyBackend',)):
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
# Verify auth fails if the auth backend is disabled for the realm
for backend_name in AUTH_BACKEND_NAME_MAP.keys():
if isinstance(backend, AUTH_BACKEND_NAME_MAP[backend_name]):
break
index = getattr(user_profile.realm.authentication_methods, backend_name).number
user_profile.realm.authentication_methods.set_bit(index, False)
user_profile.realm.save()
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
user_profile.realm.authentication_methods.set_bit(index, True)
user_profile.realm.save()
def test_dummy_backend(self):
# type: () -> None
self.verify_backend(ZulipDummyBackend(),
good_kwargs=dict(use_dummy_backend=True),
bad_kwargs=dict(use_dummy_backend=False))
def setup_subdomain(self, user_profile):
# type: (UserProfile) -> None
realm = user_profile.realm
realm.string_id = 'zulip'
realm.save()
def test_email_auth_backend(self):
# type: () -> None
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
self.setup_subdomain(user_profile)
self.verify_backend(EmailAuthBackend(),
bad_kwargs=dict(password=''),
good_kwargs=dict(password=password))
# Subdomain is ignored when feature is not enabled
self.verify_backend(EmailAuthBackend(),
good_kwargs=dict(password=password,
realm_subdomain='acme',
return_data=dict()))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
self.verify_backend(EmailAuthBackend(),
good_kwargs=dict(password=password,
realm_subdomain='zulip',
return_data=dict()),
bad_kwargs=dict(password=password,
realm_subdomain='acme',
return_data=dict()))
# Things work normally in the event that we're using a
# non-subdomain login page, even if subdomains are enabled
self.verify_backend(EmailAuthBackend(),
bad_kwargs=dict(password="wrong"),
good_kwargs=dict(password=password))
def test_email_auth_backend_disabled_password_auth(self):
# type: () -> None
email = u"hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
# Verify if a realm has password auth disabled, correct password is rejected
with mock.patch('zproject.backends.password_auth_enabled', return_value=False):
self.assertIsNone(EmailAuthBackend().authenticate(email, password))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleMobileOauth2Backend',))
def test_google_backend(self):
# type: () -> None
email = "hamlet@zulip.com"
backend = GoogleMobileOauth2Backend()
payload = dict(email_verified=True,
email=email)
user_profile = get_user_profile_by_email(email)
self.setup_subdomain(user_profile)
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
self.verify_backend(backend)
# With REALMS_HAVE_SUBDOMAINS off, subdomain is ignored
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
self.verify_backend(backend,
good_kwargs=dict(realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
self.verify_backend(backend,
good_kwargs=dict(realm_subdomain="zulip"),
bad_kwargs=dict(realm_subdomain='acme'))
# Verify valid_attestation parameter is set correctly
unverified_payload = dict(email_verified=False)
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=unverified_payload):
ret = dict() # type: Dict[str, str]
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
self.assertFalse(ret["valid_attestation"])
nonexistent_user_payload = dict(email_verified=True, email="invalid@zulip.com")
with mock.patch('apiclient.sample_tools.client.verify_id_token',
return_value=nonexistent_user_payload):
ret = dict()
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
self.assertTrue(ret["valid_attestation"])
with mock.patch('apiclient.sample_tools.client.verify_id_token',
side_effect=AppIdentityError):
ret = dict()
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_backend(self):
# type: () -> None
email = "hamlet@zulip.com"
password = "test_password"
user_profile = get_user_profile_by_email(email)
self.setup_subdomain(user_profile)
backend = ZulipLDAPAuthBackend()
# Test LDAP auth fails when LDAP server rejects password
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn',
side_effect=_LDAPUser.AuthenticationFailed("Failed")), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.assertIsNone(backend.authenticate(email, password))
# For this backend, we mock the internals of django_auth_ldap
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.verify_backend(backend, good_kwargs=dict(password=password))
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.verify_backend(backend, good_kwargs=dict(password=password,
realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.verify_backend(backend,
bad_kwargs=dict(password=password,
realm_subdomain='acme'),
good_kwargs=dict(password=password,
realm_subdomain='zulip'))
def test_devauth_backend(self):
# type: () -> None
self.verify_backend(DevAuthBackend())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_remote_user_backend(self):
# type: () -> None
self.setup_subdomain(get_user_profile_by_email(u'hamlet@zulip.com'))
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(realm_subdomain='zulip'),
bad_kwargs=dict(realm_subdomain='acme'))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_remote_user_backend_sso_append_domain(self):
# type: () -> None
self.setup_subdomain(get_user_profile_by_email(u'hamlet@zulip.com'))
with self.settings(SSO_APPEND_DOMAIN='zulip.com'):
self.verify_backend(ZulipRemoteUserBackend(),
email_to_username=email_to_username,
good_kwargs=dict(realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
with self.settings(SSO_APPEND_DOMAIN='zulip.com'):
self.verify_backend(ZulipRemoteUserBackend(),
email_to_username=email_to_username,
good_kwargs=dict(realm_subdomain='zulip'),
bad_kwargs=dict(realm_subdomain='acme'))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',))
def test_github_backend(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.setup_subdomain(get_user_profile_by_email(email))
good_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='acme')
self.verify_backend(GitHubAuthBackend(),
good_kwargs=good_kwargs,
bad_kwargs=dict())
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
good_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='zulip')
bad_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='acme')
self.verify_backend(GitHubAuthBackend(),
good_kwargs=good_kwargs,
bad_kwargs=bad_kwargs)
class SocialAuthMixinTest(ZulipTestCase):
def test_social_auth_mixing(self):
# type: () -> None
mixin = SocialAuthMixin()
with self.assertRaises(NotImplementedError):
mixin.get_email_address()
with self.assertRaises(NotImplementedError):
mixin.get_full_name()
class GitHubAuthBackendTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = 'hamlet@zulip.com'
self.name = 'Hamlet'
self.backend = GitHubAuthBackend()
self.backend.strategy = DjangoStrategy(storage=BaseDjangoStorage())
self.user_profile = get_user_profile_by_email(self.email)
self.user_profile.backend = self.backend
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.get_host = lambda: 'acme.testserver'
request.user = self.user_profile
self.backend.strategy.request = request
def do_auth(self, *args, **kwargs):
# type: (*Any, **Any) -> UserProfile
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)):
return self.backend.authenticate(*args, **kwargs)
def test_github_auth_enabled(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)):
self.assertTrue(github_auth_enabled())
def test_full_name_with_missing_key(self):
# type: () -> None
self.assertEqual(self.backend.get_full_name(), '')
def test_github_backend_do_auth_without_subdomains(self):
# type: () -> None
with mock.patch('social.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zerver.views.auth.login'):
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
self.assertNotIn('subdomain=1', result.url)
def test_github_backend_do_auth_with_non_existing_subdomain(self):
# type: () -> None
with mock.patch('social.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth):
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
self.backend.strategy.session_set('subdomain', 'test')
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
self.assertIn('subdomain=1', result.url)
def test_github_backend_do_auth_with_subdomains(self):
# type: () -> None
with mock.patch('social.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth):
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
self.backend.strategy.session_set('subdomain', 'zulip')
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
self.assertEqual('http://zulip.testserver/accounts/login/subdomain/', result.url)
def test_github_backend_do_auth_for_default(self):
# type: () -> None
with mock.patch('social.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_team(self):
# type: () -> None
with mock.patch('social.backends.github.GithubTeamOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_team_auth_failed(self):
# type: () -> None
with mock.patch('social.backends.github.GithubTeamOAuth2.do_auth',
side_effect=AuthFailed('Not found')), \
mock.patch('logging.info'), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(None, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_org(self):
# type: () -> None
with mock.patch('social.backends.github.GithubOrganizationOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_org_auth_failed(self):
# type: () -> None
with mock.patch('social.backends.github.GithubOrganizationOAuth2.do_auth',
side_effect=AuthFailed('Not found')), \
mock.patch('logging.info'), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(None, 'fake-access-token', **kwargs)
def test_github_backend_authenticate_nonexisting_user(self):
# type: () -> None
with mock.patch('zproject.backends.get_user_profile_by_email',
side_effect=UserProfile.DoesNotExist("Do not exist")):
response = dict(email=self.email, name=self.name)
return_data = dict() # type: Dict[str, Any]
user = self.backend.authenticate(return_data=return_data, response=response)
self.assertIs(user, None)
self.assertTrue(return_data['valid_attestation'])
def test_github_backend_inactive_user(self):
# type: () -> None
def do_auth_inactive(*args, **kwargs):
# type: (*Any, **Any) -> UserProfile
return_data = kwargs['return_data']
return_data['inactive_user'] = True
return self.user_profile
with mock.patch('zerver.views.auth.login_or_register_remote_user') as result, \
mock.patch('social.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth_inactive):
response = dict(email=self.email, name=self.name)
user = self.backend.do_auth(response=response)
result.assert_not_called()
self.assertIs(user, None)
def test_github_backend_new_user(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.user = self.user_profile
self.backend.strategy.request = request
def do_auth(*args, **kwargs):
# type: (*Any, **Any) -> UserProfile
return_data = kwargs['return_data']
return_data['valid_attestation'] = True
return None
with mock.patch('social.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth):
response = dict(email='nonexisting@phantom.com', name='Ghost')
result = self.backend.do_auth(response=response)
self.assert_in_response('action="/register/"', result)
self.assert_in_response('Your email address does not correspond to any '
'existing organization.', result)
def test_login_url(self):
# type: () -> None
result = self.client_get('/accounts/login/social/github')
self.assertIn(reverse('social:begin', args=['github']), result.url)
class ResponseMock(object):
def __init__(self, status_code, data):
# type: (int, Any) -> None
self.status_code = status_code
self.data = data
def json(self):
# type: () -> str
return self.data
@property
def text(self):
# type: () -> str
return "Response text"
class GoogleOAuthTest(ZulipTestCase):
def google_oauth2_test(self, token_response, account_response, subdomain=None):
# type: (ResponseMock, ResponseMock, Optional[str]) -> HttpResponse
url = "/accounts/login/google/send/"
if subdomain is not None:
url += "?subdomain=" + subdomain
result = self.client_get(url)
self.assertEqual(result.status_code, 302)
if 'google' not in result.url:
return result
self.client.cookies = result.cookies
# Now extract the CSRF token from the redirect URL
parsed_url = urllib.parse.urlparse(result.url)
csrf_state = urllib.parse.parse_qs(parsed_url.query)['state']
with mock.patch("requests.post", return_value=token_response), (
mock.patch("requests.get", return_value=account_response)):
result = self.client_get("/accounts/login/google/done/",
dict(state=csrf_state))
return result
class GoogleSubdomainLoginTest(GoogleOAuthTest):
def get_signed_subdomain_cookie(self, data):
# type: (Dict[str, str]) -> Dict[str, str]
key = 'subdomain.signature'
salt = key + 'zerver.views.auth'
value = ujson.dumps(data)
return {key: signing.get_cookie_signer(salt=salt).sign(value)}
def unsign_subdomain_cookie(self, result):
# type: (HttpResponse) -> Dict[str, Any]
key = 'subdomain.signature'
salt = key + 'zerver.views.auth'
cookie = result.cookies.get(key)
value = signing.get_cookie_signer(salt=salt).unsign(cookie.value, max_age=15)
return ujson.loads(value)
def test_google_oauth2_start(self):
# type: () -> None
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/google/')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
subdomain = urllib.parse.parse_qs(parsed_url.query)['subdomain']
self.assertEqual(subdomain, ['zulip'])
def test_google_oauth2_success(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
result = self.google_oauth2_test(token_response, account_response, 'zulip')
data = self.unsign_subdomain_cookie(result)
self.assertEqual(data['email'], 'hamlet@zulip.com')
self.assertEqual(data['name'], 'Full Name')
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = "{}://{}{}".format(parsed_url.scheme, parsed_url.netloc,
parsed_url.path)
self.assertEqual(uri, 'http://zulip.testserver/accounts/login/subdomain/')
def test_log_into_subdomain(self):
# type: () -> None
data = {'name': 'Full Name',
'email': 'hamlet@zulip.com',
'subdomain': 'zulip'}
self.client.cookies = SimpleCookie(self.get_signed_subdomain_cookie(data))
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_user_cannot_log_into_nonexisting_realm(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response, 'acme')
self.assertEqual(result.status_code, 302)
self.assertIn('subdomain=1', result.url)
def test_user_cannot_log_into_wrong_subdomain(self):
# type: () -> None
data = {'name': 'Full Name',
'email': 'hamlet@zulip.com',
'subdomain': 'acme'}
self.client.cookies = SimpleCookie(self.get_signed_subdomain_cookie(data))
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 400)
def test_log_into_subdomain_when_signature_is_bad(self):
# type: () -> None
self.client.cookies = SimpleCookie({'subdomain.signature': 'invlaid'})
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 400)
def test_log_into_subdomain_when_state_is_not_passed(self):
# type: () -> None
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 400)
def test_google_oauth2_registration(self):
# type: () -> None
"""If the user doesn't exist yet, Google auth can be used to register an account"""
with self.settings(REALMS_HAVE_SUBDOMAINS=True), (
mock.patch('zerver.views.auth.get_subdomain', return_value='zulip')), (
mock.patch('zerver.views.registration.get_subdomain', return_value='zulip')):
email = "newuser@zulip.com"
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=email)])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response, 'zulip')
data = self.unsign_subdomain_cookie(result)
self.assertEqual(data['email'], email)
self.assertEqual(data['name'], 'Full Name')
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = "{}://{}{}".format(parsed_url.scheme, parsed_url.netloc,
parsed_url.path)
self.assertEqual(uri, 'http://zulip.testserver/accounts/login/subdomain/')
result = self.client_get(result.url)
result = self.client_get(result.url) # Call the confirmation url.
key_match = re.search('value="(?P<key>[0-9a-f]+)" name="key"', result.content.decode("utf-8"))
name_match = re.search('value="(?P<name>[^"]+)" name="full_name"', result.content.decode("utf-8"))
# This goes through a brief stop on a page that auto-submits via JS
result = self.client_post('/accounts/register/',
{'full_name': name_match.group("name"),
'key': key_match.group("key"),
'from_confirmation': "1"})
self.assertEqual(result.status_code, 200)
result = self.client_post('/accounts/register/',
{'full_name': "New User",
'password': 'test_password',
'key': key_match.group("key"),
'terms': True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver/")
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class GoogleLoginTest(GoogleOAuthTest):
def test_google_oauth2_success(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
self.google_oauth2_test(token_response, account_response)
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_google_oauth2_registration(self):
# type: () -> None
"""If the user doesn't exist yet, Google auth can be used to register an account"""
email = "newuser@zulip.com"
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=email)])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 302)
result = self.client_get(result.url)
key_match = re.search('value="(?P<key>[0-9a-f]+)" name="key"', result.content.decode("utf-8"))
name_match = re.search('value="(?P<name>[^"]+)" name="full_name"', result.content.decode("utf-8"))
# This goes through a brief stop on a page that auto-submits via JS
result = self.client_post('/accounts/register/',
{'full_name': name_match.group("name"),
'key': key_match.group("key"),
'from_confirmation': "1"})
self.assertEqual(result.status_code, 200)
result = self.client_post('/accounts/register/',
{'full_name': "New User",
'password': 'test_password',
'key': key_match.group("key"),
'terms': True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://testserver/")
def test_google_oauth2_wrong_subdomain(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
result = self.google_oauth2_test(token_response, account_response)
self.assertIn('subdomain=1', result.url)
def test_google_oauth2_400_token_response(self):
# type: () -> None
token_response = ResponseMock(400, {})
with mock.patch("logging.warning") as m:
result = self.google_oauth2_test(token_response, None)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"User error converting Google oauth2 login to token: Response text")
def test_google_oauth2_500_token_response(self):
# type: () -> None
token_response = ResponseMock(500, {})
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, None)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Could not convert google oauth2 code to access_token: Response text")
def test_google_oauth2_400_account_response(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_response = ResponseMock(400, {})
with mock.patch("logging.warning") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Google login failed making info API call: Response text")
def test_google_oauth2_500_account_response(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_response = ResponseMock(500, {})
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Google login failed making API call: Response text")
def test_google_oauth2_no_fullname(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(givenName="Test", familyName="User"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
self.google_oauth2_test(token_response, account_response)
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_google_oauth2_account_response_no_email(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[])
account_response = ResponseMock(200, account_data)
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertIn("Google oauth2 account email not found:", m.call_args_list[0][0][0])
def test_google_oauth2_error_access_denied(self):
# type: () -> None
result = self.client_get("/accounts/login/google/done/?error=access_denied")
self.assertEqual(result.status_code, 302)
path = urllib.parse.urlparse(result.url).path
self.assertEqual(path, "/")
def test_google_oauth2_error_other(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?error=some_other_error")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Error from google oauth2 login: some_other_error")
def test_google_oauth2_missing_csrf(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Missing Google oauth2 CSRF state')
def test_google_oauth2_csrf_malformed(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?state=badstate")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Missing Google oauth2 CSRF state')
def test_google_oauth2_csrf_badstate(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?state=badstate:otherbadstate:")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Google oauth2 CSRF error')
class FetchAPIKeyTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = "hamlet@zulip.com"
self.user_profile = get_user_profile_by_email(self.email)
def test_success(self):
# type: () -> None
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_success(result)
def test_wrong_password(self):
# type: () -> None
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password="wrong"))
self.assert_json_error(result, "Your username or password is incorrect.", 403)
def test_password_auth_disabled(self):
# type: () -> None
with mock.patch('zproject.backends.password_auth_enabled', return_value=False):
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Password auth is disabled", 403)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_auth_email_auth_disabled_success(self):
# type: () -> None
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
self.mock_initialize = ldap_patcher.start()
self.mock_ldap = MockLDAP()
self.mock_initialize.return_value = self.mock_ldap
self.backend = ZulipLDAPAuthBackend()
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password="testing"))
self.assert_json_success(result)
self.mock_ldap.reset()
self.mock_initialize.stop()
def test_inactive_user(self):
# type: () -> None
do_deactivate_user(self.user_profile)
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self):
# type: () -> None
do_deactivate_realm(self.user_profile.realm)
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Your realm has been deactivated", 403)
class DevFetchAPIKeyTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = "hamlet@zulip.com"
self.user_profile = get_user_profile_by_email(self.email)
def test_success(self):
# type: () -> None
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data["email"], self.email)
self.assertEqual(data['api_key'], self.user_profile.api_key)
def test_inactive_user(self):
# type: () -> None
do_deactivate_user(self.user_profile)
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self):
# type: () -> None
do_deactivate_realm(self.user_profile.realm)
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Your realm has been deactivated", 403)
def test_dev_auth_disabled(self):
# type: () -> None
with mock.patch('zerver.views.auth.dev_auth_enabled', return_value=False):
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Dev environment not enabled.", 400)
class DevGetEmailsTest(ZulipTestCase):
def test_success(self):
# type: () -> None
result = self.client_get("/api/v1/dev_get_emails")
self.assert_json_success(result)
self.assert_in_response("direct_admins", result)
self.assert_in_response("direct_users", result)
def test_dev_auth_disabled(self):
# type: () -> None
with mock.patch('zerver.views.auth.dev_auth_enabled', return_value=False):
result = self.client_get("/api/v1/dev_get_emails")
self.assert_json_error_contains(result, "Dev environment not enabled.", 400)
class FetchAuthBackends(ZulipTestCase):
def test_fetch_auth_backend_format(self):
# type: () -> None
result = self.client_get("/api/v1/get_auth_backends")
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(set(data.keys()),
{'msg', 'password', 'google', 'dev', 'result'})
for backend in set(data.keys()) - {'msg', 'result'}:
self.assertTrue(isinstance(data[backend], bool))
def test_fetch_auth_backend(self):
# type: () -> None
backends = [GoogleMobileOauth2Backend(), DevAuthBackend()]
with mock.patch('django.contrib.auth.get_backends', return_value=backends):
result = self.client_get("/api/v1/get_auth_backends")
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'password': False,
'google': True,
'dev': True,
'result': 'success',
})
class TestDevAuthBackend(ZulipTestCase):
def test_login_success(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
data = {'direct_email': email}
result = self.client_post('/accounts/login/local/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure(self):
# type: () -> None
email = 'hamlet@zulip.com'
data = {'direct_email': email}
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',)):
with self.assertRaisesRegex(Exception, 'Direct login not supported.'):
try:
with mock.patch('django.core.handlers.exception.logger'):
self.client_post('/accounts/login/local/', data)
except ImportError:
with mock.patch('django.core.handlers.base.logger'):
self.client_post('/accounts/login/local/', data)
def test_login_failure_due_to_nonexistent_user(self):
# type: () -> None
email = 'nonexisting@zulip.com'
data = {'direct_email': email}
with self.assertRaisesRegex(Exception, 'User cannot login'):
try:
with mock.patch('django.core.handlers.exception.logger'):
self.client_post('/accounts/login/local/', data)
except ImportError:
with mock.patch('django.core.handlers.base.logger'):
self.client_post('/accounts/login/local/', data)
class TestZulipRemoteUserBackend(ZulipTestCase):
def test_login_success(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_authenticate_with_missing_user(self):
# type: () -> None
backend = ZulipRemoteUserBackend()
self.assertIs(backend.authenticate(None), None)
def test_login_success_with_sso_append_domain(self):
# type: () -> None
username = 'hamlet'
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',),
SSO_APPEND_DOMAIN='zulip.com'):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=username)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure(self):
# type: () -> None
email = 'hamlet@zulip.com'
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200) # This should ideally be not 200.
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_nonexisting_user(self):
# type: () -> None
email = 'nonexisting@zulip.com'
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_missing_field(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/')
self.assert_json_error_contains(result, "No REMOTE_USER set.", 400)
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
email = 'hamlet@zulip.com'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
with mock.patch('zerver.views.auth.get_subdomain', return_value='acme'):
result = self.client_post('http://testserver:9080/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assertIs(get_session_dict_user(self.client.session), None)
self.assertIn(b"Let's get started", result.content)
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
email = 'hamlet@zulip.com'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
with mock.patch('zerver.views.auth.get_subdomain', return_value=''):
result = self.client_post('http://testserver:9080/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assertIs(get_session_dict_user(self.client.session), None)
self.assertIn(b"Let's get started", result.content)
def test_login_success_under_subdomains(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertIs(get_session_dict_user(self.client.session), user_profile.id)
class TestJWTLogin(ZulipTestCase):
"""
JWT uses ZulipDummyBackend.
"""
def test_login_success(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
email = 'hamlet@zulip.com'
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
user_profile = get_user_profile_by_email(email)
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure_when_user_is_missing(self):
# type: () -> None
payload = {'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "No user specified in JSON web token claims", 400)
def test_login_failure_when_realm_is_missing(self):
# type: () -> None
payload = {'user': 'hamlet'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "No realm specified in JSON web token claims", 400)
def test_login_failure_when_key_does_not_exist(self):
# type: () -> None
data = {'json_web_token': 'not relevant'}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Auth key for this subdomain not found.", 400)
def test_login_failure_when_key_is_missing(self):
# type: () -> None
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
result = self.client_post('/accounts/login/jwt/')
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
def test_login_failure_when_bad_token_is_passed(self):
# type: () -> None
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
result = self.client_post('/accounts/login/jwt/')
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
data = {'json_web_token': 'bad token'}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Bad JSON web token", 400)
def test_login_failure_when_user_does_not_exist(self):
# type: () -> None
payload = {'user': 'nonexisting', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302) # This should ideally be not 200.
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(REALMS_HAVE_SUBDOMAINS=True, JWT_AUTH_KEYS={'acme': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value='acme'):
auth_key = settings.JWT_AUTH_KEYS['acme']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assertEqual(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(REALMS_HAVE_SUBDOMAINS=True, JWT_AUTH_KEYS={'': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value=''):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assertEqual(get_session_dict_user(self.client.session), None)
def test_login_success_under_subdomains(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(REALMS_HAVE_SUBDOMAINS=True, JWT_AUTH_KEYS={'zulip': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
email = 'hamlet@zulip.com'
auth_key = settings.JWT_AUTH_KEYS['zulip']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class TestLDAP(ZulipTestCase):
def setUp(self):
# type: () -> None
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
self.setup_subdomain(user_profile)
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
self.mock_initialize = ldap_patcher.start()
self.mock_ldap = MockLDAP()
self.mock_initialize.return_value = self.mock_ldap
self.backend = ZulipLDAPAuthBackend()
def tearDown(self):
# type: () -> None
self.mock_ldap.reset()
self.mock_initialize.stop()
def setup_subdomain(self, user_profile):
# type: (UserProfile) -> None
realm = user_profile.realm
realm.string_id = 'zulip'
realm.save()
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing')
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_wrong_password(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user = self.backend.authenticate('hamlet@zulip.com', 'wrong')
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_nonexistent_user(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user = self.backend.authenticate('nonexistent@zulip.com', 'testing')
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_permissions(self):
# type: () -> None
backend = self.backend
self.assertFalse(backend.has_perm(None, None))
self.assertFalse(backend.has_module_perms(None, None))
self.assertTrue(backend.get_all_permissions(None, None) == set())
self.assertTrue(backend.get_group_permissions(None, None) == set())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_django_to_ldap_username(self):
# type: () -> None
backend = self.backend
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
username = backend.django_to_ldap_username('"hamlet@test"@zulip.com')
self.assertEqual(username, '"hamlet@test"')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_to_django_username(self):
# type: () -> None
backend = self.backend
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
username = backend.ldap_to_django_username('"hamlet@test"')
self.assertEqual(username, '"hamlet@test"@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_exists(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
backend = self.backend
email = 'hamlet@zulip.com'
user_profile, created = backend.get_or_create_user(email, _LDAPUser())
self.assertFalse(created)
self.assertEqual(user_profile.email, email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_does_not_exist(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'nonexisting@zulip.com'
user_profile, created = backend.get_or_create_user(email, _LDAPUser())
self.assertTrue(created)
self.assertEqual(user_profile.email, email)
self.assertEqual(user_profile.full_name, 'Full Name')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_realm_is_deactivated(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'nonexisting@zulip.com'
realm = get_realm('zulip')
do_deactivate_realm(realm)
with self.assertRaisesRegex(Exception, 'Realm has been deactivated'):
backend.get_or_create_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_django_to_ldap_username_when_domain_does_not_match(self):
# type: () -> None
backend = self.backend
email = 'hamlet@zulip.com'
with self.assertRaisesRegex(Exception, 'Username does not match LDAP domain.'):
with self.settings(LDAP_APPEND_DOMAIN='acme.com'):
backend.django_to_ldap_username(email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain='acme')
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain='')
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_when_subdomain_is_none(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain=None)
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_with_valid_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain='zulip')
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
class TestZulipLDAPUserPopulator(ZulipTestCase):
def test_authenticate(self):
# type: () -> None
backend = ZulipLDAPUserPopulator()
result = backend.authenticate('hamlet@zulip.com', 'testing') # type: ignore # complains that the function does not return any value!
self.assertIs(result, None)
class TestZulipAuthMixin(ZulipTestCase):
def test_get_user(self):
# type: () -> None
backend = ZulipAuthMixin()
result = backend.get_user(11111)
self.assertIs(result, None)
class TestPasswordAuthEnabled(ZulipTestCase):
def test_password_auth_enabled_for_ldap(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',)):
realm = Realm.objects.get(string_id='zulip')
self.assertTrue(password_auth_enabled(realm))
class TestMaybeSendToRegistration(ZulipTestCase):
def test_sso_only_when_preregistration_user_does_not_exist(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/')
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form(object):
def is_valid(self):
# type: () -> bool
return True
with self.settings(ONLY_SSO=True):
with mock.patch('zerver.views.auth.HomepageForm', return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 0)
result = maybe_send_to_registration(request, 'hamlet@zulip.com')
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
self.assert_in_response('value="{0}" name="key"'.format(confirmation_key), result)
def test_sso_only_when_preregistration_user_exists(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/')
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form(object):
def is_valid(self):
# type: () -> bool
return True
email = 'hamlet@zulip.com'
user = PreregistrationUser(email=email)
user.save()
with self.settings(ONLY_SSO=True):
with mock.patch('zerver.views.auth.HomepageForm', return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = maybe_send_to_registration(request, email)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
class TestAdminSetBackends(ZulipTestCase):
def test_change_enabled_backends(self):
# type: () -> None
# Log in as admin
self.login("iago@zulip.com")
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': True})})
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertFalse(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_disable_all_backends(self):
# type: () -> None
# Log in as admin
self.login("iago@zulip.com")
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': False})})
self.assert_json_error(result, 'At least one authentication method must be enabled.', status_code=403)
realm = get_realm('zulip')
self.assertTrue(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_supported_backends_only_updated(self):
# type: () -> None
# Log in as admin
self.login("iago@zulip.com")
# Set some supported and unsupported backends
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': True, u'GitHub': False})})
self.assert_json_success(result)
realm = get_realm('zulip')
# Check that unsupported backend is not enabled
self.assertFalse(github_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
self.assertFalse(password_auth_enabled(realm))
|
|
import urllib.request
from pykml import parser
import pdb
import os
import sys
import time
import json
import math
scriptDir=os.path.dirname(os.path.realpath(__file__))
sys.path.append(scriptDir)
sys.path.append(os.path.join(scriptDir,'..','lib','python-zeromq-pubsub','src'))
import processNode
UPDATE_INTERVAL_SECS = 1
TIME_BUFFER = 30
REMOTE_DATA_URL = "http://data.cabq.gov/transit/realtime/route/allroutes.kml"
TOTAL_SECS_IN_DAY = 24*60*60
class AbqBusLocationInterface():
def __init__(self, processName=None, fullConfigPath=None):
self.gpsInterfaceNode = processNode.ProcessNode(fullConfigPath, processName)
self.done = False
self.masterList = []
self.prevMasterList = []
def run (self):
'''
Polls the public bus data that's in a KML format. The data is then parsed
and stuffed into a data structure that's optimal to publish internally.
'''
while(not(self.done)):
kmlString = b''
kmlDoc = None
try:
kmlString = urllib.request.urlopen(REMOTE_DATA_URL).read()
except:
self.gpsInterfaceNode.log(logLevel=3, message="Unable to download remote data")
fixedString = kmlString.decode('utf8', 'replace')
if (kmlString != b''):
try:
kmlDoc = parser.fromstring(fixedString.encode())
except:
self.gpsInterfaceNode.log(logLevel=3, message="Unable to parse remote data")
if (kmlDoc != None):
documentChildren = kmlDoc.Document.getchildren()
for item in documentChildren:
vehicleID = ''
routeTime = ''
routeName = ''
lat = None
lng = None
itemTag = item.tag
if (itemTag.find('Placemark') != -1):
routeName = str(item.name)
tableItems = item.description.table.getchildren()
for tableItem in tableItems:
tableChildren = tableItem.getchildren()
if (tableItem.td == 'Vehicle #'):
vehicleID = str(tableChildren[1])
elif (tableItem.td == 'Msg Time'):
routeTime = self.convertTime(str(tableChildren[1]))
routeLocation = item.Point.coordinates
valid, lat, lng = self.parseCoordinates(str(routeLocation))
if (valid):
self.masterList.append({
'vehicleID': vehicleID,
'route': routeName,
'timeStamp': routeTime,
'latitude': lat,
'longitude': lng
})
if (len(self.prevMasterList) != 0 and self.prevMasterList != self.masterList):
newIdx = 0
filteredMasterList = self.masterList
while(newIdx < len(filteredMasterList)):
newItem = filteredMasterList[newIdx]
vehicleFound = False
for oldItem in self.prevMasterList:
if (newItem['vehicleID'] == oldItem['vehicleID']):
latList = [oldItem['latitude'], newItem['latitude']]
longList = [oldItem['longitude'], newItem['longitude']]
invalid = self.detectAndSanitizeGPSJumps(latList, longList)
if (invalid):
filteredMasterList.pop(newIdx)
break
newIdx += 1
self.sendMsgs()
self.gpsInterfaceNode.log(logLevel=0, message=filteredMasterList)
self.prevMasterList = self.masterList
self.masterList = []
time.sleep(UPDATE_INTERVAL_SECS)
def detectAndSanitizeGPSJumps(self, latList, longList):
'''
Checks to see if there are large GPS jumps and filter them out
'''
invalid = False
for idx in range(len(latList) - 1):
delta = self.calcRectDistanceKM(latList[idx], latList[idx+1], longList[idx], longList[idx+1])
if (delta > 1):
self.gpsInterfaceNode.log(logLevel=0, message="Large GPS jump detected: " + str(delta))
invalid = True
break
return invalid
def calcRectDistanceKM(self, lat1, lat2, long1, long2):
'''
Calculate distance using equirectangular approximation
'''
EARTHRADIUSKM = 6371
x = (math.radians(long2 - long1)) * math.cos(math.radians((lat1+lat2)/2))
y = math.radians(lat2 - lat1)
d = math.sqrt(x*x + y*y) * EARTHRADIUSKM
return d
def parseCoordinates(self, coordinatesString):
'''
Custom little parser that converts the comma delimted latitude and
longitude coordinates into pythonic data
'''
validCoordinates = False
lng = -360
lat = -360
firstIndex = coordinatesString.find(',')
if (firstIndex != -1):
lng = float(coordinatesString[0:firstIndex])
lat = float(coordinatesString[firstIndex+1:])
if (lat > -90 and lat < 90 and lng > -180 and lng < 180):
validCoordinates = True
return validCoordinates, lat, lng
def sendMsgs(self):
'''
Wrapper for publishing/sending the parsed GPS data structure to
subscribers
'''
self.gpsInterfaceNode.send('gpsData', self.masterList)
def convertTime(self, timeStamp):
'''
Converts human time to time of day (in seconds)
'''
hour, minute, sec = self.parseHumanTime(timeStamp)
return self.convertHumanToSecsInDay(hour, minute, sec)
def parseHumanTime(self, timeStamp):
'''
:param timeStamp: time in human readable <hour>:<min>:<sec>
'''
hour = -1
minute = -1
sec = -1
firstIndex = -1
firstIndex = timeStamp.find(':')
if (firstIndex != -1):
hour = int(timeStamp[0:firstIndex])
if (timeStamp.find('PM')):
hour += 12
secondIndex = timeStamp.find(':', firstIndex+1)
if (secondIndex != -1):
minute = int(timeStamp[firstIndex+1: secondIndex])
thirdIndex = timeStamp.find(' ', secondIndex+1)
if (thirdIndex != -1):
sec = int(timeStamp[secondIndex+1: thirdIndex])
if (hour == -1 or minute == -1 or sec == -1):
gpsInterfaceNode.log(logLevel=3, message="Unable to parse time stamp")
return hour, minute, sec
def convertHumanToSecsInDay(self, hour, minute, sec):
'''
Convert hours, minutes, seconds to total seconds elapsed during the day
'''
return hour*3600 + minute*60 + sec
def adjustTime(self, secsInDay):
'''
Need to calculate valid time window because there are garbage time stamps
in the data and handle wrap condition
'''
newSecs = secsInDay - TIME_BUFFER
if (newSecs < 0):
newSecs = TOTAL_SECS_IN_DAY + newSecs
return newSecs
if __name__ == '__main__':
if (len(sys.argv) == 3):
gpsInterface = AbqBusLocationInterface(sys.argv[1], sys.argv[2])
else:
gpsInterface = AbqBusLocationInterface()
gpsInterface.run()
|
|
import boto3
import logging
import requests
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from indra import get_config, has_config
from indra.util.nested_dict import NestedDict
logger = logging.getLogger(__name__)
def kill_all(job_queue, reason='None given', states=None):
"""Terminates/cancels all RUNNING, RUNNABLE, and STARTING jobs."""
if states is None:
states = ['STARTING', 'RUNNABLE', 'RUNNING']
batch = boto3.client('batch')
runnable = batch.list_jobs(jobQueue=job_queue, jobStatus='RUNNABLE')
job_info = runnable.get('jobSummaryList')
if job_info:
job_ids = [job['jobId'] for job in job_info]
# Cancel jobs
for job_id in job_ids:
batch.cancel_job(jobId=job_id, reason=reason)
res_list = []
for status in states:
running = batch.list_jobs(jobQueue=job_queue, jobStatus=status)
job_info = running.get('jobSummaryList')
if job_info:
job_ids = [job['jobId'] for job in job_info]
for job_id in job_ids:
logger.info('Killing %s' % job_id)
res = batch.terminate_job(jobId=job_id, reason=reason)
res_list.append(res)
return res_list
def tag_instance(instance_id, **tags):
"""Tag a single ec2 instance."""
logger.debug("Got request to add tags %s to instance %s."
% (str(tags), instance_id))
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
# Remove None's from `tags`
filtered_tags = {k: v for k, v in tags.items() if v and k}
# Check for existing tags
if instance.tags is not None:
existing_tags = {tag.get('Key'): tag.get('Value')
for tag in instance.tags}
logger.debug("Ignoring existing tags; %s" % str(existing_tags))
for tag_key in existing_tags.keys():
filtered_tags.pop(tag_key, None)
# If we have new tags to add, add them.
tag_list = [{'Key': k, 'Value': v} for k, v in filtered_tags.items()]
if len(tag_list):
logger.info('Adding project tags "%s" to instance %s'
% (filtered_tags, instance_id))
instance.create_tags(Tags=tag_list)
else:
logger.info('No new tags from: %s' % str(tags))
return
def tag_myself(project='cwc', **other_tags):
"""Function run when indra is used in an EC2 instance to apply tags."""
base_url = "http://169.254.169.254"
try:
resp = requests.get(base_url + "/latest/meta-data/instance-id")
except requests.exceptions.ConnectionError:
logger.warning("Could not connect to service. Note this should only "
"be run from within a batch job.")
return
instance_id = resp.text
tag_instance(instance_id, project=project, **other_tags)
return
def get_batch_command(command_list, project=None, purpose=None):
"""Get the command appropriate for running something on batch."""
command_str = ' '.join(command_list)
ret = ['python', '-m', 'indra.util.aws', 'run_in_batch', command_str]
if not project and has_config('DEFAULT_AWS_PROJECT'):
project = get_config('DEFAULT_AWS_PROJECT')
if project:
ret += ['--project', project]
if purpose:
ret += ['--purpose', purpose]
return ret
def run_in_batch(command_list, project, purpose):
from subprocess import call
tag_myself(project, purpose=purpose)
logger.info('\n'+20*'='+' Begin Primary Command Output '+20*'='+'\n')
ret_code = call(command_list)
logger.info('\n'+21*'='+' End Primary Command Output '+21*'='+'\n')
return ret_code
def get_jobs(job_queue='run_reach_queue', job_status='RUNNING'):
"""Returns a list of dicts with jobName and jobId for each job with the
given status."""
batch = boto3.client('batch')
jobs = batch.list_jobs(jobQueue=job_queue, jobStatus=job_status)
return jobs.get('jobSummaryList')
def get_job_log(job_info, log_group_name='/aws/batch/job',
write_file=True, verbose=False):
"""Gets the Cloudwatch log associated with the given job.
Parameters
----------
job_info : dict
dict containing entries for 'jobName' and 'jobId', e.g., as returned
by get_jobs()
log_group_name : string
Name of the log group; defaults to '/aws/batch/job'
write_file : boolean
If True, writes the downloaded log to a text file with the filename
'%s_%s.log' % (job_name, job_id)
Returns
-------
list of strings
The event messages in the log, with the earliest events listed first.
"""
job_name = job_info['jobName']
job_id = job_info['jobId']
logs = boto3.client('logs')
batch = boto3.client('batch')
resp = batch.describe_jobs(jobs=[job_id])
job_desc = resp['jobs'][0]
job_def_name = job_desc['jobDefinition'].split('/')[-1].split(':')[0]
task_arn_id = job_desc['container']['taskArn'].split('/')[-1]
log_stream_name = '%s/default/%s' % (job_def_name, task_arn_id)
stream_resp = logs.describe_log_streams(
logGroupName=log_group_name,
logStreamNamePrefix=log_stream_name)
streams = stream_resp.get('logStreams')
if not streams:
logger.warning('No streams for job')
return None
elif len(streams) > 1:
logger.warning('More than 1 stream for job, returning first')
log_stream_name = streams[0]['logStreamName']
if verbose:
logger.info("Getting log for %s/%s" % (job_name, job_id))
out_file = ('%s_%s.log' % (job_name, job_id)) if write_file else None
lines = get_log_by_name(log_group_name, log_stream_name, out_file, verbose)
return lines
def get_log_by_name(log_group_name, log_stream_name, out_file=None,
verbose=True):
"""Download a log given the log's group and stream name.
Parameters
----------
log_group_name : str
The name of the log group, e.g. /aws/batch/job.
log_stream_name : str
The name of the log stream, e.g. run_reach_jobdef/default/<UUID>
Returns
-------
lines : list[str]
The lines of the log as a list.
"""
logs = boto3.client('logs')
kwargs = {'logGroupName': log_group_name,
'logStreamName': log_stream_name,
'startFromHead': True}
lines = []
while True:
response = logs.get_log_events(**kwargs)
# If we've gotten all the events already, the nextForwardToken for
# this call will be the same as the last one
if response.get('nextForwardToken') == kwargs.get('nextToken'):
break
else:
events = response.get('events')
if events:
lines += ['%s: %s\n' % (evt['timestamp'], evt['message'])
for evt in events]
kwargs['nextToken'] = response.get('nextForwardToken')
if verbose:
logger.info('%d %s' % (len(lines), lines[-1]))
if out_file:
with open(out_file, 'wt') as f:
for line in lines:
f.write(line)
return lines
def dump_logs(job_queue='run_reach_queue', job_status='RUNNING'):
"""Write logs for all jobs with given the status to files."""
jobs = get_jobs(job_queue, job_status)
for job in jobs:
get_job_log(job, write_file=True)
def get_s3_file_tree(s3, bucket, prefix):
"""Overcome s3 response limit and return NestedDict tree of paths.
The NestedDict object also allows the user to search by the ends of a path.
The tree mimics a file directory structure, with the leave nodes being the
full unbroken key. For example, 'path/to/file.txt' would be retrieved by
ret['path']['to']['file.txt']['key']
The NestedDict object returned also has the capability to get paths that
lead to a certain value. So if you wanted all paths that lead to something
called 'file.txt', you could use
ret.get_paths('file.txt')
For more details, see the NestedDict docs.
"""
def get_some_keys(keys, marker=None):
if marker:
relevant_files = s3.list_objects(Bucket=bucket, Prefix=prefix,
Marker=marker)
else:
relevant_files = s3.list_objects(Bucket=bucket, Prefix=prefix)
keys.extend([entry['Key'] for entry in relevant_files['Contents']
if entry['Key'] != marker])
return relevant_files['IsTruncated']
file_keys = []
marker = None
while get_some_keys(file_keys, marker):
marker = file_keys[-1]
file_tree = NestedDict()
pref_path = prefix.split('/')[:-1] # avoid the trailing empty str.
for key in file_keys:
full_path = key.split('/')
relevant_path = full_path[len(pref_path):]
curr = file_tree
for step in relevant_path:
curr = curr[step]
curr['key'] = key
return file_tree
if __name__ == '__main__':
parser = ArgumentParser(
'aws.py',
description=('Use some of INDRA\'s aws tools. For more specific help, '
'select one of the Methods with the `-h` option.')
)
subparsers = parser.add_subparsers(title='Task')
subparsers.required = True
subparsers.dest = 'task'
# Create parent parser classes for second layer of options
parent_run_parser = ArgumentParser(add_help=False)
parent_run_parser.add_argument(
'command',
help=('Enter the command as a single string to be run as if in a '
'batch environment.')
)
parent_run_parser.add_argument(
'--project', '-P',
default='cwc',
help='Give a name for the project.'
)
parent_run_parser.add_argument(
'--purpose', '-p',
help='Give the task some meaning.'
)
parent_kill_parser = ArgumentParser(add_help=False)
parent_kill_parser.add_argument(
'queue_name',
help='Select the batch queue in which all jobs should be terminated.'
)
parent_kill_parser.add_argument(
'--reason', '-R',
help='Give a reason for killing all the jobs.'
)
# Make non_db_parser and get subparsers
run_parser = subparsers.add_parser(
'run_in_batch',
parents=[parent_run_parser],
description=('This should be called to run any command wtihin an aws '
'batch job instance.'),
formatter_class=ArgumentDefaultsHelpFormatter
)
# Make db parser and get subparsers.
kill_parser = subparsers.add_parser(
'kill_all',
parents=[parent_kill_parser],
description='Kill all the jobs running in a given queue.',
formatter_class=ArgumentDefaultsHelpFormatter
)
args = parser.parse_args()
if args.task == 'run_in_batch':
ret_code = run_in_batch(args.command.split(' '), args.project,
args.purpose)
if ret_code is 0:
logger.info('Job endend well.')
else:
logger.error('Job failed!')
import sys
sys.exit(ret_code)
elif args.task == 'kill_all':
kill_all(args.queue_name, args.reason)
|
|
import copy
import os
import platform
from conans.client import join_arguments
from conans.client.build.compiler_flags import (architecture_flag, format_libraries,
format_library_paths, format_defines,
sysroot_flag, format_include_paths,
build_type_flags, libcxx_flag, build_type_define,
libcxx_define, pic_flag, rpath_flags)
from conans.client.build.cppstd_flags import cppstd_flag
from conans.client.tools.oss import OSInfo
from conans.client.tools.win import unix_path
from conans.tools import (environment_append, args_to_string, cpu_count, cross_building,
detected_architecture, get_gnu_triplet)
from conans.errors import ConanException
from conans.util.files import get_abs_path
class AutoToolsBuildEnvironment(object):
"""
- CPPFLAGS (C-PreProcesor-Flags NOT related with c++) (-I -D)
- CFLAGS (not CPPFLAGS nor LDFLAGS, used for optimization or debugging)
- CXXFLAGS (the CFLAGS for c++)
- LDFLAGS (-L, others like -m64 -m32) linker
"""
def __init__(self, conanfile, win_bash=False, include_rpath_flags=False):
"""
FIXME: include_rpath_flags CONAN 2.0 to default True? Could break many packages in center
"""
self._conanfile = conanfile
self._win_bash = win_bash
self._include_rpath_flags = include_rpath_flags
self.subsystem = OSInfo().detect_windows_subsystem() if self._win_bash else None
self._deps_cpp_info = conanfile.deps_cpp_info
self._os = conanfile.settings.get_safe("os")
self._arch = conanfile.settings.get_safe("arch")
self._build_type = conanfile.settings.get_safe("build_type")
self._compiler = conanfile.settings.get_safe("compiler")
self._compiler_version = conanfile.settings.get_safe("compiler.version")
self._libcxx = conanfile.settings.get_safe("compiler.libcxx")
self._cppstd = conanfile.settings.get_safe("cppstd")
# Set the generic objects before mapping to env vars to let the user
# alter some value
self.libs = copy.copy(self._deps_cpp_info.libs)
self.include_paths = copy.copy(self._deps_cpp_info.include_paths)
self.library_paths = copy.copy(self._deps_cpp_info.lib_paths)
self.defines = self._configure_defines()
# Will go to CFLAGS and CXXFLAGS ["-m64" "-m32", "-g", "-s"]
self.flags = self._configure_flags()
# Only c++ flags [-stdlib, -library], will go to CXXFLAGS
self.cxx_flags = self._configure_cxx_flags()
# cpp standard
self.cppstd_flag = cppstd_flag(self._compiler, self._compiler_version, self._cppstd)
# Not -L flags, ["-m64" "-m32"]
self.link_flags = self._configure_link_flags() # TEST!
# Precalculate -fPIC
self.fpic = self._configure_fpic()
# Precalculate build, host, target triplets
self.build, self.host, self.target = self._get_host_build_target_flags()
def _configure_fpic(self):
if str(self._os) not in ["Windows", "WindowsStore"]:
fpic = self._conanfile.options.get_safe("fPIC")
if fpic is not None:
shared = self._conanfile.options.get_safe("shared")
return True if (fpic or shared) else None
def _get_host_build_target_flags(self):
"""Based on google search for build/host triplets, it could need a lot
and complex verification"""
arch_detected = detected_architecture() or platform.machine()
os_detected = platform.system()
if os_detected is None or arch_detected is None or self._arch is None or self._os is None:
return False, False, False
if not cross_building(self._conanfile.settings, os_detected, arch_detected):
return False, False, False
try:
build = get_gnu_triplet(os_detected, arch_detected, self._compiler)
except ConanException:
build = None
try:
host = get_gnu_triplet(self._os, self._arch, self._compiler)
except ConanException:
host = None
return build, host, None
def configure(self, configure_dir=None, args=None, build=None, host=None, target=None,
pkg_config_paths=None, vars=None):
"""
:param pkg_config_paths: Optional paths to locate the *.pc files
:param configure_dir: Absolute or relative path to the configure script
:param args: Optional arguments to pass to configure.
:param build: In which system the program will be built. "False" skips the --build flag
:param host: In which system the generated program will run. "False" skips the --host flag
:param target: This option is only used to build a cross-compiling toolchain.
"False" skips the --target flag
When the tool chain generates executable program, in which target system
the program will run.
:return: None
http://jingfenghanmax.blogspot.com.es/2010/09/configure-with-host-target-and-build.html
https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html
"""
if not self._conanfile.should_configure:
return
if configure_dir:
configure_dir = configure_dir.rstrip("/")
else:
configure_dir = "."
triplet_args = []
if build is not False: # Skipped by user
if build or self.build: # User specified value or automatic
triplet_args.append("--build=%s" % (build or self.build))
if host is not False: # Skipped by user
if host or self.host: # User specified value or automatic
triplet_args.append("--host=%s" % (host or self.host))
if target is not False: # Skipped by user
if target or self.target: # User specified value or automatic
triplet_args.append("--target=%s" % (target or self.target))
if pkg_config_paths:
pkg_env = {"PKG_CONFIG_PATH":
[os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
for f in pkg_config_paths)]}
else:
# If we are using pkg_config generator automate the pcs location, otherwise it could
# read wrong files
pkg_env = {"PKG_CONFIG_PATH": [self._conanfile.install_folder]} \
if "pkg_config" in self._conanfile.generators else {}
if self._conanfile.package_folder is not None:
if not args:
args = ["--prefix=%s" % self._conanfile.package_folder.replace("\\", "/")]
elif not any(["--prefix=" in arg for arg in args]):
args.append("--prefix=%s" % self._conanfile.package_folder.replace("\\", "/"))
with environment_append(pkg_env):
with environment_append(vars or self.vars):
configure_dir = self._adjust_path(configure_dir)
command = '%s/configure %s %s' % (configure_dir,
args_to_string(args), " ".join(triplet_args))
self._conanfile.output.info("Calling:\n > %s" % command)
self._conanfile.run(command,
win_bash=self._win_bash,
subsystem=self.subsystem)
def _adjust_path(self, path):
if self._win_bash:
path = unix_path(path, path_flavor=self.subsystem)
return '"%s"' % path if " " in path else path
def make(self, args="", make_program=None, target=None, vars=None):
if not self._conanfile.should_build:
return
make_program = os.getenv("CONAN_MAKE_PROGRAM") or make_program or "make"
with environment_append(vars or self.vars):
str_args = args_to_string(args)
cpu_count_option = ("-j%s" % cpu_count()) if "-j" not in str_args else None
self._conanfile.run("%s" % join_arguments([make_program, target, str_args,
cpu_count_option]),
win_bash=self._win_bash, subsystem=self.subsystem)
def install(self, args="", make_program=None, vars=None):
if not self._conanfile.should_install:
return
self.make(args=args, make_program=make_program, target="install", vars=vars)
def _configure_link_flags(self):
"""Not the -L"""
ret = copy.copy(self._deps_cpp_info.sharedlinkflags)
ret.extend(self._deps_cpp_info.exelinkflags)
arch_flag = architecture_flag(compiler=self._compiler, arch=self._arch)
if arch_flag:
ret.append(arch_flag)
sysf = sysroot_flag(self._deps_cpp_info.sysroot, win_bash=self._win_bash,
subsystem=self.subsystem,
compiler=self._compiler)
if sysf:
ret.append(sysf)
if self._include_rpath_flags:
the_os = self._conanfile.settings.get_safe("os_build") or self._os
ret.extend(rpath_flags(the_os, self._compiler, self._deps_cpp_info.lib_paths))
return ret
def _configure_flags(self):
ret = copy.copy(self._deps_cpp_info.cflags)
arch_flag = architecture_flag(compiler=self._compiler, arch=self._arch)
if arch_flag:
ret.append(arch_flag)
btfs = build_type_flags(compiler=self._compiler, build_type=self._build_type,
vs_toolset=self._conanfile.settings.get_safe("compiler.toolset"))
if btfs:
ret.extend(btfs)
srf = sysroot_flag(self._deps_cpp_info.sysroot, win_bash=self._win_bash,
subsystem=self.subsystem,
compiler=self._compiler)
if srf:
ret.append(srf)
return ret
def _configure_cxx_flags(self):
ret = copy.copy(self._deps_cpp_info.cppflags)
cxxf = libcxx_flag(compiler=self._compiler, libcxx=self._libcxx)
if cxxf:
ret.append(cxxf)
return ret
def _configure_defines(self):
# requires declared defines
ret = copy.copy(self._deps_cpp_info.defines)
# Debug definition for GCC
btf = build_type_define(build_type=self._build_type)
if btf:
ret.append(btf)
# CXX11 ABI
abif = libcxx_define(compiler=self._compiler, libcxx=self._libcxx)
if abif:
ret.append(abif)
return ret
def _get_vars(self):
def append(*args):
ret = []
for arg in args:
if arg:
if isinstance(arg, list):
ret.extend(arg)
else:
ret.append(arg)
return ret
lib_paths = format_library_paths(self.library_paths, win_bash=self._win_bash,
subsystem=self.subsystem, compiler=self._compiler)
include_paths = format_include_paths(self.include_paths, win_bash=self._win_bash,
subsystem=self.subsystem, compiler=self._compiler)
ld_flags = append(self.link_flags, lib_paths)
cpp_flags = append(include_paths, format_defines(self.defines))
libs = format_libraries(self.libs, compiler=self._compiler)
tmp_compilation_flags = copy.copy(self.flags)
if self.fpic:
tmp_compilation_flags.append(pic_flag(self._compiler))
cxx_flags = append(tmp_compilation_flags, self.cxx_flags, self.cppstd_flag)
c_flags = tmp_compilation_flags
return ld_flags, cpp_flags, libs, cxx_flags, c_flags
@property
def vars_dict(self):
ld_flags, cpp_flags, libs, cxx_flags, c_flags = self._get_vars()
if os.environ.get("CPPFLAGS", None):
cpp_flags.append(os.environ.get("CPPFLAGS", None))
if os.environ.get("CXXFLAGS", None):
cxx_flags.append(os.environ.get("CXXFLAGS", None))
if os.environ.get("CFLAGS", None):
c_flags.append(os.environ.get("CFLAGS", None))
if os.environ.get("LDFLAGS", None):
ld_flags.append(os.environ.get("LDFLAGS", None))
if os.environ.get("LIBS", None):
libs.append(os.environ.get("LIBS", None))
ret = {"CPPFLAGS": cpp_flags,
"CXXFLAGS": cxx_flags,
"CFLAGS": c_flags,
"LDFLAGS": ld_flags,
"LIBS": libs,
}
return ret
@property
def vars(self):
ld_flags, cpp_flags, libs, cxx_flags, c_flags = self._get_vars()
cpp_flags = " ".join(cpp_flags) + _environ_value_prefix("CPPFLAGS")
cxx_flags = " ".join(cxx_flags) + _environ_value_prefix("CXXFLAGS")
cflags = " ".join(c_flags) + _environ_value_prefix("CFLAGS")
ldflags = " ".join(ld_flags) + _environ_value_prefix("LDFLAGS")
libs = " ".join(libs) + _environ_value_prefix("LIBS")
ret = {"CPPFLAGS": cpp_flags.strip(),
"CXXFLAGS": cxx_flags.strip(),
"CFLAGS": cflags.strip(),
"LDFLAGS": ldflags.strip(),
"LIBS": libs.strip(),
}
return ret
def _environ_value_prefix(var_name, prefix=" "):
if os.environ.get(var_name, ""):
return "%s%s" % (prefix, os.environ.get(var_name, ""))
else:
return ""
|
|
"""
Provides abstraction over low level coap protocol data structures(Option and Message).
"""
import struct
import gevent.event
import random
from enum import Enum
from datetime import datetime
from message_format import CoapMessage, CoapOption
from code_registry import MessageType, OptionNumber
COAP_VERSION = 1
COAP_MAX_MESSAGE_ID = 0xFFFF
COAP_ACK_TIMEOUT = 2
COAP_ACK_RANDOM_FACTOR = 1.5
COAP_MAX_RETRANSMIT = 4
COAP_BLOCK_MIN_SIZE = 16
COAP_BLOCK_MAX_SIZE = 1024
#Possible block payload sizes
_COAP_BLOCK_SIZES = [16, 32, 64, 128, 256, 512, 1024]
class MessageState(int, Enum):
""" State machine states
"""
init = 0
to_be_received = 1
wait_for_send = 2
wait_for_ack = 3
wait_for_response = 4
wait_for_free = 5
wait_for_updates = 6
@staticmethod
def get_str(value):
result = [
'init',
'to_be_received',
'wait_for_send',
'wait_for_ack',
'wait_for_response',
'wait_for_free',
'wait_for_updates'
]
return result[value].upper()
class MessageStatus(int, Enum):
""" Message status
"""
success = 0
failed = 1
ack_timeout = 2
response_timeout = 3
reset_received = 4
observe_timeout = 5
@staticmethod
def get_str(value):
result = [
'success',
'failed',
'ack_timeout'
'response_timeout',
'reset_received',
'observe_timeout'
]
return result[value].upper()
class Option(CoapOption):
""" Subclass of CoapOptions to provide additional services(for now nothing).
"""
def __init__(self, option_number, option_value, last_option_number=0):
CoapOption.__init__(self, option_number=option_number, option_value=option_value, last_option_number=last_option_number)
@staticmethod
def block_value_encode(block_number, more, size):
"""
Encodes given block number, more and size to form a CoAP block option value as byte string.
"""
if size > COAP_BLOCK_MAX_SIZE:
raise 'Invalid size {0}'.format(size)
more_bit = 1 if more else 0
szx = 0
for szx, size_value in enumerate(_COAP_BLOCK_SIZES):
if size <= size_value:
break
value = (block_number << 4) | (more_bit << 3) | szx
bit_len = value.bit_length()
byte_len = (bit_len / 8) + (1 if bit_len % 8 != 0 else 0)
if byte_len == 1:
return struct.pack('B', value)
elif byte_len == 2:
return struct.pack('BB', value >> 8, value & 0xff)
elif byte_len == 3:
return struct.pack('BBB', value >> 16, value >> 8, value & 0xff)
else:
raise 'Invalid Block size {0}'.format(size)
@staticmethod
def block_value_decode(value):
"""
Decodes given CoAP block option value(byte stream) into block number, more and size.
"""
if len(value) == 0:
return 0, False, 0
elif len(value) == 1:
option_value, = struct.unpack('B', value)
elif len(value) == 2:
value1, value2 = struct.unpack('BB', value)
option_value = (value1 << 8) | value2
elif len(value) == 3:
value1, value2, value3 = struct.unpack('BBB', value)
option_value = (value1 << 16) | (value2 << 8) | value3
block_number = option_value >> 4
more = ((option_value >> 3) & 1) != 0
szx = option_value & 0b111
size = 2 ** (szx + 4)
return block_number, more, size
class Message(CoapMessage):
""" Subclass of CoapMessage to provide additional services(such as timeout, retransmission)"""
def __init__(self, message_id=0, message_type=MessageType.confirmable, class_code=0, class_detail=0,
token='', options=None, payload=None, block1_size=0):
assert payload is None or block1_size in _COAP_BLOCK_SIZES
if options is None:
options = []
#Original payload which should be send to server(through PUT/POST request)
self.block1_payload = payload
#Preferred block size should be used in block1 request
self.block1_preferred_size = block1_size
#Payload for this trip
if payload and len(payload) > block1_size:
payload = payload[:block1_size]
CoapMessage.__init__(self, version=COAP_VERSION, message_type=message_type, message_id=message_id,
class_code=class_code, class_detail=class_detail,
token=token, token_length=len(token), options=options, payload=payload)
#assert token is None or type(token) is bytearray
assert self.token_length in [0, 1, 2, 4, 8]
#State machine states
self.state = MessageState.init
#When was the message state changed.
self._state_change_timestamp = datetime.now()
#Time out for this message
#Using this timeout, _state_change_timestamp and datetime.now() it is easy to find whether timeout happened or not.
self.timeout = random.uniform(COAP_ACK_TIMEOUT, COAP_ACK_TIMEOUT * COAP_ACK_RANDOM_FACTOR)
#How many times this message was retransmitted(because of timeout).
# Once this count reaches COAP_MAX_RETRANSMIT the message will be set to failed state.
self.retransmission_counter = 0
#Status of the request/response
self.status = MessageStatus.success
#Messages received from the other side as a reply
self.server_reply_list = []
#An event on which callers can wait.
#This event will be triggered once the coap message it transmitted and received a response or timeout.
self.transaction_complete_event = gevent.event.Event()
# Observe specific fields
self.callback = None
self.callback_arg = None
self.age = -1.0
opt = self.find_option(OptionNumber.uri_path)
if len(opt) > 0:
self.url = opt[0].value
else:
self.url = ''
def recycle(self, msg_id):
""" Recycle the given message so that it can be used to send copy/similar message again.
Note - Options are not cleared and same token is used.
"""
self.state = MessageState.init
self.message_id = msg_id
self.retransmission_counter = 0
def change_state(self, new_state):
""" Change messages state to given new state.
Also record when this change happened(_state_change_timestamp). This is timestamp is used in timeout calculation.
"""
if self.state == MessageState.init:
assert new_state == MessageState.wait_for_send or new_state == MessageState.to_be_received
self.state = new_state
self._state_change_timestamp = datetime.now()
@staticmethod
def parse(data):
coap_msg = CoapMessage.parse(data)
msg = Message()
#Copy all the CoapMessage attributes to Message object.
msg.__dict__.update(coap_msg.__dict__)
return msg
def add_option(self, option):
""" Adds given options to the option list."""
self.coap_option.append(option)
def find_option(self, option_number):
""" Returns given option_number in the current message and returns result as a list."""
return [option for option in self.coap_option if option.option_number == option_number]
#return filter(lambda option: option.option_number == option_number, self.coap_option)
def has_observe_option(self):
""" Returns True if the message has Observe option set"""
return len(self.find_option(OptionNumber.observe)) > 0
def remove_option(self, option_number):
""" Removes the given option(by options number) from the option list.
Note - If more than one option found for the given option number, all of them are removed.
"""
for index, option in enumerate(self.coap_option):
if option.option_number == option_number:
del self.coap_option[index]
def get_age_from_option(self):
""" Finds max age option in the message and returns the value.
"""
opt = self.find_option(OptionNumber.max_age)
if len(opt) > 0:
fmt = 'I'
if opt[0].length == 1:
fmt = 'B'
elif opt[0].length == 2:
fmt = 'H'
return struct.unpack(fmt, opt[0].value)[0]
return -1
def get_timeout(self):
""" Returns timeout remaining in seconds.
+ve value means the timeout is in future.
-ve value means it is already late.
"""
passed_time = (datetime.now() - self._state_change_timestamp).total_seconds()
if self.age == -1:
return self.timeout - passed_time
else:
return self.age - passed_time
class MessageIdGenerator():
""" An abstract class to generate message IDs.
See SequenceMessageIdGenerator() for sample implementation.
"""
def __init__(self, start_number):
pass
def get_next_id(self):
""" Return next unique id within the CoAP time span.
"""
return None
class TokenGenerator():
""" An abstract class to generate message tokens.
See SequenceTokenGenerator() for sample implementation.
"""
def __init__(self, token_length=4):
""" token_length - length of the tokens in bytes."""
pass
def get_next_id(self):
""" Return next unique id within the CoAP time span.
"""
return None
class SequenceMessageIdGenerator(MessageIdGenerator):
def __init__(self):
# Starts with the a random number as requried by CoAP spec.
self._next_message_id = random.randint(1, COAP_MAX_MESSAGE_ID)
def get_next_id(self):
""" Returns the next sequence number.
"""
self._next_message_id += 1
if self._next_message_id == COAP_MAX_MESSAGE_ID:
self._next_message_id = 1
return self._next_message_id
class SequenceTokenGenerator(TokenGenerator):
def __init__(self, token_length=4):
assert token_length == 1 or token_length == 2 or token_length == 4 or token_length == 8
max_number = (1 << (token_length * 8)) - 1
self._next_token = random.randint(1, max_number)
self._max_number = max_number
self._token_length = token_length
def get_next_token(self):
""" Generates a new token.
"""
self._next_token += 1
if self._next_token == self._max_number:
self._next_token = 1
if self._token_length == 1:
fmt = 'B'
elif self._token_length == 2:
fmt = 'H'
elif self._token_length == 4:
fmt = 'I'
elif self._token_length == 8:
fmt = 'Q'
return struct.pack('!' + fmt, self._next_token)
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.jid}.
"""
from twisted.python.compat import unicode
from twisted.trial import unittest
from twisted.words.protocols.jabber import jid
class JIDParsingTests(unittest.TestCase):
def test_parse(self):
"""
Test different forms of JIDs.
"""
# Basic forms
self.assertEqual(jid.parse("user@host/resource"),
("user", "host", "resource"))
self.assertEqual(jid.parse("user@host"),
("user", "host", None))
self.assertEqual(jid.parse("host"),
(None, "host", None))
self.assertEqual(jid.parse("host/resource"),
(None, "host", "resource"))
# More interesting forms
self.assertEqual(jid.parse("foo/bar@baz"),
(None, "foo", "bar@baz"))
self.assertEqual(jid.parse("boo@foo/bar@baz"),
("boo", "foo", "bar@baz"))
self.assertEqual(jid.parse("boo@foo/bar/baz"),
("boo", "foo", "bar/baz"))
self.assertEqual(jid.parse("boo/foo@bar@baz"),
(None, "boo", "foo@bar@baz"))
self.assertEqual(jid.parse("boo/foo/bar"),
(None, "boo", "foo/bar"))
self.assertEqual(jid.parse("boo//foo"),
(None, "boo", "/foo"))
def test_noHost(self):
"""
Test for failure on no host part.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@")
def test_doubleAt(self):
"""
Test for failure on double @ signs.
This should fail because @ is not a valid character for the host
part of the JID.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@@host")
def test_multipleAt(self):
"""
Test for failure on two @ signs.
This should fail because @ is not a valid character for the host
part of the JID.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@host@host")
# Basic tests for case mapping. These are fallback tests for the
# prepping done in twisted.words.protocols.jabber.xmpp_stringprep
def test_prepCaseMapUser(self):
"""
Test case mapping of the user part of the JID.
"""
self.assertEqual(jid.prep("UsEr", "host", "resource"),
("user", "host", "resource"))
def test_prepCaseMapHost(self):
"""
Test case mapping of the host part of the JID.
"""
self.assertEqual(jid.prep("user", "hoST", "resource"),
("user", "host", "resource"))
def test_prepNoCaseMapResource(self):
"""
Test no case mapping of the resourcce part of the JID.
"""
self.assertEqual(jid.prep("user", "hoST", "resource"),
("user", "host", "resource"))
self.assertNotEqual(jid.prep("user", "host", "Resource"),
("user", "host", "resource"))
class JIDTests(unittest.TestCase):
def test_noneArguments(self):
"""
Test that using no arguments raises an exception.
"""
self.assertRaises(RuntimeError, jid.JID)
def test_attributes(self):
"""
Test that the attributes correspond with the JID parts.
"""
j = jid.JID("user@host/resource")
self.assertEqual(j.user, "user")
self.assertEqual(j.host, "host")
self.assertEqual(j.resource, "resource")
def test_userhost(self):
"""
Test the extraction of the bare JID.
"""
j = jid.JID("user@host/resource")
self.assertEqual("user@host", j.userhost())
def test_userhostOnlyHost(self):
"""
Test the extraction of the bare JID of the full form host/resource.
"""
j = jid.JID("host/resource")
self.assertEqual("host", j.userhost())
def test_userhostJID(self):
"""
Test getting a JID object of the bare JID.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.internJID("user@host")
self.assertIdentical(j2, j1.userhostJID())
def test_userhostJIDNoResource(self):
"""
Test getting a JID object of the bare JID when there was no resource.
"""
j = jid.JID("user@host")
self.assertIdentical(j, j.userhostJID())
def test_fullHost(self):
"""
Test giving a string representation of the JID with only a host part.
"""
j = jid.JID(tuple=(None, 'host', None))
self.assertEqual('host', j.full())
def test_fullHostResource(self):
"""
Test giving a string representation of the JID with host, resource.
"""
j = jid.JID(tuple=(None, 'host', 'resource'))
self.assertEqual('host/resource', j.full())
def test_fullUserHost(self):
"""
Test giving a string representation of the JID with user, host.
"""
j = jid.JID(tuple=('user', 'host', None))
self.assertEqual('user@host', j.full())
def test_fullAll(self):
"""
Test giving a string representation of the JID.
"""
j = jid.JID(tuple=('user', 'host', 'resource'))
self.assertEqual('user@host/resource', j.full())
def test_equality(self):
"""
Test JID equality.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.JID("user@host/resource")
self.assertNotIdentical(j1, j2)
self.assertEqual(j1, j2)
def test_equalityWithNonJIDs(self):
"""
Test JID equality.
"""
j = jid.JID("user@host/resource")
self.assertFalse(j == 'user@host/resource')
def test_inequality(self):
"""
Test JID inequality.
"""
j1 = jid.JID("user1@host/resource")
j2 = jid.JID("user2@host/resource")
self.assertNotEqual(j1, j2)
def test_inequalityWithNonJIDs(self):
"""
Test JID equality.
"""
j = jid.JID("user@host/resource")
self.assertNotEqual(j, 'user@host/resource')
def test_hashable(self):
"""
Test JID hashability.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.JID("user@host/resource")
self.assertEqual(hash(j1), hash(j2))
def test_unicode(self):
"""
Test unicode representation of JIDs.
"""
j = jid.JID(tuple=('user', 'host', 'resource'))
self.assertEqual(u"user@host/resource", unicode(j))
def test_repr(self):
"""
Test representation of JID objects.
"""
j = jid.JID(tuple=('user', 'host', 'resource'))
self.assertEqual("JID(%s)" % repr(u'user@host/resource'), repr(j))
class InternJIDTests(unittest.TestCase):
def test_identity(self):
"""
Test that two interned JIDs yield the same object.
"""
j1 = jid.internJID("user@host")
j2 = jid.internJID("user@host")
self.assertIdentical(j1, j2)
|
|
# coding: utf-8
from __future__ import unicode_literals
from collections import OrderedDict
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.core.urlresolvers import (
NoReverseMatch, Resolver404, get_script_prefix, resolve
)
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.encoding import smart_text
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.translation import ugettext_lazy as _
from rest_framework.fields import (
Field, empty, get_attribute, is_simple_callable, iter_options
)
from rest_framework.reverse import reverse
from rest_framework.utils import html
class Hyperlink(six.text_type):
"""
A string like object that additionally has an associated name.
We use this for hyperlinked URLs that may render as a named link
in some contexts, or render as a plain URL in others.
"""
def __new__(self, url, name):
ret = six.text_type.__new__(self, url)
ret.name = name
return ret
def __getnewargs__(self):
return(str(self), self.name,)
is_hyperlink = True
class PKOnlyObject(object):
"""
This is a mock object, used for when we only need the pk of the object
instance, but still want to return an object with a .pk attribute,
in order to keep the same interface as a regular model instance.
"""
def __init__(self, pk):
self.pk = pk
# We assume that 'validators' are intended for the child serializer,
# rather than the parent serializer.
MANY_RELATION_KWARGS = (
'read_only', 'write_only', 'required', 'default', 'initial', 'source',
'label', 'help_text', 'style', 'error_messages', 'allow_empty'
)
class RelatedField(Field):
queryset = None
html_cutoff = 1000
html_cutoff_text = _('More than {count} items...')
def __init__(self, **kwargs):
self.queryset = kwargs.pop('queryset', self.queryset)
self.html_cutoff = kwargs.pop('html_cutoff', self.html_cutoff)
self.html_cutoff_text = kwargs.pop('html_cutoff_text', self.html_cutoff_text)
assert self.queryset is not None or kwargs.get('read_only', None), (
'Relational field must provide a `queryset` argument, '
'or set read_only=`True`.'
)
assert not (self.queryset is not None and kwargs.get('read_only', None)), (
'Relational fields should not provide a `queryset` argument, '
'when setting read_only=`True`.'
)
kwargs.pop('many', None)
kwargs.pop('allow_empty', None)
super(RelatedField, self).__init__(**kwargs)
def __new__(cls, *args, **kwargs):
# We override this method in order to automagically create
# `ManyRelatedField` classes instead when `many=True` is set.
if kwargs.pop('many', False):
return cls.many_init(*args, **kwargs)
return super(RelatedField, cls).__new__(cls, *args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
"""
This method handles creating a parent `ManyRelatedField` instance
when the `many=True` keyword argument is passed.
Typically you won't need to override this method.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomManyRelatedField(*args, **kwargs)
"""
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs.keys():
if key in MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return ManyRelatedField(**list_kwargs)
def run_validation(self, data=empty):
# We force empty strings to None values for relational fields.
if data == '':
data = None
return super(RelatedField, self).run_validation(data)
def get_queryset(self):
queryset = self.queryset
if isinstance(queryset, (QuerySet, Manager)):
# Ensure queryset is re-evaluated whenever used.
# Note that actually a `Manager` class may also be used as the
# queryset argument. This occurs on ModelSerializer fields,
# as it allows us to generate a more expressive 'repr' output
# for the field.
# Eg: 'MyRelationship(queryset=ExampleModel.objects.all())'
queryset = queryset.all()
return queryset
def use_pk_only_optimization(self):
return False
def get_attribute(self, instance):
if self.use_pk_only_optimization() and self.source_attrs:
# Optimized case, return a mock object only containing the pk attribute.
try:
instance = get_attribute(instance, self.source_attrs[:-1])
value = instance.serializable_value(self.source_attrs[-1])
if is_simple_callable(value):
# Handle edge case where the relationship `source` argument
# points to a `get_relationship()` method on the model
value = value().pk
return PKOnlyObject(pk=value)
except AttributeError:
pass
# Standard case, return the object instance.
return get_attribute(instance, self.source_attrs)
@property
def choices(self):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
return OrderedDict([
(
six.text_type(self.to_representation(item)),
self.display_value(item)
)
for item in queryset
])
@property
def grouped_choices(self):
return self.choices
def iter_options(self):
return iter_options(
self.grouped_choices,
cutoff=self.html_cutoff,
cutoff_text=self.html_cutoff_text
)
def display_value(self, instance):
return six.text_type(instance)
class StringRelatedField(RelatedField):
"""
A read only field that represents its targets using their
plain string representation.
"""
def __init__(self, **kwargs):
kwargs['read_only'] = True
super(StringRelatedField, self).__init__(**kwargs)
def to_representation(self, value):
return six.text_type(value)
class PrimaryKeyRelatedField(RelatedField):
default_error_messages = {
'required': _('This field is required.'),
'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'),
'incorrect_type': _('Incorrect type. Expected pk value, received {data_type}.'),
}
def __init__(self, **kwargs):
self.pk_field = kwargs.pop('pk_field', None)
super(PrimaryKeyRelatedField, self).__init__(**kwargs)
def use_pk_only_optimization(self):
return True
def to_internal_value(self, data):
if self.pk_field is not None:
data = self.pk_field.to_internal_value(data)
try:
return self.get_queryset().get(pk=data)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
if self.pk_field is not None:
return self.pk_field.to_representation(value.pk)
return value.pk
class HyperlinkedRelatedField(RelatedField):
lookup_field = 'pk'
view_name = None
default_error_messages = {
'required': _('This field is required.'),
'no_match': _('Invalid hyperlink - No URL match.'),
'incorrect_match': _('Invalid hyperlink - Incorrect URL match.'),
'does_not_exist': _('Invalid hyperlink - Object does not exist.'),
'incorrect_type': _('Incorrect type. Expected URL string, received {data_type}.'),
}
def __init__(self, view_name=None, **kwargs):
if view_name is not None:
self.view_name = view_name
assert self.view_name is not None, 'The `view_name` argument is required.'
self.lookup_field = kwargs.pop('lookup_field', self.lookup_field)
self.lookup_url_kwarg = kwargs.pop('lookup_url_kwarg', self.lookup_field)
self.format = kwargs.pop('format', None)
# We include this simply for dependency injection in tests.
# We can't add it as a class attributes or it would expect an
# implicit `self` argument to be passed.
self.reverse = reverse
super(HyperlinkedRelatedField, self).__init__(**kwargs)
def use_pk_only_optimization(self):
return self.lookup_field == 'pk'
def get_object(self, view_name, view_args, view_kwargs):
"""
Return the object corresponding to a matched URL.
Takes the matched URL conf arguments, and should return an
object instance, or raise an `ObjectDoesNotExist` exception.
"""
lookup_value = view_kwargs[self.lookup_url_kwarg]
lookup_kwargs = {self.lookup_field: lookup_value}
return self.get_queryset().get(**lookup_kwargs)
def get_url(self, obj, view_name, request, format):
"""
Given an object, return the URL that hyperlinks to the object.
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf.
"""
# Unsaved objects will not yet have a valid URL.
if hasattr(obj, 'pk') and obj.pk is None:
return None
lookup_value = getattr(obj, self.lookup_field)
kwargs = {self.lookup_url_kwarg: lookup_value}
return self.reverse(view_name, kwargs=kwargs, request=request, format=format)
def get_name(self, obj):
return six.text_type(obj)
def to_internal_value(self, data):
request = self.context.get('request', None)
try:
http_prefix = data.startswith(('http:', 'https:'))
except AttributeError:
self.fail('incorrect_type', data_type=type(data).__name__)
if http_prefix:
# If needed convert absolute URLs to relative path
data = urlparse.urlparse(data).path
prefix = get_script_prefix()
if data.startswith(prefix):
data = '/' + data[len(prefix):]
try:
match = resolve(data)
except Resolver404:
self.fail('no_match')
try:
expected_viewname = request.versioning_scheme.get_versioned_viewname(
self.view_name, request
)
except AttributeError:
expected_viewname = self.view_name
if match.view_name != expected_viewname:
self.fail('incorrect_match')
try:
return self.get_object(match.view_name, match.args, match.kwargs)
except (ObjectDoesNotExist, TypeError, ValueError):
self.fail('does_not_exist')
def to_representation(self, value):
request = self.context.get('request', None)
format = self.context.get('format', None)
assert request is not None, (
"`%s` requires the request in the serializer"
" context. Add `context={'request': request}` when instantiating "
"the serializer." % self.__class__.__name__
)
# By default use whatever format is given for the current context
# unless the target is a different type to the source.
#
# Eg. Consider a HyperlinkedIdentityField pointing from a json
# representation to an html property of that representation...
#
# '/snippets/1/' should link to '/snippets/1/highlight/'
# ...but...
# '/snippets/1/.json' should link to '/snippets/1/highlight/.html'
if format and self.format and self.format != format:
format = self.format
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.get_url(value, self.view_name, request, format)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s". You may have failed to include the related '
'model in your API, or incorrectly configured the '
'`lookup_field` attribute on this field.'
)
if value in ('', None):
value_string = {'': 'the empty string', None: 'None'}[value]
msg += (
" WARNING: The value of the field on the model instance "
"was %s, which may be why it didn't match any "
"entries in your URL conf." % value_string
)
raise ImproperlyConfigured(msg % self.view_name)
if url is None:
return None
name = self.get_name(value)
return Hyperlink(url, name)
class HyperlinkedIdentityField(HyperlinkedRelatedField):
"""
A read-only field that represents the identity URL for an object, itself.
This is in contrast to `HyperlinkedRelatedField` which represents the
URL of relationships to other objects.
"""
def __init__(self, view_name=None, **kwargs):
assert view_name is not None, 'The `view_name` argument is required.'
kwargs['read_only'] = True
kwargs['source'] = '*'
super(HyperlinkedIdentityField, self).__init__(view_name, **kwargs)
def use_pk_only_optimization(self):
# We have the complete object instance already. We don't need
# to run the 'only get the pk for this relationship' code.
return False
class SlugRelatedField(RelatedField):
"""
A read-write field that represents the target of the relationship
by a unique 'slug' attribute.
"""
default_error_messages = {
'does_not_exist': _('Object with {slug_name}={value} does not exist.'),
'invalid': _('Invalid value.'),
}
def __init__(self, slug_field=None, **kwargs):
assert slug_field is not None, 'The `slug_field` argument is required.'
self.slug_field = slug_field
super(SlugRelatedField, self).__init__(**kwargs)
def to_internal_value(self, data):
try:
return self.get_queryset().get(**{self.slug_field: data})
except ObjectDoesNotExist:
self.fail('does_not_exist', slug_name=self.slug_field, value=smart_text(data))
except (TypeError, ValueError):
self.fail('invalid')
def to_representation(self, obj):
return getattr(obj, self.slug_field)
class ManyRelatedField(Field):
"""
Relationships with `many=True` transparently get coerced into instead being
a ManyRelatedField with a child relationship.
The `ManyRelatedField` class is responsible for handling iterating through
the values and passing each one to the child relationship.
This class is treated as private API.
You shouldn't generally need to be using this class directly yourself,
and should instead simply set 'many=True' on the relationship.
"""
initial = []
default_empty_html = []
default_error_messages = {
'not_a_list': _('Expected a list of items but got type "{input_type}".'),
'empty': _('This list may not be empty.')
}
html_cutoff = 1000
html_cutoff_text = _('More than {count} items...')
def __init__(self, child_relation=None, *args, **kwargs):
self.child_relation = child_relation
self.allow_empty = kwargs.pop('allow_empty', True)
self.html_cutoff = kwargs.pop('html_cutoff', self.html_cutoff)
self.html_cutoff_text = kwargs.pop('html_cutoff_text', self.html_cutoff_text)
assert child_relation is not None, '`child_relation` is a required argument.'
super(ManyRelatedField, self).__init__(*args, **kwargs)
self.child_relation.bind(field_name='', parent=self)
def get_value(self, dictionary):
# We override the default field access in order to support
# lists in HTML forms.
if html.is_html_input(dictionary):
# Don't return [] if the update is partial
if self.field_name not in dictionary:
if getattr(self.root, 'partial', False):
return empty
return dictionary.getlist(self.field_name)
return dictionary.get(self.field_name, empty)
def to_internal_value(self, data):
if isinstance(data, type('')) or not hasattr(data, '__iter__'):
self.fail('not_a_list', input_type=type(data).__name__)
if not self.allow_empty and len(data) == 0:
self.fail('empty')
return [
self.child_relation.to_internal_value(item)
for item in data
]
def get_attribute(self, instance):
# Can't have any relationships if not created
if hasattr(instance, 'pk') and instance.pk is None:
return []
relationship = get_attribute(instance, self.source_attrs)
return relationship.all() if (hasattr(relationship, 'all')) else relationship
def to_representation(self, iterable):
return [
self.child_relation.to_representation(value)
for value in iterable
]
@property
def choices(self):
return self.child_relation.choices
@property
def grouped_choices(self):
return self.choices
def iter_options(self):
return iter_options(
self.grouped_choices,
cutoff=self.html_cutoff,
cutoff_text=self.html_cutoff_text
)
|
|
import webapp2
import jinja2
import logging
import json
import urllib2
import os
from google.appengine.api import urlfetch
from google.appengine.api import mail
from user.models import User
from lib import tools, handlers, conf
from github.github import latestChanges
urlfetch.set_default_fetch_deadline(60)
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class HomeHandler(handlers.BaseHandler):
def get(self):
if self.session.get('username') and self.session.get('user'):
self.redirect('/main')
else:
template_values = {
'title': 'Selection Naturelle',
'user': False,
'interface': 'web',
'jquery': True,
'latest_changes': latestChanges(5),
}
template = JINJA_ENVIRONMENT.get_template('templates/index.html')
self.response.write(template.render(template_values))
class MainHandler(handlers.BaseHandler):
def get(self):
username = self.session.get('username')
if username is not None:
user = User.get_by_username(username)
if user is not None:
url = 'http://%s/api/game/list?user=%s' % (tools.APP_HOSTNAME, user.username)
result = urlfetch.fetch(url)
gamelist = json.loads(result.content)
for game in gamelist['games']:
url = 'http://%s/api/user/get?key=%s' % (tools.APP_HOSTNAME, game['owner'])
result = urlfetch.fetch(url)
owner = json.loads(result.content)['user']
url = 'http://%s/api/game/details?owner=%s&name=%s' % (tools.APP_HOSTNAME, owner['username'], game['name'])
result = urlfetch.fetch(url)
details = json.loads(result.content)
game['details'] = details
for monster in gamelist['monsters']:
if monster['species'] == 0:
monster['pic'] = 'blob'
elif monster['species'] == 1:
monster['pic'] = 'chimp'
elif monster['species'] == 2:
monster['pic'] = 'mosquito'
elif monster['species'] == 3:
monster['pic' == 'pineapple']
template_values = {
'title': 'Selection Naturelle',
'jquery': True,
'user': tools.serialize_dict(user.to_dict()),
'interface': 'web',
'gamelist': gamelist,
'latest_changes': latestChanges(5),
}
template = JINJA_ENVIRONMENT.get_template('templates/main.html')
self.response.write(template.render(template_values))
else:
logging.debug('Couldn\'t find user (%s).' % username)
self.redirect('/')
else:
self.redirect('/')
class GamesHandler(handlers.BaseHandler):
def get(self):
username = self.session.get('username')
if username is not None:
user = User.get_by_username(username)
if user is not None:
url = 'http://%s/api/game/list?user=%s' % (tools.APP_HOSTNAME, user.username)
result = urlfetch.fetch(url)
gamelist = json.loads(result.content)
for game in gamelist['games']:
url = 'http://%s/api/user/get?key=%s' % (tools.APP_HOSTNAME, game['owner'])
result = urlfetch.fetch(url)
owner = json.loads(result.content)['user']
url = 'http://%s/api/game/details?owner=%s&name=%s' % (tools.APP_HOSTNAME, owner['username'], game['name'])
result = urlfetch.fetch(url)
details = json.loads(result.content)
game['details'] = details
for monster in gamelist['monsters']:
if monster['species'] == 0:
monster['pic'] = 'blob'
elif monster['species'] == 1:
monster['pic'] = 'chimp'
elif monster['species'] == 2:
monster['pic'] = 'mosquito'
elif monster['species'] == 3:
monster['pic' == 'pineapple']
template_values = {
'title': 'Selection Naturelle',
'jquery': True,
'user': tools.serialize_dict(user.to_dict()),
'interface': 'web',
'gamelist': gamelist,
'max_games': conf.max_games_per_user(user.key.urlsafe())
}
template = JINJA_ENVIRONMENT.get_template('templates/games.html')
self.response.write(template.render(template_values))
else:
logging.debug('Couldn\'t find user (%s).' % username)
self.redirect('/')
else:
self.redirect('/')
class GameHandler(handlers.BaseHandler):
def get(self):
username = self.session.get('username')
key = self.request.get('key')
if username is not None and len(key) > 0:
user = User.get_by_username(username)
if user is not None:
url = 'http://%s/api/game/get?key=%s' % (tools.APP_HOSTNAME, key)
result = urlfetch.fetch(url)
game = json.loads(result.content)['game']
url = 'http://%s/api/user/get?key=%s' % (tools.APP_HOSTNAME, game['owner'])
result = urlfetch.fetch(url)
owner = json.loads(result.content)['user']
url = 'http://%s/api/game/details?owner=%s&name=%s' % (tools.APP_HOSTNAME, owner['username'], game['name'])
result = urlfetch.fetch(url)
game_details = json.loads(result.content)
game_details['monsters'] = []
for player in game_details['players']:
url = 'http://%s/api/monster/get?key=%s' % (tools.APP_HOSTNAME, player['monster'])
result = urlfetch.fetch(url)
monster = json.loads(result.content)['monster']
url = 'http://%s/api/user/get?key=%s' % (tools.APP_HOSTNAME, monster['owner'])
result = urlfetch.fetch(url)
monster['user'] = json.loads(result.content)['user']
if monster['species'] == 0:
monster['pic'] = 'blob'
elif monster['species'] == 1:
monster['pic'] = 'chimp'
elif monster['species'] == 2:
monster['pic'] = 'mosquito'
elif monster['species'] == 3:
monster['pic' == 'pineapple']
game_details['monsters'].append(monster)
if game is not None:
template_values = {
'title': 'Selection Naturelle',
'jquery': True,
'user': tools.serialize_dict(user.to_dict()),
'owner': owner,
'game': game_details,
'interface': 'web',
}
template = JINJA_ENVIRONMENT.get_template('templates/game.html')
logging.debug('Template values: %s' % template_values)
self.response.write(template.render(template_values))
else:
logging.debug('Couldn\'t find user (%s).' % username)
self.redirect('/main?username=%s' % username)
else:
logging.debug('Couldn\'t find user (%s).' % username)
self.redirect('/')
else:
self.redirect('/')
class MonstersHandler(handlers.BaseHandler):
def get(self):
username = self.session.get('username')
user = self.session.get('user')
if username is not None and user is not None:
url = 'http://%s/api/monster/fetch?owner=%s' % (tools.APP_HOSTNAME, user['username'])
result = urlfetch.fetch(url)
monsters = json.loads(result.content)['monsters']
for monster in monsters:
if monster['species'] == 0:
monster['pic'] = 'blob'
elif monster['species'] == 1:
monster['pic'] = 'chimp'
elif monster['species'] == 2:
monster['pic'] = 'mosquito'
elif monster['species'] == 3:
monster['pic' == 'pineapple']
template_values = {
'title': 'Selection Naturelle',
'jquery': True,
'user': tools.serialize_dict(user),
'monsters': tools.serialize_dict(monsters),
'interface': 'web',
'max_monsters': conf.max_monsters_per_user(user['key'])
}
template = JINJA_ENVIRONMENT.get_template('templates/monsters.html')
self.response.write(template.render(template_values))
else:
self.redirect('/')
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import argparse
import logging
import os
import pdb
import random
import re
import shutil
import subprocess
import sys
import tempfile
import time
from typing import List
from .address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from .authproxy import JSONRPCException
from . import coverage
from .p2p import NetworkThread
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
get_datadir_path,
initialize_datadir,
p2p_port,
wait_until_helper,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "fujicoin_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class FujicoinTestMetaClass(type):
"""Metaclass for FujicoinTestFramework.
Ensures that any attempt to register a subclass of `FujicoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'FujicoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("FujicoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("FujicoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class FujicoinTestFramework(metaclass=FujicoinTestMetaClass):
"""Base class for a fujicoin test script.
Individual fujicoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.chain: str = 'regtest'
self.setup_clean_chain: bool = False
self.nodes: List[TestNode] = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = True
self.bind_to_localhost_only = True
self.parse_args()
self.default_wallet_name = "default_wallet" if self.options.descriptors else ""
self.wallet_data_filename = "wallet.dat"
# Optional list of wallet names that can be set in set_test_params to
# create and import keys to. If unset, default is len(nodes) *
# [default_wallet_name]. If wallet names are None, wallet creation is
# skipped. If list is truncated, wallet creation is skipped and keys
# are not imported.
self.wallet_names = None
# By default the wallet is not required. Set to true by skip_if_no_wallet().
# When False, we ignore wallet_names regardless of what it is.
self.requires_wallet = False
self.set_test_params()
assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes
if self.options.timeout_factor == 0 :
self.options.timeout_factor = 99999
self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) # optionally, increase timeout by a factor
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
try:
self.setup()
self.run_test()
except JSONRPCException:
self.log.exception("JSONRPC error")
self.success = TestStatus.FAILED
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
self.success = TestStatus.SKIPPED
except AssertionError:
self.log.exception("Assertion failed")
self.success = TestStatus.FAILED
except KeyError:
self.log.exception("Key error")
self.success = TestStatus.FAILED
except subprocess.CalledProcessError as e:
self.log.exception("Called Process failed with '{}'".format(e.output))
self.success = TestStatus.FAILED
except Exception:
self.log.exception("Unexpected exception caught during testing")
self.success = TestStatus.FAILED
except KeyboardInterrupt:
self.log.warning("Exiting after keyboard interrupt")
self.success = TestStatus.FAILED
finally:
exit_code = self.shutdown()
sys.exit(exit_code)
def parse_args(self):
previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave fujicoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop fujicoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--previous-releases", dest="prev_releases", action="store_true",
default=os.path.isdir(previous_releases_path) and bool(os.listdir(previous_releases_path)),
help="Force test of previous releases (default: %(default)s)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use fujicoin-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required")
parser.add_argument("--randomseed", type=int,
help="set a random seed for deterministically reproducing a previous test run")
parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts')
group = parser.add_mutually_exclusive_group()
group.add_argument("--descriptors", action='store_const', const=True,
help="Run test using a descriptor wallet", dest='descriptors')
group.add_argument("--legacy-wallet", action='store_const', const=False,
help="Run test using legacy wallets", dest='descriptors')
self.add_options(parser)
# Running TestShell in a Jupyter notebook causes an additional -f argument
# To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument
# source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168
parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1")
self.options = parser.parse_args()
self.options.previous_releases_path = previous_releases_path
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
if self.options.descriptors is None:
# Prefer BDB unless it isn't available
if self.is_bdb_compiled():
self.options.descriptors = False
elif self.is_sqlite_compiled():
self.options.descriptors = True
else:
# If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter
# It still needs to exist and be None in order for tests to work however.
self.options.descriptors = None
def setup(self):
"""Call this method to start up the test framework object with options set."""
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = self.config
fname_fujicoind = os.path.join(
config["environment"]["BUILDDIR"],
"src",
"fujicoind" + config["environment"]["EXEEXT"],
)
fname_fujicoincli = os.path.join(
config["environment"]["BUILDDIR"],
"src",
"fujicoin-cli" + config["environment"]["EXEEXT"],
)
self.options.fujicoind = os.getenv("FUJICOIND", default=fname_fujicoind)
self.options.fujicoincli = os.getenv("FUJICOINCLI", default=fname_fujicoincli)
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'), os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
# Seed the PRNG. Note that test runs are reproducible if and only if
# a single thread accesses the PRNG. For more information, see
# https://docs.python.org/3/library/random.html#notes-on-reproducibility.
# The network thread shouldn't access random. If we need to change the
# network thread to access randomness, it should instantiate its own
# random.Random object.
seed = self.options.randomseed
if seed is None:
seed = random.randrange(sys.maxsize)
else:
self.log.debug("User supplied random seed {}".format(seed))
random.seed(seed)
self.log.debug("PRNG seed is: {}".format(seed))
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.success = TestStatus.PASSED
def shutdown(self):
"""Call this method to shut down the test framework object."""
if self.success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: fujicoinds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
self.success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if self.success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif self.success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("")
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
self.log.error("")
self.log.error("If this failure happened unexpectedly or intermittently, please file a bug and provide a link or upload of the combined log.")
self.log.error(self.config['environment']['PACKAGE_BUGREPORT'])
self.log.error("")
exit_code = TEST_EXIT_FAILED
# Logging.shutdown will not remove stream- and filehandlers, so we must
# do it explicitly. Handlers are removed so the next test run can apply
# different log handler settings.
# See: https://docs.python.org/3/library/logging.html#logging.shutdown
for h in list(self.log.handlers):
h.flush()
h.close()
self.log.removeHandler(h)
rpc_logger = logging.getLogger("FujicoinRPC")
for h in list(rpc_logger.handlers):
h.flush()
rpc_logger.removeHandler(h)
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
self.nodes.clear()
return exit_code
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must override this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# self.connect_nodes(1, 2)
for i in range(self.num_nodes - 1):
self.connect_nodes(i + 1, i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = [[]] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
if self.requires_wallet:
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for i in range(self.num_nodes):
self.init_wallet(i)
def init_wallet(self, i):
wallet_name = self.default_wallet_name if self.wallet_names is None else self.wallet_names[i] if i < len(self.wallet_names) else False
if wallet_name is not False:
n = self.nodes[i]
if wallet_name is not None:
n.createwallet(wallet_name=wallet_name, descriptors=self.options.descriptors, load_on_startup=True)
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes: int, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
def get_bin_from_version(version, bin_name, bin_default):
if not version:
return bin_default
return os.path.join(
self.options.previous_releases_path,
re.sub(
r'\.0$',
'', # remove trailing .0 for point releases
'v{}.{}.{}.{}'.format(
(version % 100000000) // 1000000,
(version % 1000000) // 10000,
(version % 10000) // 100,
(version % 100) // 1,
),
),
'bin',
bin_name,
)
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if versions is None:
versions = [None] * num_nodes
if binary is None:
binary = [get_bin_from_version(v, 'fujicoind', self.options.fujicoind) for v in versions]
if binary_cli is None:
binary_cli = [get_bin_from_version(v, 'fujicoin-cli', self.options.fujicoincli) for v in versions]
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(versions), num_nodes)
assert_equal(len(binary), num_nodes)
assert_equal(len(binary_cli), num_nodes)
for i in range(num_nodes):
test_node_i = TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
chain=self.chain,
rpchost=rpchost,
timewait=self.rpc_timeout,
timeout_factor=self.options.timeout_factor,
fujicoind=binary[i],
fujicoin_cli=binary_cli[i],
version=versions[i],
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
use_valgrind=self.options.valgrind,
descriptors=self.options.descriptors,
)
self.nodes.append(test_node_i)
if not test_node_i.version_is_at_least(170000):
# adjust conf for pre 17
conf_file = test_node_i.fujicoinconf
with open(conf_file, 'r', encoding='utf8') as conf:
conf_data = conf.read()
with open(conf_file, 'w', encoding='utf8') as conf:
conf.write(conf_data.replace('[regtest]', ''))
def start_node(self, i, *args, **kwargs):
"""Start a fujicoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple fujicoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a fujicoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
def stop_nodes(self, wait=0):
"""Stop multiple fujicoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait, wait_until_stopped=False)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def connect_nodes(self, a, b):
def connect_nodes_helper(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
# See comments in net_processing:
# * Must have a version message before anything else
# * Must have a verack message before anything else
wait_until_helper(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()))
connect_nodes_helper(self.nodes[a], b)
def disconnect_nodes(self, a, b):
def disconnect_nodes_helper(from_connection, node_num):
def get_peer_ids():
result = []
for peer in from_connection.getpeerinfo():
if "testnode{}".format(node_num) in peer['subver']:
result.append(peer['id'])
return result
peer_ids = get_peer_ids()
if not peer_ids:
self.log.warning("disconnect_nodes: {} and {} were not connected".format(
from_connection.index,
node_num,
))
return
for peer_id in peer_ids:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until_helper(lambda: not get_peer_ids(), timeout=5)
disconnect_nodes_helper(self.nodes[a], b)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
self.disconnect_nodes(1, 2)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
self.connect_nodes(1, 2)
self.sync_all()
def sync_blocks(self, nodes=None, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(b) for b in best_hash),
))
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(m) for m in pool),
))
def sync_all(self, nodes=None):
self.sync_blocks(nodes)
self.sync_mempools(nodes)
def wait_until(self, test_function, timeout=60):
return wait_until_helper(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as fujicoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("FujicoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain
Afterward, create num_nodes copies from the cache."""
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
assert self.num_nodes <= MAX_NODES
if not os.path.isdir(cache_node_dir):
self.log.debug("Creating cache directory {}".format(cache_node_dir))
initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain)
self.nodes.append(
TestNode(
CACHE_NODE_ID,
cache_node_dir,
chain=self.chain,
extra_conf=["bind=127.0.0.1"],
extra_args=['-disablewallet'],
rpchost=None,
timewait=self.rpc_timeout,
timeout_factor=self.options.timeout_factor,
fujicoind=self.options.fujicoind,
fujicoin_cli=self.options.fujicoincli,
coverage_dir=None,
cwd=self.options.tmpdir,
descriptors=self.options.descriptors,
))
self.start_node(CACHE_NODE_ID)
cache_node = self.nodes[CACHE_NODE_ID]
# Wait for RPC connections to be ready
cache_node.wait_for_rpc_connection()
# Set a time in the past, so that blocks don't end up in the future
cache_node.setmocktime(cache_node.getblockheader(cache_node.getbestblockhash())['time'])
# Create a 199-block-long chain; each of the 3 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th address gets 25 mature and only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [ADDRESS_BCRT1_P2WSH_OP_TRUE]
assert_equal(len(gen_addresses), 4)
for i in range(8):
cache_node.generatetoaddress(
nblocks=25 if i != 7 else 24,
address=gen_addresses[i % len(gen_addresses)],
)
assert_equal(cache_node.getblockchaininfo()["blocks"], 199)
# Shut it down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(*paths):
return os.path.join(cache_node_dir, self.chain, *paths)
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path()):
if entry not in ['chainstate', 'blocks', 'indexes']: # Only indexes, chainstate and blocks folders
os.remove(cache_path(entry))
for i in range(self.num_nodes):
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(cache_node_dir, to_dir)
initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in fujicoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i, self.chain)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_fujicoind_zmq(self):
"""Skip the running test if fujicoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("fujicoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
self.requires_wallet = True
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
if self.options.descriptors:
self.skip_if_no_sqlite()
else:
self.skip_if_no_bdb()
def skip_if_no_sqlite(self):
"""Skip the running test if sqlite has not been compiled."""
if not self.is_sqlite_compiled():
raise SkipTest("sqlite has not been compiled.")
def skip_if_no_bdb(self):
"""Skip the running test if BDB has not been compiled."""
if not self.is_bdb_compiled():
raise SkipTest("BDB has not been compiled.")
def skip_if_no_wallet_tool(self):
"""Skip the running test if fujicoin-wallet has not been compiled."""
if not self.is_wallet_tool_compiled():
raise SkipTest("fujicoin-wallet has not been compiled")
def skip_if_no_cli(self):
"""Skip the running test if fujicoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("fujicoin-cli has not been compiled.")
def skip_if_no_previous_releases(self):
"""Skip the running test if previous releases are not available."""
if not self.has_previous_releases():
raise SkipTest("previous releases not available or disabled")
def has_previous_releases(self):
"""Checks whether previous releases are present and enabled."""
if not os.path.isdir(self.options.previous_releases_path):
if self.options.prev_releases:
raise AssertionError("Force test of previous releases but releases missing: {}".format(
self.options.previous_releases_path))
return self.options.prev_releases
def skip_if_no_external_signer(self):
"""Skip the running test if external signer support has not been compiled."""
if not self.is_external_signer_compiled():
raise SkipTest("external signer support has not been compiled.")
def is_cli_compiled(self):
"""Checks whether fujicoin-cli was compiled."""
return self.config["components"].getboolean("ENABLE_CLI")
def is_external_signer_compiled(self):
"""Checks whether external signer support was compiled."""
return self.config["components"].getboolean("ENABLE_EXTERNAL_SIGNER")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET")
def is_wallet_tool_compiled(self):
"""Checks whether fujicoin-wallet was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
return self.config["components"].getboolean("ENABLE_ZMQ")
def is_sqlite_compiled(self):
"""Checks whether the wallet module was compiled with Sqlite support."""
return self.config["components"].getboolean("USE_SQLITE")
def is_bdb_compiled(self):
"""Checks whether the wallet module was compiled with BDB support."""
return self.config["components"].getboolean("USE_BDB")
|
|
import datetime
import json
import re
from flask_jwt import current_identity, jwt_required
from flask_restplus import Namespace, Resource, fields, reqparse
from packr.models import (Address, Contact, DangerClass, Order, OrderStatus,
Package, ServiceType, StatusType)
api = Namespace('book',
description='Operations related to creating a booking')
booking = api.model('Book', {
'type': fields.String(readOnly=True,
descriptuon='The service type'),
'dangerous': fields.String(readOnly=True,
description='The danger type, if applicable'),
'pickup': fields.String(readOnly=True,
description='The pickup data'),
'delivery': fields.String(readOnly=True,
description='The delivery data'),
'fragile': fields.String(readonly=True,
description="If the package is fragile"),
'paymentType': fields.String(readOnly=True,
description='The payment type'),
'customerComments': fields.String(readOnly=True,
description='The customer comments'),
'packages': fields.String(readOnly=True,
description='A JSON map of the packages')
})
@api.route('/')
class BookItem(Resource):
@api.expect(booking)
@api.response(204, 'Created booking.')
@jwt_required()
def post(self):
req_parse = reqparse.RequestParser(bundle_errors=True)
req_parse.add_argument('type', type=str,
help='No service type provided',
required=True,
location='json')
req_parse.add_argument('dangerous', type=str,
required=False,
location='json')
req_parse.add_argument('delivery', type=str, required=True,
help='No delivery data provided',
location='json')
req_parse.add_argument('pickup', type=str, required=True,
help='No pickup data provided',
location='json')
req_parse.add_argument('fragile', type=str, required=True,
help='No fragile status provided',
location='json')
req_parse.add_argument('paymentType', type=str, required=True,
help='No payment type provided',
location='json')
req_parse.add_argument('customerComments', type=str, required=False,
location='json')
req_parse.add_argument('packages', type=str, required=True,
help='No packages list provided',
location='json')
args = req_parse.parse_args()
service_type_name = args.get('type')
dangerous = args.get('dangerous')
pickup = json.loads(args.get('pickup'))
delivery = json.loads(args.get('delivery'))
fragile = args.get('fragile')
payment_type_name = args.get('paymentType')
comments = args.get('customerComments')
packages = json.loads(args.get('packages'))
if pickup['businessName'] == '':
return {'message': {
'email': 'No pickup business name provided'}}, 400
if pickup['contactName'] == '':
return {'message': {
'email': 'No pickup contact name provided'}}, 400
if pickup['phone'] == '':
return {'message': {
'email': 'No pickup phone provided'}}, 400
if pickup['email'] == '':
return {'message': {'email': 'No pickup email provided'}}, 400
elif not re.match(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$",
pickup['email']):
return {'message': {'email': 'Invalid pickup email provided'}}, 400
if pickup['street'] == '':
return {'message': {
'email': 'No pickup street provided'}}, 400
if pickup['suburb'] == '':
return {'message': {
'email': 'No pickup suburb provided'}}, 400
if pickup['state'] == '':
return {'message': {
'email': 'No pickup state provided'}}, 400
if pickup['postCode'] == '':
return {'message': {
'email': 'No pickup post code provided'}}, 400
if pickup['dateTime'] == '':
return {'message': {
'email': 'No pickup date time provided'}}, 400
if delivery['businessName'] == '':
return {'message': {
'email': 'No delivery business name provided'}}, 400
if delivery['contactName'] == '':
return {'message': {
'email': 'No delivery contact name provided'}}, 400
if delivery['phone'] == '':
return {'message': {
'email': 'No delivery phone provided'}}, 400
if delivery['email'] == '':
return {'message': {'email': 'No delivery email provided'}}, 400
elif not re.match(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$",
delivery['email']):
return {'message': {
'email': 'Invalid delivery email provided'}}, 400
if delivery['street'] == '':
return {'message': {
'email': 'No delivery street provided'}}, 400
if delivery['suburb'] == '':
return {'message': {
'email': 'No delivery suburb provided'}}, 400
if delivery['state'] == '':
return {'message': {
'email': 'No delivery state provided'}}, 400
if delivery['postCode'] == '':
return {'message': {
'email': 'No delivery post code provided'}}, 400
if fragile == '':
return {'message': {
'email': 'No fragility provided'}}, 400
if payment_type_name == '':
return {'message': {
'email': 'No payment type provided'}}, 400
if comments == '':
return {'message': {
'email': 'No comments provided'}}, 400
if not packages:
return {'message': {'email': 'No packages provided'}}, 400
if current_identity.role.role_name != 'user':
return {'message': {'email': 'Only users can order packages'}}, 401
pickup_contact = Contact(business_name=pickup['businessName'],
contact_name=pickup['contactName'],
phone=pickup['phone'],
email=pickup['email'])
delivery_contact = Contact(business_name=delivery['businessName'],
contact_name=delivery['contactName'],
phone=delivery['phone'],
email=delivery['email'])
pickup_address = Address(street=pickup['street'],
suburb=pickup['suburb'],
state=pickup['state'],
post_code=pickup['postCode'])
delivery_address = Address(street=delivery['street'],
suburb=delivery['suburb'],
state=delivery['state'],
post_code=delivery['postCode'])
pickup_contact.save()
delivery_contact.save()
pickup_address.save()
delivery_address.save()
package_list = list()
weight = 0
for package in packages:
new_package = Package(weight=package['weight'],
width=package['width'],
height=package['height'],
length=package['length'])
package_list.append(new_package)
weight += float(package['weight'])
danger_class = DangerClass.query.filter_by(name=dangerous).first()
service_type = ServiceType.query.filter_by(name=service_type_name).\
first()
date_format = "%Y-%m-%dT%H:%M:%S.%fZ"
eta = datetime.date.today()
if service_type_name == 'overnight':
eta += datetime.timedelta(days=1)
elif service_type.name == 'express':
eta += datetime.timedelta(days=3)
else:
eta += datetime.timedelta(days=5)
new_order = Order(created_at=datetime.datetime.utcnow(),
cost=0,
delivery_address=delivery_address,
pickup_address=pickup_address,
delivery_contact=delivery_contact,
pickup_contact=pickup_contact,
package=package_list,
weight=weight,
fragile=(fragile == 'yes'),
danger=danger_class,
user_id=current_identity.id,
notes=comments,
service_type=service_type,
payment_type=payment_type_name,
eta=eta,
pickup_time=datetime.datetime.strptime(
pickup['dateTime'],
date_format))
status_type = StatusType.query.filter_by(name='booked').first()
order_status = OrderStatus(status=status_type,
address='On The Spot Depot',
time=datetime.datetime.utcnow(),
order_id=new_order.id)
order_status.save()
new_order.status.append(order_status)
new_order.save()
return {'message': {
'description': 'Successfully booked pickup.`'}}, 201
|
|
import logging
import json
import pygame
from knuckle import Window, GameState
from battle import BattleState
from scene import Scene
from texts import Texts
import stats
import ascii
logging.basicConfig(level=logging.INFO)
class Caucuses(GameState):
def __init__(self, window, *args, **kwargs):
super(Caucuses, self).__init__(
window, 'Caucuses', *args, **kwargs)
self.scene = Scene(self.window)
self.scene.add_tiled_layer('assets/raw/caucuses.json')
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['mountain_intro'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'mountain_intro'
def on_keydown(self, event):
if event.key == pygame.K_SPACE:
if self.current_scene == 'mountain_intro':
self.scene.remove_layer(self.tl)
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['mountain_suspense'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'mountain_suspense'
elif self.current_scene == 'mountain_suspense':
pygame.mixer.music.load('assets/music/musix-the-red-one.mod')
pygame.mixer.music.play(-1)
self.scene.remove_layer(self.tl)
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['mountain_ambush'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'mountain_ambush'
elif self.current_scene == 'mountain_ambush':
self.window.pop_state()
self.window.push_state(BattleState, backdrop='assets/raw/caucuses.json', monster_name='Prometheus', next_state=VictoryState, speed=650)
class DeadPlains(GameState):
def __init__(self, window, *args, **kwargs):
super(DeadPlains, self).__init__(
window, 'DeadPlains', *args, **kwargs)
self.scene = Scene(self.window)
self.scene.add_tiled_layer('assets/raw/dead_plains.json')
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['dead_intro'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'dead_intro'
def on_keydown(self, event):
if event.key == pygame.K_SPACE:
if self.current_scene == 'dead_intro':
self.scene.remove_layer(self.tl)
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['dead_ambush'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'dead_ambush'
elif self.current_scene == 'dead_ambush':
self.window.pop_state()
self.window.push_state(BattleState, backdrop='assets/raw/dead_plains.json', monster_name='Revenant', next_state=Caucuses, speed=800)
class ForestState(GameState):
def __init__(self, window, *args, **kwargs):
super(ForestState, self).__init__(
window, 'ForestState', *args, **kwargs)
self.scene = Scene(self.window)
self.scene.add_tiled_layer('assets/raw/ancient_forest.json')
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['forest_intro'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'forest_intro'
def on_keydown(self, event):
if event.key == pygame.K_SPACE:
if self.current_scene == 'forest_intro':
self.scene.remove_layer(self.tl)
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['forest_ambush'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'forest_giant'
elif self.current_scene == 'forest_giant':
self.window.pop_state()
self.window.push_state(BattleState, backdrop='assets/raw/ancient_forest.json', monster_name='Forest Giant', next_state=DeadPlains, speed=1000)
class DelphiState(GameState):
def __init__(self, window, *args, **kwargs):
super(DelphiState, self).__init__(
window, 'DelphiState', *args, **kwargs)
self.scene = Scene(self.window)
self.scene.add_tiled_layer('assets/raw/delphi_valley.json')
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['delphi_intro'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'delphi_intro'
stats.get_instance().start_quest()
def on_keydown(self, event):
if event.key == pygame.K_SPACE:
if self.current_scene == 'delphi_intro':
self.scene.remove_layer(self.tl)
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['delphi_quest'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'delphi_quest'
elif self.current_scene == 'delphi_quest':
self.window.pop_state()
self.window.push_state(ForestState)
if event.key == pygame.K_UP:
self.menu.prev()
if event.key == pygame.K_DOWN:
self.menu.next()
class VictoryState(GameState):
def __init__(self, window, *args, **kwargs):
super(VictoryState, self).__init__(
window, 'VictoryState', *args, **kwargs)
stats.get_instance().end_quest()
pygame.mixer.music.load('assets/music/musix-wild-perspective.mod')
pygame.mixer.music.play(-1)
self.scene = Scene(self.window)
self.bg = self.scene.add_tiled_layer('assets/raw/caucuses.json')
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['mountain_victory'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'mountain_victory'
def on_keydown(self, event):
if event.key == pygame.K_SPACE:
if self.current_scene == 'mountain_victory':
self.scene.remove_layer(self.tl)
self.scene.remove_layer(self.bg)
self.bg = self.scene.add_tiled_layer('assets/raw/delphi_valley.json')
self.tl = self.scene.add_text_layer()
self.tl.add_text(1, 1, Texts['delphi_victory'])
self.tl.add_text(50, 13, 'Press space to continue...')
self.current_scene = 'delphi_victory'
elif self.current_scene == 'delphi_victory':
self.window.pop_state()
self.window.push_state(stats.StatsState)
class KnuckleState(GameState):
def __init__(self, window, *args, **kwargs):
super(KnuckleState, self).__init__(
window, 'KnuckleState', *args, **kwargs)
self.scene = Scene(self.window)
tl = self.scene.add_text_layer()
tl.add_text(20, 12, 'Knuckledragger Games')
tl.add_text(26, 14, 'presents')
tl.add_text(19, 16, 'Retirement of Diomedes')
self.menu = self.scene.add_menu_layer(56, 36, 15, 4)
self.menu.add_menu_item('New Game', self.new_game)
self.menu.add_menu_item('Quit', self.quit)
def new_game(self):
stats.get_instance().reset()
self.window.push_state(DelphiState)
def quit(self):
self.window.pop_state()
def on_keydown(self, event):
if event.key == pygame.K_RETURN:
self.menu.activate()
if event.key == pygame.K_UP:
self.menu.prev()
if event.key == pygame.K_DOWN:
self.menu.next()
def on_state_gain_focus(self):
pygame.mixer.music.load('assets/music/musix-after.mod')
pygame.mixer.music.play(-1)
self.window.screen.fill((0, 0, 0))
self.scene.draw(self.window)
self.scene.redraw_all()
pygame.display.flip()
class TestState(GameState):
def __init__(self, window, *args, **kwargs):
super(TestState, self).__init__(window, 'TestState', *args, **kwargs)
self.pause = False
self.sheets = {}
self.scene = Scene(self.window)
self.scene.add_tiled_layer('assets/raw/ancient_forest.json')
tl = self.scene.add_text_layer()
tl.add_text(1, 1, Texts['forest_intro'])
self.debug = self.scene.add_text_layer()
self.fps_string = self.debug.add_string(1, 44, '', 'menu')
self.menu = self.scene.add_menu_layer(4, 25, 30, 16)
self.menu.add_menu_item('FPS timer bump', self.menu_item_one)
self.menu.add_menu_item('Second one', self.update_fps_label)
self.menu.add_menu_item('Third one', self.update_fps_label)
self.menu.add_menu_item('Close menu', self.close_menu)
self.ppl = self.scene.add_ping_pong_layer(10, 10, 20, 8, 1500)
self.window.scheduler.new_cycle_timer(1000, self.update_fps_label)
def on_keydown(self, event):
if event.key == pygame.K_ESCAPE:
self.window.pop_state()
if event.key == pygame.K_UP:
self.menu.prev()
if event.key == pygame.K_DOWN:
self.menu.next()
if event.key == pygame.K_RETURN:
self.menu.activate()
def on_draw(self):
update_rects = []
update_rects += self.scene.draw(self.window)
# pygame.display.flip()
# print len(update_rects)
# print update_rects
pygame.display.update(update_rects)
def on_update(self, dt):
pass
def update_fps_label(self):
self.fps_string.set_text('FPS: {}({}) '.format(
self.window.fps, self.window.ups))
def menu_item_one(self):
print 'activated item 1'
def close_menu(self):
self.scene.remove_layer(self.menu)
def on_state_lose_focus(self):
self.ppl.stop()
if __name__ == '__main__':
pygame.mixer.init()
flags = 0 # pygame.FULLSCREEN
window = Window(1280, 720, flags)
ascii.load_colors()
window.push_state(KnuckleState, True)
# import cProfile as profile
# profile.run('window.run()')
window.run()
|
|
import calendar
import re
from time import struct_time
from datetime import date, datetime
from operator import add, sub
from dateutil.relativedelta import relativedelta
from edtf import appsettings
from edtf.convert import dt_to_struct_time, trim_struct_time, \
TIME_EMPTY_TIME, TIME_EMPTY_EXTRAS
EARLIEST = 'earliest'
LATEST = 'latest'
PRECISION_MILLENIUM = "millenium"
PRECISION_CENTURY = "century"
PRECISION_DECADE = "decade"
PRECISION_YEAR = "year"
PRECISION_MONTH = "month"
PRECISION_SEASON = "season"
PRECISION_DAY = "day"
def days_in_month(year, month):
"""
Return the number of days in the given year and month, where month is
1=January to 12=December, and respecting leap years as identified by
`calendar.isleap()`
"""
return {
1: 31,
2: 29 if calendar.isleap(year) else 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31,
}[month]
def apply_delta(op, time_struct, delta):
"""
Apply a `relativedelta` to a `struct_time` data structure.
`op` is an operator function, probably always `add` or `sub`tract to
correspond to `a_date + a_delta` and `a_date - a_delta`.
This function is required because we cannot use standard `datetime` module
objects for conversion when the date/time is, or will become, outside the
boundary years 1 AD to 9999 AD.
"""
if not delta:
return time_struct # No work to do
try:
dt_result = op(datetime(*time_struct[:6]), delta)
return dt_to_struct_time(dt_result)
except (OverflowError, ValueError):
# Year is not within supported 1 to 9999 AD range
pass
# Here we fake the year to one in the acceptable range to avoid having to
# write our own date rolling logic
# Adjust the year to be close to the 2000 millenium in 1,000 year
# increments to try and retain accurate relative leap years
actual_year = time_struct.tm_year
millenium = int(float(actual_year) / 1000)
millenium_diff = (2 - millenium) * 1000
adjusted_year = actual_year + millenium_diff
# Apply delta to the date/time with adjusted year
dt = datetime(*(adjusted_year,) + time_struct[1:6])
dt_result = op(dt, delta)
# Convert result year back to its original millenium
final_year = dt_result.year - millenium_diff
return struct_time(
(final_year,) + dt_result.timetuple()[1:6] + tuple(TIME_EMPTY_EXTRAS))
class EDTFObject(object):
"""
Object to attact to a parser to become instantiated when the parser
completes.
"""
parser = None
@classmethod
def set_parser(cls, p):
cls.parser = p
p.addParseAction(cls.parse_action)
@classmethod
def parse_action(cls, toks):
kwargs = toks.asDict()
try:
return cls(**kwargs) # replace the token list with the class
except Exception as e:
print("trying to %s.__init__(**%s)" % (cls.__name__, kwargs))
raise e
@classmethod
def parse(cls, s):
return cls.parser.parseString(s)[0]
def __repr__(self):
return "%s: '%s'" % (type(self).__name__, str(self))
def __init__(self, *args, **kwargs):
str = "%s.__init__(*%s, **%s)" % (
type(self).__name__,
args, kwargs,
)
raise NotImplementedError("%s is not implemented." % str)
def __str__(self):
raise NotImplementedError
def _strict_date(self, lean):
raise NotImplementedError
def lower_strict(self):
return self._strict_date(lean=EARLIEST)
def upper_strict(self):
return self._strict_date(lean=LATEST)
def _get_fuzzy_padding(self, lean):
"""
Subclasses should override this to pad based on how precise they are.
"""
return relativedelta(0)
def get_is_approximate(self):
return getattr(self, '_is_approximate', False)
def set_is_approximate(self, val):
self._is_approximate = val
is_approximate = property(get_is_approximate, set_is_approximate)
def get_is_uncertain(self):
return getattr(self, '_is_uncertain', False)
def set_is_uncertain(self, val):
self._is_uncertain = val
is_uncertain = property(get_is_uncertain, set_is_uncertain)
def lower_fuzzy(self):
strict_val = self.lower_strict()
return apply_delta(sub, strict_val, self._get_fuzzy_padding(EARLIEST))
def upper_fuzzy(self):
strict_val = self.upper_strict()
return apply_delta(add, strict_val, self._get_fuzzy_padding(LATEST))
def __eq__(self, other):
if isinstance(other, EDTFObject):
return str(self) == str(other)
elif isinstance(other, date):
return str(self) == other.isoformat()
elif isinstance(other, struct_time):
return self._strict_date() == trim_struct_time(other)
return False
def __ne__(self, other):
if isinstance(other, EDTFObject):
return str(self) != str(other)
elif isinstance(other, date):
return str(self) != other.isoformat()
elif isinstance(other, struct_time):
return self._strict_date() != trim_struct_time(other)
return True
def __gt__(self, other):
if isinstance(other, EDTFObject):
return self.lower_strict() > other.lower_strict()
elif isinstance(other, date):
return self.lower_strict() > dt_to_struct_time(other)
elif isinstance(other, struct_time):
return self.lower_strict() > trim_struct_time(other)
raise TypeError("can't compare %s with %s" % (type(self).__name__, type(other).__name__))
def __ge__(self, other):
if isinstance(other, EDTFObject):
return self.lower_strict() >= other.lower_strict()
elif isinstance(other, date):
return self.lower_strict() >= dt_to_struct_time(other)
elif isinstance(other, struct_time):
return self.lower_strict() >= trim_struct_time(other)
raise TypeError("can't compare %s with %s" % (type(self).__name__, type(other).__name__))
def __lt__(self, other):
if isinstance(other, EDTFObject):
return self.lower_strict() < other.lower_strict()
elif isinstance(other, date):
return self.lower_strict() < dt_to_struct_time(other)
elif isinstance(other, struct_time):
return self.lower_strict() < trim_struct_time(other)
raise TypeError("can't compare %s with %s" % (type(self).__name__, type(other).__name__))
def __le__(self, other):
if isinstance(other, EDTFObject):
return self.lower_strict() <= other.lower_strict()
elif isinstance(other, date):
return self.lower_strict() <= dt_to_struct_time(other)
elif isinstance(other, struct_time):
return self.lower_strict() <= trim_struct_time(other)
raise TypeError("can't compare %s with %s" % (type(self).__name__, type(other).__name__))
# (* ************************** Level 0 *************************** *)
class Date(EDTFObject):
def set_year(self, y):
if y is None:
raise AttributeError("Year must not be None")
self._year = y
def get_year(self):
return self._year
year = property(get_year, set_year)
def set_month(self, m):
self._month = m
if m == None:
self.day = None
def get_month(self):
return self._month
month = property(get_month, set_month)
def __init__(self, year=None, month=None, day=None, **kwargs):
for param in ('date', 'lower', 'upper'):
if param in kwargs:
self.__init__(**kwargs[param])
return
self.year = year # Year is required, but sometimes passed in as a 'date' dict.
self.month = month
self.day = day
def __str__(self):
r = self.year
if self.month:
r += "-%s" % self.month
if self.day:
r += "-%s" % self.day
return r
def isoformat(self, default=date.max):
return "%s-%02d-%02d" % (
self.year,
int(self.month or default.month),
int(self.day or default.day),
)
def _precise_year(self, lean):
# Replace any ambiguous characters in the year string with 0s or 9s
if lean == EARLIEST:
return int(re.sub(r'[xu]', r'0', self.year))
else:
return int(re.sub(r'[xu]', r'9', self.year))
def _precise_month(self, lean):
if self.month and self.month != "uu":
try:
return int(self.month)
except ValueError as e:
raise ValueError("Couldn't convert %s to int (in %s)" % (self.month, self))
else:
return 1 if lean == EARLIEST else 12
def _precise_day(self, lean):
if not self.day or self.day == 'uu':
if lean == EARLIEST:
return 1
else:
return days_in_month(
self._precise_year(LATEST), self._precise_month(LATEST)
)
else:
return int(self.day)
def _strict_date(self, lean):
"""
Return a `time.struct_time` representation of the date.
"""
return struct_time(
(
self._precise_year(lean),
self._precise_month(lean),
self._precise_day(lean),
) + tuple(TIME_EMPTY_TIME) + tuple(TIME_EMPTY_EXTRAS)
)
@property
def precision(self):
if self.day:
return PRECISION_DAY
if self.month:
return PRECISION_MONTH
return PRECISION_YEAR
class DateAndTime(EDTFObject):
def __init__(self, date, time):
self.date = date
self.time = time
def __str__(self):
return self.isoformat()
def isoformat(self):
return self.date.isoformat() + "T" + self.time
def _strict_date(self, lean):
return self.date._strict_date(lean)
def __eq__(self, other):
if isinstance(other, datetime):
return self.isoformat() == other.isoformat()
elif isinstance(other, struct_time):
return self._strict_date() == trim_struct_time(other)
return super(DateAndTime, self).__eq__(other)
def __ne__(self, other):
if isinstance(other, datetime):
return self.isoformat() != other.isoformat()
elif isinstance(other, struct_time):
return self._strict_date() != trim_struct_time(other)
return super(DateAndTime, self).__ne__(other)
class Interval(EDTFObject):
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def __str__(self):
return "%s/%s" % (self.lower, self.upper)
def _strict_date(self, lean):
if lean == EARLIEST:
try:
r = self.lower._strict_date(lean)
if r is None:
raise AttributeError
return r
except AttributeError: # it's a string, or no date. Result depends on the upper date
upper = self.upper._strict_date(LATEST)
return apply_delta(sub, upper, appsettings.DELTA_IF_UNKNOWN)
else:
try:
r = self.upper._strict_date(lean)
if r is None:
raise AttributeError
return r
except AttributeError: # an 'unknown' or 'open' string - depends on the lower date
if self.upper and (self.upper == "open" or self.upper.date == "open"):
return dt_to_struct_time(date.today()) # it's still happening
else:
lower = self.lower._strict_date(EARLIEST)
return apply_delta(add, lower, appsettings.DELTA_IF_UNKNOWN)
# (* ************************** Level 1 *************************** *)
class UA(EDTFObject):
@classmethod
def parse_action(cls, toks):
args = toks.asList()
return cls(*args)
def __init__(self, *args):
assert len(args)==1
ua = args[0]
self.is_uncertain = "?" in ua
self.is_approximate = "~" in ua
def __str__(self):
d = ""
if self.is_uncertain:
d += "?"
if self.is_approximate:
d += "~"
return d
def _get_multiplier(self):
if self.is_uncertain and self.is_approximate:
return appsettings.MULTIPLIER_IF_BOTH
elif self.is_uncertain:
return appsettings.MULTIPLIER_IF_UNCERTAIN
elif self.is_approximate:
return appsettings.MULTIPLIER_IF_APPROXIMATE
class UncertainOrApproximate(EDTFObject):
def __init__(self, date, ua):
self.date = date
self.ua = ua
def __str__(self):
if self.ua:
return "%s%s" % (self.date, self.ua)
else:
return str(self.date)
def _strict_date(self, lean):
if self.date == "open":
return dt_to_struct_time(date.today())
if self.date =="unknown":
return None # depends on the other date
return self.date._strict_date(lean)
def _get_fuzzy_padding(self, lean):
if not self.ua:
return relativedelta(0)
multiplier = self.ua._get_multiplier()
if self.date.precision == PRECISION_DAY:
return multiplier * appsettings.PADDING_DAY_PRECISION
elif self.date.precision == PRECISION_MONTH:
return multiplier * appsettings.PADDING_MONTH_PRECISION
elif self.date.precision == PRECISION_YEAR:
return multiplier * appsettings.PADDING_YEAR_PRECISION
class Unspecified(Date):
pass
class Level1Interval(Interval):
def __init__(self, lower, upper):
self.lower = UncertainOrApproximate(**lower)
self.upper = UncertainOrApproximate(**upper)
def _get_fuzzy_padding(self, lean):
if lean == EARLIEST:
return self.lower._get_fuzzy_padding(lean)
elif lean == LATEST:
return self.upper._get_fuzzy_padding(lean)
class LongYear(EDTFObject):
def __init__(self, year):
self.year = year
def __str__(self):
return "y%s" % self.year
def _precise_year(self):
return int(self.year)
def _strict_date(self, lean):
py = self._precise_year()
if lean == EARLIEST:
return struct_time(
[py, 1, 1] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS)
else:
return struct_time(
[py, 12, 31] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS)
class Season(Date):
def __init__(self, year, season, **kwargs):
self.year = year
self.season = season # use season to look up month
# day isn't part of the 'season' spec, but it helps the inherited
# `Date` methods do their thing.
self.day = None
def __str__(self):
return "%s-%s" % (self.year, self.season)
def _precise_month(self, lean):
rng = appsettings.SEASON_MONTHS_RANGE[int(self.season)]
if lean == EARLIEST:
return rng[0]
else:
return rng[1]
# (* ************************** Level 2 *************************** *)
class PartialUncertainOrApproximate(Date):
def set_year(self, y): # Year can be None.
self._year = y
year = property(Date.get_year, set_year)
def __init__(
self, year=None, month=None, day=None,
year_ua=False, month_ua = False, day_ua = False,
year_month_ua = False, month_day_ua = False,
ssn=None, season_ua=False, all_ua=False
):
self.year = year
self.month = month
self.day = day
self.year_ua = year_ua
self.month_ua = month_ua
self.day_ua = day_ua
self.year_month_ua = year_month_ua
self.month_day_ua = month_day_ua
self.season = ssn
self.season_ua = season_ua
self.all_ua = all_ua
def __str__(self):
if self.season_ua:
return "%s%s" % (self.season, self.season_ua)
if self.year_ua:
y = "%s%s" % (self.year, self.year_ua)
else:
y = str(self.year)
if self.month_ua:
m = "(%s)%s" % (self.month, self.month_ua)
else:
m = str(self.month)
if self.day:
if self.day_ua:
d = "(%s)%s" % (self.day, self.day_ua)
else:
d = str(self.day)
else:
d = None
if self.year_month_ua: # year/month approximate. No brackets needed.
ym = "%s-%s%s" % (y, m, self.year_month_ua)
if d:
result = "%s-%s" % (ym, d)
else:
result = ym
elif self.month_day_ua:
if self.year_ua: # we don't need the brackets round month and day
result = "%s-%s-%s%s" % (y, m, d, self.month_day_ua)
else:
result = "%s-(%s-%s)%s" % (y, m, d, self.month_day_ua)
else:
if d:
result = "%s-%s-%s" % (y, m, d)
else:
result = "%s-%s" % (y, m)
if self.all_ua:
result = "(%s)%s" % (result, self.all_ua)
return result
def _precise_year(self, lean):
if self.season:
return self.season._precise_year(lean)
return super(PartialUncertainOrApproximate, self)._precise_year(lean)
def _precise_month(self, lean):
if self.season:
return self.season._precise_month(lean)
return super(PartialUncertainOrApproximate, self)._precise_month(lean)
def _precise_day(self, lean):
if self.season:
return self.season._precise_day(lean)
return super(PartialUncertainOrApproximate, self)._precise_day(lean)
def _get_fuzzy_padding(self, lean):
"""
This is not a perfect interpretation as fuzziness is introduced for
redundant uncertainly modifiers e.g. (2006~)~ will get two sets of
fuzziness.
"""
result = relativedelta(0)
if self.year_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_ua._get_multiplier()
if self.month_ua:
result += appsettings.PADDING_MONTH_PRECISION * self.month_ua._get_multiplier()
if self.day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.day_ua._get_multiplier()
if self.year_month_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_month_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.year_month_ua._get_multiplier()
if self.month_day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.month_day_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.month_day_ua._get_multiplier()
if self.season_ua:
result += appsettings.PADDING_SEASON_PRECISION * self.season_ua._get_multiplier()
if self.all_ua:
multiplier = self.all_ua._get_multiplier()
if self.precision == PRECISION_DAY:
result += multiplier * appsettings.PADDING_DAY_PRECISION
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_MONTH:
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_YEAR:
result += multiplier * appsettings.PADDING_YEAR_PRECISION
return result
class PartialUnspecified(Unspecified):
pass
class Consecutives(Interval):
# Treating Consecutive ranges as intervals where one bound is optional
def __init__(self, lower=None, upper=None):
if lower and not isinstance(lower, EDTFObject):
self.lower = Date.parse(lower)
else:
self.lower = lower
if upper and not isinstance(upper, EDTFObject):
self.upper = Date.parse(upper)
else:
self.upper = upper
def __str__(self):
return "%s..%s" % (self.lower or '', self.upper or '')
class EarlierConsecutives(Consecutives):
pass
class LaterConsecutives(Consecutives):
pass
class OneOfASet(EDTFObject):
@classmethod
def parse_action(cls, toks):
args = [t for t in toks.asList() if isinstance(t, EDTFObject)]
return cls(*args)
def __init__(self, *args):
self.objects = args
def __str__(self):
return "[%s]" % (", ".join([str(o) for o in self.objects]))
def _strict_date(self, lean):
if lean == LATEST:
return max([x._strict_date(lean) for x in self.objects])
else:
return min([x._strict_date(lean) for x in self.objects])
class MultipleDates(EDTFObject):
@classmethod
def parse_action(cls, toks):
args = [t for t in toks.asList() if isinstance(t, EDTFObject)]
return cls(*args)
def __init__(self, *args):
self.objects = args
def __str__(self):
return "{%s}" % (", ".join([str(o) for o in self.objects]))
def _strict_date(self, lean):
if lean == LATEST:
return max([x._strict_date(lean) for x in self.objects])
else:
return min([x._strict_date(lean) for x in self.objects])
class MaskedPrecision(Date):
pass
class Level2Interval(Level1Interval):
def __init__(self, lower, upper):
# Check whether incoming lower/upper values are single-item lists, and
# if so take just the first item. This works around what I *think* is a
# bug in the grammer that provides us with single-item lists of
# `PartialUncertainOrApproximate` items for lower/upper values.
if isinstance(lower, (tuple, list)) and len(lower) == 1:
self.lower = lower[0]
else:
self.lower = lower
if isinstance(lower, (tuple, list)) and len(upper) == 1:
self.upper = upper[0]
else:
self.upper = upper
class ExponentialYear(LongYear):
def __init__(self, base, exponent, precision=None):
self.base = base
self.exponent = exponent
self.precision = precision
def _precise_year(self):
return int(self.base) * 10 ** int(self.exponent)
def get_year(self):
if self.precision:
return '%se%sp%s' % (self.base, self.exponent, self.precision)
else:
return '%se%s' % (self.base, self.exponent)
year = property(get_year)
|
|
from django import forms
from django.conf import settings
try:
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db.models.query import QuerySet
from django.forms.models import BaseModelFormSet
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
try:
from django.contrib.admin.templatetags.admin_static import static
except ImportError:
def static(somestring):
return "%s%s" % (settings.ADMIN_MEDIA_PREFIX, somestring)
from contentrelations.settings import JS_PREFIX
class GenericRawIdWidget(forms.TextInput):
def __init__(self, attrs=None):
from django.core.urlresolvers import reverse, NoReverseMatch
super(GenericRawIdWidget, self).__init__(attrs)
ctypes = ContentType.objects.all().order_by('id').values_list('id', 'app_label', 'model')
elements = {}
for x, y, z in ctypes:
try:
elements[x] = reverse("admin:%s_%s_changelist" % (y, z))
except NoReverseMatch:
continue
self.content_types = "{%s}" % ",".join(["'%s': '%s'" % (k, v) for k, v in elements.items()])
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
if 'class' not in attrs:
attrs['class'] = 'vGenericRawIdAdminField'
output = [super(GenericRawIdWidget, self).render(name, value, attrs)]
output.append('<a id="lookup_id_%(name)s" class="gen-related-lookup" data-contenttypes="%(contenttypes)s" href="#">' %
{'name': name, 'contenttypes': self.content_types})
output.append(' <img src="%s" width="16" height="16" alt="%s" />' % (static('admin/img/selector-search.gif'), _('Lookup')))
output.append('</a>')
return mark_safe(u''.join(output))
class Media:
js = ('%scontentrelations/js/genericlookup.js' % JS_PREFIX, )
class InlineGenericForeignKeyHiddenInput(forms.MultiWidget):
def _has_changed(self, initial, data):
return False
def __init__(self, attrs=None):
widgets = (
forms.widgets.HiddenInput(),
forms.widgets.HiddenInput(),
)
super(InlineGenericForeignKeyHiddenInput, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
try:
decompressed = [int(x) for x in value.split('-')]
return decompressed
except ValueError:
pass
return [None, None]
class InlineGenericForeignKeyField(forms.MultiValueField):
"""
A basic integer field that deals with validating the given value to a
given source instance in an inline.
"""
widget = InlineGenericForeignKeyHiddenInput
hidden_widget = InlineGenericForeignKeyHiddenInput
default_error_messages = {
'invalid_choice': _('The inline generic foreign key did not match the '
'source instance.'),
'invalid_type': _('The inline generic foregin key did not get a list of'
' values.')
}
def __init__(self, source_instance, *args, **kwargs):
self.source_instance = source_instance
self.content_type = ContentType.objects.get_for_model(source_instance)
self.object_id = source_instance.pk
fields = (
forms.models.InlineForeignKeyField(source_instance, required=False),
forms.IntegerField(widget=forms.HiddenInput, required=False),
)
kwargs['initial'] = "%s-%s" % (self.content_type.pk, self.object_id)
super(InlineGenericForeignKeyField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
self.content_type = ContentType.objects.get(id=data_list[0])
self.object_id = data_list[1]
compressed = "-".join(data_list)
return compressed
return None
def clean(self, value):
for i in value:
if not i:
return None
try:
if not isinstance(value, (tuple, list)):
raise ValidationError(self.error_messages['invalid_type'])
if len(value) == 2 and \
int(value[0]) == self.content_type.pk and \
int(value[1]) == self.object_id:
return self.compress(value)
elif len(value) == 2 and \
int(value[0]) == self.content_type.pk and \
self.object_id is None:
# This is the case if you "save as new"
return self.compress(value)
else:
raise ValidationError(self.error_messages['invalid_choice'])
except Exception:
raise
class GenericM2MInlineFormSet(BaseModelFormSet):
"""
Basic implementation of a formset with no ForeignKey
"""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
qs = getattr(self.instance, self.rel_name).all()
super(GenericM2MInlineFormSet, self).__init__(data, files,
prefix=prefix, queryset=qs, **kwargs)
@classmethod
def get_default_prefix(cls):
return 'genericm2mform'
def initial_form_count(self):
if self.save_as_new:
return 0
return super(GenericM2MInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(GenericM2MInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do it's validation.
# setattr(form.instance, self.fk.get_attname(), self.instance.pk)
return form
def save_new(self, form, commit=True):
# Use commit=False so we can assign the source key afterwards, then
# save the object.
obj = form.save(commit=False)
obj.source = form.fields['source'].source_instance
obj.object = form.cleaned_data['object_type'].get_object_for_this_type(
id=form.cleaned_data['object_id']
)
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
# if self.can_delete and self._should_delete_form(form):
# continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def save(self, commit=True):
"""
Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
def add_fields(self, form, index):
super(GenericM2MInlineFormSet, self).add_fields(form, index)
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
default_label = capfirst(self.fk.verbose_name)
kwargs = {
'label': getattr(form.fields.get(name), 'label', default_label)
}
form.fields[name] = InlineGenericForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(GenericM2MInlineFormSet, self).get_unique_error_message(unique_check)
def genericm2m_inlineformset_factory(source_model, model, form=forms.ModelForm,
formset=GenericM2MInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=True,
can_delete=True, max_num=None,
formfield_callback=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``source_model``.
"""
fk = GenericRelation(
source_model,
verbose_name='source',
related_query_name=fk_name or 'related')
fk.name = 'source'
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': GenericM2MInlineFormSet,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
}
FormSet = forms.models.modelformset_factory(model, **kwargs) # NOQA
FormSet.fk = fk
FormSet.source_type_field = 'source_type'
return FormSet
class GFKOptimizedQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
# pop the gfk_field from the kwargs if its passed in explicitly
self._gfk_field = kwargs.pop('gfk_field', None)
# call the source class' initializer
super(GFKOptimizedQuerySet, self).__init__(*args, **kwargs)
def _clone(self, *args, **kwargs):
clone = super(GFKOptimizedQuerySet, self)._clone(*args, **kwargs)
clone._gfk_field = self._gfk_field
return clone
def get_gfk(self):
if not self._gfk_field:
for field in self.model._meta.virtual_fields:
if isinstance(field, GenericForeignKey):
self._gfk_field = field
break
return self._gfk_field
def generic_objects(self):
clone = self._clone()
ctypes_and_fks = {}
gfk_field = self.get_gfk()
ctype_field = '%s_id' % gfk_field.ct_field
fk_field = gfk_field.fk_field
for obj in clone:
ctype = ContentType.objects.get_for_id(getattr(obj, ctype_field))
obj_id = getattr(obj, fk_field)
ctypes_and_fks.setdefault(ctype, [])
ctypes_and_fks[ctype].append(obj_id)
gfk_objects = {}
for ctype, obj_ids in ctypes_and_fks.items():
gfk_objects[ctype.pk] = ctype.model_class()._default_manager.in_bulk(obj_ids)
obj_list = []
for obj in clone:
obj_list.append(gfk_objects[getattr(obj, ctype_field)][getattr(obj, fk_field)])
return obj_list
|
|
"""Various high level TF models."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.ops import dnn_ops
from tensorflow.contrib.learn.python.learn.ops import losses_ops
from tensorflow.contrib.learn.python.learn.ops import autoencoder_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
def linear_regression_zero_init(X, y):
"""Creates a linear regression TensorFlow subgraph, in which weights and
bias terms are initialized to exactly zero.
Args:
X: tensor or placeholder for input features.
y: tensor or placeholder for target.
Returns:
Predictions and loss tensors.
"""
return linear_regression(X, y, init_mean=0.0, init_stddev=0.0)
def logistic_regression_zero_init(X, y):
"""Creates a logistic regression TensorFlow subgraph, in which weights and
bias terms are initialized to exactly zero.
Args:
X: tensor or placeholder for input features.
y: tensor or placeholder for target.
Returns:
Predictions and loss tensors.
"""
return logistic_regression(X, y, init_mean=0.0, init_stddev=0.0)
def linear_regression(X, y, init_mean=None, init_stddev=1.0):
"""Creates linear regression TensorFlow subgraph.
Args:
X: tensor or placeholder for input features.
y: tensor or placeholder for target.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('linear_regression'):
logging_ops.histogram_summary('linear_regression.X', X)
logging_ops.histogram_summary('linear_regression.y', y)
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
# Set up the requested initialization.
if (init_mean is None):
weights = vs.get_variable('weights', [X.get_shape()[1], output_shape])
bias = vs.get_variable('bias', [output_shape])
else:
weights = vs.get_variable('weights', [X.get_shape()[1], output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
bias = vs.get_variable('bias', [output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
logging_ops.histogram_summary('linear_regression.weights', weights)
logging_ops.histogram_summary('linear_regression.bias', bias)
return losses_ops.mean_squared_error_regressor(X, y, weights, bias)
def logistic_regression(X,
y,
class_weight=None,
init_mean=None,
init_stddev=1.0):
"""Creates logistic regression TensorFlow subgraph.
Args:
X: tensor or placeholder for input features,
shape should be [batch_size, n_features].
y: tensor or placeholder for target,
shape should be [batch_size, n_classes].
class_weight: tensor, [n_classes], where for each class
it has weight of the class. If not provided
will check if graph contains tensor `class_weight:0`.
If that is not provided either all ones are used.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('logistic_regression'):
logging_ops.histogram_summary('logistic_regression.X', X)
logging_ops.histogram_summary('logistic_regression.y', y)
# Set up the requested initialization.
if (init_mean is None):
weights = vs.get_variable('weights',
[X.get_shape()[1], y.get_shape()[-1]])
bias = vs.get_variable('bias', [y.get_shape()[-1]])
else:
weights = vs.get_variable('weights',
[X.get_shape()[1], y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
bias = vs.get_variable('bias', [y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
logging_ops.histogram_summary('logistic_regression.weights', weights)
logging_ops.histogram_summary('logistic_regression.bias', bias)
# If no class weight provided, try to retrieve one from pre-defined
# tensor name in the graph.
if not class_weight:
try:
class_weight = ops.get_default_graph().get_tensor_by_name(
'class_weight:0')
except KeyError:
pass
return losses_ops.softmax_classifier(X,
y,
weights,
bias,
class_weight=class_weight)
def get_dnn_model(hidden_units, target_predictor_fn, dropout=None):
"""Returns a function that creates a DNN TensorFlow subgraph.
Args:
hidden_units: List of values of hidden units for layers.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss
tensors.
dropout: When not none, causes dropout regularization to be used,
with the specified probability of removing a given coordinate.
Returns:
A function that creates the subgraph.
"""
def dnn_estimator(X, y):
"""DNN estimator with target predictor function on top."""
layers = dnn_ops.dnn(X, hidden_units, dropout=dropout)
return target_predictor_fn(layers, y)
return dnn_estimator
def get_autoencoder_model(hidden_units, target_predictor_fn,
activation, add_noise=None, dropout=None):
"""Returns a function that creates a Autoencoder TensorFlow subgraph with given
params.
Args:
hidden_units: List of values of hidden units for layers.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss tensors.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
dropout: When not none, causes dropout regularization to be used,
with the specified probability of removing a given coordinate.
Returns:
A function that creates the subgraph.
"""
def dnn_autoencoder_estimator(X):
"""Autoencoder estimator with target predictor function on top."""
encoder, decoder = autoencoder_ops.dnn_autoencoder(
X, hidden_units, activation,
add_noise=add_noise, dropout=dropout)
return encoder, decoder, target_predictor_fn(X, decoder)
return dnn_autoencoder_estimator
## This will be in Tensorflow 0.7.
## TODO(ilblackdragon): Clean this up when it's released
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply
reverses the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops_.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops_.unpack(s_reversed)
return result
def bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states
are ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int64 vector (tensor) of size
[batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
state is the concatenated final state of the forward and backward RNN
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, nn.rnn_cell.RNNCell):
raise TypeError('cell_fw must be an instance of RNNCell')
if not isinstance(cell_bw, nn.rnn_cell.RNNCell):
raise TypeError('cell_bw must be an instance of RNNCell')
if not isinstance(inputs, list):
raise TypeError('inputs must be a list')
if not inputs:
raise ValueError('inputs must not be empty')
name = scope or 'BiRNN'
# Forward direction
with vs.variable_scope(name + '_FW'):
output_fw, state_fw = nn.rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length)
# Backward direction
with vs.variable_scope(name + '_BW'):
tmp, state_bw = nn.rnn(cell_bw, _reverse_seq(inputs, sequence_length),
initial_state_bw, dtype, sequence_length)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [array_ops_.concat(1, [fw, bw])
for fw, bw in zip(output_fw, output_bw)]
return outputs, array_ops_.concat(1, [state_fw, state_bw])
# End of Tensorflow 0.7
def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional,
target_predictor_fn, sequence_length, initial_state):
"""Returns a function that creates a RNN TensorFlow subgraph.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument X for input and returns transformed X.
bidirectional: boolean, Whether this is a bidirectional rnn.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss
tensors.
sequence_length: If sequence_length is provided, dynamic calculation is
performed.
This saves computational time when unrolling past max
sequence length.
Required for bidirectional RNNs.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
Returns:
A function that creates the subgraph.
"""
def rnn_estimator(X, y):
"""RNN estimator with target predictor function on top."""
X = input_op_fn(X)
if cell_type == 'rnn':
cell_fn = nn.rnn_cell.BasicRNNCell
elif cell_type == 'gru':
cell_fn = nn.rnn_cell.GRUCell
elif cell_type == 'lstm':
cell_fn = nn.rnn_cell.BasicLSTMCell
else:
raise ValueError('cell_type {} is not supported. '.format(cell_type))
if bidirectional:
# forward direction cell
rnn_fw_cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
# backward direction cell
rnn_bw_cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
_, encoding = bidirectional_rnn(rnn_fw_cell,
rnn_bw_cell,
X,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state_fw=initial_state,
initial_state_bw=initial_state)
else:
cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
_, encoding = nn.rnn(cell,
X,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state=initial_state)
return target_predictor_fn(encoding, y)
return rnn_estimator
|
|
from octopus.modules.es import dao
class SpreadsheetJobDAO(dao.ESDAO):
__type__ = 'spreadsheet'
@classmethod
def list_by_status(cls, status):
q = SpreadsheetStatusQuery(status)
return cls.object_query(q.query())
@classmethod
def query_by_filename(cls, filename):
return cls.object_query(terms={"filename.exact": filename})
@classmethod
def queue_length(cls, sheet_id, max=10):
q = SpreadsheetStatusQuery("submitted", size=max)
first_x = cls.object_query(q.query())
pos = 0
for s in first_x:
if s.id == sheet_id:
return pos
pos += 1
return max + 1
@property
def pc_complete(self):
total, epmc, oag = RecordDAO.upload_completeness(self.id)
ec = epmc.get("T", 0.0)
oc = oag.get("T", 0.0)
if total == 0:
# we will get a divide-by-zero error
return 0.0 # 100% isn't right, but 0% isn't really right either
pc = (((float(ec) + float(oc)) / 2) / float(total)) * 100.0
return pc
def list_duplicate_identifiers(self):
return RecordDAO.list_duplicates(self.id)
class SpreadsheetStatusQuery(object):
def __init__(self, status, size=10):
self.status = status
self.size = size
def query(self):
return {
"query" : {
"term" : {"status.code.exact" : self.status}
},
"sort" : [{"created_date" : {"order" : "asc"}}],
"size" : self.size
}
######################################################
class RecordDAO(dao.ESDAO):
__type__ = "record"
@classmethod
def list_by_upload(cls, sheet_id, page_size=10000):
q = RecordSheetQuery(sheet_id, page_size)
return cls.object_query(q.query())
@classmethod
def count_by_upload(cls, sheet_id):
q = RecordSheetQuery(sheet_id, 0)
return cls.count(q.query())
@classmethod
def get_by_identifier(cls, identifier, upload, type=None):
if type is not None:
q = RecordTypedIdentifierQuery(identifier, type, upload)
else:
q = RecordUntypedIdentifierQuery(identifier, upload)
return cls.iterate(q.query())
@classmethod
def upload_completeness(cls, upload_id):
q = RecordsCompleteQuery(upload_id)
res = cls.query(q=q.query())
total = res.get("hits", {}).get("total", 0)
epmc = {}
for f in res.get("facets", {}).get("epmc", {}).get("terms", []):
epmc[f.get("term")] = f.get("count", 0)
oag = {}
for f in res.get("facets", {}).get("oag", {}).get("terms", []):
oag[f.get("term")] = f.get("count", 0)
return total, epmc, oag
@classmethod
def list_duplicates(cls, sheet_id):
max = RecordDAO.count_by_upload(sheet_id)
q = RecordIdentifierFacetQuery(sheet_id, max)
res = cls.query(q.query())
pmcids = res.get("facets", {}).get("pmcid", {}).get("terms", [])
pmids = res.get("facets", {}).get("pmid", {}).get("terms", [])
dois = res.get("facets", {}).get("doi", {}).get("terms", [])
duplicates = {"pmcid" : [], "pmid" : [], "doi" : []}
for term in pmcids:
if term.get("count", 0) > 1:
duplicates["pmcid"].append(term.get("term"))
else:
break # Saves us a few cycles, since the array is ordered by count
for term in pmids:
if term.get("count", 0) > 1:
duplicates["pmid"].append(term.get("term"))
else:
break # Saves us a few cycles, since the array is ordered by count
for term in dois:
if term.get("count", 0) > 1:
duplicates["doi"].append(term.get("term"))
else:
break # Saves us a few cycles, since the array is ordered by count
return duplicates
class RecordTypedIdentifierQuery(object):
def __init__(self, identifier, type, upload):
self.type = type
self.identifier = identifier
self.upload = upload
def query(self):
return {
"query" : {
"bool" :{
"must" : [
{"term" : {"identifiers." + self.type + ".exact" : self.identifier}},
{"term" : {"upload.id.exact" : self.upload}}
]
}
}
}
class RecordUntypedIdentifierQuery(object):
def __init__(self, identifier, upload):
self.identifier = identifier
self.upload = upload
def query(self):
return {
"query" : {
"bool" :{
"must" : [
{"term" : {"upload.id.exact" : self.upload}}
],
"should" : [
{"term" : {"identifiers.pmcid.exact" : self.identifier}},
{"term" : {"identifiers.pmid.exact" : self.identifier}},
{"term" : {"identifiers.doi.exact" : self.identifier}}
],
"minimum_should_match" : 1
}
}
}
class RecordSheetQuery(object):
def __init__(self, sheet_id, page_size=10000):
self.sheet_id = sheet_id
self.page_size = page_size
def query(self):
return {
"query" : {
"term" : {"upload.id.exact" : self.sheet_id}
},
"size" : self.page_size,
"sort" : [{"upload.pos" : {"order" : "asc"}}]
}
class RecordsCompleteQuery(object):
def __init__(self, sheet_id):
self.sheet_id = sheet_id
def query(self):
return {
"query" : {
"term" : {"upload.id.exact" : self.sheet_id}
},
"size" : 0,
"facets" : {
"epmc" : {"terms" : {"field" : "supporting_info.epmc_complete"}},
"oag" : {"terms" : {"field" : "supporting_info.oag_complete"}},
}
}
class RecordIdentifierFacetQuery(object):
def __init__(self, sheet_id, max_size=100):
self.sheet_id = sheet_id
self.max_size = max_size
def query(self):
return {
"query": {
"term": {"upload.id.exact": self.sheet_id}
},
"size": 0,
"facets": {
"pmcid": {
"terms": {
"field": "identifiers.pmcid.exact",
"size" : self.max_size
}
},
"pmid": {
"terms": {
"field": "identifiers.pmid.exact",
"size" : self.max_size
}
},
"doi": {
"terms": {
"field": "identifiers.doi.exact",
"size" : self.max_size
}
}
}
}
###############################################
class OAGRLinkDAO(dao.ESDAO):
__type__ = "oagrlink"
@classmethod
def by_oagr_id(cls, oagr_id):
q = OAGRLinkQuery(oagr_id=oagr_id)
res = cls.object_query(q.query())
if len(res) > 0:
return res[0]
return None
@classmethod
def by_spreadsheet_id(cls, spreadsheet_id):
q = OAGRLinkQuery(spreadsheet_id=spreadsheet_id)
res = cls.object_query(q.query())
if len(res) > 0:
return res[0]
return None
class OAGRLinkQuery(object):
def __init__(self, oagr_id=None, spreadsheet_id=None):
self.oagr_id = oagr_id
self.spreadsheet_id = spreadsheet_id
def query(self):
q = {
"query" : {
"bool" :{
"must" : [
]
}
}
}
if self.oagr_id is not None:
q["query"]["bool"]["must"].append({"term" : {"oagrjob_id.exact" : self.oagr_id}})
if self.spreadsheet_id is not None:
q["query"]["bool"]["must"].append({"term" : {"spreadsheet_id.exact" : self.spreadsheet_id}})
return q
|
|
#!/usr/bin/env python
import os
import subprocess
import re
import logging
__all__ = ['NanoCA']
__version__ = '0.8'
__author = 'Moritz Moeller <mm@mxs.de>'
openssl_cnf_template = """
RANDFILE = $ENV::ROOT/private/.rnd
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = $ENV::ROOT
certs = $dir/certs
crl_dir = $dir/crl
new_certs_dir = $dir/newcerts
database = $dir/index.txt
serial = $dir/serial
RANDFILE = $dir/private/.rnd
private_key = $dir/private/ca.key.pem
certificate = $dir/certs/ca.cert.pem
crlnumber = $dir/crlnumber
crl = $dir/crl/ca.crl.pem
crl_extensions = crl_ext
default_crl_days = 30
default_md = sha256
name_opt = ca_default
cert_opt = ca_default
default_days = 375
preserve = no
policy = policy_loose
email_in_dn = no
copy_extensions = copy
[ policy_strict ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_loose ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
default_bits = 2048
distinguished_name = req_distinguished_name
string_mask = utf8only
default_md = sha256
x509_extensions = v3_ca
[ req_distinguished_name ]
countryName = Country Name (2 letter code)
stateOrProvinceName = State or Province Name
localityName = Locality Name
0.organizationName = Organization Name
organizationalUnitName = Organizational Unit Name
commonName = Common Name
emailAddress = Email Address
#countryName_default = GB
#stateOrProvinceName_default = England
#localityName_default =
#0.organizationName_default = Alice Ltd
#organizationalUnitName_default =
#emailAddress_default =
[ v3_ca ]
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ v3_intermediate_ca ]
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true, pathlen:0
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ user_cert ]
basicConstraints = CA:FALSE
nsCertType = client, email
nsComment = "OpenSSL Generated Client Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, emailProtection
[ server_cert ]
basicConstraints = CA:FALSE
nsCertType = server
nsComment = "OpenSSL Generated Server Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer:always
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
crlDistributionPoints = URI:http://example.com/intermediate.crl.pem
[ crl_ext ]
authorityKeyIdentifier=keyid:always
[ ocsp ]
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, digitalSignature
extendedKeyUsage = critical, OCSPSigning
"""
class NanoCA:
"""
NanoCA class
"""
class Error(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return 'NanoCA.Error: ' + self.message
# --
def __init__(self, root):
"""
initialize the CA
:param root: base path for the CA
:type root: str or unicode
"""
self.root = root
self.logger = logging.getLogger('%s.%s' % (self.__class__.__name__, root))
self.initialize()
# --
def exec_openssl(self, *args, **kwargs):
stdin_data=kwargs.pop('stdin_data', None)
if stdin_data:
stdin_data = stdin_data.encode('ascii')
args = ['openssl'] + list(args)
self.logger.debug('exec_openssl: %r', args)
if stdin_data:
self.logger.debug('exec_openssl: stdin_data=%r', stdin_data)
proc = subprocess.Popen(
args=args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={
'ROOT': self.get_path(),
'OPENSSL_CONF': self.get_path('openssl.cnf')
}
)
(stdout, stderr) = proc.communicate(stdin_data)
stderr = stderr.decode('utf-8')
stdout = stdout.decode('utf-8')
proc.wait()
self.logger.debug('exec_openssl: exitcode=%r stdout=%r stderr=%r', proc.returncode, stdout, stderr)
if proc.returncode != 0:
raise NanoCA.Error("error calling openssl:\n" + stderr)
return stdout
# --
def get_path(self, *args):
return os.path.join(self.root, *args)
# --
def validate_name(self, value):
if not re.match('^[a-zA-Z0-9\-\.\@]+$', value):
raise NanoCA.Error('invalid name: %r' % (value, ))
return value
def validate_string(self, value):
if not re.match('^[^/]+$', value):
raise NanoCA.Error('invalid string: %r' % (value, ))
return value
# --
subject_fields = {
'C': 'country',
'ST': 'state',
'L': 'locality',
'O': 'organization',
'OU': 'organizationalUnit',
'CN': 'commonName'
}
def encode_subject(self, args):
extra_fields = set(args.keys()) - set(self.subject_fields.values())
if extra_fields:
raise NanoCA.Error('subject has extra field: %r' % (extra_fields, ))
subj = ['']
for (k, v) in self.subject_fields.items():
if v in args:
#subj.append(k+'='+args[v].encode('string-escape').replace('/', '\\/'))
subj.append(k+'='+args[v].replace('/', '\\/'))
subj = '/'.join(subj)
return subj
def decode_subject(self, arg):
res = {}
for i in arg.replace('%', '%1').replace('\\/', '%2').split('/'):
if i == '':
continue
k, v = i.replace('%2', '/').replace('%1', '%').split('=', 1)
k = k.rstrip(' ')
v = v.lstrip(' ')
if k not in self.subject_fields:
raise NanoCA.Error('cannot decode field: %r' % (k, ))
res[self.subject_fields[k]] = v
return res
# --
def initialize(self):
try:
self.exec_openssl('version')
except:
raise NanoCA.Error('openssl binary not found')
if not os.path.isdir(self.get_path()):
os.mkdir(self.get_path())
if not os.path.isfile(self.get_path('openssl.cnf')):
with open(self.get_path('openssl.cnf'), 'w') as fp:
fp.write(openssl_cnf_template)
for i in ['certs', 'crl', 'newcerts', 'private', 'all']:
d = self.get_path(i)
if not os.path.isdir(d):
os.mkdir(d)
os.chmod(self.get_path('private'), 0o700)
if not os.path.isfile(self.get_path('private/.rnd')):
with open(self.get_path('private/.rnd'), 'w') as fp:
pass
if not os.path.isfile(self.get_path('index.txt')):
with open(self.get_path('index.txt'), 'w') as fp:
pass
if not os.path.isfile(self.get_path('index.txt.attr')):
with open(self.get_path('index.txt.attr'), 'w') as fp:
pass
if not os.path.isfile(self.get_path('serial')):
with open(self.get_path('serial'), 'w') as fp:
fp.write('1000\n')
if not os.path.isfile(self.get_path('crlnumber')):
with open(self.get_path('crlnumber'), 'w') as fp:
fp.write('1000\n')
if not os.path.isfile(self.get_path('private', 'ca.key.pem')):
self.exec_openssl(
'genrsa',
'-out', self.get_path('private', 'ca.key.pem'),
'4096'
)
if not os.path.isfile(self.get_path('certs', 'ca.cert.pem')):
self.exec_openssl(
'req',
'-subj', '/C=DE/ST=Germany/L=Germany/O=private/CN=ca',
'-key', self.get_path('private', 'ca.key.pem'),
'-new', '-x509', '-days', '7300', '-sha256', '-extensions', 'v3_ca',
'-out', self.get_path('certs', 'ca.cert.pem')
)
def get_ca_certificate(self):
"""
:returns: the CA certiciate in PEM format
:rtype: str
"""
with open(self.get_path('certs', 'ca.cert.pem'), 'r') as fp:
return fp.read()
def get_csr_info(self, csr):
res = self.exec_openssl(
'req', '-subject', '-noout', '-text', '-verify', stdin_data=csr
)
m = re.search('subject=(.*)', res)
if not m:
raise NanoCA.Error('cannot parse response')
return self.decode_subject(m.group(1))
def extract_key(self, pem):
return self.exec_openssl(
'rsa',
stdin_data=pem
)
def extract_cert(self, pem):
return self.exec_openssl(
'x509',
stdin_data=pem
)
def extract_csr(self, pem):
return self.exec_openssl(
'req',
stdin_data=pem
)
def sign(self, csr, store=True, days=None, extensions=None):
"""
sign csr (in pem format) and return certificate (in pem format)
"""
days = int(days or 365)
extensions = extensions or ['server_cert']
self.logger.info('sign')
info = self.get_csr_info(csr)
self.logger.info('sign commonName=%r', info['commonName'])
res = self.exec_openssl(
'ca',
'-notext',
'-md', 'sha256',
'-batch',
'-in', '/dev/stdin',
'-out', '/dev/stdout',
'-days', str(days),
'-extensions', ','.join(extensions),
stdin_data=csr
)
if store:
with open(self.get_path('all', info['commonName']+'.cert.pem'), 'w') as fp:
fp.write(res)
return res
def get_crl(self):
return self.exec_openssl('ca', '-gencrl')
def revoke(self, cert):
self.exec_openssl(
'ca', '-revoke', '/dev/stdin',
stdin_data=cert
)
def create_and_sign(self, commonName, subj=None, store=True, days=None, extensions=None):
self.logger.info('create_and_sign commonName=%r subj=%r', commonName, subj)
self.validate_name(commonName)
if subj is None:
subj = {}
subj['commonName'] = commonName
csr_and_key = self.exec_openssl(
'req',
'-utf8', '-batch', '-new', '-sha256',
'-subj', self.encode_subject(subj),
'-newkey', 'rsa:2048', '-nodes'
)
csr = self.extract_csr(csr_and_key)
key = self.extract_key(csr_and_key)
cert = self.sign(csr, store=store, days=days, extensions=extensions)
if store:
with open(self.get_path('all', commonName+'.key.pem'), 'w') as fp:
fp.write(key)
with open(self.get_path('all', commonName+'.csr.pem'), 'w') as fp:
fp.write(csr)
return cert + key
# --
def get_certificate(self, commonName):
p = self.get_path('all', commonName + '.cert.pem')
if not os.path.isfile(p):
raise NanoCA.Error('certificate for %r does not exist' % (commonName, ))
with open(p, 'r') as fp:
return fp.read()
def get_key(self, commonName):
"""
:param commonName: name of key to return
:type commonName: str
:returns: the key in PEM format
:rtype: str
"""
p = self.get_path('all', commonName + '.key.pem')
if not os.path.isfile(p):
raise NanoCA.Error('key for %r does not exist' % (commonName, ))
with open(p, 'r') as fp:
return fp.read()
def get_key_and_certificate(self, commonName):
return self.get_certificate(commonName) + self.get_key(commonName)
def main():
import os
import sys
import argparse
def do_cacert(args):
sys.stdout.write(ca.get_ca_certificate())
def do_crl(args):
sys.stdout.write(ca.get_crl())
def do_cert(args):
sys.stdout.write(ca.get_certificate(args.commonName))
def do_key(args):
sys.stdout.write(ca.get_key(args.commonName))
def do_certkey(args):
sys.stdout.write(ca.get_certificate(args.commonName))
sys.stdout.write(ca.get_key(args.commonName))
def do_sign(args):
csr = sys.stdin.read()
res = ca.sign(csr, days=args.days, extensions=[args.usage])
if args.show:
sys.stdout.write(res)
def do_revoke(args):
if args.commonName:
cert = ca.get_certificate(args.commonName)
else:
cert = sys.stdin.read()
ca.revoke(cert)
def do_create(args):
ca.create_and_sign(args.commonName, days=args.days, extensions=[args.usage])
if args.show:
sys.stdout.write(ca.get_certificate(args.commonName))
sys.stdout.write(ca.get_key(args.commonName))
parser = argparse.ArgumentParser(description='NanoCA')
parser.add_argument('--root', help='root directory for CA')
parser.add_argument('--verbose', action='store_true', default=False, help='verbose mode')
subparsers = parser.add_subparsers(help='sub-command help')
parser_cacert = subparsers.add_parser('cacert', help='write ca cert to stdout in pem format')
parser_cacert.set_defaults(func=do_cacert)
parser_crl = subparsers.add_parser('crl', help='write crl to stdout in pem format')
parser_crl.set_defaults(func=do_crl)
parser_cert = subparsers.add_parser('cert', help='write certificate to stdout in pem format')
parser_cert.add_argument('commonName', help='common name of certificate')
parser_cert.set_defaults(func=do_cert)
parser_key = subparsers.add_parser('key', help='write key to stdout in pem format')
parser_key.add_argument('commonName', help='common name of certificate')
parser_key.set_defaults(func=do_key)
parser_certkey = subparsers.add_parser('certkey', help='write certificate and key to stdout in pem format')
parser_certkey.add_argument('commonName', help='common name of certificate')
parser_certkey.set_defaults(func=do_certkey)
parser_sign = subparsers.add_parser('sign', help='sign a csr')
parser_sign.add_argument('--days', default='365', help='days of validity')
parser_sign.add_argument('--usage', default='server_cert', help='usage, server_cert or user_cert')
parser_sign.add_argument('--show', default=False, action='store_true', help='print result')
parser_sign.set_defaults(func=do_sign)
parser_revoke = subparsers.add_parser('revoke', help='revoke a certificate')
parser_revoke.add_argument('commonName', nargs='?', help='common name of certificate, otherwise stdin is used')
parser_revoke.set_defaults(func=do_revoke)
parser_create = subparsers.add_parser('create', help='create csr and sign')
parser_create.add_argument('commonName', help='common name of certificate')
parser_create.add_argument('--days', default='365', help='days of validity')
parser_create.add_argument('--usage', default='server_cert', help='usage, server_cert or user_cert')
parser_create.add_argument('--show', default=False, action='store_true', help='print result')
parser_create.set_defaults(func=do_create)
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.print_help()
sys.exit(1)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
root = os.path.abspath(os.path.join(__file__, '..', 'data'))
if 'NANOCA_ROOT' in os.environ:
root = os.environ['NANOCA_ROOT']
if args.root:
root = args.root
ca = NanoCA(
root=root
)
args.func(args)
if __name__ == '__main__':
main()
|
|
import unittest
import types
import datetime
from flask import Flask
from pprint import pprint
from sqlalchemy import (Column, create_engine, DateTime, Date, Float,
ForeignKey, Integer, Boolean, Unicode, create_engine)
from lever import API, preprocess, postprocess, ModelBasedACL, ImpersonateMixin
from lever.tests.model_helpers import FlaskTestBase, TestUserACL
class ProcessTests(unittest.TestCase):
""" Ensures our metaclasses and decorators operate as we want for assigning
preprocessors and postprocessors """
def test_basic_preprocess(self):
class APIAwesome(API):
@preprocess(method='post')
def preprocess_those(self):
pass
@preprocess(action='something')
def preprocess_that(self):
pass
assert isinstance(APIAwesome._pre_method['post'][0],
types.FunctionType)
assert isinstance(APIAwesome._pre_action['something'][0],
types.FunctionType)
def test_inheritence_mixins(self):
class APIParent(object):
@preprocess(method='post')
def preprocess_those(self):
pass
class APIAwesome(API, APIParent):
pass
assert isinstance(APIAwesome._pre_method['post'][0],
types.FunctionType)
def test_inheritence(self):
class APIParent(API):
@preprocess(method='post')
def preprocess_those(self):
pass
class APIAwesome(APIParent):
pass
assert isinstance(APIAwesome._pre_method['post'][0],
types.FunctionType)
def test_inheritence_reversal(self):
class APIParent(API):
pass
class APIAwesome(APIParent):
@preprocess(method='post')
def preprocess_those(self):
pass
assert isinstance(APIAwesome._pre_method['post'][0],
types.FunctionType)
def test_multi_preprocess(self):
class APIAwesome(API):
@preprocess(method=['post', 'get'])
def preprocess_those(self):
pass
@preprocess(action=['create', 'other'])
def preprocess_that(self):
pass
assert isinstance(APIAwesome._pre_method['post'][0],
types.FunctionType)
assert isinstance(APIAwesome._pre_method['get'][0],
types.FunctionType)
assert isinstance(APIAwesome._pre_action['other'][0],
types.FunctionType)
assert isinstance(APIAwesome._pre_action['create'][0],
types.FunctionType)
def test_basic_postprocess(self):
class APIAwesome(API):
@postprocess(method='post')
def preprocess_those(self):
pass
@postprocess(action='something')
def preprocess_that(self):
pass
assert isinstance(APIAwesome._post_method['post'][0],
types.FunctionType)
assert isinstance(APIAwesome._post_action['something'][0],
types.FunctionType)
def test_multi_postprocess(self):
class APIAwesome(API):
@postprocess(method=['post', 'get'])
def preprocess_those(self):
pass
@postprocess(action=['create', 'other'])
def preprocess_that(self):
pass
assert isinstance(APIAwesome._post_method['post'][0],
types.FunctionType)
assert isinstance(APIAwesome._post_method['get'][0],
types.FunctionType)
assert isinstance(APIAwesome._post_action['other'][0],
types.FunctionType)
assert isinstance(APIAwesome._post_action['create'][0],
types.FunctionType)
def test_preprocess_priority(self):
class APIAwesome(API):
@postprocess(method='post', pri=0)
def preprocess_those(self):
pass
@postprocess(method='post')
def preprocess_that(self):
pass
self.assertEqual(
APIAwesome._post_method['post'][0].__name__, 'preprocess_those')
def test_none(self):
class APIAwesome(API):
pass
assert APIAwesome._pre_method == {}
assert APIAwesome._pre_action == {}
class TestProcessorUsage(FlaskTestBase):
""" These tests ensure that preprocessors and postprocessors are getting
called when they should be """
def test_methods_preprocess(self):
for meth in ['post', 'get', 'delete', 'put']:
class APIAwesome(API):
@preprocess(method=meth)
def preprocessor_one(self):
raise SyntaxError # pick an obscure one to catch..
inst = APIAwesome()
self.assertRaises(SyntaxError, getattr(inst, meth))
def test_methods_postprocess(self):
obj = self.provision_single_asset()
data = [('post', {'name': 'test'}),
('get', {}),
('put', {'id': obj.id, 'name': 'test2'}),
('delete', {'id': obj.id})]
for meth, vals in data:
class APIAwesome(self.widget_api):
@postprocess(method=meth)
def postprocess_one(self, retval):
raise SyntaxError # pick an obscure one to catch..
self.app.add_url_rule('/' + meth, view_func=APIAwesome.as_view(meth))
for meth, vals in data:
self.assertRaises(SyntaxError, getattr(self, meth), meth, 500, params=vals)
class TestAPICreation(FlaskTestBase):
def test_create_bad_pkey(self):
""" ensure that exception is thrown for invalid primary_key """
class Testing(self.base):
__tablename__ = "testing_table"
bad_id = Column(Integer, primary_key=True)
class UserAPI(API):
model = Testing
session = self.session
t = UserAPI()
self.assertRaises(AttributeError, lambda: t.pkey)
class TestGet(FlaskTestBase):
""" Test facets of our get method """
def test_get_pkey(self):
obj = self.provision_single_asset()
d = self.get('widget', 200, {'id': obj.id})
assert len(d['objects']) > 0
assert d['objects'][0]['id'] == obj.id
def test_many_query(self):
self.provision_many_asset()
d = self.get('widget', 200)
assert len(d['objects']) >= 4
class TestPut(FlaskTestBase):
""" Test facets of our get method """
def test_update(self):
""" can we change an object """
obj = self.provision_single_asset()
test_string = "testing this thing"
p = {'id': obj.id, 'description': test_string}
self.put('widget', 200, params=p)
self.session.refresh(obj)
assert obj.description == test_string
def test_cant_find(self):
self.basic_api()
self.base.metadata.create_all(self.engine)
ret = self.put('widget', 404, params={'id': 123})
assert 'not be found' in ret['message']
def test_cant_find_invalid_key(self):
self.basic_api()
self.base.metadata.create_all(self.engine)
ret = self.put('widget', 404, params={'tid': 123})
assert 'any object to update' in ret['message']
class TestDelete(FlaskTestBase):
def test_delete(self):
""" can we delete an object """
obj = self.provision_single_asset()
obj_id = obj.id
self.delete('widget', 200, params={'id': obj_id})
obj = self.session.query(self.widget_model).filter_by(id=obj_id).first()
assert obj is None
def test_cant_find_put_delete(self):
self.basic_api()
self.base.metadata.create_all(self.engine)
ret = self.delete('widget', 404, params={'id': 123})
assert 'Object could not be found' in ret['message']
def test_cant_find(self):
self.basic_api()
self.base.metadata.create_all(self.engine)
ret = self.delete('widget', 404, params={'tid': 123})
assert 'object to delete' in ret['message']
class TestPost(FlaskTestBase):
def test_create_dup(self):
""" make a duplicate entry and fail """
obj = self.provision_single_asset()
p = self.post('widget', 409, params={'name': u'Testing'})
assert 'duplicate value already' in p['message']
def test_create_new(self):
""" try creating a new object """
self.basic_api()
self.base.metadata.create_all(self.engine)
p = self.post('widget', 200, params={'name': u'Testing'})
assert p['objects'][0]['name'] == 'Testing'
def test_bad_action(self):
obj = self.provision_single_asset()
p = {'id': obj.id, '__action': 'dsflgjksdfglk'}
ret = self.post('widget', 400, params=p)
assert 'missing key' in ret['message']
class TestSearch(FlaskTestBase):
""" Run a bunch of positive and negative tests on our searching system """
def test_filter_by(self):
obj = self.provision_single_asset()
d = self.get('widget', 200, {'__filter_by': {'name': obj.name}})
assert len(d['objects']) > 0
assert d['objects'][0]['name'] == obj.name
def test_single_failure(self):
self.basic_api()
self.base.metadata.create_all(self.engine)
ret = self.get('widget', 404, params={'__one': True})
assert 'not be found' in ret['message']
def test_query_bad_param_filter_by(self):
self.basic_api()
ret = self.get('widget', 400, params={'__filter_by': {'sdflgj': True}})
assert 'invalid field' in ret['message']
def test_query_filter(self):
obj = self.provision_single_asset()
ret = self.get('widget', 200,
params={'__filter': [
{'val': 'Testing', 'name': 'name', 'op': 'eq'}]})
assert ret['objects'][0]['name'] == 'Testing'
def test_query_filter_field(self):
""" test filtering by comparing different fields to eachother """
obj = self.provision_single_asset()
# TODO: Write a positive test for this
ret = self.get('widget', 200,
params={'__filter': [
{'field': 'created_at', 'name': 'name', 'op': 'eq'}]})
assert len(ret['objects']) == 0
def test_query_bad_param_filter(self):
self.basic_api()
ret = self.get('widget', 400,
params={'__filter': [
{'val': True, 'name': 'dsflgjsdflgk', 'op': 'eq'}]})
assert 'invalid field' in ret['message']
def test_query_bad_param_op(self):
self.basic_api()
ret = self.get('widget', 400,
params={'__filter': [
{'val': True, 'name': 'name', 'op': 'fake'}]})
assert 'operator specified in' in ret['message']
def test_query_missing_param(self):
self.basic_api()
ret = self.get('widget', 400,
params={'__filter': [
{'val': True, 'name': 'name', 'op2': 'fake'}]})
assert 'missing required arguments' in ret['message']
def test_query_bad_param_count(self):
self.basic_api()
ret = self.get('widget', 400,
params={'__filter': [{'name': 'name', 'op': '=='}]})
assert 'argument count' in ret['message']
def test_order_by(self):
self.provision_many_asset()
ret = self.get('widget', 200, params={'__order_by': ['id']})
assert len(ret['objects']) >= 4
assert ret['objects'][0]['id'] < ret['objects'][1]['id']
assert ret['objects'][2]['id'] < ret['objects'][3]['id']
def test_order_by_desc(self):
self.provision_many_asset()
ret = self.get('widget', 200, params={'__order_by': ['-id']})
assert len(ret['objects']) >= 4
assert ret['objects'][0]['id'] > ret['objects'][1]['id']
assert ret['objects'][2]['id'] > ret['objects'][3]['id']
def test_order_by_bad_key(self):
self.basic_api()
ret = self.get('widget', 400, params={'__order_by': ['dflgjksdfgl']})
assert 'Order_by operator' in ret['message']
class TestLogin(TestUserACL):
""" Tests abilities of the User ACL mixin class """
def test_login(self):
""" can we login with a patch action """
self.user_api()
self.base.metadata.create_all(self.engine)
people = self.provision_users()
p = {'__action': 'login', 'id': people[0].id, 'password': "testing"}
self.post('user', 200, params=p)
def test_delete_fail(self):
""" will delete fail with bad permissions? """
self.user_api()
self.base.metadata.create_all(self.engine)
people = self.provision_users()
p = {'id': people[2].id}
self.delete('user', 403, params=p)
def test_impersonate(self):
""" can the admin properly impersonate someone for a create?"""
self.user_api()
class Widget(self.base):
__tablename__ = 'testing'
id = Column(Integer, primary_key=True)
name = Column(Unicode, unique=True)
owner = Column(Unicode)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
acl = {'admin': set(['class_create_other', 'action_create'])}
standard_join = ['name', 'created_at', 'id']
@classmethod
def create(cls, name, user=None):
inst = cls(name=name)
if user:
self.owner = user.username
self.session.add(inst)
return inst
class WidgetAPI(ImpersonateMixin, ModelBasedACL, API):
model = Widget
session = self.session
create_method = "create"
user_model = self.user_model
self.app.add_url_rule('/widget', view_func=WidgetAPI.as_view('widget'))
self.base.metadata.create_all(self.engine)
people = self.provision_users()
p = {'__action': 'login', 'id': self.admin.id, 'password': "testing"}
self.post('user', 200, params=p)
p = {'__user_id': people[2].id, 'name': 'testing'}
self.post('widget', 200, params=p)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import shutil
import os
from os.path import join
import logging
from .input import pdftotext
from .input import pdfminer_wrapper
from .input import tesseract
from .input import tesseract4
from .input import gvision
from invoice2data.extract.loader import read_templates
from .output import to_csv
from .output import to_json
from .output import to_xml
logger = logging.getLogger(__name__)
input_mapping = {
"pdftotext": pdftotext,
"tesseract": tesseract,
"tesseract4": tesseract4,
"pdfminer": pdfminer_wrapper,
"gvision": gvision,
}
output_mapping = {"csv": to_csv, "json": to_json, "xml": to_xml, "none": None}
def extract_data(invoicefile, templates=None, input_module=pdftotext):
"""Extracts structured data from PDF/image invoices.
This function uses the text extracted from a PDF file or image and
pre-defined regex templates to find structured data.
Reads template if no template assigned
Required fields are matches from templates
Parameters
----------
invoicefile : str
path of electronic invoice file in PDF,JPEG,PNG (example: "/home/duskybomb/pdf/invoice.pdf")
templates : list of instances of class `InvoiceTemplate`, optional
Templates are loaded using `read_template` function in `loader.py`
input_module : {'pdftotext', 'pdfminer', 'tesseract'}, optional
library to be used to extract text from given `invoicefile`,
Returns
-------
dict or False
extracted and matched fields or False if no template matches
Notes
-----
Import required `input_module` when using invoice2data as a library
See Also
--------
read_template : Function where templates are loaded
InvoiceTemplate : Class representing single template files that live as .yml files on the disk
Examples
--------
When using `invoice2data` as an library
>>> from invoice2data.input import pdftotext
>>> extract_data("invoice2data/test/pdfs/oyo.pdf", None, pdftotext)
{'issuer': 'OYO', 'amount': 1939.0, 'date': datetime.datetime(2017, 12, 31, 0, 0), 'invoice_number': 'IBZY2087',
'currency': 'INR', 'desc': 'Invoice IBZY2087 from OYO'}
"""
if templates is None:
templates = read_templates()
# print(templates[0])
extracted_str = input_module.to_text(invoicefile).decode("utf-8")
logger.debug("START pdftotext result ===========================")
logger.debug(extracted_str)
logger.debug("END pdftotext result =============================")
for t in templates:
optimized_str = t.prepare_input(extracted_str)
if t.matches_input(optimized_str):
logger.info("Using %s template", t["template_name"])
return t.extract(optimized_str)
logger.error("No template for %s", invoicefile)
return False
def create_parser():
"""Returns argument parser """
parser = argparse.ArgumentParser(
description="Extract structured data from PDF files and save to CSV or JSON."
)
parser.add_argument(
"--input-reader",
choices=input_mapping.keys(),
default="pdftotext",
help="Choose text extraction function. Default: pdftotext",
)
parser.add_argument(
"--output-format",
choices=output_mapping.keys(),
default="none",
help="Choose output format. Default: none",
)
parser.add_argument(
"--output-date-format",
dest="output_date_format",
default="%Y-%m-%d",
help="Choose output date format. Default: %%Y-%%m-%%d (ISO 8601 Date)",
)
parser.add_argument(
"--output-name",
"-o",
dest="output_name",
default="invoices-output",
help="Custom name for output file. Extension is added based on chosen format.",
)
parser.add_argument(
"--debug", dest="debug", action="store_true", help="Enable debug information."
)
parser.add_argument(
"--copy",
"-c",
dest="copy",
help="Copy and rename processed PDFs to specified folder.",
)
parser.add_argument(
"--move",
"-m",
dest="move",
help="Move and rename processed PDFs to specified folder.",
)
parser.add_argument(
"--filename-format",
dest="filename",
default="{date} {invoice_number} {desc}.pdf",
help="Filename format to use when moving or copying processed PDFs."
'Default: "{date} {invoice_number} {desc}.pdf"',
)
parser.add_argument(
"--template-folder",
"-t",
dest="template_folder",
help="Folder containing invoice templates in yml file. Always adds built-in templates.",
)
parser.add_argument(
"--exclude-built-in-templates",
dest="exclude_built_in_templates",
default=False,
help="Ignore built-in templates.",
action="store_true",
)
parser.add_argument(
"input_files",
type=argparse.FileType("r"),
nargs="+",
help="File or directory to analyze.",
)
return parser
def main(args=None):
"""Take folder or single file and analyze each."""
if args is None:
parser = create_parser()
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO, format="%(message)s")
input_module = input_mapping[args.input_reader]
output_module = output_mapping[args.output_format]
templates = []
# Load templates from external folder if set.
if args.template_folder:
templates += read_templates(os.path.abspath(args.template_folder))
# Load internal templates, if not disabled.
if not args.exclude_built_in_templates:
templates += read_templates()
output = []
for f in args.input_files:
res = extract_data(f.name, templates=templates, input_module=input_module)
if res:
logger.info(res)
output.append(res)
if args.copy:
filename = args.filename.format(
date=res["date"].strftime("%Y-%m-%d"),
invoice_number=res["invoice_number"],
desc=res["desc"],
)
shutil.copyfile(f.name, join(args.copy, filename))
if args.move:
filename = args.filename.format(
date=res["date"].strftime("%Y-%m-%d"),
invoice_number=res["invoice_number"],
desc=res["desc"],
)
shutil.move(f.name, join(args.move, filename))
f.close()
if output_module is not None:
output_module.write_to_file(output, args.output_name, args.output_date_format)
if __name__ == "__main__":
main()
|
|
# -:- coding:utf8 -:-
"""
Customer Parser
"""
import functools
import inspect
from datetime import datetime
from decimal import Decimal
import flask_restful.fields
from flask_kits.restful import post_parameter
from flask_restful import abort
from flask_restful.reqparse import Argument
from flask_restful.reqparse import RequestParser
from flask_restful_swagger import swagger
from six import add_metaclass
from six import iteritems
from six import text_type
from werkzeug.datastructures import MultiDict
def parameter(schema):
"""
:param EntityBase schema:
"""
def wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
entity = schema.parse()
kwargs['entity'] = entity
return f(*args, **kwargs)
# Support swagger document
if '__swagger_attr' in f.__dict__:
attr = wrapped.__dict__['__swagger_attr'] = f.__dict__['__swagger_attr']
params = attr.get('parameters', [])
params.append(post_parameter(schema))
return wrapped
return wrapper
MAPPINGS = {
int: flask_restful.fields.Integer,
float: flask_restful.fields.Float,
str: flask_restful.fields.String,
datetime: flask_restful.fields.DateTime,
Decimal: flask_restful.fields.String,
bool: flask_restful.fields.Boolean
}
def get_field_type(class_type):
return MAPPINGS.get(class_type)
class DeclarativeMeta(type):
def __new__(cls, name, bases, attributes):
if name == 'EntityBase':
return type.__new__(cls, name, bases, attributes)
parser = RequestParser()
fields = [(name, field) for name, field in iteritems(attributes) if isinstance(field, Field)]
field_names = set()
resource_fields = dict()
for name, field in fields:
if inspect.isclass(field.type) and issubclass(field.type, EntityBase):
field.type = field.type.parse
field.location = 'json'
parser.add_argument(field)
field_names.add(name)
resource_fields[name] = get_field_type(field.type)
del attributes[name]
attributes['entity_parser'] = parser
attributes['entity_fields'] = field_names
attributes['resource_fields'] = resource_fields
schema = type.__new__(cls, name, bases, attributes)
# support swagger
swagger.add_model(schema)
return schema
class WrappedDict(dict):
def __init__(self, source):
super(WrappedDict, self).__init__(**source)
def json(self):
return MultiDict(self)
def values(self):
return None
@add_metaclass(DeclarativeMeta)
class EntityBase(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
@classmethod
def parse(cls, req=None):
"""
:param req:
:rtype: EntityBase
"""
if req is not None:
req = WrappedDict(req)
instance = cls() # type: EntityBase
args = cls.entity_parser.parse_args(req)
for field in cls.entity_fields:
setattr(instance, field, args[field])
success = instance.validate()
if isinstance(success, ValueError):
abort(400, message={'error': text_type(success)})
return instance
def validate(self):
"""
:rtype: (bool|ValueError)
"""
return True
def handle_error(self):
pass
class Field(Argument):
def __init__(self, name, *args, **kwargs):
self.validators = set()
if 'validators' in kwargs:
self.validators = kwargs.pop('validators')
super(Field, self).__init__(name, *args, **kwargs)
def parse(self, request, bundle_errors=False):
value, found = super(Field, self).parse(request, bundle_errors)
if not isinstance(value, ValueError) and self.validators:
for validator in self.validators:
success = validator(value)
if isinstance(success, ValueError):
found = {self.name: text_type(success)}
value = ValueError()
break
return value, found
class Validator(object):
help = ""
def __call__(self, value):
"""
validate param
:param value:
:rtype: bool
"""
return self.validate(value)
def validate(self, value):
"""
:rtype: (None|ValueError)
"""
raise NotImplementedError()
def handle_error(self, *args):
return ValueError(self.help.format(*args))
class CompareValidator(Validator):
def __init__(self, threshold):
self.threshold = threshold
def validate(self, value):
if value is None or self.illegal(value):
return self.handle_error(self.threshold)
def illegal(self, value):
raise NotImplementedError()
class LetterValidator(CompareValidator):
help = "Must be less than {0}"
def illegal(self, value):
return value > self.threshold
class MoreValidator(CompareValidator):
help = "Must be more than {0}"
def illegal(self, value):
return value < self.threshold
class MinLengthValidator(CompareValidator):
help = "String length must be more than {0}"
def illegal(self, value):
return len(value) < self.threshold
class MaxLengthValidator(CompareValidator):
help = "String length must be less than {0}"
def illegal(self, value):
return len(value) > self.threshold
class PrecisionValidator(CompareValidator):
help = "Must be less than {0} precision bit"
def illegal(self, value):
"""
:param Decimal value:
"""
return value.quantize(Decimal('1.' + (self.threshold * '0'))) != value
class Entity(EntityBase):
Name = Field('Name', location='json', type=int, validators=[MoreValidator(1), LetterValidator(15)])
def validate(self):
return self.Name == 8
if __name__ == '__main__':
class X(object):
def json(self):
return {'Name': 8}
entity2 = Entity.parse(X())
print(entity2.Name)
entity2.validate()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import datetime
from helpers import unittest
from nose.plugins.attrib import attr
import luigi.notifications
from luigi.scheduler import DISABLED, DONE, FAILED, CentralPlannerScheduler
luigi.notifications.DEBUG = True
WORKER = 'myworker'
@attr('scheduler')
class CentralPlannerTest(unittest.TestCase):
def setUp(self):
super(CentralPlannerTest, self).setUp()
conf = self.get_scheduler_config()
self.sch = CentralPlannerScheduler(**conf)
self.time = time.time
def get_scheduler_config(self):
return {
'retry_delay': 100,
'remove_delay': 1000,
'worker_disconnect_delay': 10,
'disable_persist': 10,
'disable_window': 10,
'disable_failures': 3,
'disable_hard_timeout': 60 * 60,
}
def tearDown(self):
super(CentralPlannerTest, self).tearDown()
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
self.sch.add_task(worker=WORKER, task_id='B', deps=('A',))
self.sch.add_task(worker=WORKER, task_id='A')
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A')
self.sch.add_task(worker=WORKER, task_id='A', status=DONE)
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'B')
self.sch.add_task(worker=WORKER, task_id='B', status=DONE)
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], None)
def test_failed_dep(self):
self.sch.add_task(worker=WORKER, task_id='B', deps=('A',))
self.sch.add_task(worker=WORKER, task_id='A')
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A')
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], None) # can still wait and retry: TODO: do we want this?
self.sch.add_task(worker=WORKER, task_id='A', status=DONE)
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'B')
self.sch.add_task(worker=WORKER, task_id='B', status=DONE)
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], None)
def test_broken_dep(self):
self.sch.add_task(worker=WORKER, task_id='B', deps=('A',))
self.sch.add_task(worker=WORKER, task_id='A', runnable=False)
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], None) # can still wait and retry: TODO: do we want this?
self.sch.add_task(worker=WORKER, task_id='A', status=DONE)
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'B')
self.sch.add_task(worker=WORKER, task_id='B', status=DONE)
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], None)
def test_two_workers(self):
# Worker X wants to build A -> B
# Worker Y wants to build A -> C
self.sch.add_task(worker='X', task_id='A')
self.sch.add_task(worker='Y', task_id='A')
self.sch.add_task(task_id='B', deps=('A',), worker='X')
self.sch.add_task(task_id='C', deps=('A',), worker='Y')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], None) # Worker Y is pending on A to be done
self.sch.add_task(worker='X', task_id='A', status=DONE)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'C')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'B')
def test_retry(self):
# Try to build A but fails, will retry after 100s
self.setTime(0)
self.sch.add_task(worker=WORKER, task_id='A')
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A')
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
for t in range(100):
self.setTime(t)
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], None)
self.sch.ping(worker=WORKER)
if t % 10 == 0:
self.sch.prune()
self.setTime(101)
self.sch.prune()
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A')
def test_disconnect_running(self):
# X and Y wants to run A.
# X starts but does not report back. Y does.
# After some timeout, Y will build it instead
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.sch.add_task(task_id='A', worker='Y')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
for t in range(200):
self.setTime(t)
self.sch.ping(worker='Y')
if t % 10 == 0:
self.sch.prune()
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'A')
def test_remove_dep(self):
# X schedules A -> B, A is broken
# Y schedules C -> B: this should remove A as a dep of B
self.sch.add_task(task_id='A', worker='X', runnable=False)
self.sch.add_task(task_id='B', deps=('A',), worker='X')
# X can't build anything
self.assertEqual(self.sch.get_work(worker='X')['task_id'], None)
self.sch.add_task(task_id='B', deps=('C',), worker='Y') # should reset dependencies for A
self.sch.add_task(task_id='C', worker='Y', status=DONE)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'B')
def test_timeout(self):
# A bug that was earlier present when restarting the same flow
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.setTime(10000)
self.sch.add_task(task_id='A', worker='Y') # Will timeout X but not schedule A for removal
for i in range(2000):
self.setTime(10000 + i)
self.sch.ping(worker='Y')
self.sch.add_task(task_id='A', status=DONE, worker='Y') # This used to raise an exception since A was removed
def test_disallowed_state_changes(self):
# Test that we can not schedule an already running task
t = 'A'
self.sch.add_task(task_id=t, worker='X')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], t)
self.sch.add_task(task_id=t, worker='Y')
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], None)
def test_two_worker_info(self):
# Make sure the scheduler returns info that some other worker is running task A
self.sch.add_task(worker='X', task_id='A')
self.sch.add_task(worker='Y', task_id='A')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
r = self.sch.get_work(worker='Y')
self.assertEqual(r['task_id'], None) # Worker Y is pending on A to be done
s = r['running_tasks'][0]
self.assertEqual(s['task_id'], 'A')
self.assertEqual(s['worker'], 'X')
def test_assistant_get_work(self):
self.sch.add_task(worker='X', task_id='A')
self.sch.add_worker('Y', [])
self.assertEqual(self.sch.get_work(worker='Y', assistant=True)['task_id'], 'A')
# check that the scheduler recognizes tasks as running
running_tasks = self.sch.task_list('RUNNING', '')
self.assertEqual(len(running_tasks), 1)
self.assertEqual(list(running_tasks.keys()), ['A'])
self.assertEqual(running_tasks['A']['worker_running'], 'Y')
def test_assistant_get_work_external_task(self):
self.sch.add_task(worker='X', task_id='A', runnable=False)
self.assertTrue(self.sch.get_work(worker='Y', assistant=True)['task_id'] is None)
def test_task_fails_when_assistant_dies(self):
self.setTime(0)
self.sch.add_task(worker='X', task_id='A')
self.sch.add_worker('Y', [])
self.assertEqual(self.sch.get_work(worker='Y', assistant=True)['task_id'], 'A')
self.assertEqual(list(self.sch.task_list('RUNNING', '').keys()), ['A'])
# Y dies for 50 seconds, X stays alive
self.setTime(50)
self.sch.ping(worker='X')
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), ['A'])
def test_prune_with_live_assistant(self):
self.setTime(0)
self.sch.add_task(worker='X', task_id='A')
self.sch.get_work(worker='Y', assistant=True)
self.sch.add_task(worker='Y', task_id='A', status=DONE, assistant=True)
# worker X stops communicating, A should be marked for removal
self.setTime(600)
self.sch.ping(worker='Y')
self.sch.prune()
# A will now be pruned
self.setTime(2000)
self.sch.prune()
self.assertFalse(list(self.sch.task_list('', '')))
def test_fail_job_from_dead_worker_with_live_assistant(self):
self.setTime(0)
self.sch.add_task(worker='X', task_id='A')
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.sch.add_worker('Y', [('assistant', True)])
self.setTime(600)
self.sch.ping(worker='Y')
self.sch.prune()
self.assertEqual(['A'], list(self.sch.task_list('FAILED', '').keys()))
def test_assistant_request_runnable_task(self):
self.setTime(0)
self.sch.add_task(worker='X', task_id='A', runnable=True)
self.setTime(600)
self.sch.prune()
self.assertEqual('A', self.sch.get_work(worker='Y', assistant=True)['task_id'])
def test_assistant_request_external_task(self):
self.sch.add_task(worker='X', task_id='A', runnable=False)
self.assertIsNone(self.sch.get_work(worker='Y', assistant=True)['task_id'])
def test_prune_done_tasks(self, expected=None):
self.setTime(0)
self.sch.add_task(worker=WORKER, task_id='A', status=DONE)
self.sch.add_task(worker=WORKER, task_id='B', deps=['A'], status=DONE)
self.sch.add_task(worker=WORKER, task_id='C', deps=['B'])
self.setTime(600)
self.sch.ping(worker='ASSISTANT')
self.sch.prune()
self.setTime(2000)
self.sch.ping(worker='ASSISTANT')
self.sch.prune()
self.assertEqual(set(expected or ()), set(self.sch.task_list('', '').keys()))
def test_keep_tasks_for_assistant(self):
self.sch.get_work(worker='ASSISTANT', assistant=True) # tell the scheduler this is an assistant
self.test_prune_done_tasks(['B', 'C'])
def test_keep_scheduler_disabled_tasks_for_assistant(self):
self.sch.get_work(worker='ASSISTANT', assistant=True) # tell the scheduler this is an assistant
# create a scheduler disabled task and a worker disabled task
for i in range(10):
self.sch.add_task(worker=WORKER, task_id='D', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='E', status=DISABLED)
# scheduler prunes the worker disabled task
self.assertEqual(set(['D', 'E']), set(self.sch.task_list(DISABLED, '')))
self.test_prune_done_tasks(['B', 'C', 'D'])
def test_keep_failed_tasks_for_assistant(self):
self.sch.get_work(worker='ASSISTANT', assistant=True) # tell the scheduler this is an assistant
self.sch.add_task(worker=WORKER, task_id='D', status=FAILED, deps='A')
self.test_prune_done_tasks(['A', 'B', 'C', 'D'])
def test_scheduler_resources_none_allow_one(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 1})
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
def test_scheduler_resources_none_disallow_two(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 2})
self.assertFalse(self.sch.get_work(worker='X')['task_id'], 'A')
def test_scheduler_with_insufficient_resources(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 3})
self.sch.update_resources(R1=2)
self.assertFalse(self.sch.get_work(worker='X')['task_id'])
def test_scheduler_with_sufficient_resources(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 3})
self.sch.update_resources(R1=3)
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
def test_scheduler_with_resources_used(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 1})
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.sch.add_task(worker='Y', task_id='B', resources={'R1': 1})
self.sch.update_resources(R1=1)
self.assertFalse(self.sch.get_work(worker='Y')['task_id'])
def test_scheduler_overprovisioned_on_other_resource(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 2})
self.sch.update_resources(R1=2)
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.sch.add_task(worker='Y', task_id='B', resources={'R2': 2})
self.sch.update_resources(R1=1, R2=2)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'B')
def test_scheduler_with_priority_and_competing_resources(self):
self.sch.add_task(worker='X', task_id='A')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=10)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.assertFalse(self.sch.get_work(worker='Y')['task_id'])
self.sch.add_task(worker='Y', task_id='D', priority=0)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'D')
def test_do_not_lock_resources_when_not_ready(self):
""" Test to make sure that resources won't go unused waiting on workers """
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id'])
def test_lock_resources_when_one_of_multiple_workers_is_ready(self):
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 2)])
self.sch.add_worker('Y', [])
self.assertFalse(self.sch.get_work(worker='Y')['task_id'])
def test_do_not_lock_resources_while_running_higher_priority(self):
""" Test to make sure that resources won't go unused waiting on workers """
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id'])
def test_lock_resources_while_running_lower_priority(self):
""" Make sure resources will be made available while working on lower priority tasks """
self.sch.add_task(worker='X', task_id='A', priority=4)
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertFalse(self.sch.get_work(worker='Y')['task_id'])
def test_lock_resources_for_second_worker(self):
self.sch.add_task(worker='X', task_id='A', resources={'R': 1})
self.sch.add_task(worker='X', task_id='B', resources={'R': 1})
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=10)
self.sch.add_worker('X', {'workers': 2})
self.sch.add_worker('Y', {'workers': 1})
self.sch.update_resources(R=2)
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.assertFalse(self.sch.get_work(worker='X')['task_id'])
def test_can_work_on_lower_priority_while_waiting_for_resources(self):
self.sch.add_task(worker='X', task_id='A', resources={'R': 1}, priority=0)
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.sch.add_task(worker='Y', task_id='B', resources={'R': 1}, priority=10)
self.sch.add_task(worker='Y', task_id='C', priority=0)
self.sch.update_resources(R=1)
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id'])
def test_priority_update_with_pruning(self):
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.setTime(50) # after worker disconnects
self.sch.prune()
self.sch.add_task(task_id='B', deps=['A'], worker='X')
self.setTime(2000) # after remove for task A
self.sch.prune()
# Here task A that B depends on is missing
self.sch.add_task(worker=WORKER, task_id='C', deps=['B'], priority=100)
self.sch.add_task(worker=WORKER, task_id='B', deps=['A'])
self.sch.add_task(worker=WORKER, task_id='A')
self.sch.add_task(worker=WORKER, task_id='D', priority=10)
self.check_task_order('ABCD')
def test_update_resources(self):
self.sch.add_task(worker=WORKER, task_id='A', deps=['B'])
self.sch.add_task(worker=WORKER, task_id='B', resources={'r': 2})
self.sch.update_resources(r=1)
# B requires too many resources, we can't schedule
self.check_task_order([])
self.sch.add_task(worker=WORKER, task_id='B', resources={'r': 1})
# now we have enough resources
self.check_task_order(['B', 'A'])
def test_hendle_multiple_resources(self):
self.sch.add_task(worker=WORKER, task_id='A', resources={'r1': 1, 'r2': 1})
self.sch.add_task(worker=WORKER, task_id='B', resources={'r1': 1, 'r2': 1})
self.sch.add_task(worker=WORKER, task_id='C', resources={'r1': 1})
self.sch.update_resources(r1=2, r2=1)
self.assertEqual('A', self.sch.get_work(worker=WORKER)['task_id'])
self.check_task_order('C')
def test_single_resource_lock(self):
self.sch.add_task(worker='X', task_id='A', resources={'r': 1})
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.sch.add_task(worker=WORKER, task_id='B', resources={'r': 2}, priority=10)
self.sch.add_task(worker=WORKER, task_id='C', resources={'r': 1})
self.sch.update_resources(r=2)
# Should wait for 2 units of r to be available for B before scheduling C
self.check_task_order([])
def test_no_lock_if_too_many_resources_required(self):
self.sch.add_task(worker=WORKER, task_id='A', resources={'r': 2}, priority=10)
self.sch.add_task(worker=WORKER, task_id='B', resources={'r': 1})
self.sch.update_resources(r=1)
self.check_task_order('B')
def test_multiple_resources_lock(self):
self.sch.add_task(worker='X', task_id='A', resources={'r1': 1, 'r2': 1}, priority=10)
self.sch.add_task(worker=WORKER, task_id='B', resources={'r2': 1})
self.sch.add_task(worker=WORKER, task_id='C', resources={'r1': 1})
self.sch.update_resources(r1=1, r2=1)
# should preserve both resources for worker 'X'
self.check_task_order([])
def test_multiple_resources_no_lock(self):
self.sch.add_task(worker=WORKER, task_id='A', resources={'r1': 1}, priority=10)
self.sch.add_task(worker=WORKER, task_id='B', resources={'r1': 1, 'r2': 1}, priority=10)
self.sch.add_task(worker=WORKER, task_id='C', resources={'r2': 1})
self.sch.update_resources(r1=1, r2=2)
self.assertEqual('A', self.sch.get_work(worker=WORKER)['task_id'])
# C doesn't block B, so it can go first
self.check_task_order('C')
def check_task_order(self, order):
for expected_id in order:
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], expected_id)
self.sch.add_task(worker=WORKER, task_id=expected_id, status=DONE)
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], None)
def test_priorities(self):
self.sch.add_task(worker=WORKER, task_id='A', priority=10)
self.sch.add_task(worker=WORKER, task_id='B', priority=5)
self.sch.add_task(worker=WORKER, task_id='C', priority=15)
self.sch.add_task(worker=WORKER, task_id='D', priority=9)
self.check_task_order(['C', 'A', 'D', 'B'])
def test_priorities_default_and_negative(self):
self.sch.add_task(worker=WORKER, task_id='A', priority=10)
self.sch.add_task(worker=WORKER, task_id='B')
self.sch.add_task(worker=WORKER, task_id='C', priority=15)
self.sch.add_task(worker=WORKER, task_id='D', priority=-20)
self.sch.add_task(worker=WORKER, task_id='E', priority=1)
self.check_task_order(['C', 'A', 'E', 'B', 'D'])
def test_priorities_and_dependencies(self):
self.sch.add_task(worker=WORKER, task_id='A', deps=['Z'], priority=10)
self.sch.add_task(worker=WORKER, task_id='B', priority=5)
self.sch.add_task(worker=WORKER, task_id='C', deps=['Z'], priority=3)
self.sch.add_task(worker=WORKER, task_id='D', priority=2)
self.sch.add_task(worker=WORKER, task_id='Z', priority=1)
self.check_task_order(['Z', 'A', 'B', 'C', 'D'])
def test_priority_update_dependency_after_scheduling(self):
self.sch.add_task(worker=WORKER, task_id='A', priority=1)
self.sch.add_task(worker=WORKER, task_id='B', priority=5, deps=['A'])
self.sch.add_task(worker=WORKER, task_id='C', priority=10, deps=['B'])
self.sch.add_task(worker=WORKER, task_id='D', priority=6)
self.check_task_order(['A', 'B', 'C', 'D'])
def test_disable(self):
self.sch.add_task(worker=WORKER, task_id='A')
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.add_task(worker=WORKER, task_id='A')
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], None)
def test_disable_and_reenable(self):
self.sch.add_task(worker=WORKER, task_id='A')
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.re_enable_task('A')
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 1)
self.sch.add_task(worker=WORKER, task_id='A')
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A')
def test_disable_and_reenable_and_disable_again(self):
self.sch.add_task(worker=WORKER, task_id='A')
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.re_enable_task('A')
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 1)
self.sch.add_task(worker=WORKER, task_id='A')
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A')
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
# should be still enabled
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 1)
self.sch.add_task(worker=WORKER, task_id='A')
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A')
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
# should be disabled now
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.add_task(worker=WORKER, task_id='A')
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], None)
def test_disable_and_done(self):
self.sch.add_task(worker=WORKER, task_id='A')
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.add_task(worker=WORKER, task_id='A', status=DONE)
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('DONE', '')), 1)
self.sch.add_task(worker=WORKER, task_id='A')
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A')
def test_disable_by_worker(self):
self.sch.add_task(worker=WORKER, task_id='A', status=DISABLED)
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.sch.add_task(worker=WORKER, task_id='A')
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.sch.add_task(worker=WORKER, task_id='A')
self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A')
def test_task_list_beyond_limit(self):
sch = CentralPlannerScheduler(max_shown_tasks=3)
for c in 'ABCD':
sch.add_task(worker=WORKER, task_id=c)
self.assertEqual(set('ABCD'), set(sch.task_list('PENDING', '', False).keys()))
self.assertEqual({'num_tasks': 4}, sch.task_list('PENDING', ''))
def test_task_list_within_limit(self):
sch = CentralPlannerScheduler(max_shown_tasks=4)
for c in 'ABCD':
sch.add_task(worker=WORKER, task_id=c)
self.assertEqual(set('ABCD'), set(sch.task_list('PENDING', '').keys()))
def test_task_lists_some_beyond_limit(self):
sch = CentralPlannerScheduler(max_shown_tasks=3)
for c in 'ABCD':
sch.add_task(worker=WORKER, task_id=c, status=DONE)
for c in 'EFG':
sch.add_task(worker=WORKER, task_id=c)
self.assertEqual(set('EFG'), set(sch.task_list('PENDING', '').keys()))
self.assertEqual({'num_tasks': 4}, sch.task_list('DONE', ''))
def test_task_list_filter_by_search(self):
self.sch.add_task(worker=WORKER, task_id='test_match_task')
self.sch.add_task(worker=WORKER, task_id='test_filter_task')
matches = self.sch.task_list('PENDING', '', search='match')
self.assertEqual(['test_match_task'], list(matches.keys()))
def test_task_list_filter_by_multiple_search_terms(self):
self.sch.add_task(worker=WORKER, task_id='abcd')
self.sch.add_task(worker=WORKER, task_id='abd')
self.sch.add_task(worker=WORKER, task_id='acd')
self.sch.add_task(worker=WORKER, task_id='ad')
self.sch.add_task(worker=WORKER, task_id='bc')
matches = self.sch.task_list('PENDING', '', search='b c')
self.assertEqual(set(['abcd', 'bc']), set(matches.keys()))
def test_search_results_beyond_limit(self):
sch = CentralPlannerScheduler(max_shown_tasks=3)
sch.add_task(worker=WORKER, task_id='task_a')
sch.add_task(worker=WORKER, task_id='task_b')
sch.add_task(worker=WORKER, task_id='task_c')
sch.add_task(worker=WORKER, task_id='task_d')
self.assertEqual({'num_tasks': 4}, sch.task_list('PENDING', '', search='a'))
self.assertEqual(['task_a'], list(sch.task_list('PENDING', '', search='_a').keys()))
def test_priority_update_dependency_chain(self):
self.sch.add_task(worker=WORKER, task_id='A', priority=10, deps=['B'])
self.sch.add_task(worker=WORKER, task_id='B', priority=5, deps=['C'])
self.sch.add_task(worker=WORKER, task_id='C', priority=1)
self.sch.add_task(worker=WORKER, task_id='D', priority=6)
self.check_task_order(['C', 'B', 'A', 'D'])
def test_priority_no_decrease_with_multiple_updates(self):
self.sch.add_task(worker=WORKER, task_id='A', priority=1)
self.sch.add_task(worker=WORKER, task_id='B', priority=10, deps=['A'])
self.sch.add_task(worker=WORKER, task_id='C', priority=5, deps=['A'])
self.sch.add_task(worker=WORKER, task_id='D', priority=6)
self.check_task_order(['A', 'B', 'D', 'C'])
def test_unique_tasks(self):
self.sch.add_task(worker=WORKER, task_id='A')
self.sch.add_task(worker=WORKER, task_id='B')
self.sch.add_task(worker=WORKER, task_id='C')
self.sch.add_task(worker=WORKER + "_2", task_id='B')
response = self.sch.get_work(worker=WORKER)
self.assertEqual(3, response['n_pending_tasks'])
self.assertEqual(2, response['n_unique_pending'])
def test_pending_downstream_disable(self):
self.sch.add_task(worker=WORKER, task_id='A', status=DISABLED)
self.sch.add_task(worker=WORKER, task_id='B', deps=('A',))
self.sch.add_task(worker=WORKER, task_id='C', deps=('B',))
response = self.sch.get_work(worker=WORKER)
self.assertTrue(response['task_id'] is None)
self.assertEqual(0, response['n_pending_tasks'])
self.assertEqual(0, response['n_unique_pending'])
def test_pending_downstream_failure(self):
self.sch.add_task(worker=WORKER, task_id='A', status=FAILED)
self.sch.add_task(worker=WORKER, task_id='B', deps=('A',))
self.sch.add_task(worker=WORKER, task_id='C', deps=('B',))
response = self.sch.get_work(worker=WORKER)
self.assertTrue(response['task_id'] is None)
self.assertEqual(2, response['n_pending_tasks'])
self.assertEqual(2, response['n_unique_pending'])
def test_task_list_no_deps(self):
self.sch.add_task(worker=WORKER, task_id='B', deps=('A',))
self.sch.add_task(worker=WORKER, task_id='A')
task_list = self.sch.task_list('PENDING', '')
self.assertFalse('deps' in task_list['A'])
def test_task_first_failure_time(self):
self.sch.add_task(worker=WORKER, task_id='A')
test_task = self.sch._state.get_task('A')
self.assertIsNone(test_task.failures.first_failure_time)
time_before_failure = time.time()
test_task.add_failure()
time_after_failure = time.time()
self.assertLessEqual(time_before_failure,
test_task.failures.first_failure_time)
self.assertGreaterEqual(time_after_failure,
test_task.failures.first_failure_time)
def test_task_first_failure_time_remains_constant(self):
self.sch.add_task(worker=WORKER, task_id='A')
test_task = self.sch._state.get_task('A')
self.assertIsNone(test_task.failures.first_failure_time)
test_task.add_failure()
first_failure_time = test_task.failures.first_failure_time
test_task.add_failure()
self.assertEqual(first_failure_time, test_task.failures.first_failure_time)
def test_task_has_excessive_failures(self):
self.sch.add_task(worker=WORKER, task_id='A')
test_task = self.sch._state.get_task('A')
self.assertIsNone(test_task.failures.first_failure_time)
self.assertFalse(test_task.has_excessive_failures())
test_task.add_failure()
self.assertFalse(test_task.has_excessive_failures())
fake_failure_time = (test_task.failures.first_failure_time -
2 * 60 * 60)
test_task.failures.first_failure_time = fake_failure_time
self.assertTrue(test_task.has_excessive_failures())
def test_quadratic_behavior(self):
""" Test that get_work is not taking linear amount of time.
This is of course impossible to test, however, doing reasonable
assumptions about hardware. This time should finish in a timely
manner.
"""
# For 10000 it takes almost 1 second on my laptop. Prior to these
# changes it was being slow already at NUM_TASKS=300
NUM_TASKS = 10000
for i in range(NUM_TASKS):
self.sch.add_task(worker=str(i), task_id=str(i), resources={})
for i in range(NUM_TASKS):
self.assertEqual(self.sch.get_work(worker=str(i))['task_id'], str(i))
self.sch.add_task(worker=str(i), task_id=str(i), status=DONE)
def test_get_work_speed(self):
""" Test that get_work is fast for few workers and many DONEs.
In #986, @daveFNbuck reported that he got a slowdown.
"""
# This took almost 4 minutes without optimization.
# Now it takes 10 seconds on my machine.
NUM_PENDING = 1000
NUM_DONE = 200000
assert NUM_DONE >= NUM_PENDING
for i in range(NUM_PENDING):
self.sch.add_task(worker=WORKER, task_id=str(i), resources={})
for i in range(NUM_PENDING, NUM_DONE):
self.sch.add_task(worker=WORKER, task_id=str(i), status=DONE)
for i in range(NUM_PENDING):
res = int(self.sch.get_work(worker=WORKER)['task_id'])
self.assertTrue(0 <= res < NUM_PENDING)
self.sch.add_task(worker=WORKER, task_id=str(res), status=DONE)
if __name__ == '__main__':
unittest.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitPeeringsOperations(object):
"""ExpressRouteCircuitPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitPeering"
"""Gets the specified peering for the express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCircuitPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCircuitPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitPeering"]
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update express route circuit
peering operation.
:type peering_parameters: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitPeeringListResult"]
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'} # type: ignore
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.http import Http404
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import messages
from horizon import tables
from horizon_bsn.api import neutron
from openstack_dashboard.api import heat
import logging
LOG = logging.getLogger(__name__)
class RemoveTemplateAction(tables.LinkAction):
name = "remove"
url = "horizon:project:connections:network_template:remove"
classes = ("ajax-modal", "btn-danger")
verbose_name = _("Remove Network Template Instance")
def allowed(self, request, datum):
try:
assign = neutron\
.networktemplateassignment_get(request,
request.user.tenant_id)
if assign:
return True
except Exception as e:
LOG.debug(e)
return False
class ApplyTemplateAction(tables.LinkAction):
name = "apply"
verbose_name = _("Apply Network Template")
url = "horizon:project:connections:network_template:select"
classes = ("ajax-modal", "btn-create")
def allowed(self, request, datum):
try:
assign = neutron\
.networktemplateassignment_get(request,
request.user.tenant_id)
if assign:
return False
except Exception:
return True
class StacksUpdateRow(tables.Row):
ajax = True
def can_be_selected(self, datum):
return datum.stack_status != 'DELETE_COMPLETE'
def get_data(self, request, stack_id):
assignment = None
try:
assignments = neutron.networktemplateassignment_list(
request, **{'stack_id': stack_id})
if not assignments:
raise Exception('Network template associationg not found.')
assignment = assignments[0]
template = neutron.networktemplate_get(request,
assignment.template_id)
stack = heat.stack_get(request, stack_id)
resources = heat.resources_list(request, stack.stack_name)
if stack.stack_status == 'DELETE_COMPLETE':
# returning 404 to the ajax call removes the
# row from the table on the ui
raise Http404
rowdata = {
'template_id': assignment.template_id,
'template_name': template.name,
'stack_id': stack_id,
'heat_stack_name': stack.stack_name,
'description': stack.description,
'status': stack.status,
'stack_status': stack.stack_status,
'stack_status_reason': stack.stack_status_reason,
'resources': mark_safe('<br>'.join([
('%s (%s)' % (r.resource_name,
r.resource_type)).replace(' ', ' ')
for r in resources]))
}
return rowdata
except Http404:
try:
# remove corresponding network template
if assignment:
neutron.networktemplateassignment_delete(
request, assignment.id)
msg = _('Removed template association for stack_id %s') % \
stack_id
LOG.debug(msg)
messages.success(request, msg)
except Exception as e:
msg = _('Network template association removal failed '
'due to %s') % e
LOG.error(msg)
messages.error(request, msg)
raise
except Exception as e:
raise
class NetworkTemplateTable(tables.DataTable):
STATUS_CHOICES = (
("Complete", True),
("Failed", False),
)
STACK_STATUS_DISPLAY_CHOICES = (
("init_in_progress", pgettext_lazy("current status of stack",
u"Init In Progress")),
("init_complete", pgettext_lazy("current status of stack",
u"Init Complete")),
("init_failed", pgettext_lazy("current status of stack",
u"Init Failed")),
("create_in_progress", pgettext_lazy("current status of stack",
u"Create In Progress")),
("create_complete", pgettext_lazy("current status of stack",
u"Create Complete")),
("create_failed", pgettext_lazy("current status of stack",
u"Create Failed")),
("delete_in_progress", pgettext_lazy("current status of stack",
u"Delete In Progress")),
("delete_complete", pgettext_lazy("current status of stack",
u"Delete Complete")),
("delete_failed", pgettext_lazy("current status of stack",
u"Delete Failed")),
("update_in_progress", pgettext_lazy("current status of stack",
u"Update In Progress")),
("update_complete", pgettext_lazy("current status of stack",
u"Update Complete")),
("update_failed", pgettext_lazy("current status of stack",
u"Update Failed")),
("rollback_in_progress", pgettext_lazy("current status of stack",
u"Rollback In Progress")),
("rollback_complete", pgettext_lazy("current status of stack",
u"Rollback Complete")),
("rollback_failed", pgettext_lazy("current status of stack",
u"Rollback Failed")),
("suspend_in_progress", pgettext_lazy("current status of stack",
u"Suspend In Progress")),
("suspend_complete", pgettext_lazy("current status of stack",
u"Suspend Complete")),
("suspend_failed", pgettext_lazy("current status of stack",
u"Suspend Failed")),
("resume_in_progress", pgettext_lazy("current status of stack",
u"Resume In Progress")),
("resume_complete", pgettext_lazy("current status of stack",
u"Resume Complete")),
("resume_failed", pgettext_lazy("current status of stack",
u"Resume Failed")),
("adopt_in_progress", pgettext_lazy("current status of stack",
u"Adopt In Progress")),
("adopt_complete", pgettext_lazy("current status of stack",
u"Adopt Complete")),
("adopt_failed", pgettext_lazy("current status of stack",
u"Adopt Failed")),
("snapshot_in_progress", pgettext_lazy("current status of stack",
u"Snapshot In Progress")),
("snapshot_complete", pgettext_lazy("current status of stack",
u"Snapshot Complete")),
("snapshot_failed", pgettext_lazy("current status of stack",
u"Snapshot Failed")),
("check_in_progress", pgettext_lazy("current status of stack",
u"Check In Progress")),
("check_complete", pgettext_lazy("current status of stack",
u"Check Complete")),
("check_failed", pgettext_lazy("current status of stack",
u"Check Failed")),
)
template_name = tables.Column("template_name",
verbose_name=_("Template Name"))
stack_id = tables.Column("stack_id", hidden=True)
heat_stack_name = tables.Column("heat_stack_name",
verbose_name=_("Heat Stack Name"))
description = tables.Column("description", verbose_name=_("Description"))
# status = tables.Column("status", verbose_name=_("Status"))
resources = tables.Column("resources", verbose_name=_("Resources"))
status = tables.Column("status",
hidden=True,
status=True,
status_choices=STATUS_CHOICES)
stack_status = tables.Column("stack_status",
verbose_name=_("Status"),
display_choices=STACK_STATUS_DISPLAY_CHOICES)
def get_object_id(self, template_stack):
return template_stack['stack_id']
class Meta(object):
multi_select = False
name = "networktemplate"
verbose_name = _("Network Template")
table_actions = (ApplyTemplateAction, RemoveTemplateAction)
status_columns = ["status", ]
row_class = StacksUpdateRow
# row_actions = tuple()
class DeleteTemplateAction(tables.DeleteAction):
data_type_singular = _("Network Template")
data_type_plural = _("Network Templates")
def delete(self, request, obj_id):
try:
neutron.networktemplate_delete(request, obj_id)
except Exception as e:
LOG.info(str(e))
messages.error(
request, _("Unable to delete template. Template may "
"be in use by a tenant."))
class CreateTemplateAction(tables.LinkAction):
name = "create"
verbose_name = _("Create Network Template")
url = "horizon:admin:connections:network_template_admin:create"
classes = ("ajax-modal", "btn-create")
class NetworkTemplateAdminTable(tables.DataTable):
id = tables.Column("id", verbose_name=_("Template ID"), hidden=True)
name = tables.Column(
"name", verbose_name=_("Name"),
link="horizon:admin:connections:network_template_admin:detail")
class Meta(object):
name = "networktemplate_admin"
verbose_name = _("Network Template Administration")
table_actions = (CreateTemplateAction, DeleteTemplateAction)
row_actions = (DeleteTemplateAction,)
|
|
# Copyright (c) 2010-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2012-2014 Mark D. Hill and David A. Wood
# Copyright (c) 2009-2011 Advanced Micro Devices, Inc.
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Brad Beckmann
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
import Ruby
from FSConfig import *
from SysPaths import *
from Benchmarks import *
import Simulation
import CacheConfig
import MemConfig
from Caches import *
import Options
# Check if KVM support has been enabled, we might need to do VM
# configuration if that's the case.
have_kvm_support = 'BaseKvmCPU' in globals()
def is_kvm_cpu(cpu_class):
return have_kvm_support and cpu_class != None and \
issubclass(cpu_class, BaseKvmCPU)
def initO3params(options, num_cpus, testsys):
for i in range(0, num_cpus):
testsys.cpu[i].rob_scale_enabled = options.rob_scale_enabled
print 'for cpu:%d rob_scale_enabled:%d' % (i, testsys.cpu[i].rob_scale_enabled)
testsys.cpu[i].btb_scale_enabled = options.btb_scale_enabled
print 'for cpu:%d btb_scale_enabled:%d' % (i, testsys.cpu[i].btb_scale_enabled)
testsys.cpu[i].tlb_scale_enabled = options.tlb_scale_enabled
print 'for cpu:%d tlb_scale_enabled:%d' % (i, testsys.cpu[i].tlb_scale_enabled)
testsys.cpu[i].iq_scale_enabled = options.iq_scale_enabled
print 'for cpu:%d iq_scale_enabled:%d' % (i, testsys.cpu[i].iq_scale_enabled)
testsys.cpu[i].regfile_scale_enabled = options.regfile_scale_enabled
print 'for cpu:%d regfile_scale_enabled:%d' % (i, testsys.cpu[i].regfile_scale_enabled)
testsys.cpu[i].lsq_scale_enabled = options.lsq_scale_enabled
print 'for cpu:%d lsq_scale_enabled:%d' % (i, testsys.cpu[i].lsq_scale_enabled)
testsys.cpu[i].alu_scale_enabled = options.alu_scale_enabled
print 'for cpu:%d alu_scale_enabled:%d' % (i, testsys.cpu[i].alu_scale_enabled)
testsys.cpu[i].fpu_scale_enabled = options.fpu_scale_enabled
print 'for cpu:%d fpu_scale_enabled:%d' % (i, testsys.cpu[i].fpu_scale_enabled)
testsys.cpu[i].dcache_scale_enabled = options.dcache_scale_enabled
print 'for cpu:%d dcache_scale_enabled:%d' % (i, testsys.cpu[i].dcache_scale_enabled)
testsys.cpu[i].icache_scale_enabled = options.icache_scale_enabled
print 'for cpu:%d icache_scale_enabled:%d' % (i, testsys.cpu[i].icache_scale_enabled)
def build_test_system(np):
if buildEnv['TARGET_ISA'] == "alpha":
test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby)
elif buildEnv['TARGET_ISA'] == "mips":
test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "sparc":
test_sys = makeSparcSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "x86":
test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0],
options.ruby)
elif buildEnv['TARGET_ISA'] == "arm":
test_sys = makeArmSystem(test_mem_mode, options.machine_type, bm[0],
options.dtb_filename,
bare_metal=options.bare_metal,
sdcard_image=options.sdcard_image)
if options.enable_context_switch_stats_dump:
test_sys.enable_context_switch_stats_dump = True
else:
fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA'])
# Set the cache line size for the entire system
test_sys.cache_line_size = options.cacheline_size
# Create a top-level voltage domain
test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = test_sys.voltage_domain)
#Create a clk running contantly at 1.4GHz for L2
test_sys.clk_domain_const = SrcClockDomain(clock = ["1.4GHz"],
voltage_domain = test_sys.voltage_domain)
# Create a CPU voltage domain
test_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
#test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
# voltage_domain =
# test_sys.cpu_voltage_domain)
#test_sys.cpu_clk_domain = SrcClockDomain(clock = ["3GHz","2GHz","1GHz"],
# 0 6 12
test_sys.cpu_clk_domain = SrcClockDomain(clock = ["1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=0)
test_sys.cpu_clk_domain1 = SrcClockDomain(clock = ["1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=1)
test_sys.cpu_clk_domain2 = SrcClockDomain(clock = ["1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=2)
test_sys.cpu_clk_domain3 = SrcClockDomain(clock = ["1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=3)
if options.kernel is not None:
test_sys.kernel = binary(options.kernel)
if options.script is not None:
test_sys.readfile = options.script
if options.lpae:
test_sys.have_lpae = True
if options.virtualisation:
test_sys.have_virtualization = True
test_sys.init_param = options.init_param
# For now, assign all the CPUs to the same clock domain
#test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
# for i in xrange(np)]
test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=0, socket_id=0), TestCPUClass(clk_domain=test_sys.cpu_clk_domain1, cpu_id=1, socket_id=1), TestCPUClass(clk_domain=test_sys.cpu_clk_domain2, cpu_id=2, socket_id=2), TestCPUClass(clk_domain=test_sys.cpu_clk_domain3, cpu_id=3, socket_id=3)]
if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass):
test_sys.vm = KvmVM()
test_sys.dvfs_handler.enable = True
test_sys.dvfs_handler.transform_enable = True # We do want O3 CPU to transform
test_sys.dvfs_handler.domains = [test_sys.cpu_clk_domain, test_sys.cpu_clk_domain1, test_sys.cpu_clk_domain2, test_sys.cpu_clk_domain3]
if options.ruby:
# Check for timing mode because ruby does not support atomic accesses
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
Ruby.create_system(options, test_sys, test_sys.iobus, test_sys._dma_ports)
# Create a seperate clock domain for Ruby
test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = test_sys.voltage_domain)
for (i, cpu) in enumerate(test_sys.cpu):
#
# Tie the cpu ports to the correct ruby system ports
#
cpu.clk_domain = test_sys.cpu_clk_domain
cpu.createThreads()
cpu.createInterruptController()
cpu.icache_port = test_sys.ruby._cpu_ports[i].slave
cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] == "x86":
cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master
cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master
test_sys.ruby._cpu_ports[i].access_phys_mem = True
# Create the appropriate memory controllers
# and connect them to the IO bus
test_sys.mem_ctrls = [TestMemClass(range = r) for r in test_sys.mem_ranges]
for i in xrange(len(test_sys.mem_ctrls)):
test_sys.mem_ctrls[i].port = test_sys.iobus.master
else:
if options.caches or options.l2cache:
# By default the IOCache runs at the system clock
test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges)
test_sys.iocache.cpu_side = test_sys.iobus.master
test_sys.iocache.mem_side = test_sys.membus.slave
else:
test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges)
test_sys.iobridge.slave = test_sys.iobus.master
test_sys.iobridge.master = test_sys.membus.slave
# Sanity check
if options.fastmem:
if TestCPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
for i in xrange(np):
if options.fastmem:
test_sys.cpu[i].fastmem = True
if options.checker:
test_sys.cpu[i].addCheckerCpu()
test_sys.cpu[i].createThreads()
CacheConfig.config_cache(options, test_sys)
MemConfig.config_mem(options, test_sys)
return test_sys
def build_drive_system(np):
# driver system CPU is always simple, so is the memory
# Note this is an assignment of a class, not an instance.
DriveCPUClass = AtomicSimpleCPU
drive_mem_mode = 'atomic'
DriveMemClass = SimpleMemory
if buildEnv['TARGET_ISA'] == 'alpha':
drive_sys = makeLinuxAlphaSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'mips':
drive_sys = makeLinuxMipsSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'sparc':
drive_sys = makeSparcSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'x86':
drive_sys = makeLinuxX86System(drive_mem_mode, np, bm[1])
elif buildEnv['TARGET_ISA'] == 'arm':
drive_sys = makeArmSystem(drive_mem_mode, options.machine_type, bm[1])
# Create a top-level voltage domain
drive_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
drive_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = drive_sys.voltage_domain)
# Create a CPU voltage domain
drive_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
drive_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
drive_sys.cpu_voltage_domain)
drive_sys.cpu = DriveCPUClass(clk_domain=drive_sys.cpu_clk_domain,
cpu_id=0)
drive_sys.cpu.createThreads()
drive_sys.cpu.createInterruptController()
drive_sys.cpu.connectAllPorts(drive_sys.membus)
if options.fastmem:
drive_sys.cpu.fastmem = True
if options.kernel is not None:
drive_sys.kernel = binary(options.kernel)
if is_kvm_cpu(DriveCPUClass):
drive_sys.vm = KvmVM()
drive_sys.iobridge = Bridge(delay='50ns',
ranges = drive_sys.mem_ranges)
drive_sys.iobridge.slave = drive_sys.iobus.master
drive_sys.iobridge.master = drive_sys.membus.slave
# Create the appropriate memory controllers and connect them to the
# memory bus
drive_sys.mem_ctrls = [DriveMemClass(range = r)
for r in drive_sys.mem_ranges]
for i in xrange(len(drive_sys.mem_ctrls)):
drive_sys.mem_ctrls[i].port = drive_sys.membus.master
drive_sys.init_param = options.init_param
return drive_sys
# Add options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addFSOptions(parser)
# Add the ruby specific and protocol specific options
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# system under test can be any CPU
(TestCPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
#lokeshjindal15
if (TestCPUClass == DerivO3CPU):
print ("**** TestCpuClass is: DerivO3CPU")
else:
print ("**** TestCpuClass is NOT DerivO3CPU")
# Match the memories with the CPUs, based on the options for the test system
TestMemClass = Simulation.setMemClass(options)
if options.benchmark:
try:
bm = Benchmarks[options.benchmark]
except KeyError:
print "Error benchmark %s has not been defined." % options.benchmark
print "Valid benchmarks are: %s" % DefinedBenchmarks
sys.exit(1)
else:
if options.dual:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size),
SysConfig(disk=options.disk_image, mem=options.mem_size)]
else:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size)]
np = options.num_cpus
test_sys = build_test_system(np)
print "cpu_type is: " + options.cpu_type
if (options.cpu_type == "detailed" or options.cpu_type == "arm_detailed" or options.cpu_type == "DerivO3CPU" or options.cpu_type == "atomic"):
print "########## Running initO3params for various scaling switches"
initO3params(options, np, test_sys)
else:
print "########## NOT Running initO3params for various scaling switches"
if len(bm) == 2:
drive_sys = build_drive_system(np)
root = makeDualRoot(True, test_sys, drive_sys, options.etherdump)
elif len(bm) == 1:
root = Root(full_system=True, system=test_sys)
else:
print "Error I don't know how to create more than 2 systems."
sys.exit(1)
if options.timesync:
root.time_sync_enable = True
if options.frame_capture:
VncServer.frame_capture = True
#m5.disableAllListeners()#lokesh to suppress gdb read error
Simulation.setWorkCountOptions(test_sys, options)
Simulation.run(options, root, test_sys, FutureClass)
|
|
# Copyright 2018 Rackspace, US Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from octavia_lib.api.drivers import exceptions as lib_exceptions
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from octavia.common import constants
import octavia.common.context
from octavia.tests.functional.api.v2 import base
class TestProvider(base.BaseAPITest):
root_tag_list = 'providers'
def setUp(self):
super().setUp()
def test_get_all_providers(self):
octavia_dict = {u'description': u'Octavia driver.',
u'name': u'octavia'}
amphora_dict = {u'description': u'Amp driver.', u'name': u'amphora'}
noop_dict = {u'description': u'NoOp driver.', u'name': u'noop_driver'}
providers = self.get(self.PROVIDERS_PATH).json.get(self.root_tag_list)
self.assertEqual(4, len(providers))
self.assertIn(octavia_dict, providers)
self.assertIn(amphora_dict, providers)
self.assertIn(noop_dict, providers)
def test_get_all_providers_fields(self):
octavia_dict = {u'name': u'octavia'}
amphora_dict = {u'name': u'amphora'}
noop_dict = {u'name': u'noop_driver'}
providers = self.get(self.PROVIDERS_PATH, params={'fields': ['name']})
providers_list = providers.json.get(self.root_tag_list)
self.assertEqual(4, len(providers_list))
self.assertIn(octavia_dict, providers_list)
self.assertIn(amphora_dict, providers_list)
self.assertIn(noop_dict, providers_list)
class TestFlavorCapabilities(base.BaseAPITest):
root_tag = 'flavor_capabilities'
def setUp(self):
super().setUp()
def test_nonexistent_provider(self):
self.get(self.FLAVOR_CAPABILITIES_PATH.format(provider='bogus'),
status=400)
def test_noop_provider(self):
ref_capabilities = [{'description': 'The glance image tag to use for '
'this load balancer.', 'name': 'amp_image_tag'}]
result = self.get(
self.FLAVOR_CAPABILITIES_PATH.format(provider='noop_driver'))
self.assertEqual(ref_capabilities, result.json.get(self.root_tag))
def test_amphora_driver(self):
ref_description = ("The load balancer topology. One of: SINGLE - One "
"amphora per load balancer. ACTIVE_STANDBY - Two "
"amphora per load balancer.")
result = self.get(
self.FLAVOR_CAPABILITIES_PATH.format(provider='amphora'))
capabilities = result.json.get(self.root_tag)
capability_dict = [i for i in capabilities if
i['name'] == 'loadbalancer_topology'][0]
self.assertEqual(ref_description,
capability_dict['description'])
# Some drivers might not have implemented this yet, test that case
@mock.patch('octavia.api.drivers.noop_driver.driver.NoopProviderDriver.'
'get_supported_flavor_metadata')
def test_not_implemented(self, mock_get_metadata):
mock_get_metadata.side_effect = lib_exceptions.NotImplementedError()
self.get(self.FLAVOR_CAPABILITIES_PATH.format(provider='noop_driver'),
status=501)
def test_authorized(self):
ref_capabilities = [{'description': 'The glance image tag to use '
'for this load balancer.',
'name': 'amp_image_tag'}]
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
result = self.get(self.FLAVOR_CAPABILITIES_PATH.format(
provider='noop_driver'))
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(ref_capabilities, result.json.get(self.root_tag))
def test_not_authorized(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
self.get(self.FLAVOR_CAPABILITIES_PATH.format(provider='noop_driver'),
status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
def test_amphora_driver_one_filter(self):
ref_description = ("The compute driver flavor ID.")
result = self.get(
self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA),
params={constants.NAME: 'compute_flavor'})
capabilities = result.json.get(self.root_tag)
self.assertEqual(1, len(capabilities))
self.assertEqual(2, len(capabilities[0]))
self.assertEqual(ref_description,
capabilities[0][constants.DESCRIPTION])
def test_amphora_driver_two_filters(self):
ref_description = ("The compute driver flavor ID.")
result = self.get(
self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA),
params={constants.NAME: 'compute_flavor',
constants.DESCRIPTION: ref_description})
capabilities = result.json.get(self.root_tag)
self.assertEqual(1, len(capabilities))
self.assertEqual(ref_description,
capabilities[0][constants.DESCRIPTION])
def test_amphora_driver_filter_no_match(self):
result = self.get(
self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA),
params={constants.NAME: 'bogus'})
capabilities = result.json.get(self.root_tag)
self.assertEqual([], capabilities)
def test_amphora_driver_one_filter_one_field(self):
result = self.get(
self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA),
params={constants.NAME: 'compute_flavor',
constants.FIELDS: constants.NAME})
capabilities = result.json.get(self.root_tag)
self.assertEqual(1, len(capabilities))
self.assertEqual(1, len(capabilities[0]))
self.assertEqual('compute_flavor', capabilities[0][constants.NAME])
class TestAvailabilityZoneCapabilities(base.BaseAPITest):
root_tag = 'availability_zone_capabilities'
def setUp(self):
super().setUp()
def test_nonexistent_provider(self):
self.get(self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider='bogus'), status=400)
def test_noop_provider(self):
ref_capabilities = [{'description': 'The compute availability zone to '
'use for this loadbalancer.',
'name': constants.COMPUTE_ZONE}]
result = self.get(
self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider='noop_driver'))
self.assertEqual(ref_capabilities, result.json.get(self.root_tag))
def test_amphora_driver(self):
ref_description1 = 'The compute availability zone.'
ref_description2 = 'The management network ID for the amphora.'
result = self.get(
self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider='amphora'))
capabilities = result.json.get(self.root_tag)
capability_dict = [i for i in capabilities if
i['name'] == constants.COMPUTE_ZONE][0]
self.assertEqual(ref_description1,
capability_dict['description'])
capability_dict = [i for i in capabilities if
i['name'] == constants.MANAGEMENT_NETWORK][0]
self.assertEqual(ref_description2,
capability_dict['description'])
# Some drivers might not have implemented this yet, test that case
@mock.patch('octavia.api.drivers.noop_driver.driver.NoopProviderDriver.'
'get_supported_availability_zone_metadata')
def test_not_implemented(self, mock_get_metadata):
mock_get_metadata.side_effect = lib_exceptions.NotImplementedError()
self.get(self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider='noop_driver'), status=501)
def test_authorized(self):
ref_capabilities = [{'description': 'The compute availability zone to '
'use for this loadbalancer.',
'name': constants.COMPUTE_ZONE}]
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
result = self.get(
self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider='noop_driver'))
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(ref_capabilities, result.json.get(self.root_tag))
def test_not_authorized(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
self.get(self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider='noop_driver'), status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
def test_amphora_driver_one_filter(self):
ref_description = 'The compute availability zone.'
result = self.get(
self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider=constants.AMPHORA),
params={constants.NAME: constants.COMPUTE_ZONE})
capabilities = result.json.get(self.root_tag)
self.assertEqual(1, len(capabilities))
self.assertEqual(2, len(capabilities[0]))
self.assertEqual(ref_description,
capabilities[0][constants.DESCRIPTION])
ref_description = 'The management network ID for the amphora.'
result = self.get(
self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider=constants.AMPHORA),
params={constants.NAME: constants.MANAGEMENT_NETWORK})
capabilities = result.json.get(self.root_tag)
self.assertEqual(1, len(capabilities))
self.assertEqual(2, len(capabilities[0]))
self.assertEqual(ref_description,
capabilities[0][constants.DESCRIPTION])
def test_amphora_driver_two_filters(self):
ref_description = 'The compute availability zone.'
result = self.get(
self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider=constants.AMPHORA),
params={constants.NAME: constants.COMPUTE_ZONE,
constants.DESCRIPTION: ref_description})
capabilities = result.json.get(self.root_tag)
self.assertEqual(1, len(capabilities))
self.assertEqual(ref_description,
capabilities[0][constants.DESCRIPTION])
def test_amphora_driver_filter_no_match(self):
result = self.get(
self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider=constants.AMPHORA),
params={constants.NAME: 'bogus'})
capabilities = result.json.get(self.root_tag)
self.assertEqual([], capabilities)
def test_amphora_driver_one_filter_one_field(self):
result = self.get(
self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format(
provider=constants.AMPHORA),
params={constants.NAME: constants.COMPUTE_ZONE,
constants.FIELDS: constants.NAME})
capabilities = result.json.get(self.root_tag)
self.assertEqual(1, len(capabilities))
self.assertEqual(1, len(capabilities[0]))
self.assertEqual(constants.COMPUTE_ZONE,
capabilities[0][constants.NAME])
|
|
try:
import ujson as json
except:
import json
from JumpScale import j
import re
#@review [kristof,incubaid] name:codereviewtools tools for codereview, check all new code
class CodeManager():
def __init__(self):
self.ignoreDirs=["/.hg*"]
self.users={}
self.groups={}
def setusers(self,config):
for line in config.split("\n"):
line=line.strip()
if line<>"":
if line[0]<>"#":
if line.find("#")<>-1:
line=line.split("#")[0]
if line.find(":")<>-1:
userid,aliases=line.split(":")
self.users[userid]=userid
aliases=aliases.split(",")
for alias in aliases:
self.users[alias.lower()]=userid.lower()
def setgroups(self,config):
for line in config.split("\n"):
line=line.strip()
if line<>"":
if line[0]<>"#":
if line.find("#")<>-1:
line=line.split("#")[0]
if line.find(":")<>-1:
groupid,grouptext=line.split(":")
groups=grouptext.split(",")
groups=[group.lower() for group in groups]
self.groups[groupid.lower()]=groups
#resolve groups in groups
for i in range(5):
for groupid in self.groups.keys():
result=[]
for item in self.groups[groupid]:
if self.groups.has_key(item):
result.extend(self.groups[item])
else:
result.append(item)
self.groups[groupid]=result
def getUserId(self,username):
if self.users.has_key(username):
return self.users[username]
else:
return False
def _pathIgnoreCheck(self,path):
for item in self.ignoreDirs:
item=item.replace(".","\\.")
item=item.replace("*",".*")
if j.codetools.regex.match(item,path):
return True
return False
def getCodeManagerForFile(self,path):
return CodeManagerFile(self,path)
def parse(self,path):
"""
directory to walk over and find story, task, ... statements
"""
self.rootpath=path
files=[]
files.extend(j.system.fs.listFilesInDir(path,True,filter="*.py")) #@todo P1 this is not very nice, should be done in one go, also make sure we don't descend in .hg dirs
files.extend(j.system.fs.listFilesInDir(path,True,filter="*.txt"))
files.extend(j.system.fs.listFilesInDir(path,True,filter="*.md"))
files.extend(j.system.fs.listFilesInDir(path,True,filter="*.wiki"))
for pathItem in files:
if not self._pathIgnoreCheck(pathItem):
path2=pathItem.replace(path,"")
print "parse %s" % path2
if not path2=="/apps/incubaiddevelopmentprocess/appserver/service_developmentprocess/extensions/codeparser/Parser.py":
if path2[0]=="/":
path3=path2[1:]
codemanagerfile=CodeManagerFile(self,path3)
codemanagerfile.process()
class CodeManagerFile():
"""
manages code for one file
"""
def __init__(self,codemanager,path):
self.users=j.codetools.codemanager.users
self.groups=j.codetools.codemanager.groups
self.path=path
self.code=j.system.fs.fileGetContents(path)
self.nrlines=len(self.code)
self.codemanager=codemanager
def process(self):
if self.code.strip()<>"":
self._findUsers(code,path3,pathItem)
self._findStories(code,path3,pathItem)
self._findScrumteams(code,path3,pathItem)
self._findSprints(code,path3,pathItem)
self._findTasks(code,path3,pathItem)
self._findRoadmapitems(code,path3,pathItem)
self._findGroups(code,path3,pathItem)
def findItems(self,item="@owner",maxitems=0):
result=[]
if maxitems==0:
maxitems=10000000000
def process(arg,line):
line=line.split(item)[1].strip()
if line.find("(")<>-1:
line=line.split("(")[0].strip()
result.append(line)
return ""
text2=j.codetools.regex.replaceLines( process, arg="", text=self.code, includes=["%s.*"%item], excludes='')
if len(result)>maxitems:
self.errorTrap("Error in text to parse, found more entities:%s than %s" % (item,maxitems))
if maxitems==1:
if len(result)>0:
result=result[0]
else:
result=""
return result
def findLine(self,text,item="@owner"):
for line in text.split("\n"):
if line.strip().lower().find(item)==0:
return line
return ""
def findId(self,text,path):
result=j.codetools.regex.findAll("\(\(.*: *\d* *\)\)",text)
if len(result)>1:
raise RuntimeError("Found 2 id's in %s" % path)
if len(result)==1:
result=result[0].split(":")[1]
result=result.split(")")[0]
else:
result=""
if result.strip()=="":
result=0
else:
try:
result=int(result)
except Exception,e:
raise RuntimeError("Could not parse if, error:%s. \nPath = %s" %(e,path))
return text,result
def parseTimeInfo(self,timestring,modelobj,defaults=[8,16,8,4,8]):
#print "timestring: %s" % timestring
timeItems=timestring.split("/")
modelobj.time_architecture=defaults[0]
modelobj.time_coding=defaults[1]
modelobj.time_integration=defaults[2]
modelobj.time_doc=defaults[3]
modelobj.time_testing=defaults[4]
modelobj.timestr=timestring
modelobj.time=0
for item in timeItems:
if item<>"":
if item.lower()[0]=="a":
modelobj.time_architecture=int(item.lower().replace("a",""))
modelobj.time+=modelobj.time_architecture
if item.lower()[0]=="c":
modelobj.time_coding=int(item.lower().replace("c",""))
modelobj.time+=modelobj.time_coding
if item.lower()[0]=="i":
modelobj.time_integration=int(item.lower().replace("i",""))
modelobj.time+=modelobj.time_integration
if item.lower()[0]=="d":
modelobj.time_doc=int(item.lower().replace("d",""))
modelobj.time+=modelobj.time_doc
if item.lower()[0]=="t":
modelobj.time_testing=int(item.lower().replace("t",""))
modelobj.time+=modelobj.time_testing
def _parseTaskInfo(self,storyTaskModelObject,info):
for item in info.split(" "):
if item<>"":
if item.lower()[0]=="s":
#story
storyTaskModelObject.storyid=int(item.lower().replace("s",""))
elif item.lower()[0]=="p":
#priority
storyTaskModelObject.priority=int(item.lower().replace("p",""))
elif item.lower()[0]=="m":
#sprint
storyTaskModelObject.sprintid=int(item.lower().replace("m",""))
def _parseStoryInfo(self,storyTaskModelObject,info):
for item in info.split(" "):
if item<>"":
if item.lower()[0]=="s":
#story
storyTaskModelObject.id=int(item.lower().replace("s",""))
elif item.lower()[0]=="p":
#priority
storyTaskModelObject.priority=int(item.lower().replace("p",""))
elif item.lower()[0]=="m":
#sprint
storyTaskModelObject.sprintid=int(item.lower().replace("m",""))
def getUsers(self,text):
"""
return [$text,$users] with unique id and the usergroup construct is taken out of text, all groups are resolved to users
"""
#items=j.codetools.regex.findAll("[a-z]*:[a-z]*","s: d:d")
items=j.codetools.regex.findAll("\[[a-z, ]*\]",text)
text=text.lower()
if len(items)>1:
raise RuntimeError("Found to many users,groups items in string, needs to be one [ and one ] and users & groups inside, now %s" % text)
if len(items)==0:
return text,[]
usergroups=items[0]
text=text.replace(usergroups,"").replace(" "," ").strip()
items=usergroups.replace("[","").replace("]","").split(",")
users=[]
for item in items:
item=item.strip()
if self.groups.has_key(item):
for user in self.groups[item]:
users.append(user)
if self.users.has_key(item):
users.append(item)
#get aliases
usersout=[]
for user in users:
if self.users.has_key(user):
if self.users[user] not in usersout:
usersout.append(self.users[user])
return text,usersout
def parseBasics(self,text):
"""
@return [infoitems,timeitem,users,tags,descr]
"""
keys=["P","p","S","s","M","m"]
timeitem=""
infoitems=""
descr=""
state="start"
tags=""
#print "parse task: %s" % text
text,users=self.getUsers(text)
text=text.replace(" "," ")
text=text.replace(" "," ")
if text.strip()=="":
return ["","","","",""]
usersfound=False
for item in text.strip().split(" "):
#print "item: %s" % item
if state=="endofmeta":
descr+=item+" "
if state=="start":
if item[0] in keys:
try:
int(item[1:])
infoitems+=item+" "
except:
descr+=item+" "
state="endofmeta"
elif item[0:2].lower()=="t:":
timeitem=item[2:]
if not re.match(r"[aAcCiIdDtT/\d:]*\Z", timeitem):
descr+=item+" "
state="endofmeta"
timeitem=""
#raise RuntimeError("Time item match failed for text %s" % text)
elif item.find(":")<>-1:
tags+="%s "%item
else:
descr+=item+" "
state="endofmeta"
return [infoitems,timeitem,users,tags,descr]
def _getStoryName(self,info):
out=""
for item in info.split(" "):
if not(item.lower()[0]=="s" or item.lower()[0]=="p" or item.lower()[0]=="m"):
out+=" %s"%item
return out.strip()
def _findStories(self,text,path,fullPath):
found=j.codetools.regex.extractBlocks(text,includeMatchingLine=False,blockStartPatterns=["\"\"\""])
for item in found:
if item.lower().find("@storydef")<>-1:
#found a story
lineFull=self.findLine(item,"@story")
id1=self.addUniqueId(lineFull,fullPath,ttype="story")
text2=item
text2,owners=self.findItem(text2,"@owner")
text2,priority=self.findItem(text2,"@priority")
if priority=="":
priority=2
text2,roadmap=self.findItem(text2,"@roadmap")
text2,sprint=self.findItem(text2,"@sprint")
text2,storydependencies=self.findItem(text2,"@storydependencies")
text2,roadmapdependencies=self.findItem(text2,"@roadmapdependencies")
text2,time=self.findItem(text2,"@time")
story,storyinfo2=self.findItem(text2,"@story")
storyinfo,timetextdonotuse,userdonotuse,groupdonotuse,descr=self.parseTaskQuestionRemark(storyinfo2)
if len(descr)<5:
raise RuntimeError("Story description is less than 5 for path %s" % fullPath)
storyname=descr
obj=self.projectInfoObject.stories.addStory(id=id1,name=storyname.strip(),description=story.strip())
obj.model.owner=owners
self._parseStoryInfo(obj.model,storyinfo)
self.parseTimeInfo(time,obj.model)
obj.model.path=fullPath
obj.model.storydependencies=self._strToArrayInt(storydependencies)
obj.model.roadmapdependencies=self._strToArrayInt(roadmapdependencies)
obj.model.sprintid=self._strToInt(sprint)
obj.model.priority=self._strToInt(priority)
obj.model.roadmapid=self._strToInt(roadmap)
def _findScrumteams(self,text,path,fullPath):
found=j.codetools.regex.extractBlocks(text,includeMatchingLine=False,blockStartPatterns=["\"\"\""])
for item in found:
if item.lower().find("@scrumteamdef")<>-1:
#found a story
lineFull=self.findLine(item,"@scrumteamdef")
id1=self.addUniqueId(lineFull,fullPath,ttype="scrumteam")
text2=item
text2,owners=self.findItem(text2,"@owner")
descr,name=self.findItem(text2,"@scrumteamdef")
if len(descr)<5:
raise RuntimeError("Scrumteam description is less than 5 for path %s" % fullPath)
obj=self.projectInfoObject.scrumteams.addScrumteam(id=id1,name=name.strip(),description=descr.strip())
obj.model.owner=owners
obj.model.path=fullPath
obj.model.id=id1
def _findSprints(self,text,path,fullPath):
found=j.codetools.regex.extractBlocks(text,includeMatchingLine=False,blockStartPatterns=["\"\"\""])
for item in found:
if item.lower().find("@sprintdef")<>-1:
#found a story
lineFull=self.findLine(item,"@sprintdef")
id1=self.addUniqueId(lineFull,fullPath,ttype="sprint")
text2=item
text2,owners=self.findItem(text2,"@owner")
text2,scrumteam=self.findItem(text2,"@scrumteam")
text2,company=self.findItem(text2,"@company")
text2,deadline=self.findItem(text2,"@deadline")
text2,start=self.findItem(text2,"@start")
text2,goal=self.findItem(text2,"@goal")
descr,line=self.findItem(text2,"@sprintdef")
sprintinfo,timetextdonotuse,userdonotuse,groupdonotuse,name=self.parseTaskQuestionRemark(line)
sprintinfo=sprintinfo.strip()
obj=self.projectInfoObject.sprints.addSprint(id=id1,name=name.strip(),description=descr.strip())
obj.model.ownername=owners
obj.model.path=fullPath
obj.model.goal=goal
obj.model.id=id1
obj.model.company=company
obj.model.deadline=int(j.base.time.HRDatetoEpoch(deadline.replace("-","/")))
obj.model.start=int(j.base.time.HRDatetoEpoch(start.replace("-","/")))
def _strToArrayInt(self,items):
if items=="":
return []
result=""
for item in items.split(","):
try:
result.append(int(item))
except:
raise RuntimeError("Cannot convert str to array, item was %s" % item)
return result
def _strToInt(self,item):
if item=="":
return 0
try:
result=int(item)
except:
raise RuntimeError("Cannot convert str to int, item was %s" % item)
return result
def _findRoadmapitems(self,text,path,fullPath):
found=j.codetools.regex.extractBlocks(text,includeMatchingLine=False,blockStartPatterns=["\"\"\""])
for item in found:
if item.lower().find("@roadmapdef")<>-1:
#found a story
lineFull=self.findLine(item,"@roadmapdef")
text2=item
text2,owners=self.findItem(text2,"@owner")
text2,priority=self.findItem(text2,"@priority")
if priority=="":
priority=2
text2,goal=self.findItem(text2,"@goal")
text2,releasedate_int=self.findItem(text2,"@releasedate_int")
text2,releasedate_pub=self.findItem(text2,"@releasedate_pub")
text2,bugs=self.findItem(text2,"@bugs")
text2,company=self.findItem(text2,"@company")
text2,product=self.findItem(text2,"@product")
text2,storydependencies=self.findItem(text2,"@storydependencies")
text2,roadmapdependencies=self.findItem(text2,"@roadmapdependencies")
text2,featurerequests=self.findItem(text2,"@featurerequests")
descr,line=self.findItem(text2,"@roadmapdef")
descr,remarks=self._descrToDescrAndRemarks(descr)
sprintinfodonotuse,timetextdonotuse,userdonotuse,groupdonotuse,name=self.parseTaskQuestionRemark(line)
if len(descr)<5:
raise RuntimeError("Roadmap description is less than 5 for path %s" % fullPath)
id1=self.addUniqueId(lineFull,fullPath,ttype="roadmapitem")
obj=self.projectInfoObject.roadmapitems.addRoadmapitem(id=id1,name=name.strip(),description=descr.strip())
obj.model.id=id1
obj.model.owner=owners
obj.model.path=fullPath
obj.model.goal=goal
obj.model.priority=int(priority)
obj.model.remarks=remarks
obj.model.releasedate_int=int(j.base.time.HRDatetoEpoch(releasedate_int.replace("-","/")))
obj.model.releasedate_pub=int(j.base.time.HRDatetoEpoch(releasedate_pub.replace("-","/")))
obj.model.featurerequests=self._strToArrayInt(featurerequests)
obj.model.bugs=self._strToArrayInt(bugs)
obj.model.company=company
obj.model.product=product
obj.model.storydependencies=self._strToArrayInt(storydependencies)
obj.model.roadmapdependencies=self._strToArrayInt(roadmapdependencies)
#obj.model.sprintids=self._strToArrayInt(sprintids)
def _findUsers(self,text,path,fullPath):
found=j.codetools.regex.extractBlocks(text,includeMatchingLine=False,blockStartPatterns=["\"\"\""])
for item in found:
if item.lower().find("@userdef")<>-1:
lineFull=self.findLine(item,"@userdef")
id1=self.addUniqueId(lineFull,fullPath,ttype="user")
text2=item
text2,phone=self.findItem(text2,"@phone")
text2,email=self.findItem(text2,"@email")
text2,aliases=self.findItem(text2,"@aliases")
descr,firstline=self.findItem(text2,"@userdef")
sprintinfodonotuse,timetextdonotuse,userdonotuse,groupdonotuse,name=\
self.parseTaskQuestionRemark(firstline)
obj=self.projectInfoObject.users.addUser(id=id1,name=name.strip(),description=descr.strip())
obj.model.path=fullPath
obj.model.aliases=[item.lower().strip() for item in aliases.split(",")]
obj.model.phone=phone
obj.model.email=email
def _findGroups(self,text,path,fullPath):
found=j.codetools.regex.extractBlocks(text,includeMatchingLine=False,blockStartPatterns=["\"\"\""])
for item in found:
if item.lower().find("@groupdef")<>-1:
lineFull=self.findLine(item,"@groupdef")
id1=self.addUniqueId(lineFull,fullPath,ttype="group")
text2=item
text2,email=self.findItem(text2,"@email")
text2,aliases=self.findItem(text2,"@aliases")
descr,firstline=self.findItem(text2,"@groupdef")
sprintinfodonotuse,timetextdonotuse,userdonotuse,groupdonotuse,name=\
self.parseTaskQuestionRemark(firstline)
obj=self.projectInfoObject.groups.addGroup(id=id1,name=name.strip(),descr=descr.strip())
obj.model.path=fullPath
obj.model.aliases=aliases.split(",")
obj.model.email=email
def _descrToDescrAndRemarks(self,text):
if text.find("=======")<>-1:
out=""
descr=""
intdescr=""
lines=sprint.model.description.split("\n")
state=start
for line in lines:
if line.find("======")==-1 and state=="start":
descr+=line+"\n"
if state=="int":
intdescr+=line+"\n"
if line.find("======")<>-1:
state="int"
return descr,intdescr
else:
return text,""
def _normalizeDescr(self,text):
text=text.lower()
splitat=["{","(","[","#","%","$","'"]
for tosplit in splitat:
if len(text.split(tosplit))>0:
text=text.split(tosplit)[0]
text=text.replace(",","")
text=text.replace(":","")
text=text.replace(";","")
text=text.replace(" "," ")
if text<>"" and text[-1]==" ":
text=text[:-1]
text=text.replace("-","")
text=text.replace("_","")
return text
def shortenDescr(self,text,maxnrchars=60):
return j.codetools.textToTitle(text,maxnrchars)
def _getLinesAround(self,path,tofind,nrabove,nrbelow):
text=j.system.fs.fileGetContents(path)
nr=0
lines=text.split("\n")
for line in lines:
if line.find(tofind)<>-1:
if nr-nrabove<0:
nrstart=0
else:
nrstart=nr-nrabove
if nr+nrabove>len(lines):
nrstop=len(lines)
else:
nrstop=nr+nrabove
return "\n".join(lines[nrstart:nrstop])
nr+=1
return ""
def addUniqueId(self,line,fullPath,ttype="sprint"):
line,id1=self.findId(line,fullPath)
if id1==0:
#create unique id and put it in the file
id1=j.base.idgenerator.generateIncrID("%sid"%ttype,self.service)
#tfe=j.codetools.getTextFileEditor(fullPath)
#tfe.addItemToFoundLineOnlyOnce(line," ((%s:%s))"%(ttype,id1),"\(id *: *\d* *\)",reset=True)
tfe=j.codetools.getTextFileEditor(fullPath)
tfe.addItemToFoundLineOnlyOnce(line," ((%s:%s))"%(ttype,id1),"\(+.*: *\d* *\)+",reset=self.reset)
return id1
def _findTasks(self,text,path,fullPath):
#@todo S2 do same for remarks & questions
def findTodoVariants(line):
variants=["@todo:","@todo :","@todo"]
for variant in variants:
if line.strip().find(variant)==0:
return variant
if text.lower().find("@todo")<>-1:
lines=j.codetools.regex.findAll("@todo.*",text)
for line in lines:
self.addUniqueId(line,fullPath,ttype="todo")
line,id1=self.findId(line,fullPath)
todostatement=findTodoVariants(line)
line1=line.replace(todostatement,"")
infotext,timetext,user,group,descr=self.parseTaskQuestionRemark(line1)
obj=self.projectInfoObject.tasks.addTask(id=id1,descr=descr.strip())
obj.model.storyid=0
obj.model.users=user
obj.model.group=group
obj.model.path=fullPath
obj.model.context=self._getLinesAround(fullPath,line,10,20)
obj.model.descrshort=self.shortenDescr(descr)
print "tasktext:%s" % line
#print "infotext:%s" % infotext
self._parseTaskInfo(obj.model,infotext)
self.parseTimeInfo(timetext,obj.model,defaults=[0,1,0,1,0])
if obj.model.storyid==0 :
obj.model.storyid=999 #999 is the unsorted story card
def findLineNr(self,text):
linenr=0
for line in self.code.split("\n"):
linenr+=1
if line.find(text)<>-1:
return linenr
else:
return False
def findReviews(self):
"""
return [[name,description,users,linefrom,lineto]]
"""
items=self.findItems("@review")
result=[]
for item in items:
linenr=self.findLineNr(item)
[infoitems,timeitem,users,tags,descr]=self.parseBasics(item)
tags=tags.lower()
tt=j.core.tags.getObject(tags)
if tt.tagExists("line"):
linesfromto=tt.tagGet("line")
items=linesfromto.split(",")
for item in items:
if item.strip()[0]=="-":
linefrom=linenr-int(item[1:].strip())
if linefrom<1:
linefrom=1
if item.strip()[0]=="+":
lineto=linenr+int(item[1:].strip())
else:
linefrom=1
lineto=self.nrlines
if tt.tagExists("name"):
name=tt.tagGet("name")
else:
name=""
result.append([name,descr,users,linefrom,lineto,linenr])
return result
def errorTrap(self,msg):
j.console.echo("ERROR: %s" % msg)
def __str__(self):
ss=""
ss+="%s\n"%self.model.sprints
ss+="%s\n"%self.model.stories
ss+="%s\n"%self.model.tasks
ss+="%s\n"%self.model.users
ss+="%s\n"%self.model.groups
ss+="%s\n"%self.model.remarks
ss+="%s\n"%self.model.questions
return ss
def __repr__(self):
return self.__str__()
|
|
import os
import sys
import CPUtimer
from sys import argv, exit
class Vertex:
def __init__(self, node):
self.id = node
self.adjacent = {}
self.predecessor = {}
self.position_in_buckets = -1
self.distance_from_source = -1
def __str__(self):
return str(self.id) + ' adjacent: ' + str([x.id for x in self.adjacent])
def add_neighbor(self, neighbor, weight=0):
self.adjacent[neighbor] = weight
def get_connections(self):
return self.adjacent.keys()
def get_id(self):
return self.id
def get_weight(self, neighbor):
return self.adjacent[neighbor]
def get_predecessor(self):
return self.predecessor
def set_predecessor(self, predecessor):
self.predecessor = predecessor
def get_position_in_buckets(self):
return self.position_in_buckets
def set_position_in_buckets(self, position):
self.position_in_buckets = position
def set_distance_from_source(self, distance):
self.distance_from_source = distance
def get_distance_from_source(self):
return self.distance_from_source
class Graph:
def __init__(self):
self.vert_dict = {}
self.num_vertices = 0
def __iter__(self):
return iter(self.vert_dict.values())
def add_vertex(self, node):
self.num_vertices = self.num_vertices + 1
new_vertex = Vertex(node)
self.vert_dict[node] = new_vertex
return new_vertex
def get_vertex(self, n):
if n in self.vert_dict:
return self.vert_dict[n]
else:
return None
def add_edge(self, frm, to, cost=0):
if frm not in self.vert_dict:
self.add_vertex(frm)
if to not in self.vert_dict:
self.add_vertex(to)
self.vert_dict[frm].add_neighbor(self.vert_dict[to], cost)
self.vert_dict[to].add_neighbor(self.vert_dict[frm], cost)
def get_vertices(self):
return self.vert_dict.keys()
def buckets(self, startvertex, maxweigth, numberofvertexes):
buckets = []
for i in range(1, numberofvertexes):
#print(i)
g.get_vertex(i).set_distance_from_source(999999999999999999999999999999999999)
g.get_vertex(i).set_predecessor(i)
#print(g.get_vertex(i).get_predecessor(), g.get_vertex(i).get_distance_from_source())
for i in range(0, (numberofvertexes * maxweigth + 1)):
buckets.append([])
#print(buckets, len(buckets))
buckets_index = 0
buckets[0].append(startvertex+1)
#print(buckets[0])
#print(len(buckets[0]))
g.get_vertex(buckets[0][0]).set_distance_from_source(0)
#print(g.get_vertex(buckets[0][0]).get_distance_from_source())
while (1):
while (len(buckets[buckets_index]) == 0 and buckets_index < numberofvertexes * maxweigth):
buckets_index = buckets_index + 1
if buckets_index == numberofvertexes * maxweigth:
break
#print('buckets index----------->', buckets_index)
#print('buckets[%d] antes do pop ')
#print(buckets)
current_vertex = buckets[buckets_index].pop()
#print('vertice corrent')
#print(current_vertex)
#print('buckets dp do pop ')
#print(buckets[buckets_index])
hasNb = g.get_vertex(current_vertex).get_connections()
#print 'hasNb=>', len(hasNb)
#if hasNb == 0 and buckets_index < numberofvertexes * maxweigth:
#print 'vertice sem conexcoes'
for neigbhor in g.get_vertex(current_vertex).get_connections():
#print('valor do vizinho', neigbhor.get_id())
current_vertex_dist = g.get_vertex(current_vertex).get_distance_from_source()
neigbhor_dist = neigbhor.get_distance_from_source()
#print('current_vertex_dist')
#print(current_vertex_dist)
#print('neigbhor dist')
#print(neigbhor_dist)
#print(g.get_vertex(current_vertex))
x = g.get_vertex(current_vertex)
wn = x.get_weight(neigbhor)
#print("peso visinho", wn)
if (neigbhor_dist > (current_vertex_dist + g.get_vertex(current_vertex).get_weight(neigbhor))):
#print(g.get_vertex(current_vertex).get_weight(neigbhor))
if (neigbhor_dist != 999999999999999999999999999999999999):
#print(buckets[neigbhor_dist])
if (len(buckets[neigbhor_dist]) != 0):
buckets[neigbhor_dist].remove(neigbhor.get_id())
neigbhor.set_predecessor(g.get_vertex(current_vertex))
#print(neigbhor.get_predecessor())
neigbhor.set_distance_from_source(
current_vertex_dist + g.get_vertex(current_vertex).get_weight(neigbhor))
#print('nova distancia ', neigbhor.get_distance_from_source())
neigbhor_dist = neigbhor.get_distance_from_source()
buckets[neigbhor_dist].insert(0, neigbhor.get_id())
#print(buckets[neigbhor_dist])
#print('info do grafo dp de rodar')
#for vertex in g.get_vertices():
#print('vertex ->', g.get_vertex(vertex).get_id())
#print('distancia da origem', g.get_vertex(vertex).get_distance_from_source())
#print 'predecessor ', g.get_vertex(vertex).get_predecessor()
caminho = []
i=numberofvertexes-1
caminho.append(i)
i = g.get_vertex(i)
while(1):
pred = i.get_predecessor()
#print g.get_vertex(pred)
if pred == 0:
caminho.append(pred)
break
if pred == i.get_id():
#print('parou')
break
caminho.append(pred.get_id())
i = pred
return caminho
if __name__ == '__main__':
instance_path = "DMXA/"
timer = CPUtimer.CPUTimer()
file_list = [f for f in os.listdir(instance_path)
if f.endswith('.stp')]
for filename in sorted(file_list):
#print filename
path = os.path.join(instance_path, filename)
with open(path) as f:
conteudo = f.readlines()
lines = [t.strip() for t in conteudo]
graph = []
countlines = 1
for i in lines:
if ("Section Graph" in i):
break
countlines += 1
while (lines[countlines] != "End"):
graph.append(lines[countlines].split())
countlines += 1
numVertices = int(graph[0][1])
numArestas = int(graph[1][1])
g = Graph()
for i in range(numVertices+1):
g.add_vertex(i)
for i in range(2, len(graph)):
verticePeso = []
verticePeso.append(int(graph[i][2]))
verticePeso.append(int(graph[i][3]))
#print (int(graph[i][1]))
#print (int(graph[i][2]))
#print (int(graph[i][3]))
#g.add_edge(int(graph[i][2]), int(graph[i][3]), verticePeso)
g.add_edge(int(graph[i][1]), int(graph[i][2]), int(graph[i][3]))
timer.reset()
timer.start()
for i in range(0, 2):
caminho = g.buckets(1, 13, numVertices)
sorted(caminho, reverse=True)
timer.lap()
timer.stop()
if not os.path.exists("Output/Question" + str("1d")):
try:
os.makedirs("Output/Question" + str("1d"))
except OSError as exc: # Guard against race condition
raise
output = open((os.getcwd()+"/Output/Question"+str("1d"))+"/"+filename, "w")
output.write("Caminho: " + str(caminho))
output.write("\n\n")
output.write("Total time: " + str( timer.get_time()) +" s")
output.write("\nAverage time: " + str( timer.get_time("average", "seg")) + " s")
output.write("\nLast call: " + str( timer.get_time("last")) +" s")
output.write("\nStamp 1 of the total: " + str( timer.get_stamp("total","si")))
output.write("\nStamp 2 of the total: " + str( timer.get_stamp("total","clock")))
output.write("\nPattern that ignores zeros:")
output.write("\n" + timer.get_stamp("total","si",True))
output.write("\n" + timer.get_stamp("total","clock",True))
output.write("\n\n")
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.platform import test
from tensorflow.python.training.checkpointable import util as checkpointable_util
from tensorflow.python.training.rmsprop import RMSPropOptimizer
class _RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(_RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
[input_shape, constant_shape] = input_shape
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(_RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TimeDistributedTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_timedistributed_dense(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.compile(optimizer=RMSPropOptimizer(0.01), loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
# test config
model.get_config()
# check whether the model variables are present in the
# checkpointable list of objects
checkpointed_objects = set(checkpointable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
def test_timedistributed_static_batch_size(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4), batch_size=10))
model.compile(optimizer=RMSPropOptimizer(0.01), loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
def test_timedistributed_invalid_init(self):
x = constant_op.constant(np.zeros((1, 1)).astype('float32'))
with self.assertRaisesRegexp(
ValueError,
'Please initialize `TimeDistributed` layer with a `Layer` instance.'):
keras.layers.TimeDistributed(x)
def test_timedistributed_conv2d(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Conv2D(5, (2, 2), padding='same'),
input_shape=(2, 4, 4, 3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5)))
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_timedistributed_stacked(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 3)),
epochs=1,
batch_size=10)
def test_regularizers(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2, kernel_regularizer='l1'),
input_shape=(3, 4)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
self.assertEqual(len(model.losses), 1)
def test_TimeDistributed_learning_phase(self):
with self.cached_session():
# test layers that need learning_phase to be set
np.random.seed(1234)
x = keras.layers.Input(shape=(3, 2))
y = keras.layers.TimeDistributed(
keras.layers.Dropout(.999))(x, training=True)
model = keras.models.Model(x, y)
y = model.predict(np.random.random((10, 3, 2)))
self.assertAllClose(np.mean(y), 0., atol=1e-1, rtol=1e-1)
def test_TimeDistributed_batchnorm(self):
with self.cached_session():
# test that wrapped BN updates still work.
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.BatchNormalization(center=True, scale=True),
name='bn',
input_shape=(10, 2)))
model.compile(optimizer='rmsprop', loss='mse')
# Assert that mean and variance are 0 and 1.
td = model.layers[0]
self.assertAllClose(td.get_weights()[2], np.array([0, 0]))
assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Train
model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
# Assert that mean and variance changed.
assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Verify input_map has one mapping from inputs to reshaped inputs.
self.assertEqual(len(td._input_map.keys()), 1)
def test_TimeDistributed_trainable(self):
# test layers that need learning_phase to be set
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.TimeDistributed(keras.layers.BatchNormalization())
_ = layer(x)
self.assertEquals(len(layer.updates), 2)
self.assertEquals(len(layer.trainable_weights), 2)
layer.trainable = False
assert not layer.updates
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(self):
with self.cached_session():
# test with unspecified shape and Embeddings with mask_zero
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.Embedding(5, 6, mask_zero=True),
input_shape=(None, None))) # N by t_1 by t_2 by 6
model.add(keras.layers.TimeDistributed(
keras.layers.SimpleRNN(7, return_sequences=True)))
model.add(keras.layers.TimeDistributed(
keras.layers.SimpleRNN(8, return_sequences=False)))
model.add(keras.layers.SimpleRNN(1, return_sequences=False))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4),
dtype='int32')
for i in range(4):
model_input[i, i:, i:] = 0
model.fit(model_input,
np.random.random((10, 1)), epochs=1, batch_size=10)
mask_outputs = [model.layers[0].compute_mask(model.input)]
for layer in model.layers[1:]:
mask_outputs.append(layer.compute_mask(layer.input, mask_outputs[-1]))
func = keras.backend.function([model.input], mask_outputs[:-1])
mask_outputs_val = func([model_input])
ref_mask_val_0 = model_input > 0 # embedding layer
ref_mask_val_1 = ref_mask_val_0 # first RNN layer
ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1) # second RNN layer
ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2]
for i in range(3):
self.assertAllEqual(mask_outputs_val[i], ref_mask_val[i])
self.assertIs(mask_outputs[-1], None) # final layer
def test_TimeDistributed_with_masking_layer(self):
with self.cached_session():
# test with Masking layer
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(keras.layers.Masking(
mask_value=0.,), input_shape=(None, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(5)))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4))
for i in range(4):
model_input[i, i:, :] = 0.
model.compile(optimizer='rmsprop', loss='mse')
model.fit(model_input,
np.random.random((10, 3, 5)), epochs=1, batch_size=6)
mask_outputs = [model.layers[0].compute_mask(model.input)]
mask_outputs += [model.layers[1].compute_mask(model.layers[1].input,
mask_outputs[-1])]
func = keras.backend.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
self.assertEqual((mask_outputs_val[0]).all(),
model_input.all())
self.assertEqual((mask_outputs_val[1]).all(),
model_input.all())
class BidirectionalTest(test.TestCase):
def test_bidirectional(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.cached_session():
for mode in ['sum', 'concat', 'ave', 'mul']:
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(timesteps, dim)))
model.compile(optimizer=RMSPropOptimizer(0.01), loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# checkpointable list of objects
checkpointed_objects = set(checkpointable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
# test compute output shape
ref_shape = model.layers[-1].output.get_shape()
shape = model.layers[-1].compute_output_shape(
(None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_bidirectional_invalid_init(self):
x = constant_op.constant(np.zeros((1, 1)).astype('float32'))
with self.assertRaisesRegexp(
ValueError,
'Please initialize `Bidirectional` layer with a `Layer` instance.'):
keras.layers.Bidirectional(x)
def test_bidirectional_weight_loading(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), input_shape=(timesteps, dim)))
y_ref = model.predict(x)
weights = model.layers[-1].get_weights()
model.layers[-1].set_weights(weights)
y = model.predict(x)
self.assertAllClose(y, y_ref)
def test_bidirectional_stacked(self):
# test stacked bidirectional layers
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.layers.Input((timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
def test_bidirectional_statefulness(self):
# Bidirectional and stateful
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
inputs = keras.layers.Input(batch_shape=(1, timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
def test_Bidirectional_merged_value(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
x = [np.random.rand(samples, timesteps, dim)]
with self.cached_session():
for merge_mode in ['sum', 'mul', 'ave', 'concat', None]:
if merge_mode == 'sum':
merge_func = lambda y, y_rev: y + y_rev
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: y * y_rev
elif merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1)
else:
merge_func = lambda y, y_rev: [y, y_rev]
# basic case
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_sequences=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], _to_list(layer(inputs)))
f_forward = keras.backend.function([inputs],
[layer.forward_layer.call(inputs)])
f_backward = keras.backend.function(
[inputs],
[keras.backend.reverse(layer.backward_layer.call(inputs), 1)])
y_merged = f_merged(x)
y_expected = _to_list(merge_func(f_forward(x)[0], f_backward(x)[0]))
assert len(y_merged) == len(y_expected)
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
# test return_state
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], layer(inputs))
f_forward = keras.backend.function([inputs],
layer.forward_layer.call(inputs))
f_backward = keras.backend.function([inputs],
layer.backward_layer.call(inputs))
n_states = len(layer.layer.states)
y_merged = f_merged(x)
y_forward = f_forward(x)
y_backward = f_backward(x)
y_expected = _to_list(merge_func(y_forward[0], y_backward[0]))
assert len(y_merged) == len(y_expected) + n_states * 2
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
y_merged = y_merged[-n_states * 2:]
y_forward = y_forward[-n_states:]
y_backward = y_backward[-n_states:]
for state_birnn, state_inner in zip(y_merged, y_forward + y_backward):
self.assertAllClose(state_birnn, state_inner, atol=1e-5)
def test_Bidirectional_dropout(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'sum'
x = [np.random.rand(samples, timesteps, dim)]
with self.cached_session():
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs, training=True))
assert all(not getattr(x, '_uses_learning_phase') for x in outputs)
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs))
assert all(x._uses_learning_phase for x in outputs)
model = keras.Model(inputs, outputs)
assert model.uses_learning_phase
y1 = _to_list(model.predict(x))
y2 = _to_list(model.predict(x))
for x1, x2 in zip(y1, y2):
self.assertAllClose(x1, x2, atol=1e-5)
def test_Bidirectional_state_reuse(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
with self.cached_session():
input1 = keras.layers.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True, return_sequences=True))
state = layer(input1)[1:]
# test passing invalid initial_state: passing a tensor
input2 = keras.layers.Input((timesteps, dim))
with self.assertRaises(ValueError):
output = keras.layers.Bidirectional(
rnn(units))(input2, initial_state=state[0])
# test valid usage: passing a list
output = keras.layers.Bidirectional(rnn(units))(input2,
initial_state=state)
model = keras.models.Model([input1, input2], output)
assert len(model.layers) == 4
assert isinstance(model.layers[-1].input, list)
inputs = [np.random.rand(samples, timesteps, dim),
np.random.rand(samples, timesteps, dim)]
model.predict(inputs)
def test_Bidirectional_trainable(self):
# test layers that need learning_phase to be set
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert len(layer.trainable_weights) == 6
layer.trainable = False
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.trainable_weights) == 6
def test_Bidirectional_updates(self):
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
x_reachable_update = x * x
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert not layer.updates
assert not layer.get_updates_for(None)
assert not layer.get_updates_for(x)
layer.forward_layer.add_update(x_reachable_update, inputs=x)
layer.forward_layer.add_update(1, inputs=None)
layer.backward_layer.add_update(x_reachable_update, inputs=x)
layer.backward_layer.add_update(1, inputs=None)
assert len(layer.updates) == 4
assert len(layer.get_updates_for(None)) == 2
assert len(layer.get_updates_for(x)) == 2
def test_Bidirectional_losses(self):
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
x_reachable_loss = x * x
layer = keras.layers.Bidirectional(
keras.layers.SimpleRNN(
3, kernel_regularizer='l1', bias_regularizer='l1'))
_ = layer(x)
assert len(layer.losses) == 4
assert len(layer.get_losses_for(None)) == 4
assert not layer.get_losses_for(x)
layer.forward_layer.add_loss(x_reachable_loss, inputs=x)
layer.forward_layer.add_loss(1, inputs=None)
layer.backward_layer.add_loss(x_reachable_loss, inputs=x)
layer.backward_layer.add_loss(1, inputs=None)
assert len(layer.losses) == 8
assert len(layer.get_losses_for(None)) == 6
assert len(layer.get_losses_for(x)) == 2
def test_Bidirectional_with_constants(self):
with self.cached_session():
# Test basic case.
x = keras.Input((5, 5))
c = keras.Input((3,))
cell = _RNNCellWithConstants(32)
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
y = layer(x, constants=c)
model = keras.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, constants=c)
model = keras.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, c])
model = keras.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_Bidirectional_with_constants_layer_passing_initial_state(self):
with self.cached_session():
# Test basic case.
x = keras.Input((5, 5))
c = keras.Input((3,))
s_for = keras.Input((32,))
s_bac = keras.Input((32,))
cell = _RNNCellWithConstants(32)
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = keras.Model([x, s_for, s_bac, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)),
np.zeros((6, 32)),
np.zeros((6, 32)),
np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_fw_np = np.random.random((6, 32))
s_bk_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = keras.Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Verify that state is used
y_np_2_different_s = model.predict(
[x_np, s_fw_np + 10., s_bk_np + 10., c_np])
assert np.mean(y_np - y_np_2_different_s) != 0
# Test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, s_for, s_bac, c])
model = keras.Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def _to_list(ls):
if isinstance(ls, list):
return ls
else:
return [ls]
if __name__ == '__main__':
test.main()
|
|
import json
import os
import random
import time
from contextlib import contextmanager
from datetime import datetime, timedelta
from decimal import Decimal
from functools import partial
from urlparse import SplitResult, urlsplit, urlunsplit
from django import forms, test
from django.db import connections, transaction, DEFAULT_DB_ALIAS
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.test.client import Client, RequestFactory
from django.utils import translation
from django.utils.translation import activate, trans_real
import elasticsearch
import mock
from dateutil.parser import parse as dateutil_parser
from django_browserid.tests import mock_browserid
from nose.exc import SkipTest
from nose.tools import eq_
from post_request_task import task as post_request_task
from pyquery import PyQuery as pq
from waffle.models import Flag, Sample, Switch
import mkt
from lib.es.management.commands import reindex
from mkt.access.acl import check_ownership
from mkt.access.models import Group, GroupUser
from mkt.constants import regions
from mkt.constants.payments import PROVIDER_REFERENCE
from mkt.prices.models import AddonPremium, Price, PriceCurrency
from mkt.search.indexers import BaseIndexer
from mkt.site.fixtures import fixture
from mkt.site.storage_utils import (copy_stored_file, local_storage,
private_storage)
from mkt.site.utils import (app_factory, extension_factory, # NOQA
website_factory) # NOQA
from mkt.translations.hold import clean_translations
from mkt.translations.models import Translation
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
# We might now have gettext available in jinja2.env.globals when running tests.
# It's only added to the globals when activating a language with tower (which
# is usually done in the middlewares). During tests, however, we might not be
# running middlewares, and thus not activating a language, and thus not
# installing gettext in the globals, and thus not have it in the context when
# rendering templates.
activate('en-us')
class DynamicBoolFieldsTestMixin():
def setUp(self):
"""
Create an instance of the DynamicBoolFields model and call super
on the inheriting setUp.
(e.g. RatingDescriptors.objects.create(addon=self.app))
"""
self.app = app_factory()
self.model = None
self.related_name = '' # Related name of the bool table on the Webapp.
self.BOOL_DICT = []
self.flags = [] # Flag names.
self.expected = [] # Translation names.
def _get_related_bool_obj(self):
return getattr(self.app, self.related_name)
def _flag(self):
"""Flag app with a handful of flags for testing."""
self._get_related_bool_obj().update(
**dict(('has_%s' % f.lower(), True) for f in self.flags))
def _check(self, obj=None):
if not obj:
obj = self._get_related_bool_obj()
for bool_name in self.BOOL_DICT:
field = 'has_%s' % bool_name.lower()
value = bool_name in self.flags
if isinstance(obj, dict):
eq_(obj[field], value,
u'Unexpected value for field: %s' % field)
else:
eq_(getattr(obj, field), value,
u'Unexpected value for field: %s' % field)
def to_unicode(self, items):
"""
Force unicode evaluation of lazy items in the passed list, for set
comparison to a list of already-evaluated unicode strings.
"""
return [unicode(i) for i in items]
def test_bools_set(self):
self._flag()
self._check()
def test_to_dict(self):
self._flag()
self._check(self._get_related_bool_obj().to_dict())
def test_default_false(self):
obj = self.model(addon=self.app)
eq_(getattr(obj, 'has_%s' % self.flags[0].lower()), False)
def formset(*args, **kw):
"""
Build up a formset-happy POST.
*args is a sequence of forms going into the formset.
prefix and initial_count can be set in **kw.
"""
prefix = kw.pop('prefix', 'form')
total_count = kw.pop('total_count', len(args))
initial_count = kw.pop('initial_count', len(args))
data = {prefix + '-TOTAL_FORMS': total_count,
prefix + '-INITIAL_FORMS': initial_count}
for idx, d in enumerate(args):
data.update(('%s-%s-%s' % (prefix, idx, k), v)
for k, v in d.items())
data.update(kw)
return data
def initial(form):
"""Gather initial data from the form into a dict."""
data = {}
for name, field in form.fields.items():
if form.is_bound:
data[name] = form[name].data
else:
data[name] = form.initial.get(name, field.initial)
# The browser sends nothing for an unchecked checkbox.
if isinstance(field, forms.BooleanField):
val = field.to_python(data[name])
if not val:
del data[name]
return data
def check_links(expected, elements, selected=None, verify=True):
"""Useful for comparing an `expected` list of links against PyQuery
`elements`. Expected format of links is a list of tuples, like so:
[
('Home', '/'),
('Extensions', reverse('browse.extensions')),
...
]
If you'd like to check if a particular item in the list is selected,
pass as `selected` the title of the link.
Links are verified by default.
"""
for idx, item in enumerate(expected):
# List item could be `(text, link)`.
if isinstance(item, tuple):
text, link = item
# Or list item could be `link`.
elif isinstance(item, basestring):
text, link = None, item
e = elements.eq(idx)
if text is not None:
eq_(e.text(), text)
if link is not None:
# If we passed an <li>, try to find an <a>.
if not e.filter('a'):
e = e.find('a')
eq_(e.attr('href'), link)
if verify and link != '#':
eq_(Client().head(link, follow=True).status_code, 200,
'%r is dead' % link)
if text is not None and selected is not None:
e = e.filter('.selected, .sel') or e.parents('.selected, .sel')
eq_(bool(e.length), text == selected)
class _JSONifiedResponse(object):
def __init__(self, response):
self._orig_response = response
def __getattr__(self, n):
return getattr(self._orig_response, n)
def __getitem__(self, n):
return self._orig_response[n]
def __iter__(self):
return iter(self._orig_response)
@property
def json(self):
"""Will return parsed JSON on response if there is any."""
if self.content and 'application/json' in self['Content-Type']:
if not hasattr(self, '_content_json'):
self._content_json = json.loads(self.content)
return self._content_json
class JSONClient(Client):
def _with_json(self, response):
if hasattr(response, 'json'):
return response
else:
return _JSONifiedResponse(response)
def get(self, *args, **kw):
return self._with_json(super(JSONClient, self).get(*args, **kw))
def delete(self, *args, **kw):
return self._with_json(super(JSONClient, self).delete(*args, **kw))
def post(self, *args, **kw):
return self._with_json(super(JSONClient, self).post(*args, **kw))
def put(self, *args, **kw):
return self._with_json(super(JSONClient, self).put(*args, **kw))
def patch(self, *args, **kw):
return self._with_json(super(JSONClient, self).patch(*args, **kw))
def options(self, *args, **kw):
return self._with_json(super(JSONClient, self).options(*args, **kw))
ES_patchers = [mock.patch('elasticsearch.Elasticsearch'),
mock.patch('mkt.extensions.indexers.ExtensionIndexer',
spec=True),
mock.patch('mkt.websites.indexers.WebsiteIndexer', spec=True),
mock.patch('mkt.webapps.indexers.HomescreenIndexer', spec=True),
mock.patch('mkt.webapps.indexers.WebappIndexer', spec=True),
mock.patch('mkt.search.indexers.index', spec=True),
mock.patch('mkt.search.indexers.BaseIndexer.unindex'),
mock.patch('mkt.search.indexers.Reindexing', spec=True,
side_effect=lambda i: [i]),
]
def start_es_mock():
for patch in ES_patchers:
patch.start()
def stop_es_mock():
for patch in ES_patchers:
patch.stop()
# Reset cached Elasticsearch objects.
BaseIndexer._es = {}
def days_ago(days):
return datetime.now().replace(microsecond=0) - timedelta(days=days)
class MockEsMixin(object):
mock_es = True
@classmethod
def setUpClass(cls):
if cls.mock_es:
start_es_mock()
try:
super(MockEsMixin, cls).setUpClass()
except Exception:
# We need to unpatch here because tearDownClass will not be
# called.
if cls.mock_es:
stop_es_mock()
raise
@classmethod
def tearDownClass(cls):
try:
super(MockEsMixin, cls).tearDownClass()
finally:
if cls.mock_es:
stop_es_mock()
class MockBrowserIdMixin(object):
def mock_browser_id(self):
cache.clear()
real_login = self.client.login
def fake_login(email, password=None):
with mock_browserid(email=email):
return real_login(email=email, assertion='test',
audience='test')
self.client.login = fake_login
def login(self, profile):
email = getattr(profile, 'email', profile)
if '@' not in email:
email += '@mozilla.com'
assert self.client.login(email=email, password='password')
JINJA_INSTRUMENTED = False
class ClassFixtureTestCase(test.TestCase):
""" Based on the changes to TestCase (& TransactionTestCase) in Django1.8.
Fixtures are loaded once per class, and a class setUpTestData method is
added to be overridden by sublasses. `transaction.atomic()` is used to
achieve test isolation.
See orginal code:
https://github.com/django/django/blob/1.8b2/django/test/testcases.py
#L747-990.
A noteable difference is that this class assumes the database supports
transactions. This class will be obsolete on upgrade to 1.8.
"""
fixtures = None
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [alias for alias in connections
if (include_mirrors or
connections[alias].settings_dict['TEST']['MIRROR'])]
else:
return [DEFAULT_DB_ALIAS]
@classmethod
def _enter_atomics(cls):
"""Helper method to open atomic blocks for multiple databases"""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened through the previous method"""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(ClassFixtureTestCase, cls).setUpClass()
cls.cls_atomics = cls._enter_atomics()
try:
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
@classmethod
def tearDownClass(cls):
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(ClassFixtureTestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase"""
pass
def _should_reload_connections(self):
return False
def _fixture_setup(self):
assert not self.reset_sequences, (
'reset_sequences cannot be used on TestCase instances')
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
self._rollback_atomics(self.atomics)
def _post_teardown(self):
"""Patch _post_teardown so connections don't get closed.
In django 1.6's _post_teardown connections are closed and we don't want
that to happen after each test anymore. This method isn't copied from
Django 1.8 code.
https://github.com/django/django/blob/1.6.10/django/test/testcases.py
#L788
"""
if not self._should_reload_connections():
real_connections_all = connections.all
connections.all = lambda: []
super(ClassFixtureTestCase, self)._post_teardown()
if not self._should_reload_connections():
connections.all = real_connections_all
class TestCase(MockEsMixin, MockBrowserIdMixin, ClassFixtureTestCase):
"""Base class for all mkt tests."""
client_class = Client
def shortDescription(self):
# Stop nose using the test docstring and instead the test method name.
pass
def _pre_setup(self):
super(TestCase, self)._pre_setup()
# Clean the slate.
cache.clear()
post_request_task._discard_tasks()
post_request_task._stop_queuing_tasks()
trans_real.deactivate()
trans_real._translations = {} # Django fails to clear this cache.
trans_real.activate(settings.LANGUAGE_CODE)
self.mock_browser_id()
global JINJA_INSTRUMENTED
if not JINJA_INSTRUMENTED:
import jinja2
old_render = jinja2.Template.render
def instrumented_render(self, *args, **kwargs):
context = dict(*args, **kwargs)
test.signals.template_rendered.send(sender=self, template=self,
context=context)
return old_render(self, *args, **kwargs)
jinja2.Template.render = instrumented_render
JINJA_INSTRUMENTED = True
def _post_teardown(self):
mkt.set_user(None)
clean_translations(None) # Make sure queued translations are removed.
super(TestCase, self)._post_teardown()
@contextmanager
def activate(self, locale=None):
"""Active a locale."""
old_locale = translation.get_language()
if locale:
translation.activate(locale)
yield
translation.activate(old_locale)
def assertNoFormErrors(self, response):
"""Asserts that no form in the context has errors.
If you add this check before checking the status code of the response
you'll see a more informative error.
"""
# TODO(Kumar) liberate upstream to Django?
if response.context is None:
# It's probably a redirect.
return
if len(response.templates) == 1:
tpl = [response.context]
else:
# There are multiple contexts so iter all of them.
tpl = response.context
for ctx in tpl:
for k, v in ctx.iteritems():
if isinstance(v, (forms.BaseForm, forms.formsets.BaseFormSet)):
if isinstance(v, forms.formsets.BaseFormSet):
# Concatenate errors from each form in the formset.
msg = '\n'.join(f.errors.as_text() for f in v.forms)
else:
# Otherwise, just return the errors for this form.
msg = v.errors.as_text()
msg = msg.strip()
if msg != '':
self.fail('form %r had the following error(s):\n%s'
% (k, msg))
if hasattr(v, 'non_field_errors'):
self.assertEquals(v.non_field_errors(), [])
if hasattr(v, 'non_form_errors'):
self.assertEquals(v.non_form_errors(), [])
def assertLoginRedirects(self, response, to, status_code=302):
# Not using urlparams, because that escapes the variables, which
# is good, but bad for assertRedirects which will fail.
self.assert3xx(response,
'%s?to=%s' % (reverse('users.login'), to), status_code)
def assert3xx(self, response, expected_url, status_code=302,
target_status_code=200):
"""Asserts redirect and final redirect matches expected URL.
Similar to Django's `assertRedirects` but skips the final GET
verification for speed.
"""
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
"Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
"Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
"Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(
expected_url)
if (scheme and not e_scheme) and (netloc and not e_netloc):
expected_url = urlunsplit(('http', 'testserver', e_path, e_query,
e_fragment))
self.assertEqual(
url, expected_url,
"Response redirected to '%s', expected '%s'" % (url, expected_url))
def assertLoginRequired(self, response, status_code=302):
"""
A simpler version of assertLoginRedirects that just checks that we
get the matched status code and bounced to the correct login page.
"""
assert response.status_code == status_code, (
'Response returned: %s, expected: %s'
% (response.status_code, status_code))
path = urlsplit(response['Location'])[2]
assert path == reverse('users.login'), (
'Redirected to: %s, expected: %s'
% (path, reverse('users.login')))
def assertSetEqual(self, a, b, message=None):
"""
This is a thing in unittest in 2.7,
but until then this is the thing.
Oh, and Django's `assertSetEqual` is lame and requires actual sets:
http://bit.ly/RO9sTr
"""
eq_(set(a), set(b), message)
eq_(len(a), len(b), message)
def assertCloseToNow(self, dt, now=None):
"""
Make sure the datetime is within a minute from `now`.
"""
# Try parsing the string if it's not a datetime.
if isinstance(dt, basestring):
try:
dt = dateutil_parser(dt)
except ValueError, e:
raise AssertionError(
'Expected valid date; got %s\n%s' % (dt, e))
if not dt:
raise AssertionError('Expected datetime; got %s' % dt)
dt_later_ts = time.mktime((dt + timedelta(minutes=1)).timetuple())
dt_earlier_ts = time.mktime((dt - timedelta(minutes=1)).timetuple())
if not now:
now = datetime.now()
now_ts = time.mktime(now.timetuple())
assert dt_earlier_ts < now_ts < dt_later_ts, (
'Expected datetime to be within a minute of %s. Got %r.' % (now,
dt))
def assertCORS(self, res, *verbs, **kw):
"""
Determines if a response has suitable CORS headers. Appends 'OPTIONS'
on to the list of verbs.
"""
headers = kw.pop('headers', None)
if not headers:
headers = ['X-HTTP-Method-Override', 'Content-Type']
eq_(res['Access-Control-Allow-Origin'], '*')
assert 'API-Status' in res['Access-Control-Expose-Headers']
assert 'API-Version' in res['Access-Control-Expose-Headers']
verbs = map(str.upper, verbs) + ['OPTIONS']
actual = res['Access-Control-Allow-Methods'].split(', ')
self.assertSetEqual(verbs, actual)
eq_(res['Access-Control-Allow-Headers'], ', '.join(headers))
def assertApiUrlEqual(self, *args, **kwargs):
"""
Allows equality comparison of two or more URLs agnostic of API version.
This is done by prepending '/api/vx' (where x is equal to the `version`
keyword argument or API_CURRENT_VERSION) to each string passed as a
positional argument if that URL doesn't already start with that string.
Also accepts 'netloc' and 'scheme' optional keyword arguments to
compare absolute URLs.
Example usage:
url = '/api/v1/apps/app/bastacorp/'
self.assertApiUrlEqual(url, '/apps/app/bastacorp1/')
# settings.API_CURRENT_VERSION = 2
url = '/api/v1/apps/app/bastacorp/'
self.assertApiUrlEqual(url, '/apps/app/bastacorp/', version=1)
"""
# Constants for the positions of the URL components in the tuple
# returned by urlsplit. Only here for readability purposes.
SCHEME = 0
NETLOC = 1
PATH = 2
version = kwargs.get('version', settings.API_CURRENT_VERSION)
scheme = kwargs.get('scheme', None)
netloc = kwargs.get('netloc', None)
urls = list(args)
prefix = '/api/v%d' % version
for idx, url in enumerate(urls):
urls[idx] = list(urlsplit(url))
if not urls[idx][PATH].startswith(prefix):
urls[idx][PATH] = prefix + urls[idx][PATH]
if scheme and not urls[idx][SCHEME]:
urls[idx][SCHEME] = scheme
if netloc and not urls[idx][NETLOC]:
urls[idx][NETLOC] = netloc
urls[idx] = SplitResult(*urls[idx])
eq_(*urls)
def make_price(self, price='1.00'):
price_obj, created = Price.objects.get_or_create(price=price,
name='1')
for region in [regions.USA.id, regions.RESTOFWORLD.id]:
PriceCurrency.objects.create(region=region, currency='USD',
price=price, tier=price_obj,
provider=PROVIDER_REFERENCE)
# Call Price transformer in order to repopulate _currencies cache.
Price.transformer([])
return price_obj
def make_premium(self, addon, price='1.00'):
price_obj = self.make_price(price=Decimal(price))
addon.update(premium_type=mkt.ADDON_PREMIUM)
addon._premium = AddonPremium.objects.create(addon=addon,
price=price_obj)
if hasattr(Price, '_currencies'):
del Price._currencies
return addon._premium
def create_sample(self, name=None, **kw):
if name is not None:
kw['name'] = name
kw.setdefault('percent', 100)
sample, created = Sample.objects.get_or_create(name=name, defaults=kw)
if not created:
sample.__dict__.update(kw)
sample.save()
return sample
def create_switch(self, name=None, **kw):
kw.setdefault('active', True)
if name is not None:
kw['name'] = name
switch, created = Switch.objects.get_or_create(name=name, defaults=kw)
if not created:
switch.__dict__.update(kw)
switch.save()
return switch
def create_flag(self, name=None, **kw):
if name is not None:
kw['name'] = name
kw.setdefault('everyone', True)
flag, created = Flag.objects.get_or_create(name=name, defaults=kw)
if not created:
flag.__dict__.update(kw)
flag.save()
return flag
@staticmethod
def grant_permission(user_obj, rules, name='Test Group'):
"""Creates group with rule, and adds user to group."""
group = Group.objects.create(name=name, rules=rules)
GroupUser.objects.create(group=group, user=user_obj)
return group
def remove_permission(self, user_obj, rules):
"""Remove a permission from a user."""
group = Group.objects.get(rules=rules)
GroupUser.objects.filter(user=user_obj, group=group).delete()
def days_ago(self, days):
return days_ago(days)
def trans_eq(self, trans, locale, localized_string):
eq_(Translation.objects.get(id=trans.id,
locale=locale).localized_string,
localized_string)
def extract_script_template(self, html, template_selector):
"""Extracts the inner JavaScript text/template from a html page.
Example::
>>> template = extract_script_template(res.content, '#template-id')
>>> template('#my-jquery-selector')
Returns a PyQuery object that you can refine using jQuery selectors.
"""
return pq(pq(html)(template_selector).html())
class MktPaths(object):
"""Mixin for getting common Marketplace Paths."""
def manifest_path(self, name):
return os.path.join(settings.ROOT,
'mkt/submit/tests/webapps/%s' % name)
def manifest_copy_over(self, dest, name):
copy_stored_file(
self.manifest_path(name), dest,
src_storage=local_storage, dst_storage=private_storage)
@staticmethod
def sample_key():
return os.path.join(settings.ROOT,
'mkt/webapps/tests/sample.key')
def sample_packaged_key(self):
return os.path.join(settings.ROOT,
'mkt/webapps/tests/sample.packaged.pem')
def mozball_image(self):
return os.path.join(settings.ROOT,
'mkt/developers/tests/addons/mozball-128.png')
def packaged_app_path(self, name):
return os.path.join(
settings.ROOT, 'mkt/submit/tests/packaged/%s' % name)
def packaged_copy_over(self, dest, name):
copy_stored_file(
self.packaged_app_path(name), dest,
src_storage=local_storage, dst_storage=private_storage)
def assert_no_validation_errors(validation):
"""Assert that the validation (JSON) does not contain a traceback.
Note that this does not test whether the addon passed
validation or not.
"""
if hasattr(validation, 'task_error'):
# FileUpload object:
error = validation.task_error
else:
# Upload detail - JSON output
error = validation['error']
if error:
print '-' * 70
print error
print '-' * 70
raise AssertionError("Unexpected task error: %s" %
error.rstrip().split("\n")[-1])
def _get_created(created):
"""
Returns a datetime.
If `created` is "now", it returns `datetime.datetime.now()`. If `created`
is set use that. Otherwise generate a random datetime in the year 2011.
"""
if created == 'now':
return datetime.now()
elif created:
return created
else:
return datetime(2011,
random.randint(1, 12), # Month
random.randint(1, 28), # Day
random.randint(0, 23), # Hour
random.randint(0, 59), # Minute
random.randint(0, 59)) # Seconds
def req_factory_factory(url='', user=None, post=False, data=None, **kwargs):
"""Creates a request factory, logged in with the user."""
req = RequestFactory()
if post:
req = req.post(url, data or {})
else:
req = req.get(url, data or {})
if user:
req.user = UserProfile.objects.get(id=user.id)
req.groups = user.groups.all()
else:
req.user = AnonymousUser()
req.check_ownership = partial(check_ownership, req)
req.REGION = kwargs.pop('region', mkt.regions.REGIONS_CHOICES[0][1])
req.API_VERSION = 2
for key in kwargs:
setattr(req, key, kwargs[key])
return req
user_factory_counter = 0
def user_factory(**kw):
"""
If not provided, email will be 'factoryuser<number>@mozilla.com'.
If email has no '@' it will be corrected to 'email@mozilla.com'
"""
global user_factory_counter
email = kw.pop('email', 'factoryuser%d' % user_factory_counter)
if '@' not in email:
email = '%s@mozilla.com' % email
user = UserProfile.objects.create(email=email, **kw)
if 'email' not in kw:
user_factory_counter = user.id + 1
return user
class ESTestCase(TestCase):
"""Base class for tests that require elasticsearch."""
# ES is slow to set up so this uses class setup/teardown. That happens
# outside Django transactions so be careful to clean up afterwards.
test_es = True
mock_es = False
exempt_from_fixture_bundling = True # ES doesn't support bundling (yet?)
@classmethod
def setUpClass(cls):
if not settings.RUN_ES_TESTS:
raise SkipTest('ES disabled')
cls.es = elasticsearch.Elasticsearch(hosts=settings.ES_HOSTS)
# The ES setting are set before we call super()
# because we may have indexation occuring in upper classes.
for key, index in settings.ES_INDEXES.items():
if not index.startswith('test_'):
settings.ES_INDEXES[key] = 'test_%s' % index
cls._SEARCH_ANALYZER_MAP = mkt.SEARCH_ANALYZER_MAP
mkt.SEARCH_ANALYZER_MAP = {
'english': ['en-us'],
'spanish': ['es'],
}
super(ESTestCase, cls).setUpClass()
@classmethod
def setUpTestData(cls):
try:
cls.es.cluster.health()
except Exception, e:
e.args = tuple([u'%s (it looks like ES is not running, '
'try starting it or set RUN_ES_TESTS=False)'
% e.args[0]] + list(e.args[1:]))
raise
for index in set(settings.ES_INDEXES.values()):
# Get the index that's pointed to by the alias.
try:
indices = cls.es.indices.get_aliases(index=index)
assert indices[index]['aliases']
except (KeyError, AssertionError):
# There's no alias, just use the index.
print 'Found no alias for %s.' % index
except elasticsearch.NotFoundError:
pass
# Remove any alias as well.
try:
cls.es.indices.delete(index=index)
except elasticsearch.NotFoundError as e:
print 'Could not delete index %r: %s' % (index, e)
for indexer in reindex.INDEXERS:
indexer.setup_mapping()
@classmethod
def tearDownClass(cls):
mkt.SEARCH_ANALYZER_MAP = cls._SEARCH_ANALYZER_MAP
super(ESTestCase, cls).tearDownClass()
def tearDown(self):
post_request_task._send_tasks_and_stop_queuing()
super(ESTestCase, self).tearDown()
@classmethod
def refresh(cls, doctypes=None):
"""
Force an immediate refresh for the index(es) holding the given
doctype(s) in ES. Both a string corresponding to a single doctypes or a
list of multiple doctypes are accepted.
If there are tasks in the post_request_task queue, they are processed
first.
"""
post_request_task._send_tasks_and_stop_queuing()
if doctypes:
if not isinstance(doctypes, (list, tuple)):
doctypes = [doctypes]
indexes = [settings.ES_INDEXES[doctype] for doctype in doctypes]
try:
cls.es.indices.refresh(index=indexes)
except elasticsearch.NotFoundError as e:
print "Could not refresh indexes '%s': %s" % (indexes, e)
@classmethod
def reindex(cls, model):
"""
Convenience method that re-save all instances of the specified model
and then refreshes the corresponding ES index.
"""
# Emit post-save signal so all of the objects get reindexed.
[o.save() for o in model.objects.all()]
cls.refresh(doctypes=model.get_indexer().get_mapping_type_name())
class WebappTestCase(TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = self.get_app()
def get_app(self):
return Webapp.objects.get(id=337141)
def make_game(self, app=None, rated=False):
app = make_game(self.app or app, rated)
def make_game(app, rated):
app.update(categories=['games'])
if rated:
make_rated(app)
app = app.reload()
return app
def make_rated(app):
app.set_content_ratings(
dict((body, body.ratings[0]) for body in
mkt.ratingsbodies.ALL_RATINGS_BODIES))
app.set_iarc_info(123, 'abc')
app.set_descriptors([])
app.set_interactives([])
|
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pybvc.controller.controller import Controller
from pybvc.netconfdev.vrouter.vrouter5600 import VRouter5600
from pybvc.netconfdev.vrouter.firewall import Firewall, Rule
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
def vr_demo_7():
f = "cfg4.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
nodeIpAddr = d['nodeIpAddr']
nodePortNum = d['nodePortNum']
nodeUname = d['nodeUname']
nodePswd = d['nodePswd']
ifName = d['interfaceName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("\n")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum,
nodeUname, nodePswd)
print ("<<< 'Controller': %s, '%s': %s"
% (ctrlIpAddr, nodeName, nodeIpAddr))
print ("\n")
time.sleep(rundelay)
node_configured = False
result = ctrl.check_node_config_status(nodeName)
status = result.get_status()
if(status.eq(STATUS.NODE_CONFIGURED)):
node_configured = True
print ("<<< '%s' is configured on the Controller" % nodeName)
elif(status.eq(STATUS.DATA_NOT_FOUND)):
node_configured = False
else:
print ("\n")
print "Failed to get configuration status for the '%s'" % nodeName
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
if node_configured is False:
result = ctrl.add_netconf_node(vrouter)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< '%s' added to the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n")
time.sleep(rundelay)
result = ctrl.check_node_conn_status(nodeName)
status = result.get_status()
if(status.eq(STATUS.NODE_CONNECTED)):
print ("<<< '%s' is connected to the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show firewalls configuration on the '%s'" % nodeName)
time.sleep(rundelay)
result = vrouter.get_firewalls_cfg()
status = result.get_status()
if (status.eq(STATUS.OK)):
print ("'%s' firewalls config:" % nodeName)
cfg = result.get_data()
data = json.loads(cfg)
print json.dumps(data, indent=4)
elif (status.eq(STATUS.DATA_NOT_FOUND)):
print ("No firewalls configuration found")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
ctrl.delete_netconf_node(vrouter)
exit(0)
print "\n"
fwName1 = "ACCEPT-SRC-IPADDR"
print (">>> Create new firewall instance '%s' on '%s'"
% (fwName1, nodeName))
firewall1 = Firewall(fwName1)
# Add a rule to the firewall instance
rulenum = 30
rule = Rule(rulenum)
rule.add_action("accept")
rule.add_source_address("172.22.17.108")
firewall1.add_rule(rule)
print firewall1.get_payload()
time.sleep(rundelay)
result = vrouter.add_modify_firewall_instance(firewall1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Firewall instance '%s' was successfully created" % fwName1)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print "\n"
fwName2 = "DROP-ICMP"
print (">>> Create new firewall instance '%s' on '%s'"
% (fwName2, nodeName))
firewall2 = Firewall(fwName2)
# Add a rule to the firewall instance
rulenum = 40
rule = Rule(rulenum)
rule.add_action("drop")
rule.add_icmp_typename("ping")
firewall2.add_rule(rule)
print firewall2.get_payload()
time.sleep(rundelay)
result = vrouter.add_modify_firewall_instance(firewall2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Firewall instance '%s' was successfully created" % fwName2)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
print status.detailed()
exit(0)
print("\n")
print ("<<< Show firewalls configuration on the '%s'" % nodeName)
time.sleep(rundelay)
result = vrouter.get_firewalls_cfg()
status = result.get_status()
if (status.eq(STATUS.OK)):
print ("'%s' firewalls config:" % nodeName)
cfg = result.get_data()
data = json.loads(cfg)
print json.dumps(data, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Apply firewall '%s' to inbound traffic "
"and '%s' to outbound traffic on the '%s' "
"dataplane interface" % (fwName1, fwName2, ifName))
time.sleep(rundelay)
result = vrouter.set_dataplane_interface_firewall(ifName, fwName1, fwName2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Firewall instances were successfully applied "
"to the '%s' dataplane interface" % (ifName))
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show '%s' dataplane interface configuration on the '%s'"
% (ifName, nodeName))
time.sleep(rundelay)
result = vrouter.get_dataplane_interface_cfg(ifName)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Interfaces '%s' config:" % ifName)
cfg = result.get_data()
data = json.loads(cfg)
print json.dumps(data, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Remove firewall settings from the '%s' dataplane interface"
% (ifName))
time.sleep(rundelay)
result = vrouter.delete_dataplane_interface_firewall(ifName)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Firewall settings successfully removed "
"from '%s' dataplane interface" % ifName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show '%s' dataplane interface configuration on the '%s'"
% (ifName, nodeName))
time.sleep(rundelay)
result = vrouter.get_dataplane_interface_cfg(ifName)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Interfaces '%s' config:" % ifName)
cfg = result.get_data()
data = json.loads(cfg)
print json.dumps(data, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print "\n"
print (">>> Remove firewall instance '%s' from '%s'"
% (fwName1, nodeName))
time.sleep(rundelay)
result = vrouter.delete_firewall_instance(firewall1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Firewall instance '%s' was successfully deleted" % fwName1)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print "\n"
print (">>> Remove firewall instance '%s' from '%s'"
% (fwName2, nodeName))
time.sleep(rundelay)
result = vrouter.delete_firewall_instance(firewall2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Firewall instance '%s' was successfully deleted" % fwName2)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show firewalls configuration on the '%s'" % nodeName)
time.sleep(rundelay)
result = vrouter.get_firewalls_cfg()
status = result.get_status()
if (status.eq(STATUS.OK)):
print ("'%s' firewalls config:" % nodeName)
cfg = result.get_data()
data = json.loads(cfg)
print json.dumps(data, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print "\n"
print (">>> Remove '%s' NETCONF node from the Controller" % nodeName)
time.sleep(rundelay)
result = ctrl.delete_netconf_node(vrouter)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("'%s' NETCONF node was successfully removed "
"from the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
vr_demo_7()
|
|
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver library for NetApp 7/C-mode block storage systems.
"""
import math
import sys
import uuid
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume.drivers.netapp.dataontap.client import api as na_api
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class NetAppLun(object):
"""Represents a LUN on NetApp storage."""
def __init__(self, handle, name, size, metadata_dict):
self.handle = handle
self.name = name
self.size = size
self.metadata = metadata_dict or {}
def get_metadata_property(self, prop):
"""Get the metadata property of a LUN."""
if prop in self.metadata:
return self.metadata[prop]
name = self.name
msg = _("No metadata property %(prop)s defined for the LUN %(name)s")
msg_fmt = {'prop': prop, 'name': name}
LOG.debug(msg % msg_fmt)
def __str__(self, *args, **kwargs):
return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
% (self.handle, self.name, self.size, self.metadata)
class NetAppBlockStorageLibrary(object):
"""NetApp block storage library for Data ONTAP."""
# do not increment this as it may be used in volume type definitions
VERSION = "1.0.0"
IGROUP_PREFIX = 'openstack-'
REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
'netapp_server_hostname']
def __init__(self, driver_name, driver_protocol, **kwargs):
na_utils.validate_instantiation(**kwargs)
self.driver_name = driver_name
self.driver_protocol = driver_protocol
self.zapi_client = None
self._stats = {}
self.lun_table = {}
self.lookup_service = fczm_utils.create_lookup_service()
self.app_version = kwargs.get("app_version", "unknown")
self.db = kwargs.get('db')
self.configuration = kwargs['configuration']
self.configuration.append_config_values(na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(
na_opts.netapp_provisioning_opts)
def do_setup(self, context):
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
def check_for_setup_error(self):
"""Check that the driver is working and can communicate.
Discovers the LUNs on the NetApp server.
"""
lun_list = self.zapi_client.get_lun_list()
self._extract_and_populate_luns(lun_list)
LOG.debug("Success getting list of LUNs from server.")
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata') or dict()
return metadata.get('Volume', None)
def create_volume(self, volume):
"""Driver entry point for creating a new volume (Data ONTAP LUN)."""
LOG.debug('create_volume on %s' % volume['host'])
# get Data ONTAP volume name as pool name
ontap_volume_name = volume_utils.extract_host(volume['host'],
level='pool')
if ontap_volume_name is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
lun_name = volume['name']
# start with default size, get requested size
default_size = units.Mi * 100 # 100 MB
size = default_size if not int(volume['size'])\
else int(volume['size']) * units.Gi
metadata = {'OsType': 'linux',
'SpaceReserved': 'true',
'Path': '/vol/%s/%s' % (ontap_volume_name, lun_name)}
extra_specs = na_utils.get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
# warn on obsolete extra specs
na_utils.log_extra_spec_warnings(extra_specs)
self._create_lun(ontap_volume_name, lun_name, size,
metadata, qos_policy_group)
LOG.debug('Created LUN with name %s' % lun_name)
metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
metadata['Volume'] = ontap_volume_name
metadata['Qtree'] = None
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
if not metadata:
msg = _LW("No entry in LUN table for volume/snapshot %(name)s.")
msg_fmt = {'name': name}
LOG.warning(msg % msg_fmt)
return
self.zapi_client.destroy_lun(metadata['Path'])
self.lun_table.pop(name)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume.
Since exporting is idempotent in this driver, we have nothing
to do for unexporting.
"""
pass
def create_snapshot(self, snapshot):
"""Driver entry point for creating a snapshot.
This driver implements snapshots by using efficient single-file
(LUN) cloning.
"""
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
lun = self._get_lun_from_table(vol_name)
self._clone_lun(lun.name, snapshot_name, 'false')
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
self.delete_volume(snapshot)
LOG.debug("Snapshot %s deletion successful" % snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Driver entry point for creating a new volume from a snapshot.
Many would call this "cloning" and in fact we use cloning to implement
this feature.
"""
vol_size = volume['size']
snap_size = snapshot['volume_size']
snapshot_name = snapshot['name']
new_name = volume['name']
self._clone_lun(snapshot_name, new_name, 'true')
if vol_size != snap_size:
try:
self.extend_volume(volume, volume['size'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(
_LE("Resizing %s failed. Cleaning volume."), new_name)
self.delete_volume(volume)
def _create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group=None):
"""Creates a LUN, handling Data ONTAP differences as needed."""
raise NotImplementedError()
def _create_lun_handle(self, metadata):
"""Returns LUN handle based on filer type."""
raise NotImplementedError()
def _extract_lun_info(self, lun):
"""Extracts the LUNs from API and populates the LUN table."""
meta_dict = self._create_lun_meta(lun)
path = lun.get_child_content('path')
(_rest, _splitter, name) = path.rpartition('/')
handle = self._create_lun_handle(meta_dict)
size = lun.get_child_content('size')
return NetAppLun(handle, name, size, meta_dict)
def _extract_and_populate_luns(self, api_luns):
"""Extracts the LUNs from API and populates the LUN table."""
for lun in api_luns:
discovered_lun = self._extract_lun_info(lun)
self._add_lun_to_table(discovered_lun)
def _map_lun(self, name, initiator_list, initiator_type, lun_id=None):
"""Maps LUN to the initiator(s) and returns LUN ID assigned."""
metadata = self._get_lun_attr(name, 'metadata')
os = metadata['OsType']
path = metadata['Path']
if self._check_allowed_os(os):
os = os
else:
os = 'default'
igroup_name = self._get_or_create_igroup(initiator_list,
initiator_type, os)
try:
return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
except na_api.NaApiError:
exc_info = sys.exc_info()
(_igroup, lun_id) = self._find_mapped_lun_igroup(path,
initiator_list)
if lun_id is not None:
return lun_id
else:
raise exc_info[0], exc_info[1], exc_info[2]
def _unmap_lun(self, path, initiator_list):
"""Unmaps a LUN from given initiator."""
(igroup_name, _lun_id) = self._find_mapped_lun_igroup(path,
initiator_list)
self.zapi_client.unmap_lun(path, igroup_name)
def _find_mapped_lun_igroup(self, path, initiator_list):
"""Find an igroup for a LUN mapped to the given initiator(s)."""
raise NotImplementedError()
def _has_luns_mapped_to_initiators(self, initiator_list):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
return self.zapi_client.has_luns_mapped_to_initiators(initiator_list)
def _get_or_create_igroup(self, initiator_list, initiator_type,
os='default'):
"""Checks for an igroup for a set of one or more initiators.
Creates igroup if not found.
"""
igroups = self.zapi_client.get_igroup_by_initiators(initiator_list)
igroup_name = None
for igroup in igroups:
if igroup['initiator-group-os-type'] == os:
if igroup['initiator-group-type'] == initiator_type or \
igroup['initiator-group-type'] == 'mixed':
if igroup['initiator-group-name'].startswith(
self.IGROUP_PREFIX):
igroup_name = igroup['initiator-group-name']
break
if not igroup_name:
igroup_name = self.IGROUP_PREFIX + six.text_type(uuid.uuid4())
self.zapi_client.create_igroup(igroup_name, initiator_type, os)
for initiator in initiator_list:
self.zapi_client.add_igroup_initiator(igroup_name, initiator)
return igroup_name
def _check_allowed_os(self, os):
"""Checks if the os type supplied is NetApp supported."""
if os in ['linux', 'aix', 'hpux', 'windows', 'solaris',
'netware', 'vmware', 'openvms', 'xen', 'hyper_v']:
return True
else:
return False
def _add_lun_to_table(self, lun):
"""Adds LUN to cache table."""
if not isinstance(lun, NetAppLun):
msg = _("Object is not a NetApp LUN.")
raise exception.VolumeBackendAPIException(data=msg)
self.lun_table[lun.name] = lun
def _get_lun_from_table(self, name):
"""Gets LUN from cache table.
Refreshes cache if LUN not found in cache.
"""
lun = self.lun_table.get(name)
if lun is None:
lun_list = self.zapi_client.get_lun_list()
self._extract_and_populate_luns(lun_list)
lun = self.lun_table.get(name)
if lun is None:
raise exception.VolumeNotFound(volume_id=name)
return lun
def _clone_lun(self, name, new_name, space_reserved='true',
src_block=0, dest_block=0, block_count=0):
"""Clone LUN with the given name to the new name."""
raise NotImplementedError()
def _get_lun_attr(self, name, attr):
"""Get the LUN attribute if found else None."""
try:
attr = getattr(self._get_lun_from_table(name), attr)
return attr
except exception.VolumeNotFound as e:
LOG.error(_LE("Message: %s"), e.msg)
except Exception as e:
LOG.error(_LE("Error getting LUN attribute. Exception: %s"),
e.__str__())
return None
def _create_lun_meta(self, lun):
raise NotImplementedError()
def _get_fc_target_wwpns(self, include_partner=True):
raise NotImplementedError()
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_size = volume['size']
src_vol = self._get_lun_from_table(src_vref['name'])
src_vol_size = src_vref['size']
new_name = volume['name']
self._clone_lun(src_vol.name, new_name, 'true')
if vol_size != src_vol_size:
try:
self.extend_volume(volume, volume['size'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(
_LE("Resizing %s failed. Cleaning volume."), new_name)
self.delete_volume(volume)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
raise NotImplementedError()
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
name = volume['name']
lun = self._get_lun_from_table(name)
path = lun.metadata['Path']
curr_size_bytes = six.text_type(lun.size)
new_size_bytes = six.text_type(int(new_size) * units.Gi)
# Reused by clone scenarios.
# Hence comparing the stored size.
if curr_size_bytes != new_size_bytes:
lun_geometry = self.zapi_client.get_lun_geometry(path)
if (lun_geometry and lun_geometry.get("max_resize")
and int(lun_geometry.get("max_resize")) >=
int(new_size_bytes)):
self.zapi_client.do_direct_resize(path, new_size_bytes)
else:
self._do_sub_clone_resize(path, new_size_bytes)
self.lun_table[name].size = new_size_bytes
else:
LOG.info(_LI("No need to extend volume %s"
" as it is already the requested new size."), name)
def _get_vol_option(self, volume_name, option_name):
"""Get the value for the volume option."""
value = None
options = self.zapi_client.get_volume_options(volume_name)
for opt in options:
if opt.get_child_content('name') == option_name:
value = opt.get_child_content('value')
break
return value
def _do_sub_clone_resize(self, path, new_size_bytes):
"""Does sub LUN clone after verification.
Clones the block ranges and swaps
the LUNs also deletes older LUN
after a successful clone.
"""
seg = path.split("/")
LOG.info(_LI("Resizing LUN %s to new size using clone operation."),
seg[-1])
name = seg[-1]
vol_name = seg[2]
lun = self._get_lun_from_table(name)
metadata = lun.metadata
compression = self._get_vol_option(vol_name, 'compression')
if compression == "on":
msg = _('%s cannot be resized using clone operation'
' as it is hosted on compressed volume')
raise exception.VolumeBackendAPIException(data=msg % name)
else:
block_count = self._get_lun_block_count(path)
if block_count == 0:
msg = _('%s cannot be resized using clone operation'
' as it contains no blocks.')
raise exception.VolumeBackendAPIException(data=msg % name)
new_lun = 'new-%s' % name
self.zapi_client.create_lun(vol_name, new_lun, new_size_bytes,
metadata)
try:
self._clone_lun(name, new_lun, block_count=block_count)
self._post_sub_clone_resize(path)
except Exception:
with excutils.save_and_reraise_exception():
new_path = '/vol/%s/%s' % (vol_name, new_lun)
self.zapi_client.destroy_lun(new_path)
def _post_sub_clone_resize(self, path):
"""Try post sub clone resize in a transactional manner."""
st_tm_mv, st_nw_mv, st_del_old = None, None, None
seg = path.split("/")
LOG.info(_LI("Post clone resize LUN %s"), seg[-1])
new_lun = 'new-%s' % (seg[-1])
tmp_lun = 'tmp-%s' % (seg[-1])
tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
new_path = "/vol/%s/%s" % (seg[2], new_lun)
try:
st_tm_mv = self.zapi_client.move_lun(path, tmp_path)
st_nw_mv = self.zapi_client.move_lun(new_path, path)
st_del_old = self.zapi_client.destroy_lun(tmp_path)
except Exception as e:
if st_tm_mv is None:
msg = _("Failure staging LUN %s to tmp.")
raise exception.VolumeBackendAPIException(data=msg % (seg[-1]))
else:
if st_nw_mv is None:
self.zapi_client.move_lun(tmp_path, path)
msg = _("Failure moving new cloned LUN to %s.")
raise exception.VolumeBackendAPIException(
data=msg % (seg[-1]))
elif st_del_old is None:
LOG.error(_LE("Failure deleting staged tmp LUN %s."),
tmp_lun)
else:
LOG.error(_LE("Unknown exception in"
" post clone resize LUN %s."), seg[-1])
LOG.error(_LE("Exception details: %s") % (e.__str__()))
def _get_lun_block_count(self, path):
"""Gets block counts for the LUN."""
LOG.debug("Getting LUN block count.")
lun_infos = self.zapi_client.get_lun_by_args(path=path)
if not lun_infos:
seg = path.split('/')
msg = _('Failure getting LUN info for %s.')
raise exception.VolumeBackendAPIException(data=msg % seg[-1])
lun_info = lun_infos[-1]
bs = int(lun_info.get_child_content('block-size'))
ls = int(lun_info.get_child_content('size'))
block_count = ls / bs
return block_count
def _check_volume_type_for_lun(self, volume, lun, existing_ref):
"""Checks if lun satifies the volume type."""
raise NotImplementedError()
def manage_existing(self, volume, existing_ref):
"""Brings an existing storage object under Cinder management.
existing_ref can contain source-id or source-name or both.
source-id: lun uuid.
source-name: complete lun path eg. /vol/vol0/lun.
"""
lun = self._get_existing_vol_with_manage_ref(existing_ref)
self._check_volume_type_for_lun(volume, lun, existing_ref)
path = lun.get_metadata_property('Path')
if lun.name == volume['name']:
LOG.info(_LI("LUN with given ref %s need not be renamed "
"during manage operation."), existing_ref)
else:
(rest, splitter, name) = path.rpartition('/')
new_path = '%s/%s' % (rest, volume['name'])
self.zapi_client.move_lun(path, new_path)
lun = self._get_existing_vol_with_manage_ref(
{'source-name': new_path})
self._add_lun_to_table(lun)
LOG.info(_LI("Manage operation completed for LUN with new path"
" %(path)s and uuid %(uuid)s."),
{'path': lun.get_metadata_property('Path'),
'uuid': lun.get_metadata_property('UUID')})
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
lun = self._get_existing_vol_with_manage_ref(existing_ref)
return int(math.ceil(float(lun.size) / units.Gi))
def _get_existing_vol_with_manage_ref(self, existing_ref):
"""Get the corresponding LUN from the storage server."""
uuid = existing_ref.get('source-id')
path = existing_ref.get('source-name')
if not (uuid or path):
reason = _('Reference must contain either source-id'
' or source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lun_info = {}
lun_info.setdefault('path', path) if path else None
if hasattr(self, 'vserver') and uuid:
lun_info['uuid'] = uuid
luns = self.zapi_client.get_lun_by_args(**lun_info)
if luns:
for lun in luns:
netapp_lun = self._extract_lun_info(lun)
storage_valid = self._is_lun_valid_on_storage(netapp_lun)
uuid_valid = True
if uuid:
if netapp_lun.get_metadata_property('UUID') == uuid:
uuid_valid = True
else:
uuid_valid = False
if storage_valid and uuid_valid:
return netapp_lun
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=(_('LUN not found with given ref %s.') % existing_ref))
def _is_lun_valid_on_storage(self, lun):
"""Validate lun specific to storage system."""
return True
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
"""
managed_lun = self._get_lun_from_table(volume['name'])
LOG.info(_LI("Unmanaged LUN with current path %(path)s and uuid "
"%(uuid)s."),
{'path': managed_lun.get_metadata_property('Path'),
'uuid': managed_lun.get_metadata_property('UUID')
or 'unknown'})
def initialize_connection_iscsi(self, volume, connector):
"""Driver entry point to attach a volume to an instance.
Do the LUN masking on the storage system so the initiator can access
the LUN on the target. Also return the iSCSI properties so the
initiator can find the LUN. This implementation does not call
_get_iscsi_properties() to get the properties because cannot store the
LUN number in the database. We only find out what the LUN number will
be during this method call so we construct the properties dictionary
ourselves.
"""
initiator_name = connector['initiator']
name = volume['name']
lun_id = self._map_lun(name, [initiator_name], 'iscsi', None)
msg = _("Mapped LUN %(name)s to the initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
target_details_list = self.zapi_client.get_iscsi_target_details()
msg = _("Successfully fetched target details for LUN %(name)s and "
"initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
if not target_details_list:
msg = _('Failed to get LUN target details for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
target_details = None
for tgt_detail in target_details_list:
if tgt_detail.get('interface-enabled', 'true') == 'true':
target_details = tgt_detail
break
if not target_details:
target_details = target_details_list[0]
(address, port) = (target_details['address'], target_details['port'])
if not target_details['address'] and target_details['port']:
msg = _('Failed to get target portal for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
iqn = self.zapi_client.get_iscsi_service_details()
if not iqn:
msg = _('Failed to get target IQN for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
properties = na_utils.get_iscsi_connection_properties(lun_id, volume,
iqn, address,
port)
return properties
def terminate_connection_iscsi(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance.
Unmask the LUN on the storage system so the given initiator can no
longer access it.
"""
initiator_name = connector['initiator']
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, [initiator_name])
msg = _("Unmapped LUN %(name)s from the initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
def initialize_connection_fc(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '500a098280feeba5',
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5'],
'21000024ff406cc2': ['500a098280feeba5']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['500a098280feeba5', '500a098290feeba5',
'500a098190feeba5', '500a098180feeba5'],
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5',
'500a098290feeba5'],
'21000024ff406cc2': ['500a098190feeba5',
'500a098180feeba5']
}
}
}
"""
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
volume_name = volume['name']
lun_id = self._map_lun(volume_name, initiators, 'fcp', None)
msg = _("Mapped LUN %(name)s to the initiator(s) %(initiators)s")
msg_fmt = {'name': volume_name, 'initiators': initiators}
LOG.debug(msg % msg_fmt)
target_wwpns, initiator_target_map, num_paths = \
self._build_initiator_target_map(connector)
if target_wwpns:
msg = _("Successfully fetched target details for LUN %(name)s "
"and initiator(s) %(initiators)s")
msg_fmt = {'name': volume_name, 'initiators': initiators}
LOG.debug(msg % msg_fmt)
else:
msg = _('Failed to get LUN target details for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % volume_name)
target_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_discovered': True,
'target_lun': int(lun_id),
'target_wwn': target_wwpns,
'access_mode': 'rw',
'initiator_target_map': initiator_target_map}}
return target_info
def terminate_connection_fc(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:returns: data - the target_wwns and initiator_target_map if the
zone is to be removed, otherwise the same map with
an empty dict for the 'data' key
"""
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, initiators)
msg = _("Unmapped LUN %(name)s from the initiator %(initiators)s")
msg_fmt = {'name': name, 'initiators': initiators}
LOG.debug(msg % msg_fmt)
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if not self._has_luns_mapped_to_initiators(initiators):
# No more exports for this host, so tear down zone.
LOG.info(_LI("Need to remove FC Zone, building initiator "
"target map"))
target_wwpns, initiator_target_map, num_paths = \
self._build_initiator_target_map(connector)
info['data'] = {'target_wwn': target_wwpns,
'initiator_target_map': initiator_target_map}
return info
def _build_initiator_target_map(self, connector):
"""Build the target_wwns and the initiator target map."""
# get WWPNs from controller and strip colons
all_target_wwpns = self._get_fc_target_wwpns()
all_target_wwpns = [six.text_type(wwpn).replace(':', '')
for wwpn in all_target_wwpns]
target_wwpns = []
init_targ_map = {}
num_paths = 0
if self.lookup_service is not None:
# Use FC SAN lookup to determine which ports are visible.
dev_map = self.lookup_service.get_device_mapping_from_network(
connector['wwpns'],
all_target_wwpns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
target_wwpns += fabric['target_port_wwn_list']
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
for target in init_targ_map[initiator]:
num_paths += 1
target_wwpns = list(set(target_wwpns))
else:
initiator_wwns = connector['wwpns']
target_wwpns = all_target_wwpns
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwpns
return target_wwpns, init_targ_map, num_paths
|
|
# pylint: disable=missing-docstring, invalid-name
import unittest
import numpy as np
from obsoper.horizontal import UnitSquare
from obsoper import (horizontal, tripolar, corners)
class TestHorizontal(unittest.TestCase):
def setUp(self):
# Trapezoid 2x2 grid represents most of the issues encountered with
# rotated grids
self.grid_lons = [[0, 1],
[3, 2]]
self.grid_lats = [[0, 1],
[0, 1]]
self.field = [[1, 2],
[3, 4]]
self.obs_lons = [1.5]
self.obs_lats = [0.5]
self.fixture = horizontal.Horizontal(self.grid_lons,
self.grid_lats,
self.obs_lons,
self.obs_lats)
def test_interpolate(self):
result = self.fixture.interpolate(self.field)
expect = 2.5
self.assertEqual(expect, result)
class TestTripolar(unittest.TestCase):
def setUp(self):
self.grid_lons = np.array([[10, 10],
[20, 20]])
self.grid_lats = np.array([[30, 40],
[30, 40]])
self.field = np.array([[1, 2],
[3, 4]])
def test_interpolate_given_two_points(self):
observed_longitudes = np.array([11, 19])
observed_latitudes = np.array([31, 39])
interpolator = tripolar.Tripolar(self.grid_lons,
self.grid_lats,
observed_longitudes,
observed_latitudes)
result = interpolator.interpolate(self.field)
expect = np.array([1.3, 3.7])
np.testing.assert_array_almost_equal(expect, result, decimal=2)
def test_interpolate_given_point_west_of_dateline(self):
grid_lons, grid_lats = np.meshgrid([179, -179],
[10, 12],
indexing="ij")
observed_lons = np.array([179.2])
observed_lats = np.array([10.2])
interpolator = tripolar.Tripolar(grid_lons,
grid_lats,
observed_lons,
observed_lats)
result = interpolator.interpolate(self.field)
expect = np.array([1.3])
np.testing.assert_array_almost_equal(expect, result, decimal=3)
def test_interpolate_given_point_south_of_grid_returns_masked(self):
self.check_southern_edge([0], [-80], np.ma.masked_all(1))
def test_interpolate_given_point_on_southern_edge_of_grid(self):
self.check_southern_edge([-10], [-70], [1])
def test_interpolate_given_two_points_one_south_of_grid(self):
self.check_southern_edge([0, 0], [-80, -70],
np.ma.MaskedArray([100, 4], [True, False]))
def test_interpolate_given_point_north_of_grid_returns_masked(self):
self.check_southern_edge([0], [-49], np.ma.masked_all(1))
def test_interpolate_given_point_inside_cyclic_longitude_cell(self):
grid_lons, grid_lats = np.meshgrid([70, 140, -150, -80, -10, 60],
[-70, -60, -50],
indexing="ij")
lons, lats = [65], [-60]
field = np.zeros((6, 3))
field[[0, -1], :] = 1
fixture = tripolar.Tripolar(grid_lons,
grid_lats,
lons,
lats)
result = fixture.interpolate(field)
expect = [1]
self.assertMaskedArrayAlmostEqual(expect, result)
def test_interpolate_given_masked_value_returns_masked(self):
grid_lons, grid_lats = np.meshgrid([0, 1],
[0, 1],
indexing="ij")
observed_lons = np.array([0.5])
observed_lats = np.array([0.5])
interpolator = tripolar.Tripolar(grid_lons,
grid_lats,
observed_lons,
observed_lats)
field = np.ma.MaskedArray([[1, 2], [2, 1]],
[[False, False], [True, False]])
result = interpolator.interpolate(field)
expect = np.ma.masked_all(1)
self.assertMaskedArrayAlmostEqual(expect, result)
def test_interpolate_given_predetermined_positions(self):
grid_lons, grid_lats = np.meshgrid([0, 1],
[0, 1],
indexing="ij")
observed_lons, observed_lats = [0.5], [0.5]
fixture = tripolar.Tripolar(grid_lons,
grid_lats,
observed_lons,
observed_lats)
result = fixture.interpolate(self.field)
expect = [2.5]
self.assertMaskedArrayAlmostEqual(expect, result)
def check_southern_edge(self, lons, lats, expect):
grid_lons, grid_lats = np.meshgrid([-10, 0, 10],
[-70, -60, -50],
indexing="ij")
field = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
fixture = tripolar.Tripolar(grid_lons,
grid_lats,
lons,
lats)
result = fixture.interpolate(field)
self.assertMaskedArrayAlmostEqual(expect, result)
def assertMaskedArrayAlmostEqual(self, expect, result):
expect, result = np.ma.asarray(expect), np.ma.asarray(result)
self.assertEqual(expect.shape,
result.shape)
np.testing.assert_array_almost_equal(expect.compressed(),
result.compressed(),
decimal=3)
def test_interpolate_given_unmasked_masked_array(self):
grid_lons, grid_lats = np.meshgrid([0, 1], [0, 1], indexing="ij")
obs_lons, obs_lats = np.array([0.1]), np.array([0.1])
operator = tripolar.Tripolar(grid_lons,
grid_lats,
obs_lons,
obs_lats)
field = np.ma.masked_array([[1, 2], [3, 4]], dtype="d")
result = operator.interpolate(field)
expect = [1.3]
np.testing.assert_array_almost_equal(expect, result, decimal=5)
def test_interpolate_given_halo_flag_ignores_halo_locations(self):
"""should only consider non-halo grid cells during interpolation
.. note:: Halo refers to extra columns East/West of the grid
and an extra row North of the grid.
In the contrived example below, only longitudes [1, 2] and
latitudes [0, 1] are considered inside the halo. Cyclic east/west
interpolation should not see ones in halo cells.
"""
grid_lons, grid_lats = np.meshgrid([0, 1, 2, 0, 1],
[0, 1, 2], indexing="ij")
obs_lons, obs_lats = np.array([0.5]), np.array([0.5])
operator = tripolar.Tripolar(grid_lons,
grid_lats,
obs_lons,
obs_lats,
has_halo=True)
field = np.ma.masked_array([[1, 1, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 1, 1]], dtype="d")
result = operator.interpolate(field)
expect = [0]
np.testing.assert_array_equal(expect, result)
def test_interpolate_given_cyclic_longitude_simple_case(self):
"""should consider [n-1, 0] cell as a continuous cell in longitude"""
grid_lons, grid_lats = np.meshgrid([1, 2, 0],
[0, 1], indexing="ij")
obs_lons, obs_lats = np.array([0.5]), np.array([0.5])
operator = tripolar.Tripolar(grid_lons,
grid_lats,
obs_lons,
obs_lats)
field = np.ma.masked_array([[1, 1],
[7, 7],
[3, 3]], dtype="d")
result = operator.interpolate(field)
expect = [2]
np.testing.assert_array_equal(expect, result)
class TestTripolar3D(unittest.TestCase):
def test_interpolate_3d_data(self):
no = 5
grid_lons, grid_lats = np.meshgrid([0, 1],
[0, 1],
indexing="ij")
lons, lats = np.zeros(no), np.zeros(no)
field = np.array([[[0, 1, 2],
[0, 1, 2]],
[[0, 1, 2],
[0, 1, 2]]])
fixture = tripolar.Tripolar(grid_lons,
grid_lats,
lons,
lats)
result = fixture.interpolate(field)
expect = np.array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]])
np.testing.assert_array_equal(expect, result)
class TestSelectCorners(unittest.TestCase):
def setUp(self):
self.ni = 4
longitudes, latitudes = np.meshgrid([10, 20, 30, 40],
[45, 50, 55],
indexing="ij")
self.grid = np.dstack((longitudes, latitudes))
self.cell_00 = np.array([(10, 45),
(20, 45),
(20, 50),
(10, 50)])
self.cell_10 = np.array([(20, 45),
(30, 45),
(30, 50),
(20, 50)])
self.cell_01 = np.array([(10, 50),
(20, 50),
(20, 55),
(10, 55)])
# Vector fixture
self.i = np.array([0, 1, 0])
self.j = np.array([0, 0, 1])
self.cells = np.transpose(np.array([self.cell_00,
self.cell_10,
self.cell_01]),
(1, 2, 0))
def test_grid_shape(self):
x, y, dims = 4, 3, 2
self.assertEqual((x, y, dims), self.grid.shape)
def test_cells_shape(self):
corners, cells, dims = 4, 2, 3
self.assertEqual((corners, cells, dims), self.cells.shape)
def test_select_corners_given_i_0_j_0(self):
self.check_select_corners(i=0, j=0, expect=self.cell_00)
def test_select_corners_given_i_1_j_0(self):
self.check_select_corners(i=1, j=0, expect=self.cell_10)
def test_select_corners_given_i_0_j_1(self):
self.check_select_corners(i=0, j=1, expect=self.cell_01)
def test_select_corners_cyclic_i_coordinate(self):
self.check_select_corners(i=self.ni, j=0, expect=self.cell_00)
def test_select_corners_given_array_ij(self):
self.check_select_corners(i=self.i, j=self.j, expect=self.cells)
def check_select_corners(self, i, j, expect):
result = horizontal.select_corners(self.grid, i, j)
np.testing.assert_array_almost_equal(expect, result)
class TestSelectCornersShape(unittest.TestCase):
def setUp(self):
self.nz = 5
self.surface = np.ones((2, 2))
self.full_depths = np.ones((2, 2, self.nz))
def test_select_corners_given_surface_and_scalar_returns_1d_shape(self):
self.check_shape(self.surface, 0, 0, (4,))
def test_select_corners_given_surface_and_vector_returns_2d_shape(self):
self.check_shape(self.surface, [0], [0], (4, 1))
def test_select_corners_given_full_depth_and_scalar_returns_2d_shape(self):
self.check_shape(self.full_depths, 0, 0, (4, self.nz))
def test_select_corners_given_full_depth_and_vector_returns_3d_shape(self):
self.check_shape(self.full_depths, [0], [0], (4, self.nz, 1))
def check_shape(self, values, i, j, expect):
result = horizontal.select_corners(values, i, j).shape
self.assertEqual(expect, result)
class TestSelectField(unittest.TestCase):
def setUp(self):
self.ni = 4
self.field = np.array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
self.cell_00 = np.array([0, 3, 4, 1])
self.cell_10 = np.array([3, 6, 7, 4])
self.cell_01 = np.array([1, 4, 5, 2])
# Vector fixture
self.i = np.array([0, 1])
self.j = np.array([0, 0])
self.cells = np.dstack([self.cell_00,
self.cell_10])[0, :]
def test_select_field_given_i_0_j_0(self):
self.check_select_field(i=0, j=0, expect=self.cell_00)
def test_select_field_given_i_1_j_0(self):
self.check_select_field(i=1, j=0, expect=self.cell_10)
def test_select_field_given_i_0_j_1(self):
self.check_select_field(i=0, j=1, expect=self.cell_01)
def test_select_field_cyclic_i_coordinate(self):
self.check_select_field(i=self.ni, j=0, expect=self.cell_00)
def test_select_field_given_array_ij(self):
self.check_select_field(i=self.i, j=self.j, expect=self.cells)
def check_select_field(self, i, j, expect):
result = horizontal.select_field(self.field, i, j)
np.testing.assert_array_almost_equal(expect, result)
def test_self_cells(self):
"""Assert test fixture shape (4, N)"""
result = self.cells.shape
expect = (4, 2)
self.assertEqual(expect, result)
class TestCorrectCorners(unittest.TestCase):
def setUp(self):
self.dateline_corners = [(+179, 0),
(-179, 0),
(-179, 1),
(+179, 1)]
self.ordinary_corners = [(0, 0),
(1, 0),
(1, 1),
(0, 1)]
self.east_adjusted = [(-181, 0),
(-179, 0),
(-179, 1),
(-181, 1)]
self.west_adjusted = [(+179, 0),
(+181, 0),
(+181, 1),
(+179, 1)]
self.eastern_longitude = -179
self.western_longitude = +179
# Many cells fixture
self.many_dateline_cells = np.dstack([self.dateline_corners,
self.dateline_corners,
self.dateline_corners])
self.many_ordinary_cells = np.dstack([self.ordinary_corners,
self.ordinary_corners,
self.ordinary_corners])
self.many_longitudes = [self.eastern_longitude,
self.western_longitude,
self.eastern_longitude]
self.many_adjusted_cells = np.dstack([self.east_adjusted,
self.west_adjusted,
self.east_adjusted])
def test_correct_corners_given_eastern_longitude(self):
self.check_correct_corners(self.dateline_corners,
self.eastern_longitude,
self.east_adjusted)
def test_correct_corners_given_western_longitude(self):
self.check_correct_corners(self.dateline_corners,
self.western_longitude,
self.west_adjusted)
def test_correct_corners_given_ordinary_cell_returns_ordinary_cell(self):
self.check_correct_corners(self.ordinary_corners,
self.eastern_longitude,
self.ordinary_corners)
def test_correct_corners_given_multiple_longitudes(self):
self.check_correct_corners(self.many_dateline_cells,
self.many_longitudes,
self.many_adjusted_cells)
def test_correct_corners_given_multiple_ordinary_cells(self):
self.check_correct_corners(self.many_ordinary_cells,
self.many_longitudes,
self.many_ordinary_cells)
def check_correct_corners(self, vertices, longitudes, expect):
result = horizontal.correct_corners(vertices, longitudes)
np.testing.assert_array_almost_equal(expect, result)
class TestMaskCorners(unittest.TestCase):
def setUp(self):
self.ndarray = np.arange(4)
self.all_masked = np.ma.masked_all(4)
self.single_masked = np.ma.masked_array([1, 2, 3, 4],
mask=[True, False, False, False])
# 2D array (N, 4)
values = [[1, 2, 3, 4],
[5, 6, 7, 8]]
mask_before = [[True, False, False, False],
[False, False, False, False]]
mask_after = [[True, True, True, True],
[False, False, False, False]]
self.mask2d = np.ma.masked_array(values, mask_before)
self.mask2dfill = np.ma.masked_array(values, mask_after)
# 3D array (Z, N, 4)
values = [[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[1, 2, 3, 4],
[5, 6, 7, 8]]]
mask_before = [[[True, False, False, False],
[False, False, False, False]],
[[False, False, False, False],
[True, False, False, False]]]
mask_after = [[[True, True, True, True],
[False, False, False, False]],
[[False, False, False, False],
[True, True, True, True]]]
self.mask3d = np.ma.masked_array(values, mask_before)
self.mask3dfill = np.ma.masked_array(values, mask_after)
def test_mask_corners_given_ndarray_returns_ndarray(self):
result = horizontal.mask_corners(self.ndarray)
expect = self.ndarray
np.testing.assert_array_equal(expect, result)
def test_mask_corners_given_all_masked_returns_all_masked(self):
self.check_mask_corners(self.all_masked, self.all_masked)
def test_mask_corners_given_single_masked_value_returns_all_masked(self):
self.check_mask_corners(self.single_masked, self.all_masked)
def test_mask_corners_given_masked_2d_returns_masks_axis_zero(self):
self.check_mask_corners(self.mask2d, self.mask2dfill)
def test_mask_corners_given_masked_3d_returns_masks_axis_zero(self):
self.check_mask_corners(self.mask3d, self.mask3dfill)
def check_mask_corners(self, given, expect):
result = horizontal.mask_corners(given)
self.assertMaskedArrayEqual(expect, result)
def assertMaskedArrayEqual(self, expect, result):
self.assertEqual(expect.shape, result.shape)
np.testing.assert_array_equal(expect.compressed(),
result.compressed())
class TestIsDateline(unittest.TestCase):
def setUp(self):
self.dateline_corners = [(+179, 0),
(-179, 0),
(-179, 1),
(+179, 1)]
self.ordinary_corners = [(0, 0),
(1, 0),
(1, 1),
(0, 1)]
self.sequence = np.dstack([self.ordinary_corners,
self.dateline_corners,
self.ordinary_corners])
def test_is_dateline_given_dateline_corners_returns_true(self):
self.check_is_dateline(self.dateline_corners, True)
def test_is_dateline_given_ordinary_corners_returns_false(self):
self.check_is_dateline(self.ordinary_corners, False)
def test_is_dateline_given_sequence_returns_boolean_array(self):
self.check_is_dateline(self.sequence, [False, True, False])
def check_is_dateline(self, cell, expect):
result = corners.is_dateline(cell)
np.testing.assert_array_almost_equal(expect, result)
def test_self_sequence_shape(self):
"""Assert test fixture shape (4, 2, N)"""
result = self.sequence.shape
expect = (4, 2, 3)
self.assertEqual(expect, result)
class TestIsEast(unittest.TestCase):
def test_is_east_given_east_longitude_returns_true(self):
self.check_is_east(-1., True)
def test_is_east_given_west_longitude_returns_false(self):
self.check_is_east(+1., False)
def test_is_east_given_greenwich_meridian_returns_true(self):
self.check_is_east(0., True)
def test_is_east_given_multiple_values_returns_boolean_array(self):
self.check_is_east([-1, 0, 1], [True, True, False])
def check_is_east(self, longitudes, expect):
result = corners.is_east(longitudes)
np.testing.assert_array_almost_equal(expect, result)
class TestIsWest(unittest.TestCase):
def test_is_west_given_west_longitude_returns_false(self):
self.check_is_west(-1., False)
def test_is_west_given_west_longitude_returns_true(self):
self.check_is_west(+1., True)
def test_is_west_given_greenwich_meridian_returns_false(self):
self.check_is_west(0., False)
def test_is_west_given_multiple_values_returns_boolean_array(self):
self.check_is_west([-1, 0, 1], [False, False, True])
def check_is_west(self, longitudes, expect):
result = corners.is_west(longitudes)
np.testing.assert_array_almost_equal(expect, result)
class TestUnitSquare(unittest.TestCase):
def setUp(self):
self.empty = []
self.data = np.array([1, 2, 3, 4]).reshape(2, 2)
def test_weights_given_lower_left_corner(self):
self.check_weights([0], [0], [1, 0, 0, 0])
def test_weights_given_lower_right_corner(self):
self.check_weights([0], [1], [0, 1, 0, 0])
def test_weights_given_upper_left_corner(self):
self.check_weights([1], [0], [0, 0, 1, 0])
def test_weights_given_upper_right_corner(self):
self.check_weights([1], [1], [0, 0, 0, 1])
def check_weights(self, x, y, expect):
fixture = UnitSquare(self.empty, self.empty,
np.array(x), np.array(y))
result = fixture.weights
expect = np.array(expect).reshape(4, 1)
np.testing.assert_array_equal(expect, result)
def test_weights_given_n_fractions_returns_4_by_n_shape(self):
x, y = np.array([0, 1]), np.array([0, 1])
fixture = UnitSquare(self.empty, self.empty, x, y)
result = fixture.weights.shape
expect = (4, 2)
self.assertEqual(expect, result)
def test_values_given_index_and_data(self):
i, j = np.array([0]), np.array([0])
fixture = UnitSquare(i, j, self.empty, self.empty)
data = np.array([1, 2, 3, 4]).reshape(2, 2)
result = fixture.values(data)
expect = np.array([1, 2, 3, 4]).reshape(4, 1)
np.testing.assert_array_equal(expect, result)
def test_interpolation_given_lower_left_corner(self):
self.check_interpolation([0], [0], [1])
def test_interpolation_given_lower_right_corner(self):
self.check_interpolation([0], [1], [2])
def test_interpolation_given_upper_left_corner(self):
self.check_interpolation([1], [0], [3])
def test_interpolation_given_upper_right_corner(self):
self.check_interpolation([1], [1], [4])
@staticmethod
def check_interpolation(x, y, expect):
x, y = np.array(x), np.array(y)
i, j = np.array([0]), np.array([0])
fixture = UnitSquare(i, j, x, y)
data = np.array([1, 2, 3, 4]).reshape(2, 2)
result = fixture(data)
np.testing.assert_array_equal(expect, result)
def test_masked_given_upper_left_masked_returns_true(self):
i, j = np.array([0]), np.array([0])
fixture = UnitSquare(i, j, self.empty, self.empty)
data = np.ma.masked_array([[1, 2], [3, 4]])
data[1, 0] = np.ma.masked
result = fixture.masked(data)
expect = np.array([True])
np.testing.assert_array_equal(expect, result)
def test_interpolation_given_3d_data(self):
data = np.zeros((3, 3, 2))
data[..., 0] = 1
data[..., 1] = 2
x, y = np.array([0]), np.array([0])
i, j = np.array([1]), np.array([1])
fixture = UnitSquare(i, j, x, y)
result = fixture(data)
expect = np.array([1, 2]).reshape(1, 2)
np.testing.assert_array_equal(expect, result)
class TestRegular(unittest.TestCase):
def setUp(self):
self.grid_longitudes = np.arange(3)
self.grid_latitudes = np.arange(3)
self.observed_longitudes = np.array([0.9, 1.9])
self.observed_latitudes = np.array([1.5, 0.1])
self.fixture = horizontal.Regular(self.grid_longitudes,
self.grid_latitudes,
self.observed_longitudes,
self.observed_latitudes)
self.field = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
self.counterparts = np.array([5.2, 6.8])
def test_constructor_given_lists(self):
horizontal.Regular([], [], [], [])
def test_interpolate_given_2d_grid_lon_lat_arrays(self):
grid_longitudes = np.array([[10, 10], [20, 20]])
grid_latitudes = np.array([[30, 40], [30, 40]])
field = np.array([[0, 0], [1, 1]])
fixture = horizontal.Regular(grid_longitudes,
grid_latitudes,
[11],
[31])
result = fixture.interpolate(field)
expect = [0.1]
np.testing.assert_array_equal(expect, result)
def test_interpolate(self):
result = self.fixture.interpolate(self.field)
expect = self.counterparts
np.testing.assert_array_equal(expect, result)
|
|
"""
Huffman coding is an efficient method of compressing data without losing information.
This algorithm analyzes the symbols that appear in a message.
Symbols that appear more often will be encoded as a shorter-bit string
while symbols that aren't used as much will be encoded as longer strings.
"""
from collections import defaultdict, deque
import heapq
class Node:
def __init__(self, frequency=0, sign=None, left=None, right=None):
self.frequency = frequency
self.sign = sign
self.left = left
self.right = right
def __lt__(self, other):
return self.frequency < other.frequency
def __gt__(self, other):
return self.frequency > other.frequency
def __eq__(self, other):
return self.frequency == other.frequency
def __str__(self):
return "<ch: {0}: {1}>".format(self.sign, self.frequency)
def __repr__(self):
return "<ch: {0}: {1}>".format(self.sign, self.frequency)
class HuffmanReader:
def __init__(self, file):
self.file = file
self.buffer = []
self.is_last_byte = False
def get_number_of_additional_bits_in_the_last_byte(self) -> int:
bin_num = self.get_bit() + self.get_bit() + self.get_bit()
return int(bin_num, 2)
def load_tree(self) -> Node:
"""
Load tree from file
:return:
"""
node_stack = deque()
queue_leaves = deque()
root = Node()
current_node = root
is_end_of_tree = False
while not is_end_of_tree:
current_bit = self.get_bit()
if current_bit == "0":
current_node.left = Node()
current_node.right = Node()
node_stack.append(current_node.right) # going to left node, right push on stack
current_node = current_node.left
else:
queue_leaves.append(current_node)
if node_stack:
current_node = node_stack.pop()
else:
is_end_of_tree = True
self._fill_tree(queue_leaves)
return root
def _fill_tree(self, leaves_queue):
"""
Load values to tree after reading tree
:param leaves_queue:
:return:
"""
leaves_queue.reverse()
while leaves_queue:
node = leaves_queue.pop()
s = int(self.get_byte(), 2)
node.sign = s
def _load_byte(self, buff_limit=8) -> bool:
"""
Load next byte is buffer is less than buff_limit
:param buff_limit:
:return: True if there is enough bits in buffer to read
"""
if len(self.buffer) <= buff_limit:
byte = self.file.read(1)
if not byte:
return False
i = int.from_bytes(byte, "big")
self.buffer.extend(list("{0:08b}".format(i)))
return True
def get_bit(self, buff_limit=8):
if self._load_byte(buff_limit):
bit = self.buffer.pop(0)
return bit
else:
return -1
def get_byte(self):
if self._load_byte():
byte_list = self.buffer[:8]
self.buffer = self.buffer[8:]
return "".join(byte_list)
else:
return -1
class HuffmanWriter:
def __init__(self, file):
self.file = file
self.buffer = ""
self.saved_bits = 0
def write_char(self, char):
self.write_int(ord(char))
def write_int(self, num):
bin_int = "{0:08b}".format(num)
self.write_bits(bin_int)
def write_bits(self, bits):
self.saved_bits += len(bits)
self.buffer += bits
while len(self.buffer) >= 8:
i = int(self.buffer[:8], 2)
self.file.write(bytes([i]))
self.buffer = self.buffer[8:]
def save_tree(self, tree):
"""
Generate and save tree code to file
:param tree:
:return:
"""
signs = []
tree_code = ""
def get_code_tree(T):
nonlocal tree_code
if T.sign is not None:
signs.append(T.sign)
if T.left:
tree_code += "0"
get_code_tree(T.left)
if T.right:
tree_code += "1"
get_code_tree(T.right)
get_code_tree(tree)
self.write_bits(tree_code + "1") # "1" indicates that tree ended (it will be needed to load the tree)
for int_sign in signs:
self.write_int(int_sign)
def _save_information_about_additional_bits(self, additional_bits: int):
"""
Overwrite first three bits in the file
:param additional_bits: number of bits that were appended to fill last byte
:return:
"""
self.file.seek(0)
first_byte_raw = self.file.read(1)
self.file.seek(0)
first_byte = "{0:08b}".format(int.from_bytes(first_byte_raw, "big"))
# overwrite first three bits
first_byte = first_byte[3:]
first_byte = "{0:03b}".format(additional_bits) + first_byte
self.write_bits(first_byte)
def close(self):
additional_bits = 8 - len(self.buffer)
if additional_bits != 8: # buffer is empty, no need to append extra "0"
self.write_bits("0" * additional_bits)
self._save_information_about_additional_bits(additional_bits)
class TreeFinder:
"""
Class to help find signs in tree
"""
def __init__(self, tree):
self.root = tree
self.current_node = tree
self.found = None
def find(self, bit):
"""
Find sign in tree
:param bit:
:return: True if sign is found
"""
if bit == "0":
self.current_node = self.current_node.left
elif bit == "1":
self.current_node = self.current_node.right
else:
self._reset()
return True
if self.current_node.sign is not None:
self._reset(self.current_node.sign)
return True
else:
return False
def _reset(self, found=""):
self.found = found
self.current_node = self.root
class HuffmanCoding:
def __init__(self):
pass
@staticmethod
def decode_file(file_in_name, file_out_name):
with open(file_in_name, "rb") as file_in, open(file_out_name, "wb") as file_out:
reader = HuffmanReader(file_in)
additional_bits = reader.get_number_of_additional_bits_in_the_last_byte()
tree = reader.load_tree()
HuffmanCoding._decode_and_write_signs_to_file(file_out, reader, tree, additional_bits)
print("File decoded.")
@staticmethod
def _decode_and_write_signs_to_file(file, reader: HuffmanReader, tree: Node, additional_bits: int):
tree_finder = TreeFinder(tree)
is_end_of_file = False
while not is_end_of_file:
bit = reader.get_bit()
if bit != -1:
while not tree_finder.find(bit): # read whole code
bit = reader.get_bit(0)
file.write(bytes([tree_finder.found]))
else: # There is last byte in buffer to parse
is_end_of_file = True
last_byte = reader.buffer
last_byte = last_byte[:-additional_bits] # remove additional "0" used to fill byte
for bit in last_byte:
if tree_finder.find(bit):
file.write(bytes([tree_finder.found]))
@staticmethod
def encode_file(file_in_name, file_out_name):
with open(file_in_name, "rb") as file_in, open(file_out_name, mode="wb+") as file_out:
signs_frequency = HuffmanCoding._get_char_frequency(file_in)
file_in.seek(0)
tree = HuffmanCoding._create_tree(signs_frequency)
codes = HuffmanCoding._generate_codes(tree)
writer = HuffmanWriter(file_out)
writer.write_bits("000") # leave space to save how many bits will be appended to fill the last byte
writer.save_tree(tree)
HuffmanCoding._encode_and_write_signs_to_file(file_in, writer, codes)
writer.close()
print("File encoded.")
@staticmethod
def _encode_and_write_signs_to_file(file, writer: HuffmanWriter, codes: dict):
sign = file.read(1)
while sign:
int_char = int.from_bytes(sign, "big")
writer.write_bits(codes[int_char])
sign = file.read(1)
@staticmethod
def _get_char_frequency(file) -> dict:
is_end_of_file = False
signs_frequency = defaultdict(lambda: 0)
while not is_end_of_file:
prev_pos = file.tell()
sign = file.read(1)
curr_pos = file.tell()
if prev_pos == curr_pos:
is_end_of_file = True
else:
signs_frequency[int.from_bytes(sign, "big")] += 1
return signs_frequency
@staticmethod
def _generate_codes(tree: Node) -> dict:
codes = dict()
HuffmanCoding._go_through_tree_and_create_codes(tree, "", codes)
return codes
@staticmethod
def _create_tree(signs_frequency: dict) -> Node:
nodes = [Node(frequency=frequency, sign=char_int) for char_int, frequency in signs_frequency.items()]
heapq.heapify(nodes)
while len(nodes) > 1:
left = heapq.heappop(nodes)
right = heapq.heappop(nodes)
new_node = Node(frequency=left.frequency + right.frequency, left=left, right=right)
heapq.heappush(nodes, new_node)
return nodes[0] # root
@staticmethod
def _go_through_tree_and_create_codes(tree: Node, code: str, dict_codes: dict):
if tree.sign is not None:
dict_codes[tree.sign] = code
if tree.left:
HuffmanCoding._go_through_tree_and_create_codes(tree.left, code + "0", dict_codes)
if tree.right:
HuffmanCoding._go_through_tree_and_create_codes(tree.right, code + "1", dict_codes)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.