gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import logging
import re
from datetime import datetime
from concurrent.futures import as_completed
from dateutil.tz import tzutc
from dateutil.parser import parse
from c7n.actions import (
ActionRegistry, BaseAction, ModifyVpcSecurityGroupsAction)
from c7n.filters import FilterRegistry, AgeFilter, OPERATORS
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.tags import universal_augment
from c7n.utils import (
local_session, generate_arn,
get_retry, chunks, snapshot_identifier, type_schema)
log = logging.getLogger('custodian.elasticache')
filters = FilterRegistry('elasticache.filters')
actions = ActionRegistry('elasticache.actions')
TTYPE = re.compile('cache.t')
@resources.register('cache-cluster')
class ElastiCacheCluster(QueryResourceManager):
class resource_type(object):
service = 'elasticache'
type = 'cluster'
enum_spec = ('describe_cache_clusters',
'CacheClusters[]', None)
name = id = 'CacheClusterId'
filter_name = 'CacheClusterId'
filter_type = 'scalar'
date = 'CacheClusterCreateTime'
dimension = 'CacheClusterId'
universal_taggable = True
filter_registry = filters
action_registry = actions
_generate_arn = None
retry = staticmethod(get_retry(('Throttled',)))
permissions = ('elasticache:ListTagsForResource',)
augment = universal_augment
@property
def generate_arn(self):
if self._generate_arn is None:
self._generate_arn = functools.partial(
generate_arn,
'elasticache',
region=self.config.region,
account_id=self.account_id,
resource_type='cluster',
separator=':')
return self._generate_arn
@filters.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "SecurityGroups[].SecurityGroupId"
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
"""Filters elasticache clusters based on their associated subnet
:example:
.. code-block: yaml
policies:
- name: elasticache-in-subnet-x
resource: cache-cluster
filters:
- type: subnet
key: SubnetId
value: subnet-12ab34cd
"""
RelatedIdsExpression = ""
def get_related_ids(self, resources):
group_ids = set()
for r in resources:
group_ids.update(
[s['SubnetIdentifier'] for s in
self.groups[r['CacheSubnetGroupName']]['Subnets']])
return group_ids
def process(self, resources, event=None):
self.groups = {
r['CacheSubnetGroupName']: r for r in
self.manager.get_resource_manager(
'cache-subnet-group').resources()}
return super(SubnetFilter, self).process(resources, event)
filters.register('network-location', net_filters.NetworkLocation)
@actions.register('delete')
class DeleteElastiCacheCluster(BaseAction):
"""Action to delete an elasticache cluster
To prevent unwanted deletion of elasticache clusters, it is recommended
to include a filter
:example:
.. code-block: yaml
policies:
- name: elasticache-delete-stale-clusters
resource: cache-cluster
filters:
- type: value
value_type: age
key: CacheClusterCreateTime
op: ge
value: 90
actions:
- type: delete
skip-snapshot: false
"""
schema = type_schema(
'delete', **{'skip-snapshot': {'type': 'boolean'}})
permissions = ('elasticache:DeleteCacheCluster',
'elasticache:DeleteReplicationGroup')
def process(self, clusters):
skip = self.data.get('skip-snapshot', False)
client = local_session(
self.manager.session_factory).client('elasticache')
clusters_to_delete = []
replication_groups_to_delete = set()
for cluster in clusters:
if cluster.get('ReplicationGroupId', ''):
replication_groups_to_delete.add(cluster['ReplicationGroupId'])
else:
clusters_to_delete.append(cluster)
# added if statement to handle differences in parameters if snapshot is skipped
for cluster in clusters_to_delete:
params = {'CacheClusterId': cluster['CacheClusterId']}
if _cluster_eligible_for_snapshot(cluster) and not skip:
params['FinalSnapshotIdentifier'] = snapshot_identifier(
'Final', cluster['CacheClusterId'])
self.log.debug(
"Taking final snapshot of %s", cluster['CacheClusterId'])
else:
self.log.debug(
"Skipping final snapshot of %s", cluster['CacheClusterId'])
client.delete_cache_cluster(**params)
self.log.info(
'Deleted ElastiCache cluster: %s',
cluster['CacheClusterId'])
for replication_group in replication_groups_to_delete:
params = {'ReplicationGroupId': replication_group,
'RetainPrimaryCluster': False}
if not skip:
params['FinalSnapshotIdentifier'] = snapshot_identifier(
'Final', replication_group)
client.delete_replication_group(**params)
self.log.info(
'Deleted ElastiCache replication group: %s',
replication_group)
@actions.register('snapshot')
class SnapshotElastiCacheCluster(BaseAction):
"""Action to snapshot an elasticache cluster
:example:
.. code-block: yaml
policies:
- name: elasticache-cluster-snapshot
resource: cache-cluster
filters:
- type: value
key: CacheClusterStatus
op: not-in
value: ["deleted","deleting","creating"]
actions:
- snapshot
"""
schema = type_schema('snapshot')
permissions = ('elasticache:CreateSnapshot',)
def process(self, clusters):
with self.executor_factory(max_workers=3) as w:
futures = []
for cluster in clusters:
if not _cluster_eligible_for_snapshot(cluster):
continue
futures.append(w.submit(
self.process_cluster_snapshot,
cluster))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception creating cache cluster snapshot \n %s",
f.exception())
return clusters
def process_cluster_snapshot(self, cluster):
c = local_session(self.manager.session_factory).client('elasticache')
c.create_snapshot(
SnapshotName=snapshot_identifier(
'Backup',
cluster['CacheClusterId']),
CacheClusterId=cluster['CacheClusterId'])
@actions.register('modify-security-groups')
class ElasticacheClusterModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
"""Modify security groups on an Elasticache cluster.
Looks at the individual clusters and modifies the Replication
Group's configuration for Security groups so all nodes get
affected equally
"""
permissions = ('elasticache:ModifyReplicationGroup',)
def process(self, clusters):
replication_group_map = {}
client = local_session(
self.manager.session_factory).client('elasticache')
groups = super(
ElasticacheClusterModifyVpcSecurityGroups,
self).get_groups(clusters, metadata_key='SecurityGroupId')
for idx, c in enumerate(clusters):
# build map of Replication Groups to Security Groups
replication_group_map[c['ReplicationGroupId']] = groups[idx]
for idx, r in enumerate(replication_group_map.keys()):
client.modify_replication_group(
ReplicationGroupId=r,
SecurityGroupIds=replication_group_map[r])
@resources.register('cache-subnet-group')
class ElastiCacheSubnetGroup(QueryResourceManager):
class resource_type(object):
service = 'elasticache'
type = 'subnet-group'
enum_spec = ('describe_cache_subnet_groups',
'CacheSubnetGroups', None)
name = id = 'CacheSubnetGroupName'
filter_name = 'CacheSubnetGroupName'
filter_type = 'scalar'
date = None
dimension = None
@resources.register('cache-snapshot')
class ElastiCacheSnapshot(QueryResourceManager):
class resource_type(object):
service = 'elasticache'
type = 'snapshot'
enum_spec = ('describe_snapshots', 'Snapshots', None)
name = id = 'SnapshotName'
filter_name = 'SnapshotName'
filter_type = 'scalar'
date = 'StartTime'
dimension = None
universal_taggable = True
permissions = ('elasticache:ListTagsForResource',)
filter_registry = FilterRegistry('elasticache-snapshot.filters')
action_registry = ActionRegistry('elasticache-snapshot.actions')
_generate_arn = None
retry = staticmethod(get_retry(('Throttled',)))
augment = universal_augment
@property
def generate_arn(self):
if self._generate_arn is None:
self._generate_arn = functools.partial(
generate_arn,
'elasticache',
region=self.config.region,
account_id=self.account_id,
resource_type='snapshot',
separator=':')
return self._generate_arn
@ElastiCacheSnapshot.filter_registry.register('age')
class ElastiCacheSnapshotAge(AgeFilter):
"""Filters elasticache snapshots based on their age (in days)
:example:
.. code-block: yaml
policies:
- name: elasticache-stale-snapshots
resource: cache-snapshot
filters:
- type: age
days: 30
op: ge
"""
schema = type_schema(
'age', days={'type': 'number'},
op={'type': 'string', 'enum': list(OPERATORS.keys())})
date_attribute = 'dummy'
def get_resource_date(self, snapshot):
""" Override superclass method as there is no single snapshot date attribute.
"""
def to_datetime(v):
if not isinstance(v, datetime):
v = parse(v)
if not v.tzinfo:
v = v.replace(tzinfo=tzutc())
return v
# Return the earliest of the node snaphot creation times.
return min([to_datetime(ns['SnapshotCreateTime'])
for ns in snapshot['NodeSnapshots']])
@ElastiCacheSnapshot.action_registry.register('delete')
class DeleteElastiCacheSnapshot(BaseAction):
"""Action to delete elasticache snapshots
To prevent unwanted deletion of elasticache snapshots, it is recommended to
apply a filter
:example:
.. code-block: yaml
policies:
- name: elasticache-stale-snapshots
resource: cache-snapshot
filters:
- type: age
days: 30
op: ge
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('elasticache:DeleteSnapshot',)
def process(self, snapshots):
log.info("Deleting %d ElastiCache snapshots", len(snapshots))
with self.executor_factory(max_workers=3) as w:
futures = []
for snapshot_set in chunks(reversed(snapshots), size=50):
futures.append(
w.submit(self.process_snapshot_set, snapshot_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception deleting snapshot set \n %s",
f.exception())
return snapshots
def process_snapshot_set(self, snapshots_set):
c = local_session(self.manager.session_factory).client('elasticache')
for s in snapshots_set:
c.delete_snapshot(SnapshotName=s['SnapshotName'])
@ElastiCacheSnapshot.action_registry.register('copy-cluster-tags')
class CopyClusterTags(BaseAction):
"""
Copy specified tags from Elasticache cluster to Snapshot
:example:
.. code-block: yaml
- name: elasticache-test
resource: cache-snapshot
filters:
- type: value
key: SnapshotName
op: in
value:
- test-tags-backup
actions:
- type: copy-cluster-tags
tags:
- tag1
- tag2
"""
schema = type_schema(
'copy-cluster-tags',
tags={'type': 'array', 'items': {'type': 'string'}, 'minItems': 1},
required = ('tags',))
def get_permissions(self):
perms = self.manager.get_resource_manager('cache-cluster').get_permissions()
perms.append('elasticache:AddTagsToResource')
return perms
def process(self, snapshots):
log.info("Modifying %d ElastiCache snapshots", len(snapshots))
client = local_session(self.manager.session_factory).client('elasticache')
clusters = {
cluster['CacheClusterId']: cluster for cluster in
self.manager.get_resource_manager('cache-cluster').resources()}
for s in snapshots:
if s['CacheClusterId'] not in clusters:
continue
arn = self.manager.generate_arn(s['SnapshotName'])
tags_cluster = clusters[s['CacheClusterId']]['Tags']
only_tags = self.data.get('tags', []) # Specify tags to copy
extant_tags = {t['Key']: t['Value'] for t in s.get('Tags', ())}
copy_tags = []
for t in tags_cluster:
if t['Key'] in only_tags and t['Value'] != extant_tags.get(t['Key'], ""):
copy_tags.append(t)
self.manager.retry(
client.add_tags_to_resource, ResourceName=arn, Tags=copy_tags)
def _cluster_eligible_for_snapshot(cluster):
# added regex search to filter unsupported cachenode types
return (
cluster['Engine'] != 'memcached' and not
TTYPE.match(cluster['CacheNodeType'])
)
|
|
# -*- coding: utf-8 -*-
# (C) Copyright 2014 Voyager Search
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import json
import itertools
import csv
import shutil
import datetime
import string
import xml.etree.cElementTree as et
import requests
import warnings
from requests.packages.urllib3.exceptions import InsecureRequestWarning
warnings.simplefilter('ignore', InsecureRequestWarning)
from utils import status
from utils import task_utils
SHAPE_FIELD_LENGTH = slice(0, 10)
errors_reasons = {}
skipped_reasons = {}
exported_count = 0.
errors_count = 0.
status_writer = status.Writer()
text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
def change(val, encoding_type):
if isinstance(val, (str, unicode)):
return val.encode(encoding_type)
else:
return val
def is_ascii(filename, blocksize=512):
return is_text(open(filename).read(blocksize))
def is_text(s):
if "\0" in s:
return 0
if not s: # Empty files are considered text
return 1
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = s.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if len(t) / len(s) > 0.30:
return 0
return 1
def export_to_shp(jobs, file_name, output_folder):
"""Exports results to a shapefile.
:param jobs: list of jobs (a job contains the result information)
:param file_name: the output file name
:param output_folder: the output task folder
"""
global exported_count
global errors_count
try:
from osgeo import ogr
except ImportError as ie:
errors_count += 1
exported_count = 0
errors_reasons['Import Error'] = ie.message
status_writer.send_state(status.STAT_FAILED, repr(ie))
return
driver = ogr.GetDriverByName("ESRI Shapefile")
for job in jobs:
try:
geo_json = job['[geo]']
if geo_json['type'].lower() == 'polygon':
geometry_type = ogr.wkbPolygon
elif geo_json['type'].lower() == 'geometrycollection':
geom = ogr.CreateGeometryFromJson("{0}".format(job['[geo]']))
if geom.GetDimension() == 0:
geometry_type = ogr.wkbPoint
elif geom.GetDimension() == 1:
geometry_type = ogr.wkbLineString
else:
geometry_type = ogr.wkbPolygon
elif geo_json['type'].lower() == 'multipolygon':
geometry_type = ogr.wkbMultiPolygon
elif geo_json['type'].lower() == 'linestring':
geometry_type = ogr.wkbLineString
elif geo_json['type'].lower() == 'multilinestring':
geometry_type = ogr.wkbMultiLineString
elif geo_json['type'].lower() == 'point':
geometry_type = ogr.wkbPoint
elif geo_json['type'].lower() == 'multipoint':
geometry_type = ogr.wkbMultiPoint
except KeyError as ke:
errors_count += 1
errors_reasons[job.values()[0]] = 'No Geometry field for this item.'
status_writer.send_state(status.STAT_WARNING)
continue
except TypeError as te:
errors_count += 1
errors_reasons[job.values()[0]] = repr(te)
status_writer.send_state(status.STAT_WARNING)
continue
if os.path.exists(os.path.join(output_folder, '{0}_{1}.shp'.format(file_name, geo_json['type']))):
shape_file = ogr.Open(os.path.join(output_folder, '{0}_{1}.shp'.format(file_name, geo_json['type'])), 1)
layer = shape_file.GetLayer()
else:
shape_file = driver.CreateDataSource(os.path.join(output_folder, '{0}_{1}.shp'.format(file_name, geo_json['type'])))
epsg_code = 4326
srs = ogr.osr.SpatialReference()
srs.ImportFromEPSG(epsg_code)
layer = shape_file.CreateLayer('{0}_{1}'.format(file_name, geo_json['type']), srs, geometry_type)
for name in jobs[0].keys():
if not name == '[geo]':
name = str(name)
if name.startswith('fu_'):
new_field = ogr.FieldDefn(name, ogr.OFTReal)
elif name.startswith('fi_'):
new_field = ogr.FieldDefn(name, ogr.OFTInteger)
elif name.startswith('fl_'):
new_field = ogr.FieldDefn(name, ogr.OFTInteger64)
elif name.startswith('fd_'):
new_field = ogr.FieldDefn(name, ogr.OFTDateTime)
else:
new_field = ogr.FieldDefn(name, ogr.OFTString)
layer.CreateField(new_field)
try:
layer_def = layer.GetLayerDefn()
feature = ogr.Feature(layer_def)
geom = ogr.CreateGeometryFromJson("{0}".format(job['[geo]']))
if not geom:
geom = ogr.CreateGeometryFromJson("{0}".format(json.dumps(job['[geo]'])))
feature.SetGeometry(geom)
except KeyError:
feature.SetGeometry(None)
pass
try:
job.pop('[geo]')
except KeyError:
pass
try:
for field, value in job.iteritems():
field, value = str(field), str(value)
i = feature.GetFieldIndex(field[0:10])
feature.SetField(i, value)
layer.CreateFeature(feature)
shape_file.Destroy()
shape_file = None
exported_count += 1
except Exception as ex:
errors_count += 1
errors_reasons[job.values()[0]] = repr(ex)
shape_file = None
continue
def export_to_csv(jobs, file_name, output_folder, fields):
"""
Exports result to a CSV file.
:param jobs: list of jobs (a job contains the result information)
:param file_name: the output file name
:param output_folder: the output task folder
"""
global exported_count
global errors_count
file_name_new = file_name.encode('ascii', 'ignore')
if not file_name_new:
file_path = output_folder + os.sep + file_name + '.csv'
else:
file_path = os.path.join(output_folder, '{0}.csv'.format(file_name_new))
if os.path.exists(file_path):
write_keys = False
else:
write_keys = True
with open(file_path, 'ab') as csv_file:
if 'location:[localize]' in fields:
i = fields.index('location:[localize]')
fields.remove('location:[localize]')
fields.insert(i, 'location')
if 'path[absolute]' in fields:
i = fields.index('path[absolute]')
fields.remove('path[absolute]')
fields.insert(i, '[absolute]')
writer = csv.DictWriter(csv_file, fieldnames=fields)
if write_keys:
writer.writeheader()
for cnt, job in enumerate(jobs, 1):
try:
encoded_job = {k: change(v, 'utf-8') for (k, v) in job.items()}
writer.writerow(encoded_job)
exported_count += 1
except Exception as ex:
errors_count += 1
errors_reasons[job.keys()[0]] = repr(ex)
continue
def export_to_xml(jobs, file_name, output_folder):
"""
Exports results to a XML file.
:param jobs: list of jobs (a job contains the result information)
:param file_name: the output file name
:param output_folder: the output task folder
"""
global exported_count
global errors_count
comment = et.Comment('{0}'.format(datetime.datetime.today().strftime('Exported: %c')))
if not os.path.exists(os.path.join(output_folder, "{0}.xml".format(file_name))):
results = et.Element('results')
for job in jobs:
try:
result = et.SubElement(results, 'result')
for key, val in job.items():
if key == '[geo]':
child = et.SubElement(result, 'geo')
if 'geometries' in val:
geom_collection = et.SubElement(child, val['type'])
for geom in val['geometries']:
geom_part = et.SubElement(geom_collection, geom['type'])
for part in list(itertools.chain(*geom['coordinates'])):
point = et.SubElement(geom_part, 'point')
point.text = str(part).replace('[', '').replace(']', '')
else:
geom_parent = et.SubElement(child, val['type'])
try:
list_coords = list(itertools.chain(*val['coordinates']))
except TypeError:
list_coords = [val['coordinates']]
if list_coords:
for coords in list_coords:
point = et.SubElement(geom_parent, 'point')
point.text = str(coords).replace('[', '').replace(']', '')
else:
for coords in val['coordinates']:
point.text = str(coords).replace('[', '').replace(']', '')
continue
child = et.SubElement(result, key)
child.text = str(val)
except Exception as ex:
errors_count += 1
errors_reasons[job.keys()[0]] = repr(ex)
continue
exported_count += len(results)
tree = et.ElementTree(results)
else:
tree = et.parse(os.path.join(output_folder, "{0}.xml".format(file_name)))
root = tree.getroot()
for job in jobs:
try:
result = et.SubElement(root, 'result')
for key, val in job.items():
if key == '[geo]':
child = et.SubElement(result, 'geo')
if 'geometries' in val:
geom_collection = et.SubElement(child, val['type'])
for geom in val['geometries']:
geom_part = et.SubElement(geom_collection, geom['type'])
for part in list(itertools.chain(*geom['coordinates'])):
point = et.SubElement(geom_part, 'point')
point.text = str(part).replace('[', '').replace(']', '')
else:
geom_parent = et.SubElement(child, val['type'])
try:
list_coords = list(itertools.chain(*val['coordinates']))
except TypeError:
list_coords = [val['coordinates']]
if list_coords:
for coords in list_coords:
point = et.SubElement(geom_parent, 'point')
point.text = str(coords).replace('[', '').replace(']', '')
else:
for coords in val['coordinates']:
point.text = str(coords).replace('[', '').replace(']', '')
continue
child = et.SubElement(result, key)
child.text = str(val)
exported_count += 1
except Exception as ex:
errors_count += 1
errors_reasons[job.keys()[0]] = repr(ex)
continue
tree.getroot().insert(0, comment)
tree.write(os.path.join(output_folder, "{0}.xml".format(file_name)), encoding='UTF-8')
def execute(request):
"""Exports search results a CSV, shapefile or XML document.
:param request: json as a dict.
"""
# Get SSL trust setting.
verify_ssl = task_utils.get_ssl_mode()
chunk_size = task_utils.CHUNK_SIZE
file_name = task_utils.get_parameter_value(request['params'], 'file_name', 'value')
fields = task_utils.get_parameter_value(request['params'], 'fields', 'value')
out_format = task_utils.get_parameter_value(request['params'], 'output_format', 'value')
if not 'path' in fields and 'path:[absolute]' in fields:
fields.append('path')
if 'geo' in fields:
i_geo = fields.index('geo')
fields.remove('geo')
fields.insert(i_geo, '[geo]')
# Create the temporary workspace.
task_folder = os.path.join(request['folder'], 'temp')
if not os.path.exists(task_folder):
os.makedirs(task_folder)
headers = {'x-access-token': task_utils.get_security_token(request['owner'])}
num_results, response_index = task_utils.get_result_count(request['params'])
if len(sys.argv) == 2:
query = '{0}/solr/v0/select?&wt=json&fl={1}'.format('http://localhost:8888', ','.join(fields))
else:
query = '{0}/select?&wt=json&fl={1}'.format(sys.argv[2].split('=')[1], ','.join(fields))
if 'query' in request['params'][response_index]:
# Voyager Search Traditional UI
for p in request['params']:
if 'query' in p:
request_qry = p['query']
break
if 'voyager.list' in request_qry:
query += '&voyager.list={0}'.format(request_qry['voyager.list'])
# Replace spaces with %20 & remove \\ to avoid HTTP Error 400.
if 'fq' in request_qry:
try:
if isinstance(request_qry['fq'], list):
for fq in request_qry['fq']:
try:
query += '&fq={0}'.format(str(fq))
except UnicodeEncodeError:
query += '&fq={0}'.format(str(fq.encode('utf-8')))
else:
query += '&fq={0}'.format(request_qry['fq'])
if '{!expand}' in query:
query = query.replace('{!expand}', '')
if '{!tag' in query:
tag = re.findall('{!(.*?)}', query)
if tag:
tag_str = "{!" + tag[0] + "}"
query = query.replace(tag_str, '')
query = query.replace(' ', '%20')
except AttributeError:
for qry in request_qry['fq']:
query += '&fq={0}'.format(qry).replace("\\", "").replace(' ', '%20')
if 'q' in request_qry:
try:
query += '&q={0}'.format(request_qry['q'].replace("\\", ""))
query = query.replace(' ', '%20')
except UnicodeEncodeError:
query += '&q={0}'.format(request_qry['q'].encode('utf-8').replace("\\", ""))
query = query.replace(' ', '%20')
except AttributeError:
for qry in request_qry['q']:
query += '&q={0}'.format(qry).replace("\\", "").replace(' ', '%20')
if 'place' in request_qry:
try:
query += '&place={0}'.format(request_qry['place'].replace("\\", ""))
query = query.replace(' ', '%20')
except AttributeError:
for qry in request_qry['place']:
query += '&place={0}'.format(qry).replace("\\", "").replace(' ', '%20')
if 'place.op' in request_qry:
query += '&place.op={0}'.format(request_qry['place.op'])
query += '&rows={0}&start={1}'
exported_cnt = 0.
for i in xrange(0, num_results, chunk_size):
url = query.replace('{0}', str(chunk_size)).replace('{1}', str(i))
res = requests.get(url, verify=verify_ssl, headers=headers)
jobs = res.json()['response']['docs']
if out_format == 'CSV':
export_to_csv(jobs, file_name, task_folder, fields)
elif out_format == 'XML':
export_to_xml(jobs, file_name, task_folder)
elif out_format == 'SHP':
export_to_shp(jobs, file_name, task_folder)
exported_cnt += chunk_size
if exported_cnt > num_results:
status_writer.send_percent(100, 'exported: 100%', 'export_results')
else:
percent_done = exported_cnt / num_results
status_writer.send_percent(percent_done, '{0}: {1:.0f}%'.format("exported", percent_done * 100), 'export_results')
else:
# Voyager Search Portal/Cart UI
ids = []
for p in request['params']:
if 'ids' in p:
ids = p['ids']
break
groups = task_utils.grouper(list(ids), chunk_size, '')
i = 0
for group in groups:
i += len([v for v in group if not v == ''])
results = requests.get(query + '&ids={0}'.format(','.join(group)), verify=verify_ssl, headers=headers)
jobs = eval(results.text)['response']['docs']
if out_format == 'CSV':
export_to_csv(jobs, file_name, task_folder, fields)
elif out_format == 'XML':
export_to_xml(jobs, file_name, task_folder)
elif out_format == 'SHP':
export_to_shp(jobs, file_name, task_folder)
percent_done = float(i) / num_results
status_writer.send_percent(percent_done, '{0}: {1:.0f}%'.format("exported", percent_done * 100), 'export_results')
# Zip up outputs.
if exported_count == 0:
status_writer.send_state(status.STAT_FAILED)
task_utils.report(os.path.join(request['folder'], '__report.json'), exported_count, 0, errors_count, errors_reasons)
else:
task_utils.report(os.path.join(request['folder'], '__report.json'), exported_count, 0, errors_count, errors_reasons)
zip_file = task_utils.zip_data(task_folder, 'output.zip')
shutil.move(zip_file, os.path.join(os.path.dirname(task_folder), os.path.basename(zip_file)))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=anomalous-backslash-in-string
"""Tests for tensorflow.kernels.functional_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
# pylint: disable=invalid-name
def simple_scoped_fn(a, x):
"""Simple function: (a, x) -> 2(x+a), but with "2" as a variable in scope."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(math_ops.add(a, x), two)
@test_util.with_control_flow_v2
class MapFnTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testMap_Simple(self):
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, name="data")
r = map_fn.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), elems)
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
def testMapDtypeEager(self):
with context.eager_mode():
dtype = map_fn.map_fn(lambda x: constant_op.constant(""),
constant_op.constant([]),
dtype=dtypes.string).dtype
self.assertEqual(dtype, dtypes.string)
def testMapSparseTensor(self):
with self.cached_session():
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=constant_op.constant([0, 1, 2]),
dense_shape=[2, 2])
result = map_fn.map_fn(lambda x: x, st)
self.assertAllEqual(result.indices, st.indices)
self.assertAllEqual(result.values, st.values)
self.assertAllEqual(result.dense_shape, st.dense_shape)
def testMapRaggedTensor(self):
# Note: there are additional tests in ragged/ragged_map_fn_op_test.py
with self.cached_session():
rt = ragged_factory_ops.constant([[1, 2], [3]])
result = map_fn.map_fn(
lambda x: x + 1,
rt,
fn_output_signature=ragged_tensor.RaggedTensorSpec([None], rt.dtype))
self.assertAllEqual([[2, 3], [4]], result)
self.assertEqual([2, None], result.shape.as_list())
@test_util.run_in_graph_and_eager_modes
def testMapOverScalarErrors(self):
with self.assertRaisesRegex(ValueError, "must be .* Tensor.* not scalar"):
map_fn.map_fn(lambda x: x, [1, 2])
with self.assertRaisesRegex(ValueError, "must be .* Tensor.* not scalar"):
map_fn.map_fn(lambda x: x, 1)
@test_util.run_deprecated_v1
def testMap_Scoped(self):
with self.cached_session() as sess:
def double_scoped(x):
"""2x with a dummy 2 that is scoped."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(x, two)
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
doubles = np.array([2 * x for x in [1, 2, 3, 4, 5, 6]])
r = map_fn.map_fn(double_scoped, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(doubles, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = map_fn.map_fn(double_scoped, elems)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(doubles, self.evaluate(r))
@test_util.run_deprecated_v1
def testMap_Grad(self):
with self.cached_session():
param = constant_op.constant(2.0)
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
y = map_fn.map_fn(
lambda x: math_ops.multiply(math_ops.square(x), param), elems)
r_param = gradients_impl.gradients(y, param)[0]
r_elems = gradients_impl.gradients(y, elems)[0]
self.assertAllEqual(91.0, self.evaluate(r_param))
self.assertAllEqual([4.0, 8.0, 12.0, 16.0, 20.0, 24.0],
self.evaluate(r_elems))
@test_util.run_in_graph_and_eager_modes
def testMap_SimpleNotTensor(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = map_fn.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), nums)
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testMap_SingleInputMultiOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = map_fn.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=(dtypes.int64, dtypes.int64))
self.assertEqual(2, len(r))
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
received = self.evaluate(r)
self.assertAllEqual((nums + 3) * 2, received[0])
self.assertAllEqual(-(nums + 3) * 2, received[1])
@test_util.run_in_graph_and_eager_modes
def testMap_MultiOutputMismatchedDtype(self):
nums = np.array([1, 2, 3, 4, 5, 6])
with self.assertRaisesRegex(
TypeError, r"two structures don't have the same nested structure"):
# lambda emits tuple, but dtype is a list
map_fn.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=[dtypes.int64, dtypes.int64])
@test_util.run_in_graph_and_eager_modes
def testMap_MultiInputSingleOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = map_fn.map_fn(
lambda x: x[0] * x[1][0] + x[1][1], (nums, (nums, -nums)),
dtype=dtypes.int64)
self.assertEqual((6,), r.get_shape())
received = self.evaluate(r)
self.assertAllEqual(nums * nums + (-nums), received)
@test_util.run_in_graph_and_eager_modes
def testMap_MultiInputSameStructureOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = map_fn.map_fn(lambda x: (x[1][0], (x[1][1], x[0])),
(nums, (2 * nums, -nums)))
r = [r[0], r[1][0], r[1][1]]
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
self.assertEqual((6,), r[2].get_shape())
received = self.evaluate(r)
self.assertAllEqual(2 * nums, received[0])
self.assertAllEqual(-nums, received[1])
self.assertAllEqual(nums, received[2])
@test_util.run_in_graph_and_eager_modes
def testMap_autograph_indirect(self):
def test_function(x):
cond = constant_op.constant(-1)
if cond == 0:
result = x
else:
result = x
return result
@def_function.function
def map_call(x):
return map_fn.map_fn(test_function, x)
x = constant_op.constant([1])
y = map_call(x)
self.assertAllEqual([1], self.evaluate(y))
@test_util.run_in_graph_and_eager_modes
def testMapShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
y = map_fn.map_fn(lambda e: e, x)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
@test_util.run_deprecated_v1
def testMapUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
y = map_fn.map_fn(lambda e: e, x)
self.assertIs(None, y.get_shape().dims)
# TODO(b/124383826): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
@test_util.run_v1_only("b/120545219")
def testMapEmptyScalar(self):
map_return = map_fn.map_fn(lambda x: 1,
constant_op.constant([], dtype=dtypes.int32))
self.assertAllEqual([0], map_return.get_shape().dims)
self.assertAllEqual([0], self.evaluate(map_return).shape)
# TODO(b/124383826): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
@test_util.run_v1_only("b/120545219")
def testMapEmptyTensor(self):
with self.cached_session():
map_return = map_fn.map_fn(lambda x: array_ops.zeros([3, 2]),
constant_op.constant([]))
self.assertAllEqual([0, 3, 2], map_return.get_shape().dims)
self.assertAllEqual([0, 3, 2], self.evaluate(map_return).shape)
@test_util.run_in_graph_and_eager_modes
def testMapEmptyList(self):
x = []
with self.assertRaisesRegex(ValueError, r"elems must be a Tensor or"):
_ = map_fn.map_fn(lambda e: e, x)
if __name__ == "__main__":
test.main()
# pylint: enable=invalid-name
|
|
"""Test list, dict, etc."""
import unittest
from pytype.pytd import pytd
from pytype.tests import test_inference
class ContainerTest(test_inference.InferenceTest):
def testTuplePassThrough(self):
with self.Infer("""
def f(x):
return x
f((3, "str"))
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((pytd.HomogeneousContainerType(self.tuple, (self.intorstr,)),),
pytd.HomogeneousContainerType(self.tuple, (self.intorstr,))))
def testTuple(self):
with self.Infer("""
def f(x):
return x[0]
f((3, "str"))
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((pytd.HomogeneousContainerType(self.tuple, (self.intorstr,)),),
self.intorstr))
def testTupleSwap(self):
with self.Infer("""
def f(x):
return (x[1], x[0])
f((3, "str"))
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((pytd.HomogeneousContainerType(self.tuple, (self.intorstr,)),),
pytd.HomogeneousContainerType(self.tuple, (self.intorstr,))))
def testEmptyTuple(self):
with self.Infer("""
def f():
return ()
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), pytd.HomogeneousContainerType(self.tuple, (pytd.NothingType(),))))
def testSetsSanity(self):
with self.Infer("""
def f():
x = set([1])
x.add(10)
return x
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), pytd.HomogeneousContainerType(pytd.ClassType("set"), (self.int,))))
def testSetsAdd(self):
with self.Infer("""
def f():
x = set([])
x.add(1)
x.add(10)
return x
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), pytd.HomogeneousContainerType(pytd.ClassType("set"), (self.int,))))
def testSets(self):
with self.Infer("""
def f():
x = set([1,2,3])
if x:
x = x | set()
y = x
return x
else:
x.add(10)
return x
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), pytd.HomogeneousContainerType(pytd.ClassType("set"), (self.int,))))
def testListLiteral(self):
with self.Infer("""
def f():
return [1, 2, 3]
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), pytd.HomogeneousContainerType(self.list, (self.int,))))
def testListAppend(self):
with self.Infer("""
def f():
x = []
x.append(1)
x.append(2)
x.append(3)
return x
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), pytd.HomogeneousContainerType(self.list, (self.int,))))
def testListConcat(self):
with self.Infer("""
def f():
x = []
x.append(1)
x.append(2)
x.append(3)
return [0] + x
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), pytd.HomogeneousContainerType(self.list, (self.int,))))
def testListConcatMultiType(self):
with self.Infer("""
def f():
x = []
x.append(1)
x.append("str")
return x + [1.3] + x
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((),
pytd.HomogeneousContainerType(
self.list,
(pytd.UnionType((self.int, self.float, self.str)),))))
def testUnionIntoTypeParam(self):
with self.Infer("""
y = __any_object__
if y:
x = 3
else:
x = 3.1
l = []
l.append(x)
""", deep=False, solve_unknowns=False, extract_locals=True) as ty:
self.assertTypesMatchPytd(ty, """
x: int or float
y: ?
l: list<int or float>
""")
def testListConcatUnlike(self):
with self.Infer("""
def f():
x = []
x.append(1)
x.append(2)
x.append(3)
return ["str"] + x
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), pytd.HomogeneousContainerType(self.list, (self.intorstr,))))
def testAnyObject(self):
with self.Infer("""
def f():
return __any_object__
def g():
return __any_object__()
def h():
return __any_object__("name")
f(); g(); h()
""", deep=False, solve_unknowns=False, extract_locals=True) as ty:
self.assertHasOnlySignatures(ty.Lookup("f"), ((), self.anything))
self.assertHasOnlySignatures(ty.Lookup("g"), ((), self.anything))
self.assertHasOnlySignatures(ty.Lookup("h"), ((), self.anything))
def testDictLiteral(self):
with self.Infer("""
def f():
return {"test": 1, "arg": 42}
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), self.str_int_dict))
def testDictEmptyConstructor(self):
with self.Infer("""
def f():
return dict()
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), self.nothing_nothing_dict))
def testDictConstructor(self):
with self.Infer("""
def f():
return dict([(1, 2), (3, 4)])
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), self.int_int_dict))
@unittest.skip("Needs more precise support for tuples")
def testDictConstructor2(self):
with self.Infer("""
def f():
return dict([(1, "bar"), (2, "foo")])
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), self.int_str_dict))
def testDictUpdate(self):
with self.Infer("""
def f():
d = {}
d["test"] = 1
d["arg"] = 42
return d
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(
ty.Lookup("f"),
((), self.str_int_dict))
def testForIter(self):
with self.Infer("""
class A:
def __init__(self):
self.parent = "foo"
def set_parent(l):
for e in l:
e.parent = 1
def f():
a = A()
b = A()
set_parent([a, b])
return a.parent
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(ty.Lookup("f"),
((),
self.intorstr))
def testOverloading(self):
with self.Infer("""
class Base(object):
parent = None
children = ()
def bar(self, new):
for ch in self.parent.children:
ch.foobar = 3
class Node(Base):
def __init__(self, children):
self.children = list(children)
for ch in self.children:
ch.parent = self
class Leaf(Base):
def __init__(self):
pass
def f():
l1 = Leaf()
l2 = Leaf()
n1 = Node([l1, l2])
l2.bar(None)
return l2.foobar
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(ty.Lookup("f"),
((),
self.int))
def testClassAttr(self):
with self.Infer("""
class Node(object):
children = ()
def f():
n1 = Node()
n1.children = [n1]
for ch in n1.children:
ch.foobar = 3
return n1.foobar
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(ty.Lookup("f"),
((),
self.int))
def testHeterogeneous(self):
with self.Infer("""
def f():
x = list()
x.append(3)
x.append("str")
return x[0]
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(ty.Lookup("f"),
((),
self.intorstr))
def testListComprehension(self):
# uses byte_LIST_APPEND
with self.Infer("""
def f():
return [i for i in (1,2,3)]
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(ty.Lookup("f"),
((),
self.int_list))
def testSetComprehension(self):
# uses byte_SET_ADD
with self.Infer("""
def f():
return {i for i in [1,2,3]}
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(ty.Lookup("f"),
((),
self.int_set))
def testDictComprehension(self):
# uses byte_MAP_ADD
with self.Infer("""
def f():
return {i: i for i in xrange(3)}
f()
""", deep=False, solve_unknowns=False, extract_locals=False) as ty:
self.assertHasOnlySignatures(ty.Lookup("f"),
((),
self.int_int_dict))
def testLeakingType(self):
with self.Infer("""
import sys
a = [str(ty) for ty in (int, bool)[:len(sys.argv)]]
""", deep=True, solve_unknowns=False, extract_locals=True) as ty:
self.assertTypesMatchPytd(ty, """
sys: module
a: list<str>
ty: type
""")
def testEmptyOrString(self):
with self.Infer("""
d = dict()
d["a"] = "queen"
entry = d["a"]
open('%s' % entry, 'w')
""", deep=False, solve_unknowns=False, extract_locals=True) as ty:
self.assertTypesMatchPytd(ty, """
d: dict<str, str>
entry: str
""")
def testDictInit(self):
with self.Infer("""
def f():
return dict([])
""", deep=True, solve_unknowns=False, extract_locals=True) as ty:
self.assertTypesMatchPytd(ty, """
def f() -> dict<nothing, nothing>
""")
def testDictTupleInit(self):
with self.Infer("""
def f():
return dict([("foo", "foo")])
""", deep=True, solve_unknowns=False, extract_locals=True) as ty:
self.assertTypesMatchPytd(ty, """
def f() -> dict<str, str>
""")
def testEmptyTupleAsArg(self):
with self.Infer("""
def f(x):
if x:
return isinstance(1, ())
else:
return 3j
""", deep=True, solve_unknowns=False, extract_locals=True) as ty:
self.assertTypesMatchPytd(ty, """
def f(x) -> bool or complex
""")
def testEmptyTypeParamAsArg(self):
with self.Infer("""
def f():
return u"".join(map(unicode, ()))
""", deep=True, solve_unknowns=False, extract_locals=True) as ty:
self.assertTypesMatchPytd(ty, """
def f() -> unicode
""")
def testAccessEmptyDictInIf(self):
with self.Infer("""
class Foo(object):
pass
def f(key):
d = {}
if key is None:
e = Foo()
else:
e = d[key]
e.next = None
return e
""", deep=True, solve_unknowns=False, extract_locals=True) as ty:
self.assertTypesMatchPytd(ty, """
class Foo:
next: NoneType
def f(key) -> Foo
""")
def testCascade(self):
with self.Infer("""
if __any_object__:
x = 3
else:
x = 3.14
y = divmod(x, x)
""", deep=True, solve_unknowns=False, extract_locals=True) as ty:
self.assertTypesMatchPytd(ty, """
x: float or int
y: tuple<float or int>
""")
def testMaybeAny(self):
with self.Infer("""
x = __any_object__
x.as_integer_ratio()
if x:
x = 1
y = divmod(x, 3.14)
""", deep=True, solve_unknowns=True) as ty:
self.assertTypesMatchPytd(ty, """
x: float or int
y: tuple<complex or float>
""")
if __name__ == "__main__":
test_inference.main()
|
|
# -*- coding: utf-8 -*-
"""Django settings for kitsune project."""
import logging
import os
import platform
import re
from datetime import date
from bundles import MINIFY_BUNDLES
from kitsune.lib.sumo_locales import LOCALES
DEBUG = True
TEMPLATE_DEBUG = DEBUG
STAGE = False
LOG_LEVEL = logging.INFO
SYSLOG_TAG = 'http_sumo_app'
# Repository directory.
ROOT = os.path.dirname(os.path.dirname(__file__))
# Django project directory.
PROJECT_ROOT = os.path.dirname(__file__)
PROJECT_MODULE = 'kitsune'
# path bases things off of ROOT
path = lambda *a: os.path.abspath(os.path.join(ROOT, *a))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3'
# or 'oracle'.
'ENGINE': 'django.db.backends.mysql',
# Or path to database file if sqlite3.
'NAME': 'kitsune',
# Not used with sqlite3.
'USER': '',
# Not used with sqlite3.
'PASSWORD': '',
# Set to empty string for localhost. Not used with sqlite3.
'HOST': '',
# Set to empty string for default. Not used with sqlite3.
'PORT': '',
'OPTIONS': {'init_command': 'SET storage_engine=InnoDB'},
}
}
DATABASE_ROUTERS = ('multidb.PinningMasterSlaveRouter',)
# Put the aliases for your slave databases in this list
SLAVE_DATABASES = []
# Cache Settings
# CACHES = {
# 'default': {
# 'BACKEND': 'caching.backends.memcached.MemcachedCache',
# 'LOCATION': ['localhost:11211'],
# 'PREFIX': 'sumo:',
# },
# }
# Setting this to the Waffle version.
WAFFLE_CACHE_PREFIX = 'w0.7.7a:'
# Addresses email comes from
DEFAULT_FROM_EMAIL = 'notifications@support.mozilla.org'
DEFAULT_REPLY_TO_EMAIL = 'no-reply@mozilla.org'
SERVER_EMAIL = 'server-error@support.mozilla.org'
EMAIL_SUBJECT_PREFIX = '[support] '
PLATFORM_NAME = platform.node()
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Pacific'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# Supported languages
# Note: We periodically add locales to this list and it is easier to
# review with changes with one locale per line.
SUMO_LANGUAGES = (
'af',
'ar',
'bg',
'bn-BD',
'bn-IN',
'bs',
'ca',
'cs',
'da',
'de',
'ee',
'el',
'en-US',
'es',
'et',
'eu',
'fa',
'fi',
'fr',
'fy-NL',
'gu-IN',
'ha',
'he',
'hi-IN',
'hr',
'hu',
'id',
'ig',
'it',
'ja',
'km',
'ko',
'ln',
'lt',
'ne-NP',
'nl',
'no',
'pl',
'pt-BR',
'pt-PT',
'ro',
'ru',
'si',
'sk',
'sl',
'sq',
'sr-Cyrl',
'sw',
'sv',
'ta',
'ta-LK',
'te',
'th',
'tr',
'uk',
'ur',
'vi',
'wo',
'xh',
'xx', # This is a test locale
'yo',
'zh-CN',
'zh-TW',
'zu',
)
# These languages won't show a warning about FxOS when contributors try
# to add content.
FXOS_LANGUAGES = [
'af'
'bn-BD',
'bn-IN',
'cs',
'de',
'ee',
'el',
'en-US',
'es',
'fr',
'ha',
'hi-IN',
'hr',
'hu',
'ig',
'it',
'ln',
'nl',
'pl',
'pt-BR',
'pt-PT',
'ro',
'ru',
'sr',
'ta',
'sr-Cyrl',
'sw',
'tr',
'wo',
'xh',
'yo',
'zu',
]
# These languages will get a wiki page instead of the product and topic pages.
SIMPLE_WIKI_LANGUAGES = [
'et',
]
# Languages that should show up in language switcher.
LANGUAGE_CHOICES = tuple(
[(lang, LOCALES[lang].native) for lang in SUMO_LANGUAGES
if lang != 'xx'])
LANGUAGE_CHOICES_ENGLISH = tuple(
[(lang, LOCALES[lang].english) for lang in SUMO_LANGUAGES
if lang != 'xx'])
LANGUAGES_DICT = dict([(i.lower(), LOCALES[i].native) for i in SUMO_LANGUAGES])
LANGUAGES = LANGUAGES_DICT.items()
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in SUMO_LANGUAGES])
# Locales that are known but unsupported. Keys are the locale, values
# are an optional fallback locale, or None, to use the LANGUAGE_CODE.
NON_SUPPORTED_LOCALES = {
'ach': None,
'ak': None,
'an': 'es',
'as': None,
'ast': 'es',
'az': None,
'be': 'ru',
'bn': 'bn-BD',
'br': 'fr',
'csb': 'pl',
'eo': None,
'ff': None,
'fur': 'it',
'ga-IE': None,
'gd': None,
'gl': 'es',
'hsb': 'de',
'hy-AM': None,
'ilo': None,
'is': None,
'kk': None,
'kn': None,
'lg': None,
'lij': 'it',
'mai': None,
'mk': None,
'ml': None,
'mn': None,
'mr': None,
'ms': None,
'my': None,
'nb-NO': 'no',
'nn-NO': 'no',
'nso': None,
'oc': 'fr',
'pa-IN': None,
'rm': None,
'rw': None,
'sah': None,
'son': None,
'sv-SE': 'sv',
}
ES_LOCALE_ANALYZERS = {
'ar': 'arabic',
'bg': 'bulgarian',
'ca': 'snowball-catalan',
'cs': 'czech',
'da': 'snowball-danish',
'de': 'snowball-german',
'en-US': 'snowball-english',
'es': 'snowball-spanish',
'eu': 'snowball-basque',
'fa': 'persian',
'fi': 'snowball-finnish',
'fr': 'snowball-french',
'hi-IN': 'hindi',
'hu': 'snowball-hungarian',
'id': 'indonesian',
'it': 'snowball-italian',
'ja': 'cjk',
'nl': 'snowball-dutch',
'no': 'snowball-norwegian',
'pl': 'polish',
'pt-BR': 'snowball-portuguese',
'pt-PT': 'snowball-portuguese',
'ro': 'snowball-romanian',
'ru': 'snowball-russian',
'sv': 'snowball-swedish',
'th': 'thai',
'tr': 'snowball-turkish',
'zh-CN': 'chinese',
'zh-TW': 'chinese',
}
ES_PLUGIN_ANALYZERS = [
'polish'
]
ES_USE_PLUGINS = False
TEXT_DOMAIN = 'messages'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as
# not to load the internationalization machinery.
USE_I18N = True
USE_L10N = True
DB_LOCALIZE = {
'karma': {
'Title': {
'attrs': ['name'],
'comments': ['This is a karma title.'],
}
},
'products': {
'Product': {
'attrs': ['title', 'description'],
},
'Topic': {
'attrs': ['title', 'description'],
},
},
'badger': {
'Badge': {
'attrs': ['title', 'description'],
},
},
}
# locale is in the kitsune git repo project directory, so that's
# up one directory from the PROJECT_ROOT
LOCALE_PATHS = (
path('locale'),
)
# Use the real robots.txt?
ENGAGE_ROBOTS = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = path('media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
STATIC_ROOT = path('static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'kitsune.sumo.static_finders.WTFinder')
# Paths that don't require a locale prefix.
SUPPORTED_NONLOCALES = (
'1',
'admin',
'api',
'favicon.ico',
'media',
'postcrash',
'robots.txt',
'services',
'wafflejs',
'geoip-suggestion',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#%tc(zja8j01!r#h_y)=hy!^k)9az74k+-ib&ij&+**s3-e^_z'
# List of callables that know how to import templates from various
# sources.
TEMPLATE_LOADERS = (
'jingo.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# Because Jinja2 is the default template loader, add any non-Jinja templated
# apps here:
JINGO_EXCLUDE_APPS = [
'admin',
'adminplus',
'authority',
'kadmin',
'rest_framework',
'waffle',
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'kitsune.sumo.context_processors.global_settings',
'kitsune.sumo.context_processors.i18n',
'kitsune.sumo.context_processors.geoip_cache_detector',
'kitsune.sumo.context_processors.aaq_languages',
'jingo_minify.helpers.build_ids',
'kitsune.messages.context_processors.unread_message_count',
)
MIDDLEWARE_CLASSES = (
'multidb.middleware.PinningRouterMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'commonware.request.middleware.SetRemoteAddrFromForwardedFor',
# LocaleURLMiddleware requires access to request.user. These two must be
# loaded before the LocaleURLMiddleware
'commonware.middleware.NoVarySessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'kitsune.users.middleware.LogoutDeactivatedUsersMiddleware',
# This should come before TokenLoginMiddleware, because
# TokenLoginMiddleware uses this to tell users they have been
# automatically logged. It also has to come after
# NoVarySessionMiddleware.
'django.contrib.messages.middleware.MessageMiddleware',
# This middleware should come after AuthenticationMiddleware.
'kitsune.users.middleware.TokenLoginMiddleware',
# LocaleURLMiddleware must be before any middleware that uses
# sumo.urlresolvers.reverse() to add locale prefixes to URLs:
'kitsune.sumo.middleware.LocaleURLMiddleware',
# Mobile detection should happen in Zeus.
'kitsune.sumo.middleware.DetectMobileMiddleware',
'mobility.middleware.XMobileMiddleware',
'kitsune.sumo.middleware.MobileSwitchMiddleware',
'kitsune.sumo.middleware.Forbidden403Middleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'kitsune.sumo.middleware.RemoveSlashMiddleware',
'kitsune.inproduct.middleware.EuBuildMiddleware',
'kitsune.sumo.middleware.NoCacheHttpsMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'kitsune.sumo.anonymous.AnonymousIdentityMiddleware',
'session_csrf.CsrfMiddleware',
'kitsune.twitter.middleware.SessionMiddleware',
'kitsune.sumo.middleware.PlusToSpaceMiddleware',
'commonware.middleware.ScrubRequestOnException',
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'waffle.middleware.WaffleMiddleware',
'commonware.middleware.ContentTypeOptionsHeader',
'commonware.middleware.StrictTransportMiddleware',
'commonware.middleware.XSSProtectionHeader',
'commonware.middleware.RobotsTagHeader',
# 'axes.middleware.FailedLoginMiddleware'
)
# Auth
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'kitsune.users.auth.TokenLoginBackend',
)
AUTH_PROFILE_MODULE = 'users.Profile'
USER_AVATAR_PATH = 'uploads/avatars/'
DEFAULT_AVATAR = 'img/avatar.png'
AVATAR_SIZE = 48 # in pixels
MAX_AVATAR_FILE_SIZE = 131072 # 100k, in bytes
GROUP_AVATAR_PATH = 'uploads/groupavatars/'
ACCOUNT_ACTIVATION_DAYS = 30
PASSWORD_HASHERS = (
'kitsune.users.hashers.SHA256PasswordHasher',
)
USERNAME_BLACKLIST = path('kitsune', 'configs', 'username-blacklist.txt')
ROOT_URLCONF = '%s.urls' % PROJECT_MODULE
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates"
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# Check templates in the sumo apps first. There are overrides for the admin
# templates.
path('kitsune', 'sumo', 'templates'),
)
# TODO: Figure out why changing the order of apps (for example, moving
# taggit higher in the list) breaks tests.
INSTALLED_APPS = (
# south needs to come early so tests don't fail
'south',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'corsheaders',
'kitsune.users',
'dennis.django_dennis',
'tower',
'jingo_minify',
'authority',
'timezones',
'waffle',
'kitsune.access',
'kitsune.sumo',
'kitsune.search',
'kitsune.forums',
'djcelery',
'badger',
'cronjobs',
'tidings',
'rest_framework.authtoken',
'kitsune.questions',
'adminplus',
'kitsune.kadmin',
'kitsune.kbadge',
'taggit',
'kitsune.flagit',
'kitsune.upload',
'product_details',
'kitsune.wiki',
'kitsune.kbforums',
'kitsune.dashboards',
'kitsune.gallery',
'kitsune.customercare',
'kitsune.twitter',
'kitsune.inproduct',
'kitsune.postcrash',
'kitsune.landings',
'kitsune.announcements',
'kitsune.community',
'kitsune.messages',
'commonware.response.cookies',
'kitsune.groups',
'kitsune.karma',
'kitsune.tags',
'kitsune.kpi',
'kitsune.products',
'kitsune.notifications',
'rest_framework',
'statici18n',
# 'axes',
# App for Sentry:
'raven.contrib.django',
# Extra apps for testing.
'django_nose',
'test_utils',
# Extra app for python migrations.
'django_extensions',
# App for sample data
'eadred',
)
TEST_RUNNER = 'kitsune.sumo.tests.TestSuiteRunner'
def JINJA_CONFIG():
from django.conf import settings
config = {'extensions': ['tower.template.i18n', 'caching.ext.cache',
'jinja2.ext.autoescape', 'jinja2.ext.with_',
'jinja2.ext.do'],
'finalize': lambda x: x if x is not None else ''}
return config
# Let Tower know about our additional keywords.
# DO NOT import an ngettext variant as _lazy.
TOWER_KEYWORDS = {
'_lazy': None,
}
# Tells the extract script what files to look for l10n in and what
# function handles the extraction. The Tower library expects this.
tower_tmpl = 'tower.management.commands.extract.extract_tower_template'
tower_python = 'tower.management.commands.extract.extract_tower_python'
DOMAIN_METHODS = {
'messages': [
('kitsune/forums/**.py', 'ignore'),
('kitsune/forums/**.html', 'ignore'),
('kitsune/**/tests/**.py', 'ignore'),
('kitsune/**/management/**.py', 'ignore'),
('kitsune/**.py', tower_python),
('kitsune/**/templates/**.html', tower_tmpl),
('vendor/src/django-tidings/**/templates/**.html', tower_tmpl),
('vendor/src/django-badger/badger/*.py', tower_python),
('vendor/src/django-badger/badger/templatetags/*.py', tower_python),
],
'lhtml': [
('kitsune/forums/**.lhtml', 'ignore'),
('**/templates/**.lhtml', tower_tmpl)
],
'ltxt': [
('**/templates/**.ltxt', tower_tmpl),
],
'javascript': [
# We can't say **.js because that would dive into any libraries.
('kitsune/**/static/js/*-all.js', 'ignore'),
('kitsune/**/static/js/*-min.js', 'ignore'),
('kitsune/**/static/js/*.js', 'javascript'),
],
}
# These domains will not be merged into messages.pot and will use
# separate PO files. See the following URL for an example of how to
# set these domains in DOMAIN_METHODS.
# http://github.com/jbalogh/zamboni/blob/d4c64239c24aa2f1e91276909823d1d1b290f0ee/settings.py#L254 # nopep8
STANDALONE_DOMAINS = [
TEXT_DOMAIN,
'javascript',
'yaocho',
]
STATICI18N_DOMAIN = 'javascript'
STATICI18N_PACKAGES = ['kitsune.sumo']
# If you have trouble extracting strings with Tower, try setting this
# to True
TOWER_ADD_HEADERS = True
LESS_BIN = 'lessc'
UGLIFY_BIN = 'uglifyjs'
CLEANCSS_BIN = 'cleancss'
NUNJUCKS_PRECOMPILE_BIN = 'nunjucks-precompile'
#
# Sessions
SESSION_COOKIE_AGE = 4 * 7 * 24 * 60 * 60 # 4 weeks
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
SESSION_EXISTS_COOKIE = 'sumo_session'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
#
# Connection information for Elastic
ES_URLS = ['http://127.0.0.1:9200']
# Indexes for reading
ES_INDEXES = {
'default': 'sumo-20130913',
'non-critical': 'sumo-non-critical',
'metrics': 'sumo-metrics',
}
# Indexes for indexing--set this to ES_INDEXES if you want to read to
# and write to the same index.
ES_WRITE_INDEXES = ES_INDEXES
# This is prepended to index names to get the final read/write index
# names used by kitsune. This is so that you can have multiple
# environments pointed at the same ElasticSearch cluster and not have
# them bump into one another.
ES_INDEX_PREFIX = 'sumo'
# Keep indexes up to date as objects are made/deleted.
ES_LIVE_INDEXING = False
# Timeout for querying requests
ES_TIMEOUT = 5
SEARCH_MAX_RESULTS = 1000
SEARCH_RESULTS_PER_PAGE = 10
# Search default settings
SEARCH_DEFAULT_CATEGORIES = (10, 20,)
SEARCH_DEFAULT_MAX_QUESTION_AGE = 180 * 24 * 60 * 60 # seconds
# IA default settings
IA_DEFAULT_CATEGORIES = (10, 20,)
# The length for which we would like the user to cache search forms
# and results, in minutes.
SEARCH_CACHE_PERIOD = 15
# Maximum length of the filename. Forms should use this and raise
# ValidationError if the length is exceeded.
# @see http://code.djangoproject.com/ticket/9893
# Columns are 250 but this leaves 50 chars for the upload_to prefix
MAX_FILENAME_LENGTH = 200
MAX_FILEPATH_LENGTH = 250
# Default storage engine - ours does not preserve filenames
DEFAULT_FILE_STORAGE = 'kitsune.upload.storage.RenameFileStorage'
# Auth and permissions related constants
LOGIN_URL = '/users/login'
LOGOUT_URL = '/users/logout'
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
REGISTER_URL = '/users/register'
# Video settings, hard coded here for now.
# TODO: figure out a way that doesn't need these values
WIKI_VIDEO_WIDTH = 640
WIKI_VIDEO_HEIGHT = 480
IMAGE_MAX_FILESIZE = 1048576 # 1 megabyte, in bytes
THUMBNAIL_SIZE = 120 # Thumbnail size, in pixels
THUMBNAIL_UPLOAD_PATH = 'uploads/images/thumbnails/'
IMAGE_UPLOAD_PATH = 'uploads/images/'
# A string listing image mime types to accept, comma separated.
# String must not contain double quotes!
IMAGE_ALLOWED_MIMETYPES = 'image/jpeg,image/png,image/gif'
# Topics
TOPIC_IMAGE_PATH = 'uploads/topics/'
# Products
PRODUCT_IMAGE_PATH = 'uploads/products/'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Read-only mode setup.
READ_ONLY = False
# Turn on read-only mode in settings_local.py by putting this line
# at the VERY BOTTOM: read_only_mode(globals())
def read_only_mode(env):
env['READ_ONLY'] = True
# Replace the default (master) db with a slave connection.
if not env.get('SLAVE_DATABASES'):
raise Exception("We need at least one slave database.")
slave = env['SLAVE_DATABASES'][0]
env['DATABASES']['default'] = env['DATABASES'][slave]
# No sessions without the database, so disable auth.
env['AUTHENTICATION_BACKENDS'] = ('kitsune.sumo.readonlyauth.ReadOnlyBackend',)
# Add in the read-only middleware before csrf middleware.
extra = 'kitsune.sumo.middleware.ReadOnlyMiddleware'
before = 'session_csrf.CsrfMiddleware'
m = list(env['MIDDLEWARE_CLASSES'])
m.insert(m.index(before), extra)
env['MIDDLEWARE_CLASSES'] = tuple(m)
# Celery
import djcelery
djcelery.setup_loader()
BROKER_HOST = 'localhost'
BROKER_PORT = 5672
BROKER_USER = 'kitsune'
BROKER_PASSWORD = 'kitsune'
BROKER_VHOST = 'kitsune'
CELERY_RESULT_BACKEND = 'amqp'
CELERY_IGNORE_RESULT = True
CELERY_ALWAYS_EAGER = True # For tests. Set to False for use.
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERYD_LOG_LEVEL = logging.INFO
CELERYD_CONCURRENCY = 4
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True # Explode loudly during tests.
CELERYD_HIJACK_ROOT_LOGGER = False
# Wiki rebuild settings
WIKI_REBUILD_TOKEN = 'sumo:wiki:full-rebuild'
# Anonymous user cookie
ANONYMOUS_COOKIE_NAME = 'SUMO_ANONID'
ANONYMOUS_COOKIE_MAX_AGE = 30 * 86400 # Seconds
# Do not change this without also deleting all wiki documents:
WIKI_DEFAULT_LANGUAGE = LANGUAGE_CODE
# Gallery settings
GALLERY_DEFAULT_LANGUAGE = WIKI_DEFAULT_LANGUAGE
GALLERY_IMAGE_PATH = 'uploads/gallery/images/'
GALLERY_IMAGE_THUMBNAIL_PATH = 'uploads/gallery/images/thumbnails/'
GALLERY_VIDEO_PATH = 'uploads/gallery/videos/'
GALLERY_VIDEO_URL = None
GALLERY_VIDEO_THUMBNAIL_PATH = 'uploads/gallery/videos/thumbnails/'
GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL = MEDIA_URL + 'img/video-thumb.png'
THUMBNAIL_PROGRESS_WIDTH = 32 # width of the above image
THUMBNAIL_PROGRESS_HEIGHT = 32 # height of the above image
VIDEO_MAX_FILESIZE = 52428800 # 50 megabytes, in bytes
# Customer Care settings
CC_MAX_TWEETS = 500 # Max. no. of tweets in DB
CC_TWEETS_PERPAGE = 100 # How many tweets to collect in one go. Max: 100.
CC_SHOW_REPLIES = True # Show replies to tweets?
CC_ALLOW_REMOVE = True # Allow users to hide tweets?
CC_TOP_CONTRIB_CACHE_KEY = 'sumo-cc-top-contrib-stats'
CC_TOP_CONTRIB_SORT = '1w'
CC_TOP_CONTRIB_LIMIT = 10
CC_STATS_CACHE_TIMEOUT = 24 * 60 * 60 # 24 hours
CC_STATS_WARNING = 30 * 60 * 60 # Warn if JSON data is older than 30 hours
CC_REPLIES_GOAL = 175 # Goal # of replies in 24 hours.
CC_TWEETS_DAYS = 7 # Limit tweets to those from the last 7 days.
# If any of these words show up in a tweet, it probably isn't
# actionable, so don't add it to the AoA.
CC_WORD_BLACKLIST = [
'#UninstallFirefox',
]
BITLY_API_URL = 'http://api.bitly.com/v3/shorten?callback=?'
BITLY_LOGIN = None
BITLY_API_KEY = None
TWITTER_COOKIE_SECURE = True
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
TWITTER_ACCESS_TOKEN = ''
TWITTER_ACCESS_TOKEN_SECRET = ''
TIDINGS_FROM_ADDRESS = 'notifications@support.mozilla.org'
# Anonymous watches must be confirmed.
TIDINGS_CONFIRM_ANONYMOUS_WATCHES = True
TIDINGS_MODEL_BASE = 'kitsune.sumo.models.ModelBase'
TIDINGS_REVERSE = 'kitsune.sumo.urlresolvers.reverse'
# Google Analytics settings.
GA_KEY = 'longkey' # Google API client key
GA_ACCOUNT = 'something@developer.gserviceaccount.com' # Google API Service Account email address
GA_PROFILE_ID = '12345678' # Google Analytics profile id for SUMO prod
GA_START_DATE = date(2012, 11, 10)
MOBILE_COOKIE = 'msumo'
# Directory of JavaScript test files for django_qunit to run
QUNIT_TEST_DIRECTORY = os.path.join('kitsune', 'sumo', 'static', 'js', 'tests')
# Key to access /services/version. Set to None to disallow.
VERSION_CHECK_TOKEN = None
REDIS_BACKENDS = {
#'default': 'redis://localhost:6379?socket_timeout=0.5&db=0',
#'karma': 'redis://localhost:6381?socket_timeout=0.5&db=0',
#'helpfulvotes': 'redis://localhost:6379?socket_timeout=0.5&db=1',
}
HELPFULVOTES_UNHELPFUL_KEY = 'helpfulvotes_topunhelpful'
LAST_SEARCH_COOKIE = 'last_search'
OPTIPNG_PATH = None
# Zendesk info. Fill in the prefix, email and password in settings_local.py.
ZENDESK_URL = 'https://appsmarket.zendesk.com'
ZENDESK_SUBJECT_PREFIX = '[TEST] ' # Set to '' in prod
ZENDESK_USER_EMAIL = ''
ZENDESK_USER_PASSWORD = ''
# Tasty Pie
API_LIMIT_PER_PAGE = 0
# Change the default for XFrameOptionsMiddleware.
X_FRAME_OPTIONS = 'DENY'
# Where to find the about:support troubleshooting addon.
# This is a link to the latest version, whatever that may be.
TROUBLESHOOTER_ADDON_URL = 'https://addons.mozilla.org/firefox/downloads/latest/426841/addon-426841-latest.xpi'
# SurveyGizmo API
SURVEYGIZMO_USER = ''
SURVEYGIZMO_PASSWORD = ''
# Django Rest Framework
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
}
# Django-axes settings.
AXES_LOGIN_FAILURE_LIMIT = 10
AXES_LOCK_OUT_AT_FAILURE = True
AXES_USE_USER_AGENT = False
AXES_COOLOFF_TIME = 1 # hour
AXES_BEHIND_REVERSE_PROXY = True
AXES_REVERSE_PROXY_HEADER = 'HTTP_X_CLUSTER_CLIENT_IP'
# Set this to True to wrap each HTTP request in a transaction on this database.
ATOMIC_REQUESTS = True
# CORS Setup
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = [
r'^/api/1/gallery/.*$',
r'^/api/1/kb/.*$',
r'^/api/1/products/.*',
r'^/api/1/users/get_token$',
r'^/api/1/users/test_auth$',
r'^/api/2/answer/.*$',
r'^/api/2/pushnotification/.*$',
r'^/api/2/question/.*$',
r'^/api/2/user/.*$',
]
# Now combine all those regexes with one big "or".
CORS_URLS_REGEX = re.compile('|'.join('({0})'.format(r) for r in CORS_URLS_REGEX))
CORS_URLS_REGEX = '.*'
# XXX Fix this when Bug 1059545 is fixed
CC_IGNORE_USERS = []
|
|
import unittest
from unittest import mock
from . make import make
from bibliopixel.animation import animation
from bibliopixel.colors import gamma, tables
from bibliopixel.drivers.ledtype import LEDTYPE
BAD_JSON_ERROR = """
while parsing a flow node
expected the node content, but found ']'
in "<unicode string>", line 1, column 2:
{]
^
"""
class ProjectTest(unittest.TestCase):
@mock.patch('bibliopixel.util.data_file.ALWAYS_LOAD_YAML', False)
def test_bad_project_json(self):
with self.assertRaises(Exception):
make('{]')
@mock.patch('bibliopixel.util.data_file.ALWAYS_LOAD_YAML', True)
def test_bad_project_yaml(self):
with self.assertRaises(Exception) as e:
make('{]')
self.assertEqual(str(e.exception).strip(), BAD_JSON_ERROR.strip())
def test_simple(self):
make(PROJECT)
def test_types(self):
animation = make(PROJECT_TYPES)
kwds = animation.layout.drivers[0]._kwds
self.assertEqual(kwds['c_order'], (1, 2, 0))
self.assertEqual(kwds['color'], (0, 255, 0))
self.assertEqual(kwds['duration'], 3720)
self.assertEqual(kwds['gamma'].table, gamma.APA102.table)
self.assertEqual(kwds['time'], 35000)
self.assertEqual(kwds['ledtype'], LEDTYPE.GENERIC)
def test_file(self):
make('test/bibliopixel/project/project.json', False)
def test_yaml_file(self):
make('test/bibliopixel/project/project.yml', False)
def test_super(self):
animation = make('test/bibliopixel/project/super_project.json', False)
self.assertEqual(animation.__class__.__name__, 'StripChannelTest')
self.assertEqual(animation.layout.pixelWidth, 2)
def test_multi(self):
animation = make(PROJECT_MULTI)
k = [d._kwds for d in animation.layout.drivers]
self.assertEqual(k[0]['device_id'], 10)
self.assertEqual(k[1]['device_id'], 11)
self.assertEqual(k[2]['device_id'], 12)
def test_shared(self):
make(PROJECT_SHARED)
def test_sequence(self):
animation = make(PROJECT_SEQUENCE, run_start=False)
self.assertEqual(len(animation.animations), 3)
self.assertIsNotNone(animation.animations[0])
animation = animation.animations[1]
self.assertEqual(animation.name, 'mt')
self.assertEqual(animation.layout.rotation, 90)
def test_sub_animation_names(self):
animation = make(PROJECT_SUB_ANIMATIONS, run_start=False)
self.assertEqual(animation.name, 'Sequence')
a, b, c, d = animation.animations
self.assertEqual(a.name, 'StripChannelTest_0')
self.assertEqual(d.name, 'StripChannelTest_3')
animation.pre_run()
self.assertEqual(animation.animations.StripChannelTest_2, c)
self.assertEqual(animation.animations['StripChannelTest_1'], b)
def test_numpy(self):
make(PROJECT_NUMPY)
def test_pixelwidth(self):
make(PROJECT_PIXELWIDTH)
def test_aliases(self):
make(PROJECT_ALIASES)
def test_simpixel(self):
animation = make(PROJECT_SIM, run_start=False)
self.assertEqual(animation.name, 'test name')
self.assertEqual(animation.data, {'title': 'test title'})
def test_project_from_animation_class(self):
animation = make(PROJECT_ANIMATION)
self.assertEqual(animation.layout.rotation, 90)
def test_nested_animation(self):
make(PROJECT_NESTED_ANIMATION, run_start=False)
def test_nested_sequence(self):
make(PROJECT_NESTED_SEQUENCE, run_start=False)
def test_project_colors(self):
try:
make(PROJECT_COLORS, run_start=False)
self.assertEqual(tables.get_color('bland'), (1, 2, 3))
self.assertEqual(tables.get_name((3, 2, 1)), 'exciting!!')
self.assertIs(tables.get_color('exciting'), None)
finally:
tables.set_user_colors({})
self.assertEqual(tables.get_color('bland'), None)
def test_test_example(self):
make(PROJECT_TEST_EXAMPLE)
PROJECT = """
{
"driver": "dummy",
"shape": 12,
"layout": "strip",
"animation": ".tests.StripChannelTest",
"run": {
"max_steps": 2
}
}
"""
PROJECT_TYPES = """
{
"driver": {
"typename": "dummy",
"c_order": "GBR",
"color": "green",
"duration": "1 hour, 2 minutes",
"gamma": "APA102",
"time": "35ks",
"ledtype": "GENERIC"
},
"shape": 12,
"layout": "strip",
"animation": ".tests.StripChannelTest",
"run": {
"max_steps": 2
}
}
"""
PROJECT_MULTI = """
{
"driver": {
"typename": "dummy",
"num": 4096
},
"drivers": [
{"device_id": 10},
{"device_id": 11},
{"device_id": 12}
],
"layout": {
"typename": "matrix",
"width": 128,
"height": 32,
"gen_coord_map": [
{
"dx": 32,
"dy": 32
},
{
"dx": 32,
"dy": 32
},
{
"dx": 32,
"dy": 32
}
]
},
"animation": ".tests.MatrixChannelTest",
"run": {
"max_steps": 2
}
}
"""
PROJECT_SHARED = """
{
"driver": {
"typename": "dummy",
"num": 12
},
"layout": "strip",
"animation": ".tests.StripChannelTest",
"run": {
"max_steps": 2
},
"maker": {
"shared_memory": true
}
}
"""
PROJECT_NUMPY = """
{
"driver": {
"typename": "dummy",
"num": 12
},
"layout": "bibliopixel.layout.strip.Strip",
"animation": ".tests.StripChannelTest",
"run": {
"max_steps": 2
},
"maker": {
"numpy_dtype": "float"
}
}
"""
PROJECT_SIM = """
{
"driver": {
"typename": ".SimPixel",
"num": 12,
"port": 1338
},
"layout": {
"typename": ".strip"
},
"animation": {
"typename": ".tests.StripChannelTest",
"name": "test name",
"data": {"title": "test title"}
},
"run": {
"max_steps": 2
}
}
"""
PROJECT_SEQUENCE = """
{
"driver": "dummy",
"layout": {
"typename": "matrix",
"rotation": 92
},
"animation": {
"typename": "sequence",
"animations": [
".tests.MatrixChannelTest",
{
"typename": ".tests.MatrixChannelTest",
"name": "mt",
"data": {"title": "test title"}
},
{
"animation": {
"typename": ".tests.MatrixChannelTest",
"name": "mt2",
"data": {"title": "test title"}
}
}
]
}
}
"""
PROJECT_ALIASES = """
{
"aliases": {
"st": "bibliopixel.layout.strip",
"stc": ".tests.StripChannelTest"
},
"driver": {
"typename": "dummy",
"num": 12
},
"layout": "@st.Strip",
"animation": "stc",
"run": {
"max_steps": 2
}
}
"""
PROJECT_PIXELWIDTH = """
{
"driver": {
"typename": "dummy",
"num": 12
},
"layout": {
"typename": "strip",
"pixelWidth": 3
},
"animation": ".tests.StripChannelTest",
"run": {
"max_steps": 2
}
}
"""
PROJECT_SUB_ANIMATIONS = """
{
"driver": {
"typename": "dummy",
"num": 12
},
"layout": {
"typename": "strip",
"pixelWidth": 3
},
"animation": {
"typename": "sequence",
"animations": [".tests.StripChannelTest",
".tests.StripChannelTest",
".tests.StripChannelTest",
".tests.StripChannelTest"]
},
"run": {
"max_steps": 2
}
}
"""
PROJECT_ANIMATION = """
{"animation": "test.bibliopixel.project.project_test.AnimationTest"}
"""
PROJECT_NESTED_ANIMATION = """
shape: [32, 32]
animation:
typename: .wrapper
animation:
typename: .wrapper
animation: $bpa.matrix.bloom
"""
PROJECT_NESTED_SEQUENCE = """
shape: [32, 32]
animation:
typename: .sequence
animations:
- typename: .sequence
animations:
- $bpa.matrix.bloom
"""
PROJECT_COLORS = """
shape: 32
animation: .tests.StripChannelTest
colors:
bland: [1, 2, 3]
'exciting!!': [3, 2, 1]
"""
PROJECT_TEST_EXAMPLE = """
driver: dummy
shape: [48, 24]
animation: test.bibliopixel.animation.documentation_class.Example26
run:
max_steps: 4
"""
class AnimationTest(animation.Animation):
PROJECT = {
"driver": "dummy",
"layout": {
"typename": "matrix",
"rotation": 92},
"run": {
"max_steps": 2},
}
def step(self, amt=1):
pass
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Interface with command line GULP.
http://projects.ivec.org
WARNING: you need to have GULP installed on your system.
"""
__author__ = "Bharat Medasani, Wenhao Sun"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Bharat Medasani"
__email__ = "bkmedasani@lbl.gov,wenhao@mit.edu"
__status__ = "Production"
__date__ = "$Jun 22, 2013M$"
import os
import re
import subprocess
from monty.tempfile import ScratchDir
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
_anions = set(map(Element, ["O", "S", "F", "Cl", "Br", "N", "P"]))
_cations = set(
map(
Element,
[
"Li",
"Na",
"K", # alkali metals
"Be",
"Mg",
"Ca", # alkaline metals
"Al",
"Sc",
"Ti",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"Cu",
"Zn",
"Ge",
"As",
"Y",
"Zr",
"Nb",
"Mo",
"Tc",
"Ru",
"Rh",
"Pd",
"Ag",
"Cd",
"In",
"Sn",
"Sb",
"Hf",
"Ta",
"W",
"Re",
"Os",
"Ir",
"Pt",
"Au",
"Hg",
"Tl",
"Pb",
"Bi",
"La",
"Ce",
"Pr",
"Nd",
"Pm",
"Sm",
"Eu",
"Gd",
"Tb",
"Dy",
"Ho",
"Er",
"Tm",
"Yb",
"Lu",
],
)
)
_gulp_kw = {
# Control of calculation type
"angle",
"bond",
"cosmo",
"cosmic",
"cost",
"defect",
"distance",
"eem",
"efg",
"fit",
"free_energy",
"gasteiger",
"genetic",
"gradients",
"md",
"montecarlo",
"noautobond",
"noenergy",
"optimise",
"pot",
"predict",
"preserve_Q",
"property",
"phonon",
"qeq",
"qbond",
"single",
"sm",
"static_first",
"torsion",
"transition_state",
# Geometric variable specification
"breathe",
"bulk_noopt",
"cellonly",
"conp",
"conv",
"isotropic",
"orthorhombic",
"nobreathe",
"noflgs",
"shell",
"unfix",
# Algorithm
"c6",
"dipole",
"fbfgs",
"fix_molecule",
"full",
"hill",
"kfull",
"marvinSE",
"madelung",
"minimum_image",
"molecule",
"molmec",
"molq",
"newda",
"noanisotropic_2b",
"nod2sym",
"nodsymmetry",
"noelectrostatics",
"noexclude",
"nofcentral",
"nofirst_point",
"noksymmetry",
"nolist_md",
"nomcediff",
"nonanal",
"noquicksearch",
"noreal",
"norecip",
"norepulsive",
"nosasinitevery",
"nosderv",
"nozeropt",
"numerical",
"qiter",
"qok",
"spatial",
"storevectors",
"nomolecularinternalke",
"voight",
"zsisa",
# Optimisation method
"conjugate",
"dfp",
"lbfgs",
"numdiag",
"positive",
"rfo",
"unit",
# Output control
"average",
"broaden_dos",
"cartesian",
"compare",
"conserved",
"dcharge",
"dynamical_matrix",
"eigenvectors",
"global",
"hessian",
"hexagonal",
"intensity",
"linmin",
"meanke",
"nodensity_out",
"nodpsym",
"nofirst_point",
"nofrequency",
"nokpoints",
"operators",
"outcon",
"prt_eam",
"prt_two",
"prt_regi_before",
"qsas",
"restore",
"save",
"terse",
# Structure control
"full",
"hexagonal",
"lower_symmetry",
"nosymmetry",
# PDF control
"PDF",
"PDFcut",
"PDFbelow",
"PDFkeep",
"coreinfo",
"nowidth",
"nopartial",
# Miscellaneous
"nomodcoord",
"oldunits",
"zero_potential",
}
class GulpIO:
"""
To generate GULP input and process output
"""
@staticmethod
def keyword_line(*args):
r"""
Checks if the input args are proper gulp keywords and
generates the 1st line of gulp input. Full keywords are expected.
Args:
\\*args: 1st line keywords
"""
# if len(list(filter(lambda x: x in _gulp_kw, args))) != len(args):
# raise GulpError("Wrong keywords given")
gin = " ".join(args)
gin += "\n"
return gin
@staticmethod
def structure_lines(
structure,
cell_flg=True,
frac_flg=True,
anion_shell_flg=True,
cation_shell_flg=False,
symm_flg=True,
):
"""
Generates GULP input string corresponding to pymatgen structure.
Args:
structure: pymatgen Structure object
cell_flg (default = True): Option to use lattice parameters.
fractional_flg (default = True): If True, fractional coordinates
are used. Else, cartesian coodinates in Angstroms are used.
******
GULP convention is to use fractional coordinates for periodic
structures and cartesian coordinates for non-periodic
structures.
******
anion_shell_flg (default = True): If True, anions are considered
polarizable.
cation_shell_flg (default = False): If True, cations are
considered polarizable.
symm_flg (default = True): If True, symmetry information is also
written.
Returns:
string containing structure for GULP input
"""
gin = ""
if cell_flg:
gin += "cell\n"
l = structure.lattice
lat_str = "{0:6f} {1:6f} {2:6f} {3:6f} {4:6f} {5:6f}".format(l.a, l.b, l.c, l.alpha, l.beta, l.gamma)
gin += lat_str + "\n"
if frac_flg:
gin += "frac\n"
coord_attr = "frac_coords"
else:
gin += "cart\n"
coord_attr = "coords"
for site in structure.sites:
coord = [str(i) for i in getattr(site, coord_attr)]
specie = site.specie
core_site_desc = specie.symbol + " core " + " ".join(coord) + "\n"
gin += core_site_desc
if (specie in _anions and anion_shell_flg) or (specie in _cations and cation_shell_flg):
shel_site_desc = specie.symbol + " shel " + " ".join(coord) + "\n"
gin += shel_site_desc
else:
pass
if symm_flg:
gin += "space\n"
gin += str(SpacegroupAnalyzer(structure).get_space_group_number()) + "\n"
return gin
@staticmethod
def specie_potential_lines(structure, potential, **kwargs):
r"""
Generates GULP input specie and potential string for pymatgen
structure.
Args:
structure: pymatgen.core.structure.Structure object
potential: String specifying the type of potential used
\\*\\*kwargs: Additional parameters related to potential. For
potential == "buckingham",
anion_shell_flg (default = False):
If True, anions are considered polarizable.
anion_core_chrg=float
anion_shell_chrg=float
cation_shell_flg (default = False):
If True, cations are considered polarizable.
cation_core_chrg=float
cation_shell_chrg=float
Returns:
string containing specie and potential specification for gulp
input.
"""
raise NotImplementedError("gulp_specie_potential not yet implemented." "\nUse library_line instead")
@staticmethod
def library_line(file_name):
"""
Specifies GULP library file to read species and potential parameters.
If using library don't specify species and potential
in the input file and vice versa. Make sure the elements of
structure are in the library file.
Args:
file_name: Name of GULP library file
Returns:
GULP input string specifying library option
"""
gulplib_set = "GULP_LIB" in os.environ.keys()
def readable(f):
return os.path.isfile(f) and os.access(f, os.R_OK)
gin = ""
dirpath, fname = os.path.split(file_name)
if dirpath and readable(file_name): # Full path specified
gin = "library " + file_name
else:
fpath = os.path.join(os.getcwd(), file_name) # Check current dir
if readable(fpath):
gin = "library " + fpath
elif gulplib_set: # Check the GULP_LIB path
fpath = os.path.join(os.environ["GULP_LIB"], file_name)
if readable(fpath):
gin = "library " + file_name
if gin:
return gin + "\n"
raise GulpError("GULP Library not found")
def buckingham_input(self, structure, keywords, library=None, uc=True, valence_dict=None):
"""
Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence}
"""
gin = self.keyword_line(*keywords)
gin += self.structure_lines(structure, symm_flg=not uc)
if not library:
gin += self.buckingham_potential(structure, valence_dict)
else:
gin += self.library_line(library)
return gin
@staticmethod
def buckingham_potential(structure, val_dict=None):
"""
Generate species, buckingham, and spring options for an oxide structure
using the parameters in default libraries.
Ref:
1. G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys.,
18, 1149-1161 (1985)
2. T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle,
J. Mater Chem., 4, 831-837 (1994)
Args:
structure: pymatgen.core.structure.Structure
val_dict (Needed if structure is not charge neutral): {El:valence}
dict, where El is element.
"""
if not val_dict:
try:
# If structure is oxidation state decorated, use that first.
el = [site.specie.symbol for site in structure]
valences = [site.specie.oxi_state for site in structure]
val_dict = dict(zip(el, valences))
except AttributeError:
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
val_dict = dict(zip(el, valences))
# Try bush library first
bpb = BuckinghamPotential("bush")
bpl = BuckinghamPotential("lewis")
gin = ""
for key in val_dict.keys():
use_bush = True
el = re.sub(r"[1-9,+,\-]", "", key)
if el not in bpb.species_dict.keys():
use_bush = False
elif val_dict[key] != bpb.species_dict[el]["oxi"]:
use_bush = False
if use_bush:
gin += "species \n"
gin += bpb.species_dict[el]["inp_str"]
gin += "buckingham \n"
gin += bpb.pot_dict[el]
gin += "spring \n"
gin += bpb.spring_dict[el]
continue
# Try lewis library next if element is not in bush
# use_lewis = True
if el != "O": # For metals the key is "Metal_OxiState+"
k = el + "_" + str(int(val_dict[key])) + "+"
if k not in bpl.species_dict.keys():
# use_lewis = False
raise GulpError("Element {} not in library".format(k))
gin += "species\n"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[k]
else:
gin += "species\n"
k = "O_core"
gin += bpl.species_dict[k]
k = "O_shel"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[key]
gin += "spring\n"
gin += bpl.spring_dict[key]
return gin
def tersoff_input(self, structure, periodic=False, uc=True, *keywords):
"""
Gets a GULP input with Tersoff potential for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
periodic (Default=False): Flag denoting whether periodic
boundary conditions are used
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
keywords: GULP first line keywords.
"""
# gin="static noelectrostatics \n "
gin = self.keyword_line(*keywords)
gin += self.structure_lines(
structure,
cell_flg=periodic,
frac_flg=periodic,
anion_shell_flg=False,
cation_shell_flg=False,
symm_flg=not uc,
)
gin += self.tersoff_potential(structure)
return gin
@staticmethod
def tersoff_potential(structure):
"""
Generate the species, tersoff potential lines for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
"""
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
el_val_dict = dict(zip(el, valences))
gin = "species \n"
qerfstring = "qerfc\n"
for key in el_val_dict.keys():
if key != "O" and el_val_dict[key] % 1 != 0:
raise SystemError("Oxide has mixed valence on metal")
specie_string = key + " core " + str(el_val_dict[key]) + "\n"
gin += specie_string
qerfstring += key + " " + key + " 0.6000 10.0000 \n"
gin += "# noelectrostatics \n Morse \n"
met_oxi_ters = TersoffPotential().data
for key in el_val_dict.keys():
if key != "O":
metal = key + "(" + str(int(el_val_dict[key])) + ")"
ters_pot_str = met_oxi_ters[metal]
gin += ters_pot_str
gin += qerfstring
return gin
@staticmethod
def get_energy(gout):
"""
Args:
gout ():
Returns:
Energy
"""
energy = None
for line in gout.split("\n"):
if "Total lattice energy" in line and "eV" in line:
energy = line.split()
elif "Non-primitive unit cell" in line and "eV" in line:
energy = line.split()
if energy:
return float(energy[4])
raise GulpError("Energy not found in Gulp output")
@staticmethod
def get_relaxed_structure(gout):
"""
Args:
gout ():
Returns:
(Structure) relaxed structure.
"""
# Find the structure lines
structure_lines = []
cell_param_lines = []
output_lines = gout.split("\n")
no_lines = len(output_lines)
i = 0
# Compute the input lattice parameters
while i < no_lines:
line = output_lines[i]
if "Full cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[8])
alpha = float(line.split()[11])
line = output_lines[i + 1]
b = float(line.split()[8])
beta = float(line.split()[11])
line = output_lines[i + 2]
c = float(line.split()[8])
gamma = float(line.split()[11])
i += 3
break
if "Cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[2])
alpha = float(line.split()[5])
line = output_lines[i + 1]
b = float(line.split()[2])
beta = float(line.split()[5])
line = output_lines[i + 2]
c = float(line.split()[2])
gamma = float(line.split()[5])
i += 3
break
i += 1
while i < no_lines:
line = output_lines[i]
if "Final fractional coordinates of atoms" in line:
# read the site coordinates in the following lines
i += 6
line = output_lines[i]
while line[0:2] != "--":
structure_lines.append(line)
i += 1
line = output_lines[i]
# read the cell parameters
i += 9
line = output_lines[i]
if "Final cell parameters" in line:
i += 3
for del_i in range(6):
line = output_lines[i + del_i]
cell_param_lines.append(line)
break
i += 1
# Process the structure lines
if structure_lines:
sp = []
coords = []
for line in structure_lines:
fields = line.split()
if fields[2] == "c":
sp.append(fields[1])
coords.append(list(float(x) for x in fields[3:6]))
else:
raise IOError("No structure found")
if cell_param_lines:
a = float(cell_param_lines[0].split()[1])
b = float(cell_param_lines[1].split()[1])
c = float(cell_param_lines[2].split()[1])
alpha = float(cell_param_lines[3].split()[1])
beta = float(cell_param_lines[4].split()[1])
gamma = float(cell_param_lines[5].split()[1])
latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
return Structure(latt, sp, coords)
class GulpCaller:
"""
Class to run gulp from commandline
"""
def __init__(self, cmd="gulp"):
"""
Initialize with the executable if not in the standard path
Args:
cmd: Command. Defaults to gulp.
"""
def is_exe(f):
return os.path.isfile(f) and os.access(f, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
self._gulp_cmd = cmd
return
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
file = os.path.join(path, cmd)
if is_exe(file):
self._gulp_cmd = file
return
raise GulpError("Executable not found")
def run(self, gin):
"""
Run GULP using the gin as input
Args:
gin: GULP input string
Returns:
gout: GULP output string
"""
with ScratchDir("."):
p = subprocess.Popen(
self._gulp_cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate(bytearray(gin, "utf-8"))
out = out.decode("utf-8")
err = err.decode("utf-8")
if "Error" in err or "error" in err:
print(gin)
print("----output_0---------")
print(out)
print("----End of output_0------\n\n\n")
print("----output_1--------")
print(out)
print("----End of output_1------")
raise GulpError(err)
# We may not need this
if "ERROR" in out:
raise GulpError(out)
# Sometimes optimisation may fail to reach convergence
conv_err_string = "Conditions for a minimum have not been satisfied"
if conv_err_string in out:
raise GulpConvergenceError()
gout = ""
for line in out.split("\n"):
gout = gout + line + "\n"
return gout
def get_energy_tersoff(structure, gulp_cmd="gulp"):
"""
Compute the energy of a structure using Tersoff potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.tersoff_input(structure)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_buckingham(structure, gulp_cmd="gulp", keywords=("optimise", "conp", "qok"), valence_dict=None):
"""
Compute the energy of a structure using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(structure, keywords, valence_dict=valence_dict)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_relax_structure_buckingham(structure, gulp_cmd="gulp", keywords=("optimise", "conp"), valence_dict=None):
"""
Relax a structure and compute the energy using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(structure, keywords, valence_dict=valence_dict)
gout = gc.run(gin)
energy = gio.get_energy(gout)
relax_structure = gio.get_relaxed_structure(gout)
return energy, relax_structure
class GulpError(Exception):
"""
Exception class for GULP.
Raised when the GULP gives an error
"""
def __init__(self, msg):
"""
Args:
msg (str): Message
"""
self.msg = msg
def __str__(self):
return "GulpError : " + self.msg
class GulpConvergenceError(Exception):
"""
Exception class for GULP.
Raised when proper convergence is not reached in Mott-Littleton
defect energy optimisation procedure in GULP
"""
def __init__(self, msg=""):
"""
Args:
msg (str): Message
"""
self.msg = msg
def __str__(self):
return self.msg
class BuckinghamPotential:
"""
Generate the Buckingham Potential Table from the bush.lib and lewis.lib.
Ref:
T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle, J. Mater Chem.,
4, 831-837 (1994).
G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys., 18,
1149-1161 (1985)
"""
def __init__(self, bush_lewis_flag):
"""
Args:
bush_lewis_flag (str): Flag for using Bush or Lewis potential.
"""
assert bush_lewis_flag in {"bush", "lewis"}
pot_file = "bush.lib" if bush_lewis_flag == "bush" else "lewis.lib"
with open(os.path.join(os.environ["GULP_LIB"], pot_file), "rt") as f:
# In lewis.lib there is no shell for cation
species_dict, pot_dict, spring_dict = {}, {}, {}
sp_flg, pot_flg, spring_flg = False, False, False
for row in f:
if row[0] == "#":
continue
if row.split()[0] == "species":
sp_flg, pot_flg, spring_flg = True, False, False
continue
if row.split()[0] == "buckingham":
sp_flg, pot_flg, spring_flg = False, True, False
continue
if row.split()[0] == "spring":
sp_flg, pot_flg, spring_flg = False, False, True
continue
elmnt = row.split()[0]
if sp_flg:
if bush_lewis_flag == "bush":
if elmnt not in species_dict.keys():
species_dict[elmnt] = {"inp_str": "", "oxi": 0}
species_dict[elmnt]["inp_str"] += row
species_dict[elmnt]["oxi"] += float(row.split()[2])
elif bush_lewis_flag == "lewis":
if elmnt == "O":
if row.split()[1] == "core":
species_dict["O_core"] = row
if row.split()[1] == "shel":
species_dict["O_shel"] = row
else:
metal = elmnt.split("_")[0]
# oxi_state = metaloxi.split('_')[1][0]
species_dict[elmnt] = metal + " core " + row.split()[2] + "\n"
continue
if pot_flg:
if bush_lewis_flag == "bush":
pot_dict[elmnt] = row
elif bush_lewis_flag == "lewis":
if elmnt == "O":
pot_dict["O"] = row
else:
metal = elmnt.split("_")[0]
# oxi_state = metaloxi.split('_')[1][0]
pot_dict[elmnt] = metal + " " + " ".join(row.split()[1:]) + "\n"
continue
if spring_flg:
spring_dict[elmnt] = row
if bush_lewis_flag == "bush":
# Fill the null keys in spring dict with empty strings
for key in pot_dict.keys():
if key not in spring_dict.keys():
spring_dict[key] = ""
self.species_dict = species_dict
self.pot_dict = pot_dict
self.spring_dict = spring_dict
class TersoffPotential:
"""
Generate Tersoff Potential Table from "OxideTersoffPotentialentials" file
"""
def __init__(self):
"""
Init TersoffPotential
"""
module_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(module_dir, "OxideTersoffPotentials"), "r") as f:
data = dict()
for row in f:
metaloxi = row.split()[0]
line = row.split(")")
data[metaloxi] = line[1]
self.data = data
|
|
#
# Data for analyzing causality.
# By Nick Cortale
#
# Classes:
# ccm
# embed
#
# Paper:
# Detecting Causality in Complex Ecosystems
# George Sugihara et al. 2012
#
# Thanks to Kenneth Ells and Dylan McNamara
#
# Notes:
# Originally I thought this can be made way faster by only calculting the
# distances once and then chopping it to a specific library length. It turns out
# that calculating the distances is cheaper than filtering the indices.
#
import numpy as np
from sklearn import neighbors
from sklearn import metrics
import skccm.utilities as ut
import pandas as pd
import time
class CCM:
"""
Convergent cross mapping for two embedded time series
"""
def __init__(self, weights='exp', score_metric='corrcoef', verbose=False):
"""
Parameters
----------
weights : weighting scheme for predictions
- exp : exponential weighting
score : how to score the predictions
-'score'
-'corrcoef'
verbose : prints out calculation status
"""
self.weights = weights
self.score_metric = score_metric
self.verbose = verbose
def fit(self,X1,X2):
"""
Fit the training data for ccm. Creates seperate near neighbor regressors
for X1 and X2 independently.
X1 : embedded time series of shape (num_samps,embed_dim)
X2 : embedded time series of shape (num_samps,embed_dim)
near_neighs : string
- 'sorround' : this is what the paper uses
- 'all' : calculate the distance to all near neighbors
"""
# Save X1_train and X2_train for prediction later. Confusing,
# but we need to make predictions about our testing set using these.
self.X1 = X1
self.X2 = X2
#to sorround a point, there must be ndim + 1 points
# we add two here because the closest neighbor is itself. so that is
# going to be dropped.
near_neighs = X1.shape[1] + 2
self.knn1 = neighbors.KNeighborsRegressor(near_neighs)
self.knn2 = neighbors.KNeighborsRegressor(near_neighs)
def predict_no_drop(self,lib_lengths):
"""
Make a prediction
Parameters
----------
X1_test : test set
X2_test : test set
lib_lengths : list of library lengths to test
"""
X1_pred = []
X2_pred = []
for liblen in lib_lengths:
x1_p = np.empty(self.X1.shape)
x2_p = np.empty(self.X2.shape)
#keep only the indices that are less than library length
self.knn1.fit(self.X1[:liblen], self.X1[:liblen])
self.knn2.fit(self.X2[:liblen], self.X2[:liblen])
dist1,ind1 = self.knn1.kneighbors(self.X1)
dist2,ind2 = self.knn2.kneighbors(self.X2)
#drop indices and distances to themselves
dist1 = dist1[:,1:]
dist2 = dist2[:,1:]
ind1 = ind1[:,1:]
ind2 = ind2[:,1:]
for j in range(self.X1.shape[1]):
W1 = ut.exp_weight(dist1)
W2 = ut.exp_weight(dist2)
#flip the weights and indices
x1_p[:, j] = np.sum(self.X1[ind2, j] * W2, axis=1)
x2_p[:, j] = np.sum(self.X2[ind1, j] * W1, axis=1)
X1_pred.append(x1_p)
X2_pred.append(x2_p)
self.X1_pred = X1_pred
self.X2_pred = X2_pred
return X1_pred, X2_pred
def predict_drop_in_list(self,lib_lengths,emb_ind1,emb_ind2):
"""
Make a prediction, but the same indices cant be matched with each other.
Parameters
----------
lib_lengths : library lengths to Test
e_ind1 : indices of the first embed time series.
e_ind2 : indices of the second embed time series.
"""
X1_pred = []
X2_pred = []
#need to reset the class ot use all neighbors so that the appropriate
# neighbors can be dropped for each class
self.knn1 = neighbors.KNeighborsRegressor(len(self.X1))
self.knn2 = neighbors.KNeighborsRegressor(len(self.X2))
self.knn1.fit(self.X1, self.X1)
self.knn2.fit(self.X2, self.X2)
dist1,ind1 = self.knn1.kneighbors(self.X1)
dist2,ind2 = self.knn2.kneighbors(self.X2)
#find the conflicting indices
conf1 = ut.conflicting_indices(emb_ind1)
conf2 = ut.conflicting_indices(emb_ind2)
#throw out the indices that are in the embedding
dist1, ind1 = ut.throw_out_nn_indices(dist1,ind1,conf1)
dist2, ind2 = ut.throw_out_nn_indices(dist2,ind2,conf2)
n_sorround = self.X1.shape[1] + 1
#flipping allows for a faster implentation as we can feed
# ut.in_libary_len smaller and smaller arrays
for liblen in lib_lengths:
#keep only the indices that are less than library length
#t0 = time.time()
i_1, d_1 = ut.in_library_len_keep(ind1, dist1, liblen,n_sorround)
i_2, d_2 = ut.in_library_len_keep(ind2, dist2, liblen,n_sorround)
#t1 = time.time()
#t0 = time.time()
W1 = ut.exp_weight(d_1)
W2 = ut.exp_weight(d_2)
x1_p = np.empty(self.X1.shape)
x2_p = np.empty(self.X2.shape)
for j in range(self.X1.shape[1]):
#flip the weights and indices
x1_p[:, j] = np.sum(self.X1[i_2, j] * W2, axis=1)
x2_p[:, j] = np.sum(self.X2[i_1, j] * W1, axis=1)
#t1 = time.time()
#print('second_loop:',np.around(t1-t0,4))
X1_pred.append(x1_p)
X2_pred.append(x2_p)
self.X1_pred = X1_pred
self.X2_pred = X2_pred
if self.verbose: print("predictions made")
return X1_pred, X2_pred
def score(self,how='corrcoef'):
"""
Evalulate the predictions. Calculates the skill down each column
and averages them together to get the total skill.
how : how to score the predictions
-'score'
-'corrcoef'
"""
num_preds = self.X1.shape[1]
score_1 = []
score_2 = []
for x1_p, x2_p in zip(self.X1_pred, self.X2_pred):
sc1 = np.empty(num_preds)
sc2 = np.empty(num_preds)
for ii in range(num_preds):
p1 = x1_p[:,ii]
p2 = x2_p[:,ii]
if self.score_metric == 'score':
sc1[ii] = ut.score(p1,self.X1[:,ii])
sc2[ii] = ut.score(p2,self.X2[:,ii])
if self.score_metric == 'corrcoef':
sc1[ii] = ut.corrcoef(p1,self.X1[:,ii])
sc2[ii] = ut.corrcoef(p2,self.X2[:,ii])
score_1.append( np.mean(sc1) )
score_2.append( np.mean(sc2) )
return score_1, score_2
class Embed:
def __init__(self,X):
"""
Parameters
----------
X : series or dataframe,
"""
if type(X) is pd.pandas.core.frame.DataFrame:
self.df = X
else:
self.X = X
def df_mutual_information(self,max_lag):
"""
Calculates the mutual information along each row of a time series.
Ensure that the time series is continuous in time and sampled regularly.
You can resample it hourly, daily, minutely etc. if needed.
Parameters
----------
max_lag : int
maximum amount to shift the time series
Returns
-------
mi : dataframe, shape(max_lag,num_cols)
columns are the columns of the original dataframe with rows being
the mutual information
"""
cols = self.df.columns
mi = np.empty((max_lag, len(cols)))
for i,col in enumerate(cols):
self.X = self.df[col].values
mi[:,i] = self.mutual_information(max_lag)
mi = pd.DataFrame(mi,columns=cols)
return mi
def mutual_information(self,max_lag):
"""
Calculates the mutual information between the an unshifted time series
and a shifted time series. Utilizes scikit-learn's implementation of
the mutual information found in sklearn.metrics.
Parameters
----------
max_lag : integer
maximum amount to shift the time series
Returns
-------
m_score : 1-D array
mutual information at between the unshifted time series and the
shifted time series
"""
#number of bins - say ~ 20 pts / bin for joint distribution
#and that at least 4 bins are required
N = max(self.X.shape)
num_bins = max(4.,np.floor(np.sqrt(N/20)))
num_bins = int(num_bins)
m_score = np.zeros((max_lag))
for jj in range(max_lag):
lag = jj+1
ts = self.X[0:-lag]
ts_shift = self.X[lag::]
min_ts = np.min(self.X)
max_ts = np.max(self.X)+.0001 #needed to bin them up
bins = np.linspace(min_ts,max_ts,num_bins+1)
bin_tracker = np.zeros_like(ts)
bin_tracker_shift = np.zeros_like(ts_shift)
for ii in range(num_bins):
locs = np.logical_and( ts>=bins[ii], ts<bins[ii+1] )
bin_tracker[locs] = ii
locs_shift = np.logical_and( ts_shift>=bins[ii], ts_shift<bins[ii+1] )
bin_tracker_shift[locs_shift]=ii
m_score[jj] = metrics.mutual_info_score(bin_tracker,bin_tracker_shift)
return m_score
def embed_indices(self,lag,embed):
"""
Gets the indices of the embedded time series. This assumes that the
time series is sequential. Non-sequential time series are currently
not supported.
Parameters
----------
lag : int
lag values as calculated from the first minimum of the mutual info.
embed : int
embedding dimension, how many lag values to take
"""
tsize = self.X.shape[0]
X = np.arange(0,tsize)
t_iter = tsize-(lag*(embed-1))
features = np.zeros((t_iter,embed))
for ii in range(t_iter):
end_val = ii+lag*(embed-1)+1
part = X[ii : end_val]
features[ii,:] = part[::lag]
return features
def embed_vectors_1d(self,lag,embed):
"""
Embeds vectors from a one dimensional time series in
m-dimensional space.
Parameters
----------
lag : int
lag values as calculated from the first minimum of the mutual info.
embed : int
embedding dimension, how many lag values to take
Returns
-------
features : array of shape [num_vectors,embed]
A 2-D array containing all of the embedded vectors
Example
-------
X = [0,1,2,3,4,5,6,7,8,9,10]
em = 3
lag = 2
predict=3
returns:
features = [[0,2,4], [1,3,5], [2,4,6], [3,5,7]]
"""
tsize = self.X.shape[0]
t_iter = tsize-(lag*(embed-1))
features = np.zeros((t_iter,embed))
for ii in range(t_iter):
end_val = ii+lag*(embed-1)+1
part = self.X[ii : end_val]
features[ii,:] = part[::lag]
return features
|
|
# BSD 3-Clause License
#
# Copyright (c) 2016-21, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Internal utility functions"""
__author__ = "Felix Simkovic"
__date__ = "16 Feb 2017"
__version__ = "0.13.1"
import numpy as np
import os
from conkit.core.contact import Contact
from conkit.core.contactmap import ContactMap
from conkit.core.contactfile import ContactFile
from conkit.core.sequence import Sequence
from conkit.core.sequencefile import SequenceFile
from conkit.core.distogram import Distogram
from conkit.core.distancefile import DistanceFile
from conkit.misc import deprecate
HierarchyIndex = {
"Contact": Contact,
"ContactMap": ContactMap,
"ContactFile": ContactFile,
"Sequence": Sequence,
"SequenceFile": SequenceFile,
"Distogram": Distogram,
"DistanceFile": DistanceFile
}
class ColorDefinitions(object):
"""A class storing all color definitions for the various plots
for fast and easy handling
"""
GENERAL = "#000000"
MATCH = "#0F0B2C"
MISMATCH = "#DC4869"
STRUCTURAL = "#D8D6D6"
L5CUTOFF = "#3F4587"
L20CUTOFF = "#B5DD2B"
PRECISION50 = L5CUTOFF
FACTOR1 = L20CUTOFF
SCORE = '#3299a8'
ERROR = '#f54242'
CORRECT = '#40eef7'
ALIGNED = '#3d8beb'
MISALIGNED = '#f7ba40'
AA_ENCODING = {
"A": "#882D17",
"C": "#F3C300",
"D": "#875692",
"E": "#F38400",
"F": "#A1CAF1",
"G": "#BE0032",
"H": "#C2B280",
"I": "#848482",
"K": "#008856",
"L": "#E68FAC",
"M": "#0067A5",
"N": "#F99379",
"P": "#604E97",
"Q": "#F6A600",
"R": "#B3446C",
"S": "#DCD300",
"T": "#8DB600",
"V": "#654522",
"W": "#E25822",
"Y": "#2B3D26",
"X": "#000000",
}
def find_minima(data, order=1):
"""Find the minima in a 1-D list
Parameters
----------
data : list, tuple
A list of values
order : int, optional
The order, i.e. number of points next to point to consider
Returns
-------
list
A list of indices for minima
Warning
-------
For multi-dimensional problems, see :func:`~scipy.signal.argrelmin`.
Raises
------
:exc:`ValueError`
Order needs to be >= 1!
:exc:`ValueError`
More than two elements required!
"""
if order < 1:
raise ValueError("Order needs to be >= 1!")
data = np.asarray(data)
nelements = data.shape[0]
if nelements < 2:
raise ValueError("More than two elements required!")
results = np.zeros(nelements, dtype=np.bool_)
for i in np.arange(1, nelements - 1):
start = 0 if i - order < 0 else i - order
end = nelements if i + order + 1 > nelements else i + order + 1
results[i] = np.all(data[start:i] > data[i]) and np.all(data[i] < data[i + 1: end])
return np.where(results)[0].tolist()
def get_adjusted_aspect(ax, aspect_ratio):
"""Adjust the aspect ratio
Parameters
----------
ax : :obj:`~matplotlib.axes.Axes`
A :obj:`~matplotlib.axes.Axes` instance
aspect_ratio : float
The desired aspect ratio for :obj:`~matplotlib.axes.Axes`
Returns
-------
float
The required aspect ratio to achieve the desired one
Warning
-------
This function only works for non-logarithmic axes.
"""
default_ratio = (ax.get_xlim()[1] - ax.get_xlim()[0]) / (ax.get_ylim()[1] - ax.get_ylim()[0])
return float(default_ratio * aspect_ratio)
@deprecate("0.11", msg="Use get_points_on_circle instead")
def points_on_circle(*args, **kwargs):
return get_points_on_circle(*args, **kwargs)
def get_points_on_circle(radius, h=0, k=0):
"""Calculate points on a circle with even spacing
Parameters
----------
radius : int
The radius of the circle
h : int, optional
The x coordinate of the origin
k : int, optional
The y coordinate of the origin
Returns
-------
list
The list of coordinates for each point
"""
if radius == 0:
return [[]]
else:
space = 2 * np.pi / radius
coords = np.zeros((radius, 2))
for i in np.arange(radius):
coords[i] = [round(h + radius * np.cos(space * i), 6), round(k + radius * np.sin(space * i), 6)]
return coords.tolist()
def get_radius_around_circle(p1, p2):
"""Obtain the radius around a given circle
Parameters
----------
p1 : list, tuple
Point 1
p2 : list, tuple
Point 2 adjacent `p1`
Returns
-------
float
The radius for points so p1 and p2 do not intersect
"""
dist = np.linalg.norm(np.array(p1) - np.array(p2))
return dist / 2.0 - dist * 0.1
def _isinstance(hierarchy, hierarchy_type):
"""Confirm the data structure to be a ConKit definition"""
if isinstance(hierarchy_type, str) and hierarchy_type in HierarchyIndex:
return isinstance(hierarchy, HierarchyIndex[hierarchy_type])
else:
return isinstance(hierarchy, hierarchy_type)
def is_executable(executable):
"""Check if a given program can be executed
Parameters
----------
executable : str
The path or name for an executable
Returns
-------
str
The absolute path to the executable
Raises
------
ValueError
The executable cannot be accessed
Credits
-------
https://stackoverflow.com/a/377028/3046533
"""
if executable is None:
return
fpath, fname = os.path.split(executable)
if fpath:
if os.path.isfile(executable) and os.access(executable, os.X_OK):
return executable
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, executable)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
return exe_file
raise ValueError('Executable {} cannot be accessed'.format(executable))
def convolution_smooth_values(x, window=5):
"""Use convolutions to smooth a list of numeric values
Parameters
----------
x : list, tuple
A list with the numeric values to be smoothed
window : int
The residue window to be used to smooth values [default: 5]
Returns
-------
list
A list with the smoothed numeric values
"""
box = np.ones(window) / window
x_smooth = np.convolve(x, box, mode='same')
return x_smooth
def get_rmsd(distogram_1, distogram_2, calculate_wrmsd=True):
"""Calculate the RMSD between two different distograms
Parameters
----------
distogram_1 : :obj:`~conkit.core.distogram.Distogram`
Distogram 1
distogram_2 : :obj:`~conkit.core.distogram.Distogram`
Distogram 2
calculate_wrmsd: bool
If True then the WRMSD is calculated using the confidence scores from distogram 1
Returns
-------
tuple
Two lists with the raw/smoothed RMSD values at each residue position
"""
rmsd_raw = Distogram.calculate_rmsd(distogram_1, distogram_2, calculate_wrmsd=calculate_wrmsd)
rmsd_smooth = convolution_smooth_values(np.nan_to_num(rmsd_raw), 10)
return rmsd_raw, rmsd_smooth
def get_cmap_validation_metrics(model_cmap_dict, predicted_cmap_dict, sequence, absent_residues):
"""For a given observed contact map and predicted contact map calculate a series of validation metrics at each
residue position (Accuracy, FN, FNR, FP, FPR, Sensitivity, Specificity)
Parameters
----------
model_cmap_dict : dict
Dictionary representation of the contact map observed in the model
predicted_cmap_dict : dict
Dictionary representation of the predicted contact map
sequence: :obj:`~conkit.core.sequence.Sequence`
The sequence of the model to be validated
absent_residues: list, tuple, set
The residues that are missing from the model
Returns
-------
tuple
Two lists with the raw/smoothed values of each validation metric at each residue position
"""
def accuracy(tp, fp, tn, fn):
if (tp + fp + tn + fn) > 0:
return (tp + tn) / (tp + fp + tn + fn)
return 0
def fn(*args):
return args[-1]
def fn_rate(tp, fp, tn, fn):
if (fn + tn) > 0:
return fn / (fn + tn)
return 0
def fp(*args):
return args[1]
def fp_rate(tp, fp, tn, fn):
if (fp + tp) > 0:
return fp / (fp + tp)
return 0
def sensitivity(tp, fp, tn, fn):
if (fn + tp) > 0:
return tp / (fn + tp)
return 0
def specificity(tp, fp, tn, fn):
if (fp + tn) > 0:
return tn / (fp + tn)
return 0
nresidues = len(sequence) - len(absent_residues)
cmap_metrics = [[] for i in range(7)]
metrics_list = (accuracy, fn, fn_rate, fp, fp_rate, sensitivity, specificity)
for resnum in sorted(predicted_cmap_dict.keys()):
if absent_residues and resnum in absent_residues:
for metric in cmap_metrics:
metric.append(np.nan)
continue
predicted_contact_set = {c for c in predicted_cmap_dict[resnum] if
c[0] not in absent_residues and c[1] not in absent_residues}
model_contact_set = {c for c in model_cmap_dict[resnum] if
c[0] not in absent_residues and c[1] not in absent_residues}
_fn = len(predicted_contact_set - model_contact_set)
_tp = len(predicted_contact_set & model_contact_set)
_fp = len(model_contact_set - predicted_contact_set)
_tn = nresidues - _fn - _tp - _fp
for idx, metric in enumerate(metrics_list):
cmap_metrics[idx].append(metric(_tp, _fp, _tn, _fn))
smooth_cmap_metrics = []
for metric in cmap_metrics:
smooth_cmap_metrics.append(convolution_smooth_values(np.nan_to_num(metric), 5))
return cmap_metrics, smooth_cmap_metrics
def get_zscores(model_distogram, predicted_cmap_dict, absent_residues, *metrics):
"""Calculate the Z-Scores for a series of metrics at each residue position
using the population of residues within 10A
Parameters
----------
model_distogram : :obj:`~conkit.core.distogram.Distogram`
Distogram of the model that will be validated
predicted_cmap_dict : dict
Dictionary representation of the predicted contact map
absent_residues: list, tuple, set
The residues that are missing from the model
*metrics: list
The mertics for which the Z-Scores will be calculated
Returns
-------
list
A list of lists where each sublist contains the Z-Scores for the input metrics across all the residues. The
sublists containing the Z-Scores are ordered in the same original order as in the input *metrics
"""
zscore_cmap_metrics = [[] for i in metrics]
for resnum in sorted(predicted_cmap_dict.keys()):
if absent_residues and resnum in absent_residues:
for zscore_metric in zscore_cmap_metrics:
zscore_metric.append(np.nan)
continue
neighbour_residues = model_distogram.find_residues_within(resnum, 10)
for cmap_metric, zscore_metric in zip(metrics, zscore_cmap_metrics):
population_scores = [cmap_metric[resid - 1] for resid in neighbour_residues]
observed_score = cmap_metric[resnum - 1]
zscore_metric.append(calculate_zscore(observed_score, population_scores))
return zscore_cmap_metrics
def calculate_zscore(observed_score, population_scores):
"""Calculate the Z-Score for a given population of values. Z-Score = (score - mean) / stdev
Parameters
----------
observed_score : float
The observed score used to calculate the Z-Score
population_scores : list
A list containing the scores observed across the samples in the population
Returns
-------
float
The calculated Z-Score
"""
if len(population_scores) < 2:
return 0
stdev = np.std(population_scores).astype(float)
if stdev == 0:
return 0
mean = np.mean(population_scores).astype(float)
zscore = (observed_score - mean) / stdev
return zscore
def get_residue_ranges(numbers):
"""Given a list of integers, creates a list of ranges with the consecutive numbers found in the list.
Parameters
----------
numbers: list
A list of integers
Returns
------
list
A list with the ranges of consecutive numbers found in the list
"""
nums = sorted(set(numbers))
gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s + 3 < e]
edges = iter(nums[:1] + sum(gaps, []) + nums[-1:])
return list(zip(edges, edges))
def parse_map_align_stdout(stdout):
"""Parse the stdout of map_align and extract the alignment of residues.
Parameters
----------
stdout : str
Standard output created with map_align
Returns
------
dict
A dictionary where aligned residue numbers in map_b are the keys and residue numbers in map_a values. Only
misaligned regions are included.
"""
alignment_dict = {}
for line in stdout.split('\n'):
if line and line.split()[0] == "MAX":
line = line.rstrip().lstrip().split()
for residue_pair in line[8:]:
residue_pair = residue_pair.split(":")
if residue_pair[0] != residue_pair[1]:
alignment_dict[int(residue_pair[1])] = int(residue_pair[0])
return alignment_dict
|
|
import functools
import tensorflow as tf
import numpy as np
import time
def lazy_property(function):
attribute = '_' + function.__name__
@property
@functools.wraps(function)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return wrapper
class SequenceClassification:
def __init__(self, data, target, dropout=0.2, num_hidden=200, num_layers=2):
self.data = data
self.target = target
self.dropout = dropout
self._num_hidden = num_hidden
self._num_layers = num_layers
self.prediction
self.prediction_label
self.cost
self.optimize
self.error
self.confusion_matrix
def rnn_cell(self):
cell = tf.contrib.rnn.LSTMCell(self._num_hidden) # GRUCell/LSTMCell
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=1.0-self.dropout)
return cell
@lazy_property
def prediction(self):
# Recurrent network.
# network = tf.contrib.rnn.LSTMCell(self._num_hidden)
# network = tf.contrib.rnn.DropoutWrapper(network, output_keep_prob=self.dropout)
network = tf.contrib.rnn.MultiRNNCell([self.rnn_cell() for _ in range(self._num_layers)])
output, _ = tf.nn.dynamic_rnn(network, self.data, dtype=tf.float32)
# Select last output.
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
# Softmax layer.
# weight, bias = self._weight_and_bias(self._num_hidden, int(self.target.get_shape()[1]))
# prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
out_size = self.target.get_shape()[1].value
# logit = tf.contrib.layers.fully_connected(last, out_size, activation_fn=None)
logit = tf.layers.dense(last, out_size)
prediction = tf.nn.softmax(logit)
return prediction
@lazy_property
def prediction_label(self):
return tf.argmax(self.prediction, 1)
@lazy_property
def cost(self):
cross_entropy = -tf.reduce_sum(self.target * tf.log(self.prediction))
return cross_entropy
@lazy_property
def optimize(self):
learning_rate = 0.1 # 0.001
optimizer = tf.train.AdadeltaOptimizer(learning_rate) # AdadeltaOptimizer,RMSPropOptimizer
return optimizer.minimize(self.cost)
@lazy_property
def error(self):
mistakes = tf.not_equal(tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))
return tf.reduce_mean(tf.cast(mistakes, tf.float32))
@lazy_property
def confusion_matrix(self):
cm = tf.confusion_matrix(tf.argmax(self.target, 1), tf.argmax(self.prediction, 1), int(self.target.get_shape()[1]))
return cm
@staticmethod
def _weight_and_bias(in_size, out_size):
weight = tf.truncated_normal([in_size, out_size], stddev=0.01)
bias = tf.constant(0.1, shape=[out_size])
return tf.Variable(weight), tf.Variable(bias)
def get_csv_data(source, size):
filename_queue = tf.train.string_input_producer([source])
# reader = tf.TableRecordReader()
reader = tf.TextLineReader()
features = None
labels = None
for i in range(size):
key, value = reader.read(filename_queue)
record_defaults = [[''], [''], ['']]
user_id, travel_by_air_history, max_travel_by_air_in7days = tf.decode_csv(value, record_defaults=record_defaults)
str_travel_by_air_historys = tf.string_split([travel_by_air_history], '>')
# tf.sparse_reorder(str_travel_by_air_historys)
num_travel_historys = tf.string_to_number(str_travel_by_air_historys.values, out_type=tf.float32)
num_max_travel_level_in7days = tf.string_to_number(max_travel_by_air_in7days, out_type=tf.int32)
if features is None:
features = [num_travel_historys]
else:
features = tf.concat([features, [num_travel_historys]], 0)
if labels is None:
labels = [num_max_travel_level_in7days]
else:
labels = tf.concat([labels, [num_max_travel_level_in7days]], 0)
return features, labels
# user_id,binary_sequence(fixed sequence length),class
# 1,1>0>1>1>0>0>0>0>...>0,1
# 2,0>1>0>1>0>0>0>0>...>0,0
# ...
# 99,0>1>0>1>0>0>0>0>...>0,0
def print_tf_vars(tf_sess, names):
variables_names = [v.name for v in tf.trainable_variables()]
values = tf_sess.run(variables_names)
for k, v in zip(variables_names, values):
if names is None or names.find(k) >= 0:
print(k, v)
def main():
num_classes = 3
num_sequence = 120
num_feature = 1
num_epoch = 1
batch_size = 100
num_iteration_train = 10000
num_iteration_valid = 10
num_iteration_test = 100
num_iteration_show = 100
train_file = "tf_train.csv"
test_file = "tf_test.csv"
data = tf.placeholder(tf.float32, [None, num_sequence, num_feature])
target = tf.placeholder(tf.float32, [None, num_classes])
# dropout = tf.placeholder(tf.float32)
model = SequenceClassification(data, target) # , dropout
train_feature_batch, train_label_batch = get_csv_data(train_file, batch_size)
train_label_batch = tf.one_hot(train_label_batch, num_classes)
test_feature_batch, test_label_batch = get_csv_data(test_file, batch_size)
test_label_batch = tf.one_hot(test_label_batch, num_classes)
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(tf.global_variables_initializer())
for epoch in range(num_epoch):
for iteration in range(num_iteration_train):
_train_feature_batch, _train_label_batch = sess.run([train_feature_batch, train_label_batch])
_train_feature_batch2 = np.reshape(_train_feature_batch, (batch_size, num_sequence, num_feature))
sess.run(model.optimize, {data: _train_feature_batch2, target: _train_label_batch})
if iteration % num_iteration_show == 0:
now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
loss = sess.run(model.cost, {data: _train_feature_batch2, target: _train_label_batch})
error_sum = 0.0
# predict = sess.run(model.prediction, {data: _train_feature_batch2, target: _train_label_batch})
for iteration_v in range(num_iteration_valid):
_test_feature_batch, _test_label_batch = sess.run([test_feature_batch, test_label_batch])
_test_feature_batch2 = np.reshape(_test_feature_batch, (batch_size, num_sequence, num_feature))
error_sum += sess.run(model.error, {data: _test_feature_batch2, target: _test_label_batch})
error_batch = error_sum / num_iteration_valid
print('{:s}: iteration:{:2d} loss:{:f} accuracy:{:f}'.format(now_time, iteration + 1, loss, 1 - error_batch))
# print_tf_vars(sess, "u'rnn/multi_rnn_cell/cell_1/gru_cell/candidate/weights:0'")
# print_tf_vars(sess, "u'rnn/multi_rnn_cell/cell_3/lstm_cell/weights:0")
print('Epoch:{:d} training finished with {:d} iterations'.format(epoch + 1, iteration + 1))
error_sum = 0.0
cm = None
for iteration_t in range(num_iteration_test):
_test_feature_batch, _test_label_batch = sess.run([test_feature_batch, test_label_batch])
_test_feature_batch2 = np.reshape(_test_feature_batch, (batch_size, num_sequence, num_feature))
error_sum += sess.run(model.error, {data: _test_feature_batch2, target: _test_label_batch})
# predict = sess.run(model.prediction, {data: _test_feature_batch2, target: _test_label_batch})
_cm = sess.run(model.confusion_matrix, {data: _test_feature_batch2, target: _test_label_batch})
if cm is None:
cm = _cm
else:
cm = np.add(cm, _cm)
error_batch = error_sum / num_iteration_test
print('{:s}: accuracy:{:f} confusion_matrix: '.format(now_time, 1 - error_batch))
print(np.matrix(cm))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
|
|
import io
import os
import sys
import imp
import time
import shutil
import struct
import zipfile
import unittest
from tempfile import TemporaryFile
from random import randint, random, getrandbits
from test.support import (TESTFN, findfile, unlink,
requires_zlib, requires_bz2, requires_lzma,
captured_stdout)
TESTFN2 = TESTFN + "2"
TESTFNDIR = TESTFN + "d"
FIXEDTEST_SIZE = 1000
DATAFILES_DIR = 'zipfile_datafiles'
SMALL_TEST_DATA = [('_ziptest1', '1q2w3e4r5t'),
('ziptest2dir/_ziptest2', 'qawsedrftg'),
('ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
('ziptest2dir/ziptest3dir/ziptest4dir/_ziptest3', '6y7u8i9o0p')]
def get_files(test):
yield TESTFN2
with TemporaryFile() as f:
yield f
test.assertFalse(f.closed)
with io.BytesIO() as f:
yield f
test.assertFalse(f.closed)
class AbstractTestsWithSourceFile:
@classmethod
def setUpClass(cls):
cls.line_gen = [bytes("Zipfile test line %d. random float: %f\n" %
(i, random()), "ascii")
for i in range(FIXEDTEST_SIZE)]
cls.data = b''.join(cls.line_gen)
def setUp(self):
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = io.StringIO()
zipfp.printdir(file=fp)
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
self.assertEqual(b''.join(zipdata1), self.data)
self.assertEqual(b''.join(zipdata2), self.data)
def test_open(self):
for f in get_files(self):
self.zip_open_test(f, self.compression)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
self.assertEqual(b''.join(zipdata1), self.data)
def test_random_open(self):
for f in get_files(self):
self.zip_random_open_test(f, self.compression)
def zip_read1_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
zipdata = []
while True:
read_data = zipopen.read1(-1)
if not read_data:
break
zipdata.append(read_data)
self.assertEqual(b''.join(zipdata), self.data)
def test_read1(self):
for f in get_files(self):
self.zip_read1_test(f, self.compression)
def zip_read1_10_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
zipdata = []
while True:
read_data = zipopen.read1(10)
self.assertLessEqual(len(read_data), 10)
if not read_data:
break
zipdata.append(read_data)
self.assertEqual(b''.join(zipdata), self.data)
def test_read1_10(self):
for f in get_files(self):
self.zip_read1_10_test(f, self.compression)
def zip_readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
data = b''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(100)
if not read:
break
data += read
self.assertEqual(data, self.data)
def test_readline_read(self):
# Issue #7610: calls to readline() interleaved with calls to read().
for f in get_files(self):
self.zip_readline_read_test(f, self.compression)
def zip_readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line)
def test_readline(self):
for f in get_files(self):
self.zip_readline_test(f, self.compression)
def zip_readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
ziplines = zipopen.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line)
def test_readlines(self):
for f in get_files(self):
self.zip_readlines_test(f, self.compression)
def zip_iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line, zipline in zip(self.line_gen, zipopen):
self.assertEqual(zipline, line)
def test_iterlines(self):
for f in get_files(self):
self.zip_iterlines_test(f, self.compression)
def test_low_compression(self):
"""Check for cases where compressed data is larger than original."""
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", self.compression) as zipfp:
zipfp.writestr("strfile", '12')
# Get an open object for strfile
with zipfile.ZipFile(TESTFN2, "r", self.compression) as zipfp:
with zipfp.open("strfile") as openobj:
self.assertEqual(openobj.read(1), b'1')
self.assertEqual(openobj.read(1), b'2')
def test_writestr_compression(self):
zipfp = zipfile.ZipFile(TESTFN2, "w")
zipfp.writestr("b.txt", "hello world", compress_type=self.compression)
info = zipfp.getinfo('b.txt')
self.assertEqual(info.compress_type, self.compression)
def test_read_return_size(self):
# Issue #9837: ZipExtFile.read() shouldn't return more bytes
# than requested.
for test_size in (1, 4095, 4096, 4097, 16384):
file_size = test_size + 1
junk = getrandbits(8 * file_size).to_bytes(file_size, 'little')
with zipfile.ZipFile(io.BytesIO(), "w", self.compression) as zipf:
zipf.writestr('foo', junk)
with zipf.open('foo', 'r') as fp:
buf = fp.read(test_size)
self.assertEqual(len(buf), test_size)
def test_truncated_zipfile(self):
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode='w') as zipf:
zipf.writestr('strfile', self.data, compress_type=self.compression)
end_offset = fp.tell()
zipfiledata = fp.getvalue()
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
zipopen.read()
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
while zipopen.read(100):
pass
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
while zipopen.read1(100):
pass
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class StoredTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_STORED
test_low_compression = None
def zip_test_writestr_permissions(self, f, compression):
# Make sure that writestr creates files with mode 0600,
# when it is passed a name rather than a ZipInfo instance.
self.make_test_archive(f, compression)
with zipfile.ZipFile(f, "r") as zipfp:
zinfo = zipfp.getinfo('strfile')
self.assertEqual(zinfo.external_attr, 0o600 << 16)
def test_writestr_permissions(self):
for f in get_files(self):
self.zip_test_writestr_permissions(f, zipfile.ZIP_STORED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
def test_append_to_zip_file(self):
"""Test appending to an existing zipfile."""
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("strfile", self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, "strfile"])
def test_append_to_non_zip_file(self):
"""Test appending to an existing file that is not a zipfile."""
# NOTE: this test fails if len(d) < 22 because of the first
# line "fpin.seek(-22, 2)" in _EndRecData
data = b'I am not a ZipFile!'*10
with open(TESTFN2, 'wb') as f:
f.write(data)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'rb') as f:
f.seek(len(data))
with zipfile.ZipFile(f, "r") as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN])
def test_ignores_newline_at_end(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("\r\n\00\00\00")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
def test_ignores_stuff_appended_past_comments(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.comment = b"this is a comment"
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("abcdef\r\n")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
self.assertEqual(zipfp.comment, b"this is a comment")
def test_write_default_name(self):
"""Check that calling ZipFile.write without arcname specified
produces the expected result."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN)
with open(TESTFN, "rb") as f:
self.assertEqual(zipfp.read(TESTFN), f.read())
def test_write_to_readonly(self):
"""Check that trying to call write() on a readonly ZipFile object
raises a RuntimeError."""
with zipfile.ZipFile(TESTFN2, mode="w") as zipfp:
zipfp.writestr("somefile.txt", "bogus")
with zipfile.ZipFile(TESTFN2, mode="r") as zipfp:
self.assertRaises(RuntimeError, zipfp.write, TESTFN)
def test_add_file_before_1980(self):
# Set atime and mtime to 1970-01-01
os.utime(TESTFN, (0, 0))
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
self.assertRaises(ValueError, zipfp.write, TESTFN)
@requires_zlib
class DeflateTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
def test_per_file_compression(self):
"""Check that files within a Zip archive can have different
compression options."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)
self.assertEqual(dinfo.compress_type, zipfile.ZIP_DEFLATED)
@requires_bz2
class Bzip2TestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
class AbstractTestZip64InSmallFiles:
# These tests test the ZIP64 functionality without using large files,
# see test_zipfile64 for proper tests.
@classmethod
def setUpClass(cls):
line_gen = (bytes("Test of zipfile line %d." % i, "ascii")
for i in range(0, FIXEDTEST_SIZE))
cls.data = b'\n'.join(line_gen)
def setUp(self):
self._limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = 5
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def zip_test(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression, allowZip64=True) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = io.StringIO()
zipfp.printdir(fp)
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def tearDown(self):
zipfile.ZIP64_LIMIT = self._limit
unlink(TESTFN)
unlink(TESTFN2)
class StoredTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_STORED
def large_file_exception_test(self, f, compression):
with zipfile.ZipFile(f, "w", compression) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.write, TESTFN, "another.name")
def large_file_exception_test2(self, f, compression):
with zipfile.ZipFile(f, "w", compression) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.writestr, "another.name", self.data)
def test_large_file_exception(self):
for f in get_files(self):
self.large_file_exception_test(f, zipfile.ZIP_STORED)
self.large_file_exception_test2(f, zipfile.ZIP_STORED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED,
allowZip64=True) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
@requires_zlib
class DeflateTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2TestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
class PyZipFileTests(unittest.TestCase):
def assertCompiledIn(self, name, namelist):
if name + 'o' not in namelist:
self.assertIn(name + 'c', namelist)
def test_write_pyfile(self):
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
fn = __file__
if fn.endswith('.pyc') or fn.endswith('.pyo'):
path_split = fn.split(os.sep)
if os.altsep is not None:
path_split.extend(fn.split(os.altsep))
if '__pycache__' in path_split:
fn = imp.source_from_cache(fn)
else:
fn = fn[:-1]
zipfp.writepy(fn)
bn = os.path.basename(fn)
self.assertNotIn(bn, zipfp.namelist())
self.assertCompiledIn(bn, zipfp.namelist())
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
fn = __file__
if fn.endswith(('.pyc', '.pyo')):
fn = fn[:-1]
zipfp.writepy(fn, "testpackage")
bn = "%s/%s" % ("testpackage", os.path.basename(fn))
self.assertNotIn(bn, zipfp.namelist())
self.assertCompiledIn(bn, zipfp.namelist())
def test_write_python_package(self):
import email
packagedir = os.path.dirname(email.__file__)
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(packagedir)
# Check for a couple of modules at different levels of the
# hierarchy
names = zipfp.namelist()
self.assertCompiledIn('email/__init__.py', names)
self.assertCompiledIn('email/mime/text.py', names)
def test_write_with_optimization(self):
import email
packagedir = os.path.dirname(email.__file__)
# use .pyc if running test in optimization mode,
# use .pyo if running test in debug mode
optlevel = 1 if __debug__ else 0
ext = '.pyo' if optlevel == 1 else '.pyc'
with TemporaryFile() as t, \
zipfile.PyZipFile(t, "w", optimize=optlevel) as zipfp:
zipfp.writepy(packagedir)
names = zipfp.namelist()
self.assertIn('email/__init__' + ext, names)
self.assertIn('email/mime/text' + ext, names)
def test_write_python_directory(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("print(42)\n")
with open(os.path.join(TESTFN2, "mod2.py"), "w") as fp:
fp.write("print(42 * 42)\n")
with open(os.path.join(TESTFN2, "mod2.txt"), "w") as fp:
fp.write("bla bla bla\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(TESTFN2)
names = zipfp.namelist()
self.assertCompiledIn('mod1.py', names)
self.assertCompiledIn('mod2.py', names)
self.assertNotIn('mod2.txt', names)
finally:
shutil.rmtree(TESTFN2)
def test_write_non_pyfile(self):
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
with open(TESTFN, 'w') as f:
f.write('most definitely not a python file')
self.assertRaises(RuntimeError, zipfp.writepy, TESTFN)
os.remove(TESTFN)
def test_write_pyfile_bad_syntax(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("Bad syntax in python file\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
# syntax errors are printed to stdout
with captured_stdout() as s:
zipfp.writepy(os.path.join(TESTFN2, "mod1.py"))
self.assertIn("SyntaxError", s.getvalue())
# as it will not have compiled the python file, it will
# include the .py file not .pyc or .pyo
names = zipfp.namelist()
self.assertIn('mod1.py', names)
self.assertNotIn('mod1.pyc', names)
self.assertNotIn('mod1.pyo', names)
finally:
shutil.rmtree(TESTFN2)
class ExtractTests(unittest.TestCase):
def test_extract(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
writtenfile = zipfp.extract(fpath)
# make sure it was written to the right place
correctfile = os.path.join(os.getcwd(), fpath)
correctfile = os.path.normpath(correctfile)
self.assertEqual(writtenfile, correctfile)
# make sure correct data is in correct file
with open(writtenfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
os.remove(writtenfile)
# remove the test file subdirectories
shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def test_extract_all(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
zipfp.extractall()
for fpath, fdata in SMALL_TEST_DATA:
outfile = os.path.join(os.getcwd(), fpath)
with open(outfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
os.remove(outfile)
# remove the test file subdirectories
shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def check_file(self, filename, content):
self.assertTrue(os.path.isfile(filename))
with open(filename, 'rb') as f:
self.assertEqual(f.read(), content)
def test_sanitize_windows_name(self):
san = zipfile.ZipFile._sanitize_windows_name
# Passing pathsep in allows this test to work regardless of platform.
self.assertEqual(san(r',,?,C:,foo,bar/z', ','), r'_,C_,foo,bar/z')
self.assertEqual(san(r'a\b,c<d>e|f"g?h*i', ','), r'a\b,c_d_e_f_g_h_i')
self.assertEqual(san('../../foo../../ba..r', '/'), r'foo/ba..r')
def test_extract_hackers_arcnames_common_cases(self):
common_hacknames = [
('../foo/bar', 'foo/bar'),
('foo/../bar', 'foo/bar'),
('foo/../../bar', 'foo/bar'),
('foo/bar/..', 'foo/bar'),
('./../foo/bar', 'foo/bar'),
('/foo/bar', 'foo/bar'),
('/foo/../bar', 'foo/bar'),
('/foo/../../bar', 'foo/bar'),
]
self._test_extract_hackers_arcnames(common_hacknames)
@unittest.skipIf(os.path.sep != '\\', 'Requires \\ as path separator.')
def test_extract_hackers_arcnames_windows_only(self):
"""Test combination of path fixing and windows name sanitization."""
windows_hacknames = [
(r'..\foo\bar', 'foo/bar'),
(r'..\/foo\/bar', 'foo/bar'),
(r'foo/\..\/bar', 'foo/bar'),
(r'foo\/../\bar', 'foo/bar'),
(r'C:foo/bar', 'foo/bar'),
(r'C:/foo/bar', 'foo/bar'),
(r'C://foo/bar', 'foo/bar'),
(r'C:\foo\bar', 'foo/bar'),
(r'//conky/mountpoint/foo/bar', 'foo/bar'),
(r'\\conky\mountpoint\foo\bar', 'foo/bar'),
(r'///conky/mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\\conky\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//conky//mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\conky\\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//?/C:/foo/bar', 'foo/bar'),
(r'\\?\C:\foo\bar', 'foo/bar'),
(r'C:/../C:/foo/bar', 'C_/foo/bar'),
(r'a:b\c<d>e|f"g?h*i', 'b/c_d_e_f_g_h_i'),
('../../foo../../ba..r', 'foo/ba..r'),
]
self._test_extract_hackers_arcnames(windows_hacknames)
@unittest.skipIf(os.path.sep != '/', r'Requires / as path separator.')
def test_extract_hackers_arcnames_posix_only(self):
posix_hacknames = [
('//foo/bar', 'foo/bar'),
('../../foo../../ba..r', 'foo../ba..r'),
(r'foo/..\bar', r'foo/..\bar'),
]
self._test_extract_hackers_arcnames(posix_hacknames)
def _test_extract_hackers_arcnames(self, hacknames):
for arcname, fixedname in hacknames:
content = b'foobar' + arcname.encode()
with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipfp:
zinfo = zipfile.ZipInfo()
# preserve backslashes
zinfo.filename = arcname
zinfo.external_attr = 0o600 << 16
zipfp.writestr(zinfo, content)
arcname = arcname.replace(os.sep, "/")
targetpath = os.path.join('target', 'subdir', 'subsub')
correctfile = os.path.join(targetpath, *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname, targetpath)
self.assertEqual(writtenfile, correctfile,
msg='extract %r: %r != %r' %
(arcname, writtenfile, correctfile))
self.check_file(correctfile, content)
shutil.rmtree('target')
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall(targetpath)
self.check_file(correctfile, content)
shutil.rmtree('target')
correctfile = os.path.join(os.getcwd(), *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname)
self.assertEqual(writtenfile, correctfile,
msg="extract %r" % arcname)
self.check_file(correctfile, content)
shutil.rmtree(fixedname.split('/')[0])
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall()
self.check_file(correctfile, content)
shutil.rmtree(fixedname.split('/')[0])
os.remove(TESTFN2)
class OtherTests(unittest.TestCase):
def test_open_via_zip_info(self):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("name", "foo")
with self.assertWarns(UserWarning):
zipfp.writestr("name", "bar")
self.assertEqual(zipfp.namelist(), ["name"] * 2)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
infos = zipfp.infolist()
data = b""
for info in infos:
with zipfp.open(info) as zipopen:
data += zipopen.read()
self.assertIn(data, {b"foobar", b"barfoo"})
data = b""
for info in infos:
data += zipfp.read(info)
self.assertIn(data, {b"foobar", b"barfoo"})
def test_universal_readaheads(self):
f = io.BytesIO()
data = b'a\r\n' * 16 * 1024
with zipfile.ZipFile(f, 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.writestr(TESTFN, data)
data2 = b''
with zipfile.ZipFile(f, 'r') as zipfp, \
zipfp.open(TESTFN, 'rU') as zipopen:
for line in zipopen:
data2 += line
self.assertEqual(data, data2.replace(b'\n', b'\r\n'))
def test_writestr_extended_local_header_issue1202(self):
with zipfile.ZipFile(TESTFN2, 'w') as orig_zip:
for data in 'abcdefghijklmnop':
zinfo = zipfile.ZipInfo(data)
zinfo.flag_bits |= 0x08 # Include an extended local header.
orig_zip.writestr(zinfo, data)
def test_close(self):
"""Check that the zipfile is closed after the 'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
self.assertIsNotNone(zipfp.fp, 'zipfp is not open')
self.assertIsNone(zipfp.fp, 'zipfp is not closed')
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsNotNone(zipfp.fp, 'zipfp is not open')
self.assertIsNone(zipfp.fp, 'zipfp is not closed')
def test_close_on_exception(self):
"""Check that the zipfile is closed if an exception is raised in the
'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
try:
with zipfile.ZipFile(TESTFN2, "r") as zipfp2:
raise zipfile.BadZipFile()
except zipfile.BadZipFile:
self.assertIsNone(zipfp2.fp, 'zipfp is not closed')
def test_unsupported_version(self):
# File has an extract_version of 120
data = (b'PK\x03\x04x\x00\x00\x00\x00\x00!p\xa1@\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00xPK\x01\x02x\x03x\x00\x00\x00\x00'
b'\x00!p\xa1@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00xPK\x05\x06'
b'\x00\x00\x00\x00\x01\x00\x01\x00/\x00\x00\x00\x1f\x00\x00\x00\x00\x00')
self.assertRaises(NotImplementedError, zipfile.ZipFile,
io.BytesIO(data), 'r')
@requires_zlib
def test_read_unicode_filenames(self):
# bug #10801
fname = findfile('zip_cp437_header.zip')
with zipfile.ZipFile(fname) as zipfp:
for name in zipfp.namelist():
zipfp.open(name).close()
def test_write_unicode_filenames(self):
with zipfile.ZipFile(TESTFN, "w") as zf:
zf.writestr("foo.txt", "Test for unicode filename")
zf.writestr("\xf6.txt", "Test for unicode filename")
self.assertIsInstance(zf.infolist()[0].filename, str)
with zipfile.ZipFile(TESTFN, "r") as zf:
self.assertEqual(zf.filelist[0].filename, "foo.txt")
self.assertEqual(zf.filelist[1].filename, "\xf6.txt")
def test_create_non_existent_file_for_append(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
filename = 'testfile.txt'
content = b'hello, world. this is some content.'
try:
with zipfile.ZipFile(TESTFN, 'a') as zf:
zf.writestr(filename, content)
except IOError:
self.fail('Could not append data to a non-existent zip file.')
self.assertTrue(os.path.exists(TESTFN))
with zipfile.ZipFile(TESTFN, 'r') as zf:
self.assertEqual(zf.read(filename), content)
def test_close_erroneous_file(self):
# This test checks that the ZipFile constructor closes the file object
# it opens if there's an error in the file. If it doesn't, the
# traceback holds a reference to the ZipFile object and, indirectly,
# the file object.
# On Windows, this causes the os.unlink() call to fail because the
# underlying file is still open. This is SF bug #412214.
#
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
try:
zf = zipfile.ZipFile(TESTFN)
except zipfile.BadZipFile:
pass
def test_is_zip_erroneous_file(self):
"""Check that is_zipfile() correctly identifies non-zip files."""
# - passing a filename
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
self.assertFalse(zipfile.is_zipfile(TESTFN))
# - passing a file object
with open(TESTFN, "rb") as fp:
self.assertFalse(zipfile.is_zipfile(fp))
# - passing a file-like object
fp = io.BytesIO()
fp.write(b"this is not a legal zip file\n")
self.assertFalse(zipfile.is_zipfile(fp))
fp.seek(0, 0)
self.assertFalse(zipfile.is_zipfile(fp))
def test_damaged_zipfile(self):
"""Check that zipfiles with missing bytes at the end raise BadZipFile."""
# - Create a valid zip file
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
zipfiledata = fp.getvalue()
# - Now create copies of it missing the last N bytes and make sure
# a BadZipFile exception is raised when we try to open it
for N in range(len(zipfiledata)):
fp = io.BytesIO(zipfiledata[:N])
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, fp)
def test_is_zip_valid_file(self):
"""Check that is_zipfile() correctly identifies zip files."""
# - passing a filename
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
self.assertTrue(zipfile.is_zipfile(TESTFN))
# - passing a file object
with open(TESTFN, "rb") as fp:
self.assertTrue(zipfile.is_zipfile(fp))
fp.seek(0, 0)
zip_contents = fp.read()
# - passing a file-like object
fp = io.BytesIO()
fp.write(zip_contents)
self.assertTrue(zipfile.is_zipfile(fp))
fp.seek(0, 0)
self.assertTrue(zipfile.is_zipfile(fp))
def test_non_existent_file_raises_IOError(self):
# make sure we don't raise an AttributeError when a partially-constructed
# ZipFile instance is finalized; this tests for regression on SF tracker
# bug #403871.
# The bug we're testing for caused an AttributeError to be raised
# when a ZipFile instance was created for a file that did not
# exist; the .fp member was not initialized but was needed by the
# __del__() method. Since the AttributeError is in the __del__(),
# it is ignored, but the user should be sufficiently annoyed by
# the message on the output that regression will be noticed
# quickly.
self.assertRaises(IOError, zipfile.ZipFile, TESTFN)
def test_empty_file_raises_BadZipFile(self):
f = open(TESTFN, 'w')
f.close()
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
with open(TESTFN, 'w') as fp:
fp.write("short file")
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
def test_closed_zip_raises_RuntimeError(self):
"""Verify that testzip() doesn't swallow inappropriate exceptions."""
data = io.BytesIO()
with zipfile.ZipFile(data, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# This is correct; calling .read on a closed ZipFile should raise
# a RuntimeError, and so should calling .testzip. An earlier
# version of .testzip would swallow this exception (and any other)
# and report that the first file in the archive was corrupt.
self.assertRaises(RuntimeError, zipf.read, "foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt")
self.assertRaises(RuntimeError, zipf.testzip)
self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus")
with open(TESTFN, 'w') as f:
f.write('zipfile test data')
self.assertRaises(RuntimeError, zipf.write, TESTFN)
def test_bad_constructor_mode(self):
"""Check that bad modes passed to ZipFile constructor are caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "q")
def test_bad_open_mode(self):
"""Check that bad modes passed to ZipFile.open are caught."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
# read the data to make sure the file is there
zipf.read("foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt", "q")
def test_read0(self):
"""Check that calling read(0) on a ZipExtFile object returns an empty
string and doesn't advance file pointer."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# read the data to make sure the file is there
with zipf.open("foo.txt") as f:
for i in range(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), b'')
self.assertEqual(f.read(), b"O, for a Muse of Fire!")
def test_open_non_existent_item(self):
"""Check that attempting to call open() for an item that doesn't
exist in the archive raises a RuntimeError."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertRaises(KeyError, zipf.open, "foo.txt", "r")
def test_bad_compression_mode(self):
"""Check that bad compression methods passed to ZipFile.open are
caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "w", -1)
def test_unsupported_compression(self):
# data is declared as shrunk, but actually deflated
data = (b'PK\x03\x04.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00'
b'\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00x\x03\x00PK\x01'
b'\x02.\x03.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00\x00\x02\x00\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x80\x01\x00\x00\x00\x00xPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00'
b'/\x00\x00\x00!\x00\x00\x00\x00\x00')
with zipfile.ZipFile(io.BytesIO(data), 'r') as zipf:
self.assertRaises(NotImplementedError, zipf.open, 'x')
def test_null_byte_in_filename(self):
"""Check that a filename containing a null byte is properly
terminated."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt\x00qqq", b"O, for a Muse of Fire!")
self.assertEqual(zipf.namelist(), ['foo.txt'])
def test_struct_sizes(self):
"""Check that ZIP internal structure sizes are calculated correctly."""
self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
def test_comments(self):
"""Check that comments on the archive are handled properly."""
# check default comment is empty
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertEqual(zipf.comment, b'')
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, b'')
# check a simple short comment
comment = b'Bravely taking to his feet, he beat a very brave retreat.'
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipf.comment, comment)
# check a comment of max length
comment2 = ''.join(['%d' % (i**3 % 10) for i in range((1 << 16)-1)])
comment2 = comment2.encode("ascii")
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment2
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, comment2)
# check a comment that is too long is truncated
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
with self.assertWarns(UserWarning):
zipf.comment = comment2 + b'oops'
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, comment2)
# check that comments are correctly modified in append mode
with zipfile.ZipFile(TESTFN,mode="w") as zipf:
zipf.comment = b"original comment"
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN,mode="a") as zipf:
zipf.comment = b"an updated comment"
with zipfile.ZipFile(TESTFN,mode="r") as zipf:
self.assertEqual(zipf.comment, b"an updated comment")
# check that comments are correctly shortened in append mode
with zipfile.ZipFile(TESTFN,mode="w") as zipf:
zipf.comment = b"original comment that's longer"
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN,mode="a") as zipf:
zipf.comment = b"shorter comment"
with zipfile.ZipFile(TESTFN,mode="r") as zipf:
self.assertEqual(zipf.comment, b"shorter comment")
def test_unicode_comment(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with self.assertRaises(TypeError):
zipf.comment = "this is an error"
def test_change_comment_in_empty_archive(self):
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertFalse(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_change_comment_in_nonempty_archive(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertTrue(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_empty_zipfile(self):
# Check that creating a file in 'w' or 'a' mode and closing without
# adding any files to the archives creates a valid empty ZIP file
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.close()
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except zipfile.BadZipFile:
self.fail("Unable to create empty ZIP file in 'w' mode")
zipf = zipfile.ZipFile(TESTFN, mode="a")
zipf.close()
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except:
self.fail("Unable to create empty ZIP file in 'a' mode")
def test_open_empty_file(self):
# Issue 1710703: Check that opening a file with less than 22 bytes
# raises a BadZipFile exception (rather than the previously unhelpful
# IOError)
f = open(TESTFN, 'w')
f.close()
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN, 'r')
def test_create_zipinfo_before_1980(self):
self.assertRaises(ValueError,
zipfile.ZipInfo, 'seventies', (1979, 1, 1, 0, 0, 0))
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class AbstractBadCrcTests:
def test_testzip_with_bad_crc(self):
"""Tests that files with bad CRCs return their name from testzip."""
zipdata = self.zip_with_bad_crc
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
# testzip returns the name of the first corrupt file, or None
self.assertEqual('afile', zipf.testzip())
def test_read_with_bad_crc(self):
"""Tests that files with bad CRCs raise a BadZipFile exception when read."""
zipdata = self.zip_with_bad_crc
# Using ZipFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
self.assertRaises(zipfile.BadZipFile, zipf.read, 'afile')
# Using ZipExtFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
self.assertRaises(zipfile.BadZipFile, corrupt_file.read)
# Same with small reads (in order to exercise the buffering logic)
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
corrupt_file.MIN_READ_SIZE = 2
with self.assertRaises(zipfile.BadZipFile):
while corrupt_file.read(2):
pass
class StoredBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_STORED
zip_with_bad_crc = (
b'PK\003\004\024\0\0\0\0\0 \213\212;:r'
b'\253\377\f\0\0\0\f\0\0\0\005\0\0\000af'
b'ilehello,AworldP'
b'K\001\002\024\003\024\0\0\0\0\0 \213\212;:'
b'r\253\377\f\0\0\0\f\0\0\0\005\0\0\0\0'
b'\0\0\0\0\0\0\0\200\001\0\0\0\000afi'
b'lePK\005\006\0\0\0\0\001\0\001\0003\000'
b'\0\0/\0\0\0\0\0')
@requires_zlib
class DeflateBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
zip_with_bad_crc = (
b'PK\x03\x04\x14\x00\x00\x00\x08\x00n}\x0c=FA'
b'KE\x10\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\xc9\xa0'
b'=\x13\x00PK\x01\x02\x14\x03\x14\x00\x00\x00\x08\x00n'
b'}\x0c=FAKE\x10\x00\x00\x00n\x00\x00\x00\x05'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00'
b'\x00afilePK\x05\x06\x00\x00\x00\x00\x01\x00'
b'\x01\x003\x00\x00\x003\x00\x00\x00\x00\x00')
@requires_bz2
class Bzip2BadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_BZIP2
zip_with_bad_crc = (
b'PK\x03\x04\x14\x03\x00\x00\x0c\x00nu\x0c=FA'
b'KE8\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ileBZh91AY&SY\xd4\xa8\xca'
b'\x7f\x00\x00\x0f\x11\x80@\x00\x06D\x90\x80 \x00 \xa5'
b'P\xd9!\x03\x03\x13\x13\x13\x89\xa9\xa9\xc2u5:\x9f'
b'\x8b\xb9"\x9c(HjTe?\x80PK\x01\x02\x14'
b'\x03\x14\x03\x00\x00\x0c\x00nu\x0c=FAKE8'
b'\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00 \x80\x80\x81\x00\x00\x00\x00afilePK'
b'\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00\x00[\x00'
b'\x00\x00\x00\x00')
@requires_lzma
class LzmaBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_LZMA
zip_with_bad_crc = (
b'PK\x03\x04\x14\x03\x00\x00\x0e\x00nu\x0c=FA'
b'KE\x1b\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\t\x04\x05\x00]\x00\x00\x00\x04\x004\x19I'
b'\xee\x8d\xe9\x17\x89:3`\tq!.8\x00PK'
b'\x01\x02\x14\x03\x14\x03\x00\x00\x0e\x00nu\x0c=FA'
b'KE\x1b\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00 \x80\x80\x81\x00\x00\x00\x00afil'
b'ePK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00'
b'\x00>\x00\x00\x00\x00\x00')
class DecryptionTests(unittest.TestCase):
"""Check that ZIP decryption works. Since the library does not
support encryption at the moment, we use a pre-generated encrypted
ZIP file."""
data = (
b'PK\x03\x04\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00\x1a\x00'
b'\x00\x00\x08\x00\x00\x00test.txt\xfa\x10\xa0gly|\xfa-\xc5\xc0=\xf9y'
b'\x18\xe0\xa8r\xb3Z}Lg\xbc\xae\xf9|\x9b\x19\xe4\x8b\xba\xbb)\x8c\xb0\xdbl'
b'PK\x01\x02\x14\x00\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00'
b'\x1a\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x01\x00 \x00\xb6\x81'
b'\x00\x00\x00\x00test.txtPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x006\x00'
b'\x00\x00L\x00\x00\x00\x00\x00' )
data2 = (
b'PK\x03\x04\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02'
b'\x00\x00\x04\x00\x15\x00zeroUT\t\x00\x03\xd6\x8b\x92G\xda\x8b\x92GUx\x04'
b'\x00\xe8\x03\xe8\x03\xc7<M\xb5a\xceX\xa3Y&\x8b{oE\xd7\x9d\x8c\x98\x02\xc0'
b'PK\x07\x08xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00PK\x01\x02\x17\x03'
b'\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00'
b'\x04\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00\x00\x00\x00ze'
b'roUT\x05\x00\x03\xd6\x8b\x92GUx\x00\x00PK\x05\x06\x00\x00\x00\x00\x01'
b'\x00\x01\x00?\x00\x00\x00[\x00\x00\x00\x00\x00' )
plain = b'zipfile.py encryption test'
plain2 = b'\x00'*512
def setUp(self):
with open(TESTFN, "wb") as fp:
fp.write(self.data)
self.zip = zipfile.ZipFile(TESTFN, "r")
with open(TESTFN2, "wb") as fp:
fp.write(self.data2)
self.zip2 = zipfile.ZipFile(TESTFN2, "r")
def tearDown(self):
self.zip.close()
os.unlink(TESTFN)
self.zip2.close()
os.unlink(TESTFN2)
def test_no_password(self):
# Reading the encrypted file without password
# must generate a RunTime exception
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
def test_bad_password(self):
self.zip.setpassword(b"perl")
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.zip2.setpassword(b"perl")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
@requires_zlib
def test_good_password(self):
self.zip.setpassword(b"python")
self.assertEqual(self.zip.read("test.txt"), self.plain)
self.zip2.setpassword(b"12345")
self.assertEqual(self.zip2.read("zero"), self.plain2)
def test_unicode_password(self):
self.assertRaises(TypeError, self.zip.setpassword, "unicode")
self.assertRaises(TypeError, self.zip.read, "test.txt", "python")
self.assertRaises(TypeError, self.zip.open, "test.txt", pwd="python")
self.assertRaises(TypeError, self.zip.extract, "test.txt", pwd="python")
class AbstractTestsWithRandomBinaryFiles:
@classmethod
def setUpClass(cls):
datacount = randint(16, 64)*1024 + randint(1, 1024)
cls.data = b''.join(struct.pack('<f', random()*randint(-1000, 1000))
for i in range(datacount))
def setUp(self):
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
testdata = zipfp.read(TESTFN)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
def test_read(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
testdata1 = b''.join(zipdata1)
self.assertEqual(len(testdata1), len(self.data))
self.assertEqual(testdata1, self.data)
testdata2 = b''.join(zipdata2)
self.assertEqual(len(testdata2), len(self.data))
self.assertEqual(testdata2, self.data)
def test_open(self):
for f in get_files(self):
self.zip_open_test(f, self.compression)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
testdata = b''.join(zipdata1)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
def test_random_open(self):
for f in get_files(self):
self.zip_random_open_test(f, self.compression)
class StoredTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_STORED
@requires_zlib
class DeflateTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2TestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
@requires_zlib
class TestsWithMultipleOpens(unittest.TestCase):
def setUp(self):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_DEFLATED) as zipfp:
zipfp.writestr('ones', '1'*FIXEDTEST_SIZE)
zipfp.writestr('twos', '2'*FIXEDTEST_SIZE)
def test_same_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('ones') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, data2)
def test_different_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, b'1'*FIXEDTEST_SIZE)
self.assertEqual(data2, b'2'*FIXEDTEST_SIZE)
def test_interleaved(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, b'1'*FIXEDTEST_SIZE)
self.assertEqual(data2, b'2'*FIXEDTEST_SIZE)
def tearDown(self):
unlink(TESTFN2)
class TestWithDirectory(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN2)
def test_extract_dir(self):
with zipfile.ZipFile(findfile("zipdir.zip")) as zipf:
zipf.extractall(TESTFN2)
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a")))
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a", "b")))
self.assertTrue(os.path.exists(os.path.join(TESTFN2, "a", "b", "c")))
def test_bug_6050(self):
# Extraction should succeed if directories already exist
os.mkdir(os.path.join(TESTFN2, "a"))
self.test_extract_dir()
def test_store_dir(self):
os.mkdir(os.path.join(TESTFN2, "x"))
zipf = zipfile.ZipFile(TESTFN, "w")
zipf.write(os.path.join(TESTFN2, "x"), "x")
self.assertTrue(zipf.filelist[0].filename.endswith("x/"))
def tearDown(self):
shutil.rmtree(TESTFN2)
if os.path.exists(TESTFN):
unlink(TESTFN)
class AbstractUniversalNewlineTests:
@classmethod
def setUpClass(cls):
cls.line_gen = [bytes("Test of zipfile line %d." % i, "ascii")
for i in range(FIXEDTEST_SIZE)]
cls.seps = (b'\r', b'\r\n', b'\n')
cls.arcdata = {}
for n, s in enumerate(cls.seps):
cls.arcdata[s] = s.join(cls.line_gen) + s
def setUp(self):
self.arcfiles = {}
for n, s in enumerate(self.seps):
self.arcfiles[s] = '%s-%d' % (TESTFN, n)
with open(self.arcfiles[s], "wb") as f:
f.write(self.arcdata[s])
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
for fn in self.arcfiles.values():
zipfp.write(fn, fn)
def read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as fp:
zipdata = fp.read()
self.assertEqual(self.arcdata[sep], zipdata)
def test_read(self):
for f in get_files(self):
self.read_test(f, self.compression)
def readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as zipopen:
data = b''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(5)
if not read:
break
data += read
self.assertEqual(data, self.arcdata[b'\n'])
def test_readline_read(self):
for f in get_files(self):
self.readline_read_test(f, self.compression)
def readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line + b'\n')
def test_readline(self):
for f in get_files(self):
self.readline_test(f, self.compression)
def readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as fp:
ziplines = fp.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line + b'\n')
def test_readlines(self):
for f in get_files(self):
self.readlines_test(f, self.compression)
def iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as fp:
for line, zipline in zip(self.line_gen, fp):
self.assertEqual(zipline, line + b'\n')
def test_iterlines(self):
for f in get_files(self):
self.iterlines_test(f, self.compression)
def tearDown(self):
for sep, fn in self.arcfiles.items():
os.remove(fn)
unlink(TESTFN)
unlink(TESTFN2)
class StoredUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_STORED
@requires_zlib
class DeflateUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2UniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import print_function
EXTENDED = 'Ext'
TRIGGERS = {
'P': '"Push"',
'S': '"Switch"',
'W': '"WalkOver"',
'G': '"Gun"',
}
SPEEDS = {
'----': 0,
'Slow': 8,
'Normal': 16,
'Fast': 32,
'Turbo': 64,
'Inst': 16384,
}
LOCKS = {
'No': None,
'Blue': 0,
'Red': 1,
'Yell': 2,
}
def to_special_type(column):
return int(column)
def to_extended(column):
return column == EXTENDED
def to_trigger_and_only_once(column):
return TRIGGERS[column[0]], column[1] == '1'
def to_wait(column):
if column == '--':
return 0.0
return float(column.rstrip('s'))
def split_chunk(chunk, n_columns):
return (line.split(None, n_columns - 1) for line in chunk)
def to_bool(column):
if column == 'Yes':
return True
elif column == 'No' or column == '--':
return False
else:
assert False, column
def height(ref, off=None):
string = '{ to = "%s"' % (ref,)
if off is not None:
return string + ', off = %d }' % (off,)
else:
return string + ' }'
def doors(chunk):
open_door = height('LowestCeiling', -4)
close_door = height('Floor')
ceilings = {
'Open, Wait, Then Close': (open_door, close_door),
'Open and Stay Open': (open_door, None),
'Close and Stay Closed': (close_door, None),
'Close, Wait, Then Open': (close_door, open_door),
}
print()
print()
print('### Doors ###')
print()
for row in split_chunk(chunk, 8):
special_type = to_special_type(row[0])
extended = to_extended(row[1])
trigger, only_once = to_trigger_and_only_once(row[2])
lock = LOCKS[row[3]]
speed = SPEEDS[row[4]]
wait = to_wait(row[5])
monsters = to_bool(row[6])
first, second = ceilings[row[7]]
print('[[linedef]]')
print(' special_type =', special_type)
print(' trigger =', trigger)
if extended:
print(' extended = true')
if only_once:
print(' only_once = true')
if monsters:
print(' monsters = true')
if lock is not None:
print(' lock =', lock)
print(' [linedef.move]')
if wait > 0.0:
print(' wait =', wait)
if speed > 0.0:
print(' speed =', speed)
if second is None:
print(' ceiling = { first =', first, '}')
else:
print(' [linedef.move.ceiling]')
print(' first =', first)
print(' second =', second)
print()
def floors(chunk):
first_floors = {
'Absolute 24': height('Floor', 24),
'Absolute 512': height('Floor', 24),
'Abs Shortest Lower Texture': None,
'None': None,
'Highest Neighbor Floor': height('HighestFloor'),
'Highest Neighbor Floor + 8': height('HighestFloor', 8),
'Lowest Neighbor Ceiling': height('LowestCeiling'),
'Lowest Neighbor Ceiling - 8': height('LowestCeiling', - 8),
'Lowest Neighbor Floor': height('LowestFloor'),
'Next Neighbor Floor': height('NextFloor'),
}
print()
print()
print('### Floors ###')
print()
for row in split_chunk(chunk, 10):
special_type = to_special_type(row[0])
extended = to_extended(row[1])
trigger, only_once = to_trigger_and_only_once(row[2])
speed = SPEEDS[row[4]]
monsters = to_bool(row[7])
first_floor = first_floors[row[9]]
if first_floor is None:
continue
print('[[linedef]]')
print(' special_type =', special_type)
print(' trigger =', trigger)
if extended:
print(' extended = true')
if only_once:
print(' only_once = true')
if monsters:
print(' monsters = true')
print(' [linedef.move]')
if speed > 0.0:
print(' speed =', speed)
print(' floor = { first =', first_floor, '}')
print()
def ceilings(chunk):
first_ceilings = {
'8 Above Floor': height('Floor', 8),
'Floor': height('Floor'),
'Highest Neighbor Ceiling': height('HighestCeiling'),
'Highest Neighbor Floor': height('HighestFloor'),
'Lowest Neighbor Ceiling': height('LowestCeiling'),
}
print()
print()
print('### Ceilings ###')
print()
for row in split_chunk(chunk, 10):
special_type = to_special_type(row[0])
extended = to_extended(row[1])
trigger, only_once = to_trigger_and_only_once(row[2])
speed = SPEEDS[row[4]]
monsters = to_bool(row[7])
first_ceiling = first_ceilings[row[9]]
print('[[linedef]]')
print(' special_type =', special_type)
print(' trigger =', trigger)
if extended:
print(' extended = true')
if only_once:
print(' only_once = true')
if monsters:
print(' monsters = true')
print(' [linedef.move]')
if speed > 0.0:
print(' speed =', speed)
print(' ceiling = { first =', first_ceiling, '}')
print()
def platforms(chunk):
floors = {
'Ceiling (toggle)': None,
'Lowest and Highest Floor (perpetual)': (height('LowestFloor'),
height('HighestFloor'),
True),
'Lowest Neighbor Floor (lift)': (height('LowestFloor'),
height('Floor'), False),
'Raise 24 Units': (height('Floor', 24), None, False),
'Raise 32 Units': (height('Floor', 32), None, False),
'Raise Next Floor': (height('NextFloor'), None, False),
'Stop': None,
}
print()
print()
print('### Platforms ###')
print()
for row in split_chunk(chunk, 9):
special_type = to_special_type(row[0])
extended = to_extended(row[1])
trigger, only_once = to_trigger_and_only_once(row[2])
wait = to_wait(row[3])
speed = SPEEDS[row[4]]
monsters = to_bool(row[7])
triple = floors[row[8]]
if triple is None:
continue
first, second, repeat = triple
print('[[linedef]]')
print(' special_type =', special_type)
print(' trigger =', trigger)
if extended:
print(' extended = true')
if only_once:
print(' only_once = true')
if monsters:
print(' monsters = true')
print(' [linedef.move]')
if wait > 0.0:
print(' wait =', wait)
if speed > 0.0:
print(' speed =', speed)
if repeat:
print(' repeat = true')
if second is None:
print(' floor = { first =', first, '}')
else:
print(' [linedef.move.floor]')
print(' first =', first)
print(' second =', second)
print()
def exits(chunk):
exits = {
'Normal': '"Normal"',
'Secret': '"Secret"',
}
print()
print()
print('### Exits ###')
print()
for row in split_chunk(chunk, 4):
special_type = to_special_type(row[0])
extended = to_extended(row[1])
trigger, only_once = to_trigger_and_only_once(row[2])
exit = exits[row[3]]
print('[[linedef]]')
print(' special_type =', special_type)
print(' trigger =', trigger)
if extended:
print(' extended = true')
if only_once:
print(' only_once = true')
print(' exit = ', exit)
print()
def main():
lines = [line.strip() for line in open('tables.txt', 'r')]
def gen_chunks():
chunk = []
for line in lines:
if line:
chunk.append(line)
else:
yield chunk
chunk = []
chunks = gen_chunks()
doors(next(chunks))
floors(next(chunks))
ceilings(next(chunks))
platforms(next(chunks))
next(chunks) # crusher_ceilings(next(chunks))
next(chunks) # stair_builders(next(chunks))
next(chunks) # elevators(next(chunks))
next(chunks) # lighting(next(chunks))
exits(next(chunks))
next(chunks) # teleporters(next(chunks))
next(chunks) # donuts(next(chunks))
if __name__ == '__main__':
main()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.serialization import jsonutils
import six
import webob.dec
from keystone.common import authorization
from keystone.common import config
from keystone.common import utils
from keystone.common import wsgi
from keystone import exception
from keystone.i18n import _LW
from keystone.models import token_model
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
# Header used to transmit the auth token
AUTH_TOKEN_HEADER = 'X-Auth-Token'
# Header used to transmit the subject token
SUBJECT_TOKEN_HEADER = 'X-Subject-Token'
# Environment variable used to pass the request context
CONTEXT_ENV = wsgi.CONTEXT_ENV
# Environment variable used to pass the request params
PARAMS_ENV = wsgi.PARAMS_ENV
class TokenAuthMiddleware(wsgi.Middleware):
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
context['token_id'] = token
if SUBJECT_TOKEN_HEADER in request.headers:
context['subject_token_id'] = (
request.headers.get(SUBJECT_TOKEN_HEADER))
request.environ[CONTEXT_ENV] = context
class AdminTokenAuthMiddleware(wsgi.Middleware):
"""A trivial filter that checks for a pre-defined admin token.
Sets 'is_admin' to true in the context, expected to be checked by
methods that are admin-only.
"""
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
context['is_admin'] = (token == CONF.admin_token)
request.environ[CONTEXT_ENV] = context
class PostParamsMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as POST parameters.
Filters out the parameters `self`, `context` and anything beginning with
an underscore.
"""
def process_request(self, request):
params_parsed = request.params
params = {}
for k, v in six.iteritems(params_parsed):
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ[PARAMS_ENV] = params
class JsonBodyMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as serialized JSON.
Accepting arguments as JSON is useful for accepting data that may be more
complex than simple primitives.
Filters out the parameters `self`, `context` and anything beginning with
an underscore.
"""
def process_request(self, request):
# Abort early if we don't have any work to do
params_json = request.body
if not params_json:
return
# Reject unrecognized content types. Empty string indicates
# the client did not explicitly set the header
if request.content_type not in ('application/json', ''):
e = exception.ValidationError(attribute='application/json',
target='Content-Type header')
return wsgi.render_exception(e, request=request)
params_parsed = {}
try:
params_parsed = jsonutils.loads(params_json)
except ValueError:
e = exception.ValidationError(attribute='valid JSON',
target='request body')
return wsgi.render_exception(e, request=request)
finally:
if not params_parsed:
params_parsed = {}
if not isinstance(params_parsed, dict):
e = exception.ValidationError(attribute='valid JSON object',
target='request body')
return wsgi.render_exception(e, request=request)
params = {}
for k, v in six.iteritems(params_parsed):
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ[PARAMS_ENV] = params
class XmlBodyMiddleware(wsgi.Middleware):
"""De/serialize XML to/from JSON."""
def print_warning(self):
LOG.warning(_LW('XML support has been removed as of the Kilo release '
'and should not be referenced or used in deployment. '
'Please remove references to XmlBodyMiddleware from '
'your configuration. This compatibility stub will be '
'removed in the L release'))
def __init__(self, *args, **kwargs):
super(XmlBodyMiddleware, self).__init__(*args, **kwargs)
self.print_warning()
class XmlBodyMiddlewareV2(XmlBodyMiddleware):
"""De/serialize XML to/from JSON for v2.0 API."""
def __init__(self, *args, **kwargs):
pass
class XmlBodyMiddlewareV3(XmlBodyMiddleware):
"""De/serialize XML to/from JSON for v3 API."""
def __init__(self, *args, **kwargs):
pass
class NormalizingFilter(wsgi.Middleware):
"""Middleware filter to handle URL normalization."""
def process_request(self, request):
"""Normalizes URLs."""
# Removes a trailing slash from the given path, if any.
if (len(request.environ['PATH_INFO']) > 1 and
request.environ['PATH_INFO'][-1] == '/'):
request.environ['PATH_INFO'] = request.environ['PATH_INFO'][:-1]
# Rewrites path to root if no path is given.
elif not request.environ['PATH_INFO']:
request.environ['PATH_INFO'] = '/'
class RequestBodySizeLimiter(wsgi.Middleware):
"""Limit the size of an incoming request."""
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
@webob.dec.wsgify()
def __call__(self, req):
if req.content_length is None:
if req.is_body_readable:
limiter = utils.LimitingReader(req.body_file,
CONF.max_request_body_size)
req.body_file = limiter
elif req.content_length > CONF.max_request_body_size:
raise exception.RequestTooLarge()
return self.application
class AuthContextMiddleware(wsgi.Middleware):
"""Build the authentication context from the request auth token."""
def _build_auth_context(self, request):
token_id = request.headers.get(AUTH_TOKEN_HEADER).strip()
if token_id == CONF.admin_token:
# NOTE(gyee): no need to proceed any further as the special admin
# token is being handled by AdminTokenAuthMiddleware. This code
# will not be impacted even if AdminTokenAuthMiddleware is removed
# from the pipeline as "is_admin" is default to "False". This code
# is independent of AdminTokenAuthMiddleware.
return {}
context = {'token_id': token_id}
context['environment'] = request.environ
try:
token_ref = token_model.KeystoneToken(
token_id=token_id,
token_data=self.token_provider_api.validate_token(token_id))
# TODO(gyee): validate_token_bind should really be its own
# middleware
wsgi.validate_token_bind(context, token_ref)
return authorization.token_to_auth_context(token_ref)
except exception.TokenNotFound:
LOG.warning(_LW('RBAC: Invalid token'))
raise exception.Unauthorized()
def process_request(self, request):
if AUTH_TOKEN_HEADER not in request.headers:
LOG.debug(('Auth token not in the request header. '
'Will not build auth context.'))
return
if authorization.AUTH_CONTEXT_ENV in request.environ:
msg = _LW('Auth context already exists in the request environment')
LOG.warning(msg)
return
auth_context = self._build_auth_context(request)
LOG.debug('RBAC: auth_context: %s', auth_context)
request.environ[authorization.AUTH_CONTEXT_ENV] = auth_context
|
|
#----------------------------------------------------------------------
# Copyright (c) 2008 Board of Trustees, Princeton University
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
##
# Implements SFA Credentials
#
# Credentials are layered on top of certificates, and are essentially a
# certificate that stores a tuple of parameters.
##
import xmlrpclib
from openflow.optin_manager.sfa.util.faults import MissingDelegateBit, ChildRightsNotSubsetOfParent
from openflow.optin_manager.sfa.trust.certificate import Certificate
from openflow.optin_manager.sfa.trust.gid import GID
##
# Credential is a tuple:
# (GIDCaller, GIDObject, LifeTime, Privileges, Delegate)
#
# These fields are encoded using xmlrpc into the subjectAltName field of the
# x509 certificate. Note: Call encode() once the fields have been filled in
# to perform this encoding.
class CredentialLegacy(Certificate):
gidCaller = None
gidObject = None
lifeTime = None
privileges = None
delegate = False
##
# Create a Credential object
#
# @param create If true, create a blank x509 certificate
# @param subject If subject!=None, create an x509 cert with the subject name
# @param string If string!=None, load the credential from the string
# @param filename If filename!=None, load the credential from the file
def __init__(self, create=False, subject=None, string=None, filename=None):
Certificate.__init__(self, create, subject, string, filename)
##
# set the GID of the caller
#
# @param gid GID object of the caller
def set_gid_caller(self, gid):
self.gidCaller = gid
# gid origin caller is the caller's gid by default
self.gidOriginCaller = gid
##
# get the GID of the object
def get_gid_caller(self):
if not self.gidCaller:
self.decode()
return self.gidCaller
##
# set the GID of the object
#
# @param gid GID object of the object
def set_gid_object(self, gid):
self.gidObject = gid
##
# get the GID of the object
def get_gid_object(self):
if not self.gidObject:
self.decode()
return self.gidObject
##
# set the lifetime of this credential
#
# @param lifetime lifetime of credential
def set_lifetime(self, lifeTime):
self.lifeTime = lifeTime
##
# get the lifetime of the credential
def get_lifetime(self):
if not self.lifeTime:
self.decode()
return self.lifeTime
##
# set the delegate bit
#
# @param delegate boolean (True or False)
def set_delegate(self, delegate):
self.delegate = delegate
##
# get the delegate bit
def get_delegate(self):
if not self.delegate:
self.decode()
return self.delegate
##
# set the privileges
#
# @param privs either a comma-separated list of privileges of a Rights object
def set_privileges(self, privs):
if isinstance(privs, str):
self.privileges = Rights(string = privs)
else:
self.privileges = privs
##
# return the privileges as a Rights object
def get_privileges(self):
if not self.privileges:
self.decode()
return self.privileges
##
# determine whether the credential allows a particular operation to be
# performed
#
# @param op_name string specifying name of operation ("lookup", "update", etc)
def can_perform(self, op_name):
rights = self.get_privileges()
if not rights:
return False
return rights.can_perform(op_name)
##
# Encode the attributes of the credential into a string and store that
# string in the alt-subject-name field of the X509 object. This should be
# done immediately before signing the credential.
def encode(self):
dict = {"gidCaller": None,
"gidObject": None,
"lifeTime": self.lifeTime,
"privileges": None,
"delegate": self.delegate}
if self.gidCaller:
dict["gidCaller"] = self.gidCaller.save_to_string(save_parents=True)
if self.gidObject:
dict["gidObject"] = self.gidObject.save_to_string(save_parents=True)
if self.privileges:
dict["privileges"] = self.privileges.save_to_string()
str = xmlrpclib.dumps((dict,), allow_none=True)
self.set_data('URI:http://' + str)
##
# Retrieve the attributes of the credential from the alt-subject-name field
# of the X509 certificate. This is automatically done by the various
# get_* methods of this class and should not need to be called explicitly.
def decode(self):
data = self.get_data().lstrip('URI:http://')
if data:
dict = xmlrpclib.loads(data)[0][0]
else:
dict = {}
self.lifeTime = dict.get("lifeTime", None)
self.delegate = dict.get("delegate", None)
privStr = dict.get("privileges", None)
if privStr:
self.privileges = Rights(string = privStr)
else:
self.privileges = None
gidCallerStr = dict.get("gidCaller", None)
if gidCallerStr:
self.gidCaller = GID(string=gidCallerStr)
else:
self.gidCaller = None
gidObjectStr = dict.get("gidObject", None)
if gidObjectStr:
self.gidObject = GID(string=gidObjectStr)
else:
self.gidObject = None
##
# Verify that a chain of credentials is valid (see cert.py:verify). In
# addition to the checks for ordinary certificates, verification also
# ensures that the delegate bit was set by each parent in the chain. If
# a delegate bit was not set, then an exception is thrown.
#
# Each credential must be a subset of the rights of the parent.
def verify_chain(self, trusted_certs = None):
# do the normal certificate verification stuff
Certificate.verify_chain(self, trusted_certs)
if self.parent:
# make sure the parent delegated rights to the child
if not self.parent.get_delegate():
raise MissingDelegateBit(self.parent.get_subject())
# make sure the rights given to the child are a subset of the
# parents rights
if not self.parent.get_privileges().is_superset(self.get_privileges()):
raise ChildRightsNotSubsetOfParent(self.get_subject()
+ " " + self.parent.get_privileges().save_to_string()
+ " " + self.get_privileges().save_to_string())
return
##
# Dump the contents of a credential to stdout in human-readable format
#
# @param dump_parents If true, also dump the parent certificates
def dump(self, *args, **kwargs):
print self.dump_string(*args,**kwargs)
def dump_string(self, dump_parents=False):
result=""
result += "CREDENTIAL %s\n" % self.get_subject()
result += " privs: %s\n" % self.get_privileges().save_to_string()
gidCaller = self.get_gid_caller()
if gidCaller:
result += " gidCaller:\n"
gidCaller.dump(8, dump_parents)
gidObject = self.get_gid_object()
if gidObject:
result += " gidObject:\n"
result += gidObject.dump_string(8, dump_parents)
result += " delegate: %s" % self.get_delegate()
if self.parent and dump_parents:
result += "PARENT\n"
result += self.parent.dump_string(dump_parents)
return result
|
|
# Copyright (c) 2013-2016 Christian Geier et al.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""contains a re-usable CalendarWidget for urwid"""
import calendar
from datetime import date
from locale import getlocale, setlocale, LC_ALL
import urwid
setlocale(LC_ALL, '')
def getweeknumber(day):
"""return iso week number for datetime.date object
:param day: date
:type day: datetime.date()
:return: weeknumber
:rtype: int
"""
return date.isocalendar(day)[1]
class DatePart(urwid.Text):
"""used in the Date widget (single digit)"""
def __init__(self, digit):
super(DatePart, self).__init__(digit)
@classmethod
def selectable(cls):
return True
def keypress(self, _, key):
return key
class Date(urwid.WidgetWrap):
"""used in the main calendar for dates (a number)"""
def __init__(self, date, get_styles=None):
dstr = str(date.day).rjust(2)
self.halves = [urwid.AttrMap(DatePart(dstr[:1]), None, None),
urwid.AttrMap(DatePart(dstr[1:]), None, None)]
self.date = date
self._get_styles = get_styles
super(Date, self).__init__(urwid.Columns(self.halves))
def set_styles(self, styles):
"""If single string, sets the same style for both halves, if two
strings, sets different style for each half.
"""
if type(styles) is tuple:
self.halves[0].set_attr_map({None: styles[0]})
self.halves[1].set_attr_map({None: styles[1]})
self.halves[0].set_focus_map({None: styles[0]})
self.halves[1].set_focus_map({None: styles[1]})
else:
self.halves[0].set_attr_map({None: styles})
self.halves[1].set_attr_map({None: styles})
self.halves[1].set_focus_map({None: styles})
self.halves[0].set_focus_map({None: styles})
def reset_styles(self):
self.set_styles(self._get_styles(self.date, False))
@property
def marked(self):
if 'mark' in [self.halves[0].attr_map[None], self.halves[1].attr_map[None]]:
return True
else:
return False
@classmethod
def selectable(cls):
return True
def keypress(self, _, key):
return key
class DateCColumns(urwid.Columns):
"""container for one week worth of dates
which are horizontally aligned
TODO: rename, awful name
focus can only move away by pressing 'TAB',
calls 'on_date_change' on every focus change (see below for details)
"""
def __init__(self, widget_list, on_date_change, on_press, keybindings,
get_styles=None, **kwargs):
self.on_date_change = on_date_change
self.on_press = on_press
self.keybindings = keybindings
self.get_styles = get_styles
# we need the next two attributes for attribute resetting when a
# cell regains focus after having lost it
self._old_attr_map = False
self._old_pos = 0
self._init = True
super(DateCColumns, self).__init__(widget_list, **kwargs)
def __repr__(self):
return '<DateCColumns from {} to {}>'.format(self[1].date, self[7].date)
def _set_focus_position(self, position):
"""calls on_date_change before calling super()._set_focus_position"""
# do not call when building up the interface, lots of potentially
# expensive calls made here
if self._init:
self._init = False
else:
self.contents[position][0].set_styles(
self.get_styles(self.contents[position][0].date, True))
self.on_date_change(self.contents[position][0].date)
super(DateCColumns, self)._set_focus_position(position)
def set_focus_date(self, a_date):
for num, day in enumerate(self.contents[1:8], 1):
if day[0].date == a_date:
self._set_focus_position(num)
return None
raise ValueError('%s not found in this week' % a_date)
def get_date_column(self, a_date):
"""return the column `a_date` is in, raises ValueError if `a_date`
cannot be found
"""
for num, day in enumerate(self.contents[1:8], 1):
if day[0].date == a_date:
return num
raise ValueError('%s not found in this week' % a_date)
focus_position = property(
urwid.Columns._get_focus_position,
_set_focus_position,
doc=('Index of child widget in focus. Raises IndexError if read when '
'CColumns is empty, or when set to an invalid index.')
)
def keypress(self, size, key):
"""only leave calendar area on pressing 'tab' or 'enter'"""
if key in self.keybindings['left']:
key = 'left'
elif key in self.keybindings['up']:
key = 'up'
elif key in self.keybindings['right']:
key = 'right'
elif key in self.keybindings['down']:
key = 'down'
old_pos = self.focus_position
key = super(DateCColumns, self).keypress(size, key)
# make sure we don't leave the calendar
if old_pos == 7 and key == 'right':
self.contents[old_pos][0].set_styles(
self.get_styles(self.contents[old_pos][0].date, False))
self.focus_position = 1
self.contents[self.focus_position][0].set_styles(
self.get_styles(self.contents[self.focus_position][0].date, False))
return 'down'
elif old_pos == 1 and key == 'left':
self.contents[old_pos][0].set_styles(
self.get_styles(self.contents[old_pos][0].date, False))
self.focus_position = 7
self.contents[self.focus_position][0].set_styles(
self.get_styles(self.contents[self.focus_position][0].date, False))
return 'up'
if key in self.keybindings['view']: # XXX make this more generic
self._old_pos = old_pos
self.contents[old_pos][0].set_styles(
self.get_styles(self.contents[old_pos][0].date, True))
return 'right'
if old_pos != self.focus_position:
self.contents[old_pos][0].set_styles(
self.get_styles(self.contents[old_pos][0].date, False))
self.contents[self.focus_position][0].set_styles(
self.get_styles(self.contents[self.focus_position][0].date, True))
if key in ['up', 'down']:
self.contents[old_pos][0].set_styles(
self.get_styles(self.contents[old_pos][0].date, False))
return key
class CListBox(urwid.ListBox):
"""our custom version of ListBox containing a CalendarWalker instance
it should contain a `CalendarWalker` instance which it autoextends on
rendering, if needed """
def __init__(self, walker):
self._init = True
self.keybindings = walker.keybindings
self.on_press = walker.on_press
self._marked = False
self._pos_old = False
super(CListBox, self).__init__(walker)
def render(self, size, focus=False):
if self._init:
while 'bottom' in self.ends_visible(size):
self.body._autoextend()
self.set_focus_valign('middle')
self._init = False
return super(CListBox, self).render(size, focus)
def _date(self, row, column):
"""return the date at row `row` and column `column`"""
return self.body[row].contents[column][0].date
def _unmark_one(self, row, column):
"""remove attribute *mark* from the date at row `row` and column `column`
returning it to the attributes defined by self._get_color()
"""
self.body[row].contents[column][0].reset_styles()
def _mark_one(self, row, column):
"""set attribute *mark* on the date at row `row` and column `column`"""
self.body[row].contents[column][0].set_styles('mark')
def _mark(self, a_date=None):
"""make sure everything between the marked entry and `a_date`
is visually marked, and nothing else"""
if a_date is None:
a_date = self.body.focus_date
def toggle(row, column):
if self.body[row].contents[column][0].marked:
self._mark_one(row, column)
else:
self._unmark_one(row, column)
start = min(self._marked['pos'][0], self.focus_position) - 2
stop = max(self._marked['pos'][0], self.focus_position) + 2
for row in range(start, stop):
for col in range(1, 8):
if a_date > self._marked['date']:
if self._marked['date'] <= self._date(row, col) <= a_date:
self._mark_one(row, col)
else:
self._unmark_one(row, col)
else:
if self._marked['date'] >= self._date(row, col) >= a_date:
self._mark_one(row, col)
else:
self._unmark_one(row, col)
toggle(self.focus_position, self.focus.focus_col)
self._pos_old = self.focus_position, self.focus.focus_col
def _unmark_all(self):
start = min(self._marked['pos'][0], self.focus_position, self._pos_old[0])
end = max(self._marked['pos'][0], self.focus_position, self._pos_old[0]) + 1
for row in range(start, end):
for col in range(1, 8):
self._unmark_one(row, col)
def set_focus_date(self, a_day):
if self._marked:
self._unmark_all()
self._mark(a_day)
self.body.set_focus_date(a_day)
def keypress(self, size, key):
if key in self.keybindings['mark'] + ['esc'] and self._marked:
self._unmark_all()
self._marked = False
return
if key in self.keybindings['mark']:
self._marked = {'date': self.body.focus_date,
'pos': (self.focus_position, self.focus.focus_col)}
if self._marked and key in self.keybindings['other']:
row, col = self._marked['pos']
self._marked = {'date': self.body.focus_date,
'pos': (self.focus_position, self.focus.focus_col)}
self.focus.focus_col = col
self.focus_position = row
if key in self.on_press:
if self._marked:
start = min(self.body.focus_date, self._marked['date'])
end = max(self.body.focus_date, self._marked['date'])
else:
start = self.body.focus_date
end = None
return self.on_press[key](start, end)
if key in self.keybindings['today']:
# reset colors of currently focused Date widget
self.focus.focus.set_styles(
self.focus.get_styles(self.body.focus_date, False))
self.set_focus_date(date.today())
self.set_focus_valign(('relative', 10))
key = super(CListBox, self).keypress(size, key)
if self._marked:
self._mark()
return key
class CalendarWalker(urwid.SimpleFocusListWalker):
def __init__(self, on_date_change, on_press, keybindings, firstweekday=0,
weeknumbers=False, get_styles=None):
self.firstweekday = firstweekday
self.weeknumbers = weeknumbers
self.on_date_change = on_date_change
self.on_press = on_press
self.keybindings = keybindings
self.get_styles = get_styles
weeks = self._construct_month()
urwid.SimpleFocusListWalker.__init__(self, weeks)
def set_focus(self, position):
"""set focus by item number"""
while position >= len(self) - 1:
self._autoextend()
while position <= 0:
no_additional_weeks = self._autoprepend()
position += no_additional_weeks
return urwid.SimpleFocusListWalker.set_focus(self, position)
@property
def focus_date(self):
"""return the date the focus is currently set to
:rtype: datetime.date
"""
return self[self.focus].focus.date
def set_focus_date(self, a_day):
"""set the focus to `a_day`
:type: a_day: datetime.date
"""
row, column = self.get_date_pos(a_day)
self.set_focus(row)
self[self.focus]._set_focus_position(column)
return None
def get_date_pos(self, a_day):
"""get row and column where `a_day` is located
:type: a_day: datetime.date
:rtype: tuple(int, int)
"""
# rough estimate of difference in lines, i.e. full weeks, we might be
# off by as much as one week though
week_diff = int((self.focus_date - a_day).days / 7)
new_focus = self.focus - week_diff
# in case new_focus is 1 we will later try set the focus to 0 which
# will lead to an autoprepend which will f*ck up our estimation,
# therefore better autoprepending anyway, even if it might not be
# necessary
if new_focus <= 1:
self.set_focus(new_focus - 1)
week_diff = int((self.focus_date - a_day).days / 7)
new_focus = self.focus - week_diff
for offset in [0, -1, 1]: # we might be off by a week
row = new_focus + offset
self.set_focus(row)
try:
column = self[self.focus].get_date_column(a_day)
return row, column
except ValueError:
pass
# we didn't find the date we were looking for...
raise ValueError('something is wrong')
def _autoextend(self):
"""appends the next month"""
date_last_month = self[-1][1].date # a date from the last month
last_month = date_last_month.month
last_year = date_last_month.year
month = last_month % 12 + 1
year = last_year if not last_month == 12 else last_year + 1
weeks = self._construct_month(year, month, clean_first_row=True)
self.extend(weeks)
def _autoprepend(self):
"""prepends the previous month
:returns: number of weeks prepended
:rtype: int
"""
try:
date_first_month = self[0][-1].date # a date from the first month
except AttributeError:
# rightmost column is weeknumber
date_first_month = self[0][-2].date
first_month = date_first_month.month
first_year = date_first_month.year
if first_month == 1:
month = 12
year = first_year - 1
else:
month = first_month - 1
year = first_year
weeks = self._construct_month(year, month, clean_last_row=True)
weeks.reverse()
for one in weeks:
self.insert(0, one)
return len(weeks)
def _construct_week(self, week):
"""
constructs a CColumns week from a week of datetime.date objects. Also
prepends the month name if the first day of the month is included in
that week.
:param week: list of datetime.date objects
:returns: the week as an CColumns object and True or False depending on
if today is in this week
:rtype: tuple(urwid.CColumns, bool)
"""
if 1 in [day.day for day in week]:
month_name = calendar.month_abbr[week[-1].month].ljust(4)
attr = 'monthname'
elif self.weeknumbers == 'left':
month_name = ' {:2} '.format(getweeknumber(week[0]))
attr = 'weeknumber_left'
else:
month_name = ' '
attr = None
this_week = [(4, urwid.AttrMap(urwid.Text(month_name), attr))]
for number, day in enumerate(week):
new_date = Date(day, self.get_styles)
this_week.append((2, new_date))
new_date.set_styles(self.get_styles(new_date.date, False))
if self.weeknumbers == 'right':
this_week.append((2, urwid.AttrMap(
urwid.Text('{:2}'.format(getweeknumber(week[0]))), 'weeknumber_right')))
week = DateCColumns(this_week,
on_date_change=self.on_date_change,
on_press=self.on_press,
keybindings=self.keybindings,
dividechars=1,
get_styles=self.get_styles)
return week
def _construct_month(self,
year=date.today().year,
month=date.today().month,
clean_first_row=False,
clean_last_row=False):
"""construct one month of DateCColumns
:param year: the year this month is set in
:type year: int
:param month: the number of the month to be constructed
:type month: int (1-12)
:param clean_first_row: makes sure that the first element returned is
completely in `month` and not partly in the one
before (which might lead to that line occurring
twice
:type clean_first_row: bool
:param clean_last_row: makes sure that the last element returned is
completely in `month` and not partly in the one
after (which might lead to that line occurring
twice
:type clean_last_row: bool
:returns: list of DateCColumns and the number of the list element which
contains today (or None if it isn't in there)
:rtype: tuple(list(dateCColumns, int or None))
"""
plain_weeks = calendar.Calendar(
self.firstweekday).monthdatescalendar(year, month)
weeks = list()
for number, week in enumerate(plain_weeks):
week = self._construct_week(week)
weeks.append(week)
if clean_first_row and weeks[0][1].date.month != weeks[0][7].date.month:
return weeks[1:]
elif clean_last_row and \
weeks[-1][1].date.month != weeks[-1][7].date.month:
return weeks[:-1]
else:
return weeks
class CalendarWidget(urwid.WidgetWrap):
def __init__(self, on_date_change, keybindings, on_press, firstweekday=0,
weeknumbers=False, get_styles=None, initial=date.today()):
"""
:param on_date_change: a function that is called every time the selected date
is changed with the newly selected date as a first (and
only argument)
:type on_date_change: function
:param keybindings: bind keys to specific functions, keys are
commands (e.g. movement commands, values are lists of keys
that should be bound to those commands. See below for the
defaults.
Available commands:
'left', 'right', 'up', 'down': move cursor in direction
'today': refocus on today
'mark': toggles selection mode
:type keybindings: dict
:param on_press: dict of functions that are called when the key is
pressed. These functions must accept at least two argument. In the
normal case the first argument is the currently selected date
(datetime.date) and the second is *None*. When a date range is
selected, the first argument is the earlier and the second argument
is the later date. The function's return values are interpreted as
pressed keys.
:type on_pres: dict
"""
default_keybindings = {
'left': ['left'], 'down': ['down'], 'right': ['right'], 'up': ['up'],
'today': ['t'],
'view': [],
'mark': ['v'],
}
from collections import defaultdict
on_press = defaultdict(lambda: lambda x: x, on_press)
default_keybindings.update(keybindings)
calendar.setfirstweekday(firstweekday)
try:
mylocale = '.'.join(getlocale())
except TypeError: # language code and encoding may be None
mylocale = 'C'
_calendar = calendar.LocaleTextCalendar(firstweekday, mylocale)
weekheader = _calendar.formatweekheader(2)
dnames = weekheader.split(' ')
def _get_styles(date, focus):
if focus:
if date == date.today():
return 'today focus'
else:
return 'reveal focus'
else:
if date == date.today():
return 'today'
else:
return None
if get_styles is None:
get_styles = _get_styles
if weeknumbers == 'right':
dnames.append('#w')
dnames = urwid.Columns(
[(4, urwid.Text(' '))] +
[(2, urwid.AttrMap(urwid.Text(name), 'dayname')) for name in dnames],
dividechars=1)
self.walker = CalendarWalker(
on_date_change, on_press, default_keybindings, firstweekday, weeknumbers,
get_styles)
self.box = CListBox(self.walker)
frame = urwid.Frame(self.box, header=dnames)
urwid.WidgetWrap.__init__(self, frame)
self.set_focus_date(initial)
def focus_today(self):
self.set_focus_date(date.today())
@property
def focus_date(self):
return self.walker.focus_date
def set_focus_date(self, a_day):
"""set the focus to `a_day`
:type a_day: datetime.date
"""
self.box.set_focus_date(a_day)
|
|
import os
import pickle
import time
import warnings
import matplotlib.pyplot as plt
import numpy as np
from algorithms import create_index, compute_feature_array, extract_pixel_feature, best_approximate_match, \
best_coherence_match, compute_distance
from config import setup_vars, save_metadata
from img_preprocess import convert_to_YIQ, convert_to_RGB, compute_gaussian_pyramid, initialize_Bp, remap_luminance, \
compress_values, ix2px, px2ix, Ap_ix2px, Ap_px2ix, savefig_noborder, pad_img_pair
def img_setup(A_fname, Ap_fname_list, B_fname, out_path, c):
if not os.path.exists(out_path):
os.makedirs(out_path)
A_orig = plt.imread(A_fname)
B_orig = plt.imread(B_fname)
assert(len(A_orig.shape) == len(B_orig.shape)) # same number of channels (for now)
Ap_orig_list = []
for Ap_fname in Ap_fname_list:
Ap_orig = plt.imread(Ap_fname)
assert(A_orig.shape == Ap_orig.shape) # src alignment
Ap_orig_list.append(Ap_orig)
# Make sure all images are floats on 0 to 1 scale
scales = []
for img in [A_orig, B_orig, Ap_orig[0]]:
if np.max(img) > 1.0:
scales.append(255.)
else:
scales.append(1.0)
# Do conversions
if c.convert:
A_yiq = convert_to_YIQ( A_orig/scales[0])
B_yiq = convert_to_YIQ( B_orig/scales[1])
A = A_yiq[:, :, 0]
B = B_yiq[:, :, 0]
Ap_yiq_list = []
Ap_list = []
for Ap_orig in Ap_orig_list:
Ap_yiq_list.append(convert_to_YIQ(Ap_orig/scales[2]))
Ap_list.append(Ap_yiq_list[-1][:, :, 0])
else:
A = A_orig/scales[0]
B = B_orig/scales[1]
Ap_list = []
for Ap_orig in Ap_orig_list:
Ap_list.append(Ap_orig/scales[2])
# Process input images
if c.remap_lum:
A, Ap_list = remap_luminance(A, Ap_list, B)
if not c.init_rand:
B_orig_pyr = compute_gaussian_pyramid(B, c.n_sm)
A, B = compress_values(A, B, c.AB_weight)
c.num_ch, c.padding_sm, c.padding_lg, c.weights = setup_vars(A)
# Create Pyramids
A_pyr = compute_gaussian_pyramid(A, c.n_sm)
B_pyr = compute_gaussian_pyramid(B, c.n_sm)
Ap_pyr_list = []
for Ap in Ap_list:
Ap_pyr_list.append(compute_gaussian_pyramid(Ap, c.n_sm))
if c.convert:
color_pyr_list = [compute_gaussian_pyramid(B_yiq, c.n_sm)]
else:
color_pyr_list = [compute_gaussian_pyramid(Ap_orig, c.n_sm) for Ap_orig in Ap_list]
if len(A_pyr) != len(B_pyr):
c.max_levels = min(len(A_pyr), len(B_pyr))
warnings.warn('Warning: input images are very different sizes! The minimum number of levels will be used.')
else:
c.max_levels = len(B_pyr)
# Create Random Initialization of Bp
if c.init_rand:
Bp_pyr = initialize_Bp(B_pyr, init_rand=True)
else:
Bp_pyr = initialize_Bp(B_orig_pyr, init_rand=False)
return A_pyr, Ap_pyr_list, B_pyr, Bp_pyr, color_pyr_list, c
def image_analogies_main(A_fname, Ap_fname_list, B_fname, out_path, c, debug=False):
# # This is the setup code
begin_time = time.time()
start_time = time.time()
# Load images
A_pyr, Ap_pyr_list, B_pyr, Bp_pyr, color_pyr_list, c = img_setup(A_fname, Ap_fname_list, B_fname, out_path, c)
# Save parameters for reference
names = ['A_fname', 'Ap_fname_list', 'B_fname', 'c.convert', 'c.remap_lum', 'c.init_rand', 'c.AB_weight', 'c.k']
vars = [ A_fname, Ap_fname_list, B_fname, c.convert, c.remap_lum, c.init_rand, c.AB_weight, c.k ]
save_metadata(out_path, names, vars)
# Pull features from B
B_features = compute_feature_array(B_pyr, c, full_feat=True)
stop_time = time.time()
print 'Environment Setup: %f' % (stop_time - start_time)
# Build Structures for ANN
start_time = time.time()
flann, flann_params, As, As_size = create_index(A_pyr, Ap_pyr_list, c)
stop_time = time.time()
ann_time_total = stop_time - start_time
print 'ANN Index Creation: %f' % (ann_time_total)
# ##########################################################################################
# # This is the Algorithm Code
# now we iterate per pixel in each level
for level in range(1, c.max_levels):
start_time = time.time()
ann_time_level = 0
print('Computing level %d of %d' % (level, c.max_levels - 1))
imh, imw = Bp_pyr[level].shape[:2]
color_im_out = np.nan * np.ones((imh, imw, 3))
s = []
im = []
if debug:
# make debugging structures
sa = []
sc = []
rstars = []
p_src = np.nan * np.ones((imh, imw, 3))
img_src = np.zeros((imh, imw))
app_dist = np.zeros((imh, imw))
coh_dist = np.zeros((imh, imw))
app_color = np.array([1, 0, 0])
coh_color = np.array([1, 1, 0])
err_color = np.array([0, 0, 0])
paths = ['%d_psrc.eps' % (level),
'%d_appdist.eps' % (level),
'%d_cohdist.eps' % (level),
'%d_output.eps' % (level),
'%d_imgsrc.eps' % (level)]
vars = [p_src, app_dist, coh_dist, Bp_pyr[level], img_src]
for row in range(imh):
for col in range(imw):
px = np.array([row, col])
# we pad on each iteration so Bp features will be more accurate
Bp_pd = pad_img_pair(Bp_pyr[level - 1], Bp_pyr[level], c)
BBp_feat = np.hstack([B_features[level][px2ix(px, imw), :],
extract_pixel_feature(Bp_pd, px, c, full_feat=False)])
assert(BBp_feat.shape == (As_size[level][1],))
# Find Approx Nearest Neighbor
ann_start_time = time.time()
p_app_ix = best_approximate_match(flann[level], flann_params[level], BBp_feat)
assert(p_app_ix < As_size[level][0])
ann_stop_time = time.time()
ann_time_level = ann_time_level + ann_stop_time - ann_start_time
# translate p_app_ix back to row, col
Ap_imh, Ap_imw = Ap_pyr_list[0][level].shape[:2]
p_app, i_app = Ap_ix2px(p_app_ix, Ap_imh, Ap_imw)
# is this the first iteration for this level?
# then skip coherence step
if len(s) < 1:
p = p_app
i = i_app
# Find Coherence Match and Compare Distances
else:
p_coh, i_coh, r_star = best_coherence_match(As[level], (Ap_imh, Ap_imw), BBp_feat, s, im, px, imw, c)
if np.allclose(p_coh, np.array([-1, -1])):
p = p_app
i = i_app
else:
AAp_feat_app = As[level][p_app_ix]
AAp_feat_coh = As[level][Ap_px2ix(p_coh, i_coh, Ap_imh, Ap_imw)]
d_app = compute_distance(AAp_feat_app, BBp_feat, c.weights)
d_coh = compute_distance(AAp_feat_coh, BBp_feat, c.weights)
if d_coh <= d_app * (1 + (2**(level - c.max_levels)) * c.k):
p = p_coh
i = i_coh
else:
p = p_app
i = i_app
# Update Bp and s
Bp_pyr[level][row, col] = Ap_pyr_list[i][level][tuple(p)]
if not c.convert:
color_im_out[row, col, :] = color_pyr_list[i][level][tuple(p)]
s.append(p)
im.append(i)
if debug:
sa.append(p_app)
if len(s) > 1 and not np.allclose(p_coh, np.array([-1, -1])):
sc.append(p_coh)
rstars.append(r_star)
app_dist[row, col] = d_app
coh_dist[row, col] = d_coh
if np.allclose(p, p_coh):
p_src[row, col] = coh_color
elif np.allclose(p, p_app):
p_src[row, col] = app_color
else:
print('Look, a bug! Squash it!')
raise
else:
sc.append((0, 0))
rstars.append((0, 0))
p_src[row, col] = err_color
ann_time_total = ann_time_total + ann_time_level
if debug:
assert(len(im) == np.product(img_src.shape))
img_src[:, :] = (np.array(im).astype(np.float64)/np.max(im)).reshape(img_src.shape)
# Save debugging structures
for path, var in zip(paths, vars):
fig = plt.imshow(var, interpolation='nearest', cmap='gray')
savefig_noborder(out_path + path, fig)
plt.close()
with open(out_path + '%d_srcs.pickle' % level, 'w') as f:
pickle.dump([sa, sc, rstars, s, im], f)
# Save color output images
if c.convert:
color_im_out = convert_to_RGB(np.dstack([Bp_pyr[level], color_pyr_list[i][level][:, :, 1:]]))
color_im_out = np.clip(color_im_out, 0, 1)
plt.imsave(out_path + 'level_%d_color.jpg' % level, color_im_out)
plt.imsave(out_path + out_path.split('/')[-2] + '.jpg', color_im_out)
stop_time = time.time()
print 'Level %d time: %f' % (level, stop_time - start_time)
print('Level %d ANN time: %f' % (level, ann_time_level))
end_time = time.time()
print 'Total time: %f' % (end_time - begin_time)
print('ANN time: %f' % ann_time_total)
|
|
import pytest
import numpy as np
import pandas as pd
from dateutil.parser import parse as parse_date
from pdutils import df_compare, ndarray_compare, ts_compare, assert_, assert_not
# Example pandas DataFrame objects that are expected to be equal.
TEST_DF_SAME = [
(
pd.DataFrame({'a': [1., 2., 3.], 'b': ['a', 'b', 'c']}),
pd.DataFrame({'a': [1., 2., 3.], 'b': ['a', 'b', 'c']}),
),
(
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.DataFrame({'a': [1., 2., 3.], 'b': [1, 2, 3]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [1., 2., 3.], 'b': [1, 2, 3]}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.DataFrame({'a': [1., 2., 3.], 'b': [1, 2, 3]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [1., 2., 3.], 'b': [1, 2, 3]}, pd.date_range('1970-01-01', periods=3, freq='S'),
columns=['b', 'a']),
),
(
pd.DataFrame({'a': [np.nan, 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [np.nan, 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
]
# Example pandas DataFrame objects that are not expected to be equal.
TEST_DF_DIFFERENT = [
(
pd.DataFrame({'a': [1., 2., 3.], 'b': ['a', 'b', 'c']}),
pd.DataFrame({'a': [1., 2., 3.], 'b': ['a', 'b', 'd']}),
),
(
pd.DataFrame({'a': [1., 2., 3.], 'b': ['a', 'b', 'c']}),
pd.DataFrame({'a': [1., 2., 4.], 'b': ['a', 'b', 'c']}),
),
(
pd.DataFrame({'a': [1., 2., 3.], 'b': ['a', 'b', 'c']}),
pd.DataFrame({'a': [1., 2., 4.], 'b': ['a', 'b', 'c']}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.DataFrame({'a': [1., 2.]}, pd.date_range('1970-01-01', periods=2, freq='S')),
pd.DataFrame({'a': [np.nan, 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [1., 2., 3.], 'b': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.DataFrame({'a': [1., 2., 3.], 'b': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [1., 2., 3.], 'c': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [np.nan, 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.DataFrame({'a': [2., np.nan, 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [np.nan, 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [True, False, True]}, pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [1., 2., 3.]}, [0, 1, 2]),
),
(
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='D')),
),
]
# Example numpy ndarray objects that are expected to be equal.
TEST_NDARRAY_SAME = [
(np.array([]), np.array([])),
(np.array([[]]), np.array([[]])),
(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[1, 4], [2, 5], [3, 6]]).T),
(np.array([True, False, True]), np.array([True, False, True])),
(np.array(['a', 'b', 'c']), np.array(['a', 'b', 'c'])),
(
np.array([parse_date('2013-01-01'), parse_date('2013-01-02'), parse_date('2013-01-03')]),
np.array([parse_date('2013-01-01'), parse_date('2013-01-02'), parse_date('2013-01-03')]),
),
(np.array([1., 2., 3.]), np.array([1., 2., 3.])),
(np.array([1., 2., np.nan]), np.array([1., 2., np.nan])),
]
# Example numpy ndarray objects that are not expected to be equal.
TEST_NDARRAY_DIFFERENT = [
(np.array([]), np.array([1])),
(np.array([[]]), np.array([1])),
(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[1, 4], [2, 5], [3, 6]])),
(np.array([1, 2, 3]), np.array([1, 2, 4])),
(np.array([True, False, True]), np.array([True, True, True])),
(np.array(['a', 'b', 'c']), np.array(['a', 'b', 'd'])),
(
np.array([parse_date('2013-01-01'), parse_date('2013-01-02'), parse_date('2013-01-03')]),
np.array([parse_date('2013-01-01'), parse_date('2013-01-02'), parse_date('2013-01-04')]),
),
(np.array([1., 2., 3.]), np.array([1, 2, 3])),
(np.array([1., 2., np.nan]), np.array([1., np.nan, 2.])),
(np.array([1., 2., np.nan]), np.array([1., 3., np.nan])),
]
# Example pandas TimeSeries objects that are expected to be equal.
TEST_TS_SAME = [
(
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.TimeSeries([1, 2, 3], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([1, 2, 3], pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.TimeSeries([np.nan, 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([np.nan, 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
),
]
# Example pandas TimeSeries objects that are not expected to be equal.
TEST_TS_DIFFERENT = [
(
pd.TimeSeries([1., 2.], pd.date_range('1970-01-01', periods=2, freq='S')),
pd.TimeSeries([np.nan, 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([np.nan, 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.TimeSeries([2., np.nan, 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([np.nan, 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([True, False, True], pd.date_range('1970-01-01', periods=3, freq='S')),
),
(
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([1., 2., 3.], [0, 1, 2]),
),
(
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='D')),
),
]
@pytest.mark.parametrize(('df1', 'df2'), TEST_DF_SAME)
def test_df_compare_same(df1, df2):
assert_(df_compare(df1, df2))
@pytest.mark.parametrize(('df1', 'df2'), TEST_DF_DIFFERENT)
def test_df_compare_different(df1, df2):
assert_not(df_compare(df1, df2))
@pytest.mark.parametrize(('a1', 'a2'), TEST_NDARRAY_SAME)
def test_ndarray_compare_same(a1, a2):
assert_(ndarray_compare(a1, a2))
@pytest.mark.parametrize(('a1', 'a2'), TEST_NDARRAY_DIFFERENT)
def test_ndarray_compare_different(a1, a2):
assert_not(ndarray_compare(a1, a2))
@pytest.mark.parametrize(('ts1', 'ts2'), TEST_TS_SAME)
def test_ts_compare_same(ts1, ts2):
assert_(ts_compare(ts1, ts2))
@pytest.mark.parametrize(('ts1', 'ts2'), TEST_TS_DIFFERENT)
def test_ts_compare_different(ts1, ts2):
assert_not(ts_compare(ts1, ts2))
@pytest.mark.parametrize(('df1', 'df2', 'tolerance'), [
(
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [1.0001, 2.0002, 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
1e-4, # tolerance
),
(
pd.DataFrame({'a': [1., 2., 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({'a': [1.01, 2.02, 3.]}, pd.date_range('1970-01-01', periods=3, freq='S')),
1e-2, # tolerance
),
])
def test_df_compare_same_with_custom_float_precision(df1, df2, tolerance):
assert_(df_compare(df1, df2, verbose=True, rtol=tolerance))
@pytest.mark.parametrize(('ts1', 'ts2', 'tolerance'), [
(
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([1.0001, 2.0002, 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
1e-4, # tolerance
),
(
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([1.01, 2.02, 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
1e-2, # tolerance
),
])
def test_ts_compare_same_with_custom_float_precision(ts1, ts2, tolerance):
assert_(ts_compare(ts1, ts2, verbose=True, rtol=tolerance))
|
|
# -*- coding: utf-8 -*-
from urllib.request import urlopen
from ipaddress import ip_address
from collections import deque
from .dcc import DCCManager
from .dcc import DCCChat
from .dec import dcc_event
from .dec import event
from .dec import extend
from .dec import plugin
from . import config
from . import utils
from . import rfc
from . import base
from .compat import asyncio
from .compat import Queue
import venusian
import time
class IrcConnection(asyncio.Protocol):
"""asyncio protocol to handle an irc connection"""
def connection_made(self, transport):
self.transport = transport
self.closed = False
self.queue = deque()
def decode(self, data):
"""Decode data with bot's encoding"""
encoding = getattr(self, 'encoding', 'ascii')
return data.decode(encoding, 'ignore')
def data_received(self, data):
data = self.decode(data)
if self.queue:
data = self.queue.popleft() + data
lines = data.split('\r\n')
self.queue.append(lines.pop(-1))
for line in lines:
self.factory.dispatch(line)
def write(self, data):
if data is not None:
data = data.encode(self.encoding)
if not data.endswith(b'\r\n'):
data = data + b'\r\n'
self.transport.write(data)
def connection_lost(self, exc):
self.factory.log.critical('connection lost (%s): %r',
id(self.transport),
exc)
self.factory.notify('connection_lost')
if not self.closed:
self.close()
# wait a few before reconnect
self.factory.loop.call_later(
2, self.factory.create_connection)
def close(self):
if not self.closed:
self.factory.log.critical('closing old transport (%r)',
id(self.transport))
try:
self.transport.close()
finally:
self.closed = True
class IrcBot(base.IrcObject):
"""An IRC bot"""
_pep8 = [dcc_event, event, extend, plugin, rfc, config]
venusian = venusian
venusian_categories = [
'irc3',
'irc3.dcc',
'irc3.extend',
'irc3.rfc1459',
'irc3.plugins.cron',
'irc3.plugins.command',
]
logging_config = config.LOGGING
defaults = dict(
base.IrcObject.defaults,
nick='irc3',
username='irc3',
realname='Irc bot based on irc3 http://irc3.readthedocs.io',
host='localhost',
mode=0,
url='https://irc3.readthedocs.io/',
passwords={},
flood_burst=4,
flood_rate=1,
flood_rate_delay=1,
ctcp=dict(
version='irc3 {version} - {url}',
userinfo='{realname}',
time='{now:%c}',
),
# freenode config as default for testing
server_config=dict(
STATUSMSG='+@',
PREFIX='(ov)@+',
CHANTYPES='#',
CHANMODES='eIbq,k,flj,CFLMPQScgimnprstz',
),
connection=IrcConnection,
)
def __init__(self, *ini, **config):
update_config_needed = False
if 'userinfo' in config or \
('realname' in config and 'username' not in config):
update_config_needed = True # pragma: no cover
super(IrcBot, self).__init__(*ini, **config)
if update_config_needed: # pragma: no cover
# Backward compat. Remove me in 2017
self.log.fatal('realname has been renamed to username.')
self.log.fatal('userinfo has been renamed to realname.')
self.log.fatal('Please update your config with something like:.')
if 'realname' in self.config:
self.log.fatal('username = %(realname)s', self.config)
if 'userinfo' in self.config:
self.log.fatal('realname = %(userinfo)s', self.config)
import sys
sys.exit(-1)
self.queue = None
if self.config.async:
self.queue = Queue(loop=self.loop)
self.awaiting_queue = self.create_task(self.process_queue())
self._ip = self._dcc = None
# auto include the sasl plugin if needed
if 'sasl_username' in self.config and \
'irc3.plugins.sasl' not in self.registry.includes:
self.include('irc3.plugins.sasl')
# auto include the autojoins plugin if needed (for backward compat)
if 'autojoins' in self.config and \
'irc3.plugins.autojoins' not in self.registry.includes:
self.include('irc3.plugins.autojoins')
@property
def server_config(self):
"""return server configuration (rfc rpl 005)::
>>> bot = IrcBot()
>>> print(bot.server_config['STATUSMSG'])
+@
The real values are only available after the server sent them.
"""
return self.config.server_config
def connection_made(self, f): # pragma: no cover
if getattr(self, 'protocol', None):
self.protocol.close()
try:
transport, protocol = f.result()
except Exception as e:
self.log.exception(e)
self.loop.call_later(3, self.create_connection)
else:
self.log.debug('Connected')
self.protocol = protocol
self.protocol.queue = deque()
self.protocol.factory = self
self.protocol.encoding = self.encoding
if self.config.get('password'):
self._send('PASS {password}'.format(**self.config))
self.notify('connection_ready')
self.send((
'USER {username} {mode} * :{realname}\r\n'
'NICK {nick}\r\n'
).format(**self.config))
self.notify('connection_made')
def send_line(self, data, nowait=False):
"""send a line to the server. replace CR by spaces"""
data = data.replace('\n', ' ').replace('\r', ' ')
f = asyncio.Future(loop=self.loop)
if self.queue is not None and nowait is False:
self.queue.put_nowait((f, data))
else:
self.send(data.replace('\n', ' ').replace('\r', ' '))
f.set_result(True)
return f
@asyncio.coroutine
def process_queue(self):
flood_burst = self.config.flood_burst
delay = float(self.config.flood_rate_delay)
flood_rate = delay / float(self.config.flood_rate)
while True:
if flood_burst == 0:
future, data = yield from self.queue.get()
future.set_result(True)
self.send(data)
yield from asyncio.sleep(.001, loop=self.loop)
else:
lines = []
for i in range(flood_burst):
future, data = yield from self.queue.get()
future.set_result(True)
lines.append(data)
if self.queue.empty():
break
if lines:
self.send(u'\r\n'.join(lines))
while not self.queue.empty():
yield from asyncio.sleep(flood_rate, loop=self.loop)
future, data = yield from self.queue.get()
future.set_result(True)
self.send(data)
def send(self, data):
"""send data to the server"""
self._send(data)
def _send(self, data):
self.protocol.write(data)
self.dispatch(data, iotype='out')
def privmsg(self, target, message, nowait=False):
"""send a privmsg to target"""
if message:
messages = utils.split_message(message, self.config.max_length)
if isinstance(target, DCCChat):
for message in messages:
target.send_line(message)
elif target:
f = None
for message in messages:
f = self.send_line('PRIVMSG %s :%s' % (target, message),
nowait=nowait)
return f
def action(self, target, message, nowait=False):
return self.privmsg(target, '\x01ACTION %s\x01' % message,
nowait=nowait)
def notice(self, target, message, nowait=False):
"""send a notice to target"""
if message:
messages = utils.split_message(message, self.config.max_length)
if isinstance(target, DCCChat):
for message in messages:
target.action(message)
elif target:
f = None
for message in messages:
f = self.send_line('NOTICE %s :%s' % (target, message),
nowait=nowait)
return f
def ctcp(self, target, message, nowait=False):
"""send a ctcp to target"""
if target and message:
messages = utils.split_message(message, self.config.max_length)
f = None
for message in messages:
f = self.send_line('PRIVMSG %s :\x01%s\x01' % (target,
message),
nowait=nowait)
return f
def ctcp_reply(self, target, message, nowait=False):
"""send a ctcp reply to target"""
if target and message:
messages = utils.split_message(message, self.config.max_length)
f = None
for message in messages:
f = self.send_line('NOTICE %s :\x01%s\x01' % (target, message),
nowait=nowait)
return f
def mode(self, target, *data):
"""set user or channel mode"""
self.send_line('MODE %s %s' % (target, ' '.join(data)), nowait=True)
def join(self, target):
"""join a channel"""
password = self.config.passwords.get(
target.strip(self.server_config['CHANTYPES']))
if password:
target += ' ' + password
self.send_line('JOIN %s' % target)
def part(self, target, reason=None):
"""quit a channel"""
if reason:
target += ' :' + reason
self.send_line('PART %s' % target)
def kick(self, channel, target, reason=None):
"""kick target from channel"""
if reason:
target += ' :' + reason
self.send_line('KICK %s %s' % (channel, target), nowait=True)
def invite(self, target, channel):
"""invite target to a channel"""
self.send_line('INVITE %s %s' % (target, channel))
def topic(self, channel, topic=None):
"""change or request the topic of a channel"""
if topic:
channel += ' :' + topic
self.send_line('TOPIC %s' % channel)
def away(self, message=None):
"""mark ourself as away"""
cmd = 'AWAY'
if message:
cmd += ' :' + message
self.send_line(cmd)
def unaway(self):
"""mask ourself as no longer away"""
self.away()
def quit(self, reason=None):
"""disconnect"""
if not reason:
reason = 'bye'
else:
reason = reason
self.send_line('QUIT :%s' % reason)
def get_nick(self):
return self.config.nick
def set_nick(self, nick):
self.send_line('NICK ' + nick, nowait=True)
nick = property(get_nick, set_nick, doc='nickname get/set')
@property
def ip(self):
"""return bot's ip as an ``ip_address`` object"""
if not self._ip:
if 'ip' in self.config:
ip = self.config['ip']
else:
ip = self.protocol.transport.get_extra_info('sockname')[0]
ip = ip_address(ip)
if ip.version == 4:
self._ip = ip
else: # pragma: no cover
response = urlopen('http://ipv4.icanhazip.com/')
ip = response.read().strip().decode()
ip = ip_address(ip)
self._ip = ip
return self._ip
@property
def dcc(self):
"""return the :class:`~irc3.dcc.DCCManager`"""
if self._dcc is None:
self._dcc = DCCManager(self)
return self._dcc
@asyncio.coroutine
def dcc_chat(self, mask, host=None, port=None):
"""Open a DCC CHAT whith mask. If host/port are specified then connect
to a server. Else create a server"""
return self.dcc.create(
'chat', mask, host=host, port=port).ready
@asyncio.coroutine
def dcc_get(self, mask, host, port, filepath, filesize=None):
"""DCC GET a file from mask. filepath must be an absolute path with an
existing directory. filesize is the expected file size."""
return self.dcc.create(
'get', mask, filepath=filepath, filesize=filesize,
host=host, port=port).ready
@asyncio.coroutine
def dcc_send(self, mask, filepath):
"""DCC SEND a file to mask. filepath must be an absolute path to
existing file"""
return self.dcc.create('send', mask, filepath=filepath).ready
@asyncio.coroutine
def dcc_accept(self, mask, filepath, port, pos):
"""accept a DCC RESUME for an axisting DCC SEND. filepath is the
filename to sent. port is the port opened on the server.
pos is the expected offset"""
return self.dcc.resume(mask, filepath, port, pos)
def SIGHUP(self):
self.reload()
def SIGINT(self):
self.notify('SIGINT')
if getattr(self, 'protocol', None):
self.quit('INT')
time.sleep(1)
self.loop.stop()
def run(argv=None):
return IrcBot.from_argv(argv)
|
|
from __future__ import unicode_literals
import re
import tempfile
from django.contrib.gis import gdal
from django.contrib.gis.db.models import Extent, MakeLine, Union
from django.contrib.gis.geos import (
GeometryCollection, GEOSGeometry, LinearRing, LineString, Point, Polygon,
fromstr,
)
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from ..utils import no_oracle, oracle, postgis, skipUnlessGISLookup, spatialite
from .models import (
City, Country, Feature, MinusOneSRID, NonConcreteModel, PennsylvaniaCity,
State, Track,
)
def postgis_bug_version():
spatial_version = getattr(connection.ops, "spatial_version", (0, 0, 0))
return spatial_version and (2, 0, 0) <= spatial_version <= (2, 0, 1)
@skipUnlessDBFeature("gis_enabled")
class GeoModelTest(TestCase):
fixtures = ['initial']
def test_fixtures(self):
"Testing geographic model initialization from fixtures."
# Ensuring that data was loaded from initial data fixtures.
self.assertEqual(2, Country.objects.count())
self.assertEqual(8, City.objects.count())
self.assertEqual(2, State.objects.count())
def test_proxy(self):
"Testing Lazy-Geometry support (using the GeometryProxy)."
# Testing on a Point
pnt = Point(0, 0)
nullcity = City(name='NullCity', point=pnt)
nullcity.save()
# Making sure TypeError is thrown when trying to set with an
# incompatible type.
for bad in [5, 2.0, LineString((0, 0), (1, 1))]:
try:
nullcity.point = bad
except TypeError:
pass
else:
self.fail('Should throw a TypeError')
# Now setting with a compatible GEOS Geometry, saving, and ensuring
# the save took, notice no SRID is explicitly set.
new = Point(5, 23)
nullcity.point = new
# Ensuring that the SRID is automatically set to that of the
# field after assignment, but before saving.
self.assertEqual(4326, nullcity.point.srid)
nullcity.save()
# Ensuring the point was saved correctly after saving
self.assertEqual(new, City.objects.get(name='NullCity').point)
# Setting the X and Y of the Point
nullcity.point.x = 23
nullcity.point.y = 5
# Checking assignments pre & post-save.
self.assertNotEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.save()
self.assertEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.delete()
# Testing on a Polygon
shell = LinearRing((0, 0), (0, 100), (100, 100), (100, 0), (0, 0))
inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40))
# Creating a State object using a built Polygon
ply = Polygon(shell, inner)
nullstate = State(name='NullState', poly=ply)
self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None
nullstate.save()
ns = State.objects.get(name='NullState')
self.assertEqual(ply, ns.poly)
# Testing the `ogr` and `srs` lazy-geometry properties.
if gdal.HAS_GDAL:
self.assertIsInstance(ns.poly.ogr, gdal.OGRGeometry)
self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb)
self.assertIsInstance(ns.poly.srs, gdal.SpatialReference)
self.assertEqual('WGS 84', ns.poly.srs.name)
# Changing the interior ring on the poly attribute.
new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30))
ns.poly[1] = new_inner
ply[1] = new_inner
self.assertEqual(4326, ns.poly.srid)
ns.save()
self.assertEqual(ply, State.objects.get(name='NullState').poly)
ns.delete()
@skipUnlessDBFeature("supports_transform")
def test_lookup_insert_transform(self):
"Testing automatic transform for lookups and inserts."
# San Antonio in 'WGS84' (SRID 4326)
sa_4326 = 'POINT (-98.493183 29.424170)'
wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84
# Oracle doesn't have SRID 3084, using 41157.
if oracle:
# San Antonio in 'Texas 4205, Southern Zone (1983, meters)' (SRID 41157)
# Used the following Oracle SQL to get this value:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(
# SDO_CS.TRANSFORM(SDO_GEOMETRY('POINT (-98.493183 29.424170)', 4326), 41157))
# )
# FROM DUAL;
nad_wkt = 'POINT (300662.034646583 5416427.45974934)'
nad_srid = 41157
else:
# San Antonio in 'NAD83(HARN) / Texas Centric Lambert Conformal' (SRID 3084)
# Used ogr.py in gdal 1.4.1 for this transform
nad_wkt = 'POINT (1645978.362408288754523 6276356.025927528738976)'
nad_srid = 3084
# Constructing & querying with a point from a different SRID. Oracle
# `SDO_OVERLAPBDYINTERSECT` operates differently from
# `ST_Intersects`, so contains is used instead.
nad_pnt = fromstr(nad_wkt, srid=nad_srid)
if oracle:
tx = Country.objects.get(mpoly__contains=nad_pnt)
else:
tx = Country.objects.get(mpoly__intersects=nad_pnt)
self.assertEqual('Texas', tx.name)
# Creating San Antonio. Remember the Alamo.
sa = City.objects.create(name='San Antonio', point=nad_pnt)
# Now verifying that San Antonio was transformed correctly
sa = City.objects.get(name='San Antonio')
self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6)
self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6)
# If the GeometryField SRID is -1, then we shouldn't perform any
# transformation if the SRID of the input geometry is different.
m1 = MinusOneSRID(geom=Point(17, 23, srid=4326))
m1.save()
self.assertEqual(-1, m1.geom.srid)
def test_createnull(self):
"Testing creating a model instance and the geometry being None"
c = City()
self.assertEqual(c.point, None)
def test_geometryfield(self):
"Testing the general GeometryField."
Feature(name='Point', geom=Point(1, 1)).save()
Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save()
Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save()
Feature(name='GeometryCollection',
geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)),
Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save()
f_1 = Feature.objects.get(name='Point')
self.assertIsInstance(f_1.geom, Point)
self.assertEqual((1.0, 1.0), f_1.geom.tuple)
f_2 = Feature.objects.get(name='LineString')
self.assertIsInstance(f_2.geom, LineString)
self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple)
f_3 = Feature.objects.get(name='Polygon')
self.assertIsInstance(f_3.geom, Polygon)
f_4 = Feature.objects.get(name='GeometryCollection')
self.assertIsInstance(f_4.geom, GeometryCollection)
self.assertEqual(f_3.geom, f_4.geom[2])
@skipUnlessDBFeature("supports_transform")
def test_inherited_geofields(self):
"Test GeoQuerySet methods on inherited Geometry fields."
# Creating a Pennsylvanian city.
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
# All transformation SQL will need to be performed on the
# _parent_ table.
qs = PennsylvaniaCity.objects.transform(32128)
self.assertEqual(1, qs.count())
for pc in qs:
self.assertEqual(32128, pc.point.srid)
def test_raw_sql_query(self):
"Testing raw SQL query."
cities1 = City.objects.all()
# Only PostGIS would support a 'select *' query because of its recognized
# HEXEWKB format for geometry fields
as_text = 'ST_AsText(%s)' if postgis else connection.ops.select
cities2 = City.objects.raw(
'select id, name, %s from geoapp_city' % as_text % 'point'
)
self.assertEqual(len(cities1), len(list(cities2)))
self.assertIsInstance(cities2[0].point, Point)
def test_dumpdata_loaddata_cycle(self):
"""
Test a dumpdata/loaddata cycle with geographic data.
"""
out = six.StringIO()
original_data = list(City.objects.all().order_by('name'))
call_command('dumpdata', 'geoapp.City', stdout=out)
result = out.getvalue()
houston = City.objects.get(name='Houston')
self.assertIn('"point": "%s"' % houston.point.ewkt, result)
# Reload now dumped data
with tempfile.NamedTemporaryFile(mode='w', suffix='.json') as tmp:
tmp.write(result)
tmp.seek(0)
call_command('loaddata', tmp.name, verbosity=0)
self.assertListEqual(original_data, list(City.objects.all().order_by('name')))
@skipUnlessDBFeature("gis_enabled")
class GeoLookupTest(TestCase):
fixtures = ['initial']
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
ptown = City.objects.get(name='Pueblo')
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
if connection.features.supports_real_shape_operations:
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual('Kansas', qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name='Texas')
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if connection.features.supports_contained_lookup:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ['Houston', 'Dallas', 'Oklahoma City']
for c in qs:
self.assertIn(c.name, cities)
# Pulling out some cities.
houston = City.objects.get(name='Houston')
wellington = City.objects.get(name='Wellington')
pueblo = City.objects.get(name='Pueblo')
okcity = City.objects.get(name='Oklahoma City')
lawrence = City.objects.get(name='Lawrence')
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX
self.assertEqual('Texas', tx.name)
self.assertEqual('New Zealand', nz.name)
# Testing `contains` on the states using the point for Lawrence.
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual('Kansas', ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas)
# are not contained in Texas or New Zealand.
self.assertEqual(len(Country.objects.filter(mpoly__contains=pueblo.point)), 0) # Query w/GEOSGeometry object
self.assertEqual(len(Country.objects.filter(mpoly__contains=okcity.point.wkt)),
0 if connection.features.supports_real_shape_operations else 1) # Query w/WKT
# OK City is contained w/in bounding box of Texas.
if connection.features.supports_bbcontains_lookup:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual('Texas', qs[0].name)
@skipUnlessDBFeature("supports_crosses_lookup")
def test_crosses_lookup(self):
Track.objects.create(
name='Line1',
line=LineString([(-95, 29), (-60, 0)])
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 0), (-60, 29)])).count(),
1
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 30), (0, 30)])).count(),
0
)
@skipUnlessDBFeature("supports_left_right_lookups")
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source.
# The left/right lookup tests are known failures on PostGIS 2.0/2.0.1
# http://trac.osgeo.org/postgis/ticket/2035
if postgis_bug_version():
self.skipTest("PostGIS 2.0/2.0.1 left and right lookups are known to be buggy.")
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name='Colorado').poly
ks_border = State.objects.get(name='Kansas').poly
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = ['Houston', 'Dallas', 'Oklahoma City',
'Lawrence', 'Chicago', 'Wellington']
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# These cities should be strictly to the right of the KS border.
cities = ['Chicago', 'Wellington']
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual('Victoria', vic.name)
cities = ['Pueblo', 'Victoria']
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
@skipUnlessGISLookup("strictly_above", "strictly_below")
def test_strictly_above_below_lookups(self):
dallas = City.objects.get(name='Dallas')
self.assertQuerysetEqual(
City.objects.filter(point__strictly_above=dallas.point).order_by('name'),
['Chicago', 'Lawrence', 'Oklahoma City', 'Pueblo', 'Victoria'],
lambda b: b.name
)
self.assertQuerysetEqual(
City.objects.filter(point__strictly_below=dallas.point).order_by('name'),
['Houston', 'Wellington'],
lambda b: b.name
)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]:
self.assertEqual('Houston', c.name)
@skipUnlessDBFeature("supports_null_geometries")
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name='Puerto Rico')
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated territory)
self.assertEqual(1, len(nullqs))
self.assertEqual('Puerto Rico', nullqs[0].name)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertIn('Colorado', state_names)
self.assertIn('Kansas', state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name='Northern Mariana Islands', poly=None)
self.assertEqual(nmi.poly, None)
# Assigning a geometry and saving -- then UPDATE back to NULL.
nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))'
nmi.save()
State.objects.filter(name='Northern Mariana Islands').update(poly=None)
self.assertIsNone(State.objects.get(name='Northern Mariana Islands').poly)
@skipUnlessDBFeature("supports_relate_lookup")
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference point in
# different SRIDs.
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Not passing in a geometry as first param should
# raise a type error when initializing the GeoQuerySet
self.assertRaises(ValueError, Country.objects.filter, mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
self.assertRaises(e, qs.count)
# Relate works differently for the different backends.
if postgis or spatialite:
contains_mask = 'T*T***FF*'
within_mask = 'T*F**F***'
intersects_mask = 'T********'
elif oracle:
contains_mask = 'contains'
within_mask = 'inside'
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = 'overlapbdyintersect'
# Testing contains relation mask.
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name)
# Testing within relation mask.
ks = State.objects.get(name='Kansas')
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name)
# Testing intersection relation mask.
if not oracle:
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name)
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name)
@skipUnlessDBFeature("gis_enabled")
@ignore_warnings(category=RemovedInDjango20Warning)
class GeoQuerySetTest(TestCase):
fixtures = ['initial']
# Please keep the tests in GeoQuerySet method's alphabetic order
@skipUnlessDBFeature("has_centroid_method")
def test_centroid(self):
"Testing the `centroid` GeoQuerySet method."
qs = State.objects.exclude(poly__isnull=True).centroid()
if oracle:
tol = 0.1
elif spatialite:
tol = 0.000001
else:
tol = 0.000000001
for s in qs:
self.assertTrue(s.poly.centroid.equals_exact(s.centroid, tol))
@skipUnlessDBFeature(
"has_difference_method", "has_intersection_method",
"has_sym_difference_method", "has_union_method")
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23)
qs = Country.objects.all().difference(geom).sym_difference(geom).union(geom)
# XXX For some reason SpatiaLite does something screwy with the Texas geometry here. Also,
# XXX it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.intersection(geom)
for c in qs:
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
pass
else:
self.assertEqual(c.mpoly.difference(geom), c.difference)
if not spatialite:
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
# Ordering might differ in collections
self.assertSetEqual(set(g.wkt for g in c.mpoly.sym_difference(geom)),
set(g.wkt for g in c.sym_difference))
self.assertSetEqual(set(g.wkt for g in c.mpoly.union(geom)),
set(g.wkt for g in c.union))
@skipUnlessDBFeature("has_envelope_method")
def test_envelope(self):
"Testing the `envelope` GeoQuerySet method."
countries = Country.objects.all().envelope()
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent(self):
"""
Testing the `Extent` aggregate.
"""
# Reference query:
# `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');`
# => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203)
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
qs = City.objects.filter(name__in=('Houston', 'Dallas'))
extent = qs.aggregate(Extent('point'))['point__extent']
for val, exp in zip(extent, expected):
self.assertAlmostEqual(exp, val, 4)
self.assertIsNone(City.objects.filter(name=('Smalltown')).aggregate(Extent('point'))['point__extent'])
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent_with_limit(self):
"""
Testing if extent supports limit.
"""
extent1 = City.objects.all().aggregate(Extent('point'))['point__extent']
extent2 = City.objects.all()[:3].aggregate(Extent('point'))['point__extent']
self.assertNotEqual(extent1, extent2)
@skipUnlessDBFeature("has_force_rhr_method")
def test_force_rhr(self):
"Testing GeoQuerySet.force_rhr()."
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
s = State.objects.force_rhr().get(name='Foo')
self.assertEqual(rhr_rings, s.force_rhr.coords)
@skipUnlessDBFeature("has_geohash_method")
def test_geohash(self):
"Testing GeoQuerySet.geohash()."
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.geohash().get(name='Houston')
h2 = City.objects.geohash(precision=5).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
def test_geojson(self):
"Testing GeoJSON output from the database using GeoQuerySet.geojson()."
# Only PostGIS and SpatiaLite support GeoJSON.
if not connection.ops.geojson:
self.assertRaises(NotImplementedError, Country.objects.all().geojson, field_name='mpoly')
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = (
'{"type":"Point","crs":{"type":"name","properties":'
'{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
)
victoria_json = (
'{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],'
'"coordinates":[-123.305196,48.462611]}'
)
chicago_json = (
'{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},'
'"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
)
if spatialite:
victoria_json = (
'{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],'
'"coordinates":[-123.305196,48.462611]}'
)
# Precision argument should only be an integer
self.assertRaises(TypeError, City.objects.geojson, precision='foo')
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0)
# FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(pueblo_json, City.objects.geojson().get(name='Pueblo').geojson)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(houston_json, City.objects.geojson(crs=True, model_att='json').get(name='Houston').json)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(victoria_json, City.objects.geojson(bbox=True).get(name='Victoria').geojson)
# SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(
chicago_json,
City.objects.geojson(bbox=True, crs=True, precision=5).get(name='Chicago').geojson
)
@skipUnlessDBFeature("has_gml_method")
def test_gml(self):
"Testing GML output from the database using GeoQuerySet.gml()."
# Should throw a TypeError when trying to obtain GML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.gml, field_name='name')
ptown1 = City.objects.gml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.gml(precision=9).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326" xmlns:gml="http://www.opengis.net/gml">'
r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ '
r'</gml:coordinates></gml:Point>'
)
else:
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>'
r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>'
)
for ptown in [ptown1, ptown2]:
self.assertTrue(gml_regex.match(ptown.gml))
if postgis:
self.assertIn('<gml:pos srsDimension="2">', City.objects.gml(version=3).get(name='Pueblo').gml)
@skipUnlessDBFeature("has_kml_method")
def test_kml(self):
"Testing KML output from the database using GeoQuerySet.kml()."
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.kml, 'name')
# Ensuring the KML is as expected.
ptown1 = City.objects.kml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.kml(precision=9).get(name='Pueblo')
for ptown in [ptown1, ptown2]:
self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml)
def test_make_line(self):
"""
Testing the `MakeLine` aggregate.
"""
if not connection.features.supports_make_line_aggr:
self.assertRaises(
NotImplementedError,
City.objects.all().aggregate, MakeLine('point')
)
return
# MakeLine on an inappropriate field returns simply None
self.assertIsNone(State.objects.aggregate(MakeLine('poly'))['poly__makeline'])
# Reference query:
# SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city;
ref_line = GEOSGeometry(
'LINESTRING(-95.363151 29.763374,-96.801611 32.782057,'
'-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,'
'-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)',
srid=4326
)
# We check for equality with a tolerance of 10e-5 which is a lower bound
# of the precisions of ref_line coordinates
line = City.objects.aggregate(MakeLine('point'))['point__makeline']
self.assertTrue(
ref_line.equals_exact(line, tolerance=10e-5),
"%s != %s" % (ref_line, line)
)
@skipUnlessDBFeature("has_num_geom_method")
def test_num_geom(self):
"Testing the `num_geom` GeoQuerySet method."
# Both 'countries' only have two geometries.
for c in Country.objects.num_geom():
self.assertEqual(2, c.num_geom)
for c in City.objects.filter(point__isnull=False).num_geom():
# Oracle and PostGIS 2.0+ will return 1 for the number of
# geometries on non-collections.
self.assertEqual(1, c.num_geom)
@skipUnlessDBFeature("supports_num_points_poly")
def test_num_points(self):
"Testing the `num_points` GeoQuerySet method."
for c in Country.objects.num_points():
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.num_points():
self.assertEqual(1, c.num_points)
@skipUnlessDBFeature("has_point_on_surface_method")
def test_point_on_surface(self):
"Testing the `point_on_surface` GeoQuerySet method."
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05))
# FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
else:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
for c in Country.objects.point_on_surface():
if spatialite:
# XXX This seems to be a WKT-translation-related precision issue?
tol = 0.00001
else:
tol = 0.000000001
self.assertTrue(ref[c.name].equals_exact(c.point_on_surface, tol))
@skipUnlessDBFeature("has_reverse_method")
def test_reverse_geom(self):
"Testing GeoQuerySet.reverse_geom()."
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
t = Track.objects.reverse_geom().get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), t.reverse_geom.coords)
if oracle:
self.assertRaises(TypeError, State.objects.reverse_geom)
@skipUnlessDBFeature("has_scale_method")
def test_scale(self):
"Testing the `scale` GeoQuerySet method."
xfac, yfac = 2, 3
tol = 5 # XXX The low precision tolerance is for SpatiaLite
qs = Country.objects.scale(xfac, yfac, model_att='scaled')
for c in qs:
for p1, p2 in zip(c.mpoly, c.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
@skipUnlessDBFeature("has_snap_to_grid_method")
def test_snap_to_grid(self):
"Testing GeoQuerySet.snap_to_grid()."
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
self.assertRaises(ValueError, Country.objects.snap_to_grid, *bad_args)
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
self.assertRaises(TypeError, Country.objects.snap_to_grid, *bad_args)
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.1).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(
ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23).get(name='San Marino').snap_to_grid, tol)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr(
'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))'
)
self.assertTrue(
ref.equals_exact(
Country.objects.snap_to_grid(0.05, 0.23, 0.5, 0.17).get(name='San Marino').snap_to_grid,
tol
)
)
@skipUnlessDBFeature("has_svg_method")
def test_svg(self):
"Testing SVG output using GeoQuerySet.svg()."
self.assertRaises(TypeError, City.objects.svg, precision='foo')
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.svg().get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.svg(relative=5).get(name='Pueblo').svg)
@skipUnlessDBFeature("has_transform_method")
def test_transform(self):
"Testing the transform() GeoQuerySet method."
# Pre-transformed points for Houston and Pueblo.
htown = fromstr('POINT(1947516.83115183 6322297.06040572)', srid=3084)
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points. Oracle does not have the 3084 SRID.
if not oracle:
h = City.objects.transform(htown.srid).get(name='Houston')
self.assertEqual(3084, h.point.srid)
self.assertAlmostEqual(htown.x, h.point.x, prec)
self.assertAlmostEqual(htown.y, h.point.y, prec)
p1 = City.objects.transform(ptown.srid, field_name='point').get(name='Pueblo')
p2 = City.objects.transform(srid=ptown.srid).get(name='Pueblo')
for p in [p1, p2]:
self.assertEqual(2774, p.point.srid)
self.assertAlmostEqual(ptown.x, p.point.x, prec)
self.assertAlmostEqual(ptown.y, p.point.y, prec)
@skipUnlessDBFeature("has_translate_method")
def test_translate(self):
"Testing the `translate` GeoQuerySet method."
xfac, yfac = 5, -23
qs = Country.objects.translate(xfac, yfac, model_att='translated')
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# XXX The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# TODO: Oracle can be made to pass if
# union1 = union2 = fromstr('POINT (-97.5211570000000023 34.4646419999999978)')
# but this seems unexpected and should be investigated to determine the cause.
@skipUnlessDBFeature("has_unionagg_method")
@no_oracle
def test_unionagg(self):
"""
Testing the `Union` aggregate.
"""
tx = Country.objects.get(name='Texas').mpoly
# Houston, Dallas -- Ordering may differ depending on backend or GEOS version.
union1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
union2 = fromstr('MULTIPOINT(-95.363151 29.763374,-96.801611 32.782057)')
qs = City.objects.filter(point__within=tx)
self.assertRaises(ValueError, qs.aggregate, Union('name'))
# Using `field_name` keyword argument in one query and specifying an
# order in the other (which should not be used because this is
# an aggregate method on a spatial column)
u1 = qs.aggregate(Union('point'))['point__union']
u2 = qs.order_by('name').aggregate(Union('point'))['point__union']
tol = 0.00001
self.assertTrue(union1.equals_exact(u1, tol) or union2.equals_exact(u1, tol))
self.assertTrue(union1.equals_exact(u2, tol) or union2.equals_exact(u2, tol))
qs = City.objects.filter(name='NotACity')
self.assertIsNone(qs.aggregate(Union('point'))['point__union'])
def test_within_subquery(self):
"""
Test that using a queryset inside a geo lookup is working (using a subquery)
(#14483).
"""
tex_cities = City.objects.filter(
point__within=Country.objects.filter(name='Texas').values('mpoly')).order_by('name')
expected = ['Dallas', 'Houston']
if not connection.features.supports_real_shape_operations:
expected.append('Oklahoma City')
self.assertEqual(
list(tex_cities.values_list('name', flat=True)),
expected
)
def test_non_concrete_field(self):
NonConcreteModel.objects.create(point=Point(0, 0), name='name')
list(NonConcreteModel.objects.all())
def test_values_srid(self):
for c, v in zip(City.objects.all(), City.objects.values()):
self.assertEqual(c.point.srid, v['point'].srid)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
from oslo_config import cfg
from oslo_log import log as logging
import routes
import stevedore
from report.api.openstack import extensions
from report.api.openstack import wsgi
from report import exception
from report.i18n import _
from report.i18n import _LC
from report.i18n import _LI
from report.i18n import _LW
from report import wsgi as base_wsgi
from report.api.openstack.wsgi_test import Resource
api_opts = [
cfg.BoolOpt('enabled',
default=False,
help='Whether the V3 API is enabled or not'),
cfg.ListOpt('extensions_blacklist',
default=[],
help='A list of v3 API extensions to never load. '
'Specify the extension aliases here.'),
cfg.ListOpt('extensions_whitelist',
default=[],
help='If the list is not empty then a v3 API extension '
'will only be loaded if it exists in this list. Specify '
'the extension aliases here.')
]
api_opts_group = cfg.OptGroup(name='osapi_v3', title='API v3 Options')
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_group(api_opts_group)
CONF.register_opts(api_opts, api_opts_group)
# List of v3 API extensions which are considered to form
# the core API and so must be present
# TODO(cyeoh): Expand this list as the core APIs are ported to V3
"""
API_V3_CORE_EXTENSIONS = set(['os-consoles',
'extensions',
'os-flavor-extra-specs',
'os-flavor-manage',
'flavors',
'ips',
'os-keypairs',
'os-flavor-access',
'server-metadata',
'servers',
'versions'])
"""
API_V3_CORE_EXTENSIONS = set(['versions'])
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url == "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kargs):
# NOTE(vish): Default the format part of a route to only accept json
# and xml so it doesn't eat all characters after a '.'
# in the url.
kargs.setdefault('requirements', {})
if not kargs['requirements'].get('format'):
kargs['requirements']['format'] = 'json|xml'
return routes.Mapper.connect(self, *args, **kargs)
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' not in kwargs:
kwargs['path_prefix'] = '{project_id}/'
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
class PlainMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' in kwargs:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '%s/:%s_id' % (p_collection, p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
class APIRouter(base_wsgi.Router):
"""Routes requests on the OpenStack API to the appropriate controller
and method.
"""
ExtensionManager = None # override in subclasses
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one."""
return cls()
def __init__(self, ext_mgr=None, init_only=None):
if ext_mgr is None:
if self.ExtensionManager:
""" """
ext_mgr = self.ExtensionManager()
else:
raise Exception(_("Must specify an ExtensionManager class"))
mapper = ProjectMapper()
self.resources = {}
self._setup_routes(mapper, ext_mgr, init_only)
self._setup_ext_routes(mapper, ext_mgr, init_only)
self._setup_extensions(ext_mgr)
super(APIRouter, self).__init__(mapper)
def _setup_ext_routes(self, mapper, ext_mgr, init_only):
for resource in ext_mgr.get_resources():
LOG.debug('Extending resource: %s',
resource.collection)
if init_only is not None and resource.collection not in init_only:
continue
inherits = None
if resource.inherits:
inherits = self.resources.get(resource.inherits)
if not resource.controller:
resource.controller = inherits.controller
wsgi_resource = wsgi.Resource(resource.controller,
inherits=inherits)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
mapper.resource(resource.collection, resource.collection, **kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _setup_extensions(self, ext_mgr):
for extension in ext_mgr.get_controller_extensions():
collection = extension.collection
controller = extension.controller
msg_format_dict = {'collection': collection,
'ext_name': extension.extension.name}
if collection not in self.resources:
LOG.debug(_LW('Extension %(ext_name)s: Cannot extend resource %(collection)s: No such resource'), msg_format_dict)
continue
LOG.debug('Extension %(ext_name)s extended resource: %(collection)s',msg_format_dict)
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
def _setup_routes(self, mapper, ext_mgr, init_only):
raise NotImplementedError()
class ControllerTest():
def __init__(self):
LOG.debug("init ControllerTest!!")
def test(self, req):
LOG.debug("request:", str(req))
return {'name':"test", 'properties':'test'}
class APIRouterV21(base_wsgi.Router):
"""Routes requests on the OpenStack v2.1 API to the appropriate controller
and method.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one."""
LOG.debug("APIRouterV21 factory")
return cls()
@staticmethod
def api_extension_namespace():
# TODO(oomichi): This namespaces will be changed after moving all v3
# APIs to v2.1.
return 'report.api.v3.extensions'
def __init__(self, init_only=None):
# TODO(cyeoh): bp v3-api-extension-framework. Currently load
# all extensions but eventually should be able to exclude
# based on a config file
# TODO(oomichi): We can remove v3mode argument after moving all v3 APIs
# to v2.1.
LOG.debug("APIRouterV21 init")
def _check_load_extension(ext):
return self._register_extension(ext)
self.init_only = init_only
self.api_extension_manager = stevedore.enabled.EnabledExtensionManager(
namespace=self.api_extension_namespace(),
check_func=_check_load_extension,
invoke_on_load=True,
invoke_kwds={"extension_info": self.loaded_extension_info})
mapper = PlainMapper()
self.resources = {}
# NOTE(cyeoh) Core API support is rewritten as extensions
# but conceptually still have core
if list(self.api_extension_manager):
# NOTE(cyeoh): Stevedore raises an exception if there are
# no plugins detected. I wonder if this is a bug.
self._register_resources_check_inherits(mapper)
self.api_extension_manager.map(self._register_controllers)
missing_core_extensions = self.get_missing_core_extensions(
self.loaded_extension_info.get_extensions().keys())
if not self.init_only and missing_core_extensions:
LOG.debug(_LC("Missing core API extensions: %s"),
missing_core_extensions)
raise exception.CoreAPIMissing(
missing_apis=missing_core_extensions)
LOG.debug(_LI("Loaded extensions: %s"),
sorted(self.loaded_extension_info.get_extensions().keys()))
for res in mapper.matchlist:
LOG.debug(str(res.conditions)+":"+str(res.routepath))
super(APIRouterV21, self).__init__(mapper)
def _register_resources_list(self, ext_list, mapper):
for ext in ext_list:
self._register_resources(ext, mapper)
def _register_resources_check_inherits(self, mapper):
ext_has_inherits = []
ext_no_inherits = []
for ext in self.api_extension_manager:
for resource in ext.obj.get_resources():
if resource.inherits:
ext_has_inherits.append(ext)
break
else:
ext_no_inherits.append(ext)
self._register_resources_list(ext_no_inherits, mapper)
self._register_resources_list(ext_has_inherits, mapper)
@staticmethod
def get_missing_core_extensions(extensions_loaded):
extensions_loaded = set(extensions_loaded)
missing_extensions = API_V3_CORE_EXTENSIONS - extensions_loaded
return list(missing_extensions)
@property
def loaded_extension_info(self):
raise NotImplementedError()
def _register_extension(self, ext):
raise NotImplementedError()
def _register_resources(self, ext, mapper):
"""Register resources defined by the extensions
Extensions define what resources they want to add through a
get_resources function
"""
handler = ext.obj
LOG.debug("Running _register_resources on %s", ext.obj)
for resource in handler.get_resources():
LOG.debug('Extended resource: %s', resource.collection)
inherits = None
if resource.inherits:
inherits = self.resources.get(resource.inherits)
if not resource.controller:
resource.controller = inherits.controller
wsgi_resource = wsgi.ResourceV21(resource.controller,
inherits=inherits)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
# non core-API plugins use the collection name as the
# member name, but the core-API plugins use the
# singular/plural convention for member/collection names
if resource.member_name:
member_name = resource.member_name
else:
member_name = resource.collection
mapper.resource(member_name, resource.collection,
**kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _register_controllers(self, ext):
"""Register controllers defined by the extensions
Extensions define what resources they want to add through
a get_controller_extensions function
"""
handler = ext.obj
LOG.debug("Running _register_controllers on %s", ext.obj)
for extension in handler.get_controller_extensions():
ext_name = extension.extension.name
collection = extension.collection
controller = extension.controller
if collection not in self.resources:
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
{'ext_name': ext_name, 'collection': collection})
continue
LOG.debug('Extension %(ext_name)s extending resource: '
'%(collection)s',
{'ext_name': ext_name, 'collection': collection})
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
|
|
"""Undocumented Module"""
__all__ = ['MetaInterval', 'Sequence', 'Parallel', 'ParallelEndTogether', 'Track']
from panda3d.core import *
from panda3d.direct import *
from direct.directnotify.DirectNotifyGlobal import *
from .IntervalManager import ivalMgr
from . import Interval
from direct.task.Task import TaskManager
#if __debug__:
# import direct.showbase.PythonUtil as PythonUtil
PREVIOUS_END = CMetaInterval.RSPreviousEnd
PREVIOUS_START = CMetaInterval.RSPreviousBegin
TRACK_START = CMetaInterval.RSLevelBegin
class MetaInterval(CMetaInterval):
# This is a Python-C++ hybrid class. MetaInterval is a Python
# extension of the C++ class CMetaInterval, which adds some
# Python-specific features (like list management).
# This is the base class of Sequence, Parallel, and Track.
notify = directNotify.newCategory("MetaInterval")
SequenceNum = 1
def __init__(self, *ivals, **kw):
#if __debug__:
# self.debugInitTraceback = PythonUtil.StackTrace(
# "create interval", 1, 10)
name = None
#if len(ivals) == 2 and isinstance(ivals[1], str):
# # If the second parameter is a string, it's the name.
# name = ivals[1]
# ivals = ivals[0]
#else:
# Look for the name in the keyword params.
if 'name' in kw:
name = kw['name']
del kw['name']
# If the keyword "autoPause" or "autoFinish" is defined to
# non-zero, it means the interval may be automatically paused
# or finished when CIntervalManager::interrupt() is called.
# This is generally called only on a catastrophic situation
# (for instance, the connection to the server being lost) when
# we have to exit right away; these keywords indicate
# intervals that might not be cleaned up by their owners.
autoPause = 0
autoFinish = 0
if 'autoPause' in kw:
autoPause = kw['autoPause']
del kw['autoPause']
if 'autoFinish' in kw:
autoFinish = kw['autoFinish']
del kw['autoFinish']
# A duration keyword specifies the duration the interval will
# appear to have for the purposes of computing the start time
# for subsequent intervals in a sequence or track.
self.phonyDuration = -1
if 'duration' in kw:
self.phonyDuration = kw['duration']
del kw['duration']
if kw:
self.notify.error("Unexpected keyword parameters: %s" % (list(kw.keys())))
# We must allow the old style: Track([ival0, ival1, ...]) as
# well as the new style: Track(ival0, ival1, ...)
# Note: this breaks in the case of a Track with one tuple:
# Track((0, ival0),). We could go through some effort to fix
# this case, but for now I prefer just to document it as a
# bug, since it will go away when we eventually remove support
# for the old interface.
#if len(ivals) == 1 and \
# (isinstance(ivals[0], tuple) or \
# isinstance(ivals[0], list)):
# self.ivals = ivals[0]
#else:
self.ivals = ivals
self.__ivalsDirty = 1
if name == None:
name = self.__class__.__name__ + '-%d'
if '%' in name:
name = name % (self.SequenceNum)
MetaInterval.SequenceNum += 1
CMetaInterval.__init__(self, name)
self.__manager = ivalMgr
self.setAutoPause(autoPause)
self.setAutoFinish(autoFinish)
self.pstats = None
if __debug__ and TaskManager.taskTimerVerbose:
self.pname = name.split('-', 1)[0]
self.pstats = PStatCollector("App:Show code:ivalLoop:%s" % (self.pname))
self.pythonIvals = []
# If we are running in debug mode, we validate the intervals
# in the list right away. There's no good reason to do this,
# except that it makes it easier for the programmer to detect
# when a MetaInterval is misdefined at creation time.
assert self.validateComponents(self.ivals)
# Functions to make the MetaInterval object act just like a Python
# list of intervals:
def append(self, ival):
# Appends a single interval to the list so far.
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
self.ivals.append(ival)
self.__ivalsDirty = 1
assert self.validateComponent(ival)
def extend(self, ivals):
# Appends a list of intervals to the list so far.
self += ivals
def count(self, ival):
# Returns the number of occurrences of the indicated interval.
return self.ivals.count(ival)
def index(self, ival):
# Returns the position of the indicated interval within the list.
return self.ivals.index(ival)
def insert(self, index, ival):
# Inserts the given interval into the middle of the list.
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
self.ivals.insert(index, ival)
self.__ivalsDirty = 1
assert self.validateComponent(ival)
def pop(self, index = None):
# Returns element index (or the last element) and removes it
# from the list.
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
self.__ivalsDirty = 1
if index == None:
return self.ivals.pop()
else:
return self.ivals.pop(index)
def remove(self, ival):
# Removes the indicated interval from the list.
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
self.ivals.remove(ival)
self.__ivalsDirty = 1
def reverse(self):
# Reverses the order of the intervals.
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
self.ivals.reverse()
self.__ivalsDirty = 1
def sort(self, cmpfunc = None):
# Sorts the intervals. (?)
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
self.__ivalsDirty = 1
if cmpfunc == None:
self.ivals.sort()
else:
self.ivals.sort(cmpfunc)
def __len__(self):
return len(self.ivals)
def __getitem__(self, index):
return self.ivals[index]
def __setitem__(self, index, value):
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
self.ivals[index] = value
self.__ivalsDirty = 1
assert self.validateComponent(value)
def __delitem__(self, index):
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
del self.ivals[index]
self.__ivalsDirty = 1
def __getslice__(self, i, j):
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
return self.__class__(self.ivals[i: j])
def __setslice__(self, i, j, s):
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
self.ivals[i: j] = s
self.__ivalsDirty = 1
assert self.validateComponents(s)
def __delslice__(self, i, j):
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
del self.ivals[i: j]
self.__ivalsDirty = 1
def __iadd__(self, other):
if isinstance(self.ivals, tuple):
self.ivals = list(self.ivals)
if isinstance(other, MetaInterval):
assert self.__class__ == other.__class__
ivals = other.ivals
else:
ivals = list(other)
self.ivals += ivals
self.__ivalsDirty = 1
assert self.validateComponents(ivals)
return self
def __add__(self, other):
copy = self[:]
copy += other
return copy
# Functions to define sequence, parallel, and track behaviors:
def addSequence(self, list, name, relTime, relTo, duration):
# Adds the given list of intervals to the MetaInterval to be
# played one after the other.
self.pushLevel(name, relTime, relTo)
for ival in list:
self.addInterval(ival, 0.0, PREVIOUS_END)
self.popLevel(duration)
def addParallel(self, list, name, relTime, relTo, duration):
# Adds the given list of intervals to the MetaInterval to be
# played simultaneously; all will start at the same time.
self.pushLevel(name, relTime, relTo)
for ival in list:
self.addInterval(ival, 0.0, TRACK_START)
self.popLevel(duration)
def addParallelEndTogether(self, list, name, relTime, relTo, duration):
# Adds the given list of intervals to the MetaInterval to be
# played simultaneously; all will end at the same time, but
# the longest interval will be started first to achieve this.
maxDuration = 0
for ival in list:
maxDuration = max(maxDuration, ival.getDuration())
self.pushLevel(name, relTime, relTo)
for ival in list:
self.addInterval(ival, maxDuration - ival.getDuration(), TRACK_START)
self.popLevel(duration)
def addTrack(self, list, name, relTime, relTo, duration):
# Adds a "track list". This is a list of tuples of the form:
#
# (<delay>, <Interval>,
# PREVIOUS_END | PREVIOUS_START | TRACK_START)
#
# where <delay> is a relative time, in seconds, for the
# <Interval> to start, relative to either the end of the
# previous interval (PREVIOUS_END), the start of the previous
# interval (PREVIOUS_START) or the start of the track list
# (TRACK_START). If the relative code is omitted, the default
# is TRACK_START.
self.pushLevel(name, relTime, relTo)
for tuple in list:
if isinstance(tuple, tuple) or \
isinstance(tuple, list):
relTime = tuple[0]
ival = tuple[1]
if len(tuple) >= 3:
relTo = tuple[2]
else:
relTo = TRACK_START
self.addInterval(ival, relTime, relTo)
else:
self.notify.error("Not a tuple in Track: %s" % (tuple,))
self.popLevel(duration)
def addInterval(self, ival, relTime, relTo):
# Adds the given interval to the MetaInterval.
if isinstance(ival, CInterval):
# It's a C++-style Interval, so add it directly.
if getattr(ival, "inPython", 0):
# Actually, it's been flagged to run in Python, even
# though it's a C++ Interval. It's probably got some
# Python functors that must be invoked at runtime to
# define some of its parameters. Treat it as a Python
# interval.
index = len(self.pythonIvals)
self.pythonIvals.append(ival)
self.addExtIndex(index, ival.getName(), ival.getDuration(),
ival.getOpenEnded(), relTime, relTo)
elif isinstance(ival, MetaInterval):
# It's another MetaInterval, so copy in its intervals
# directly to this object. We could just store the
# MetaInterval itself, which would work, but we get a
# performance advantage by flattening out the deeply
# nested hierarchy into a linear list within the root
# CMetaInterval object.
ival.applyIvals(self, relTime, relTo)
else:
# Nope, a perfectly ordinary C++ interval. Hooray!
self.addCInterval(ival, relTime, relTo)
elif isinstance(ival, Interval.Interval):
# It's a Python-style Interval, so add it as an external.
index = len(self.pythonIvals)
self.pythonIvals.append(ival)
if self.pstats:
ival.pstats = PStatCollector(self.pstats, ival.pname)
self.addExtIndex(index, ival.getName(), ival.getDuration(),
ival.getOpenEnded(), relTime, relTo)
else:
self.notify.error("Not an Interval: %s" % (ival,))
# Functions to support automatic playback of MetaIntervals along
# with all of their associated Python callbacks:
def setManager(self, manager):
rogerroger
self.__manager = manager
CMetaInterval.setManager(self, manager)
def getManager(self):
return self.__manager
def setT(self, t):
self.__updateIvals()
CMetaInterval.setT(self, t)
def start(self, startT = 0.0, endT = -1.0, playRate = 1.0):
self.__updateIvals()
self.setupPlay(startT, endT, playRate, 0)
self.__manager.addInterval(self)
def loop(self, startT = 0.0, endT = -1.0, playRate = 1.0):
self.__updateIvals()
self.setupPlay(startT, endT, playRate, 1)
self.__manager.addInterval(self)
def pause(self):
if self.getState() == CInterval.SStarted:
self.privInterrupt()
self.__manager.removeInterval(self)
self.privPostEvent()
return self.getT()
def resume(self, startT = None):
self.__updateIvals()
if startT != None:
self.setT(startT)
self.setupResume()
self.__manager.addInterval(self)
def resumeUntil(self, endT):
self.__updateIvals()
self.setupResumeUntil(endT)
self.__manager.addInterval(self)
def finish(self):
self.__updateIvals()
state = self.getState()
if state == CInterval.SInitial:
self.privInstant()
elif state != CInterval.SFinal:
self.privFinalize()
self.__manager.removeInterval(self)
self.privPostEvent()
def clearToInitial(self):
# This is overloaded at the Python level to properly call
# pause() at the Python level, then upcall to finish the job
# at the C++ level.
self.pause()
CMetaInterval.clearToInitial(self)
# Internal functions:
def validateComponent(self, component):
# This is called only in debug mode to verify that the
# indicated component added to the MetaInterval is appropriate
# to this type of MetaInterval. In most cases except Track,
# this is the same as asking that the component is itself an
# Interval.
return isinstance(component, CInterval) or \
isinstance(component, Interval.Interval)
def validateComponents(self, components):
# This is called only in debug mode to verify that all the
# components on the indicated list are appropriate to this
# type of MetaInterval.
for component in components:
if not self.validateComponent(component):
return 0
return 1
def __updateIvals(self):
# The MetaInterval object does not create the C++ list of
# Intervals immediately; rather, it stores a Python list of
# Intervals that will be compiled into the C++ list the first
# time it is needed.
# This design allows us to avoid creation of the C++ list for
# nested MetaInterval objects, instead copying all nested
# MetaInterval hierarchy into the root CMetaInterval object,
# for a performance benefit.
# This function is called only on the root MetaInterval
# object, when it is time to build the C++ list for itself.
if self.__ivalsDirty:
self.clearIntervals()
self.applyIvals(self, 0, TRACK_START)
self.__ivalsDirty = 0
def clearIntervals(self):
# This overrides the function defined at the C++ level to
# reset the inPython flag. Clearing out the intervals list
# allows us to run entirely in C++ again, at least until a new
# Python interval gets added.
CMetaInterval.clearIntervals(self)
self.inPython = 0
def applyIvals(self, meta, relTime, relTo):
# Add the intervals listed in this object to the given
# MetaInterval object at the C++ level. This will make the
# other MetaInterval object ready to play the intervals.
# This function should be overridden in a derived class to
# change the intepretation of the intervals in this list. In
# the case of a MetaInterval directly, this is valid only if
# the list has only zero or one intervals.
if len(self.ivals) == 0:
pass
elif len(self.ivals) == 1:
meta.addInterval(self.ivals[0], relTime, relTo)
else:
self.notify.error("Cannot build list from MetaInterval directly.")
def setPlayRate(self, playRate):
""" Changes the play rate of the interval. If the interval is
already started, this changes its speed on-the-fly. Note that
since playRate is a parameter to start() and loop(), the next
call to start() or loop() will reset this parameter. """
if self.isPlaying():
self.pause()
CMetaInterval.setPlayRate(self, playRate)
self.resume()
else:
CMetaInterval.setPlayRate(self, playRate)
def __doPythonCallbacks(self):
# This function invokes any Python-level Intervals that need
# to be invoked at this point in time. It must be called
# after any call to setT() or setFinalT() or stepPlay(), or
# some such; basically any function that might invoke an
# interval. The C++ base class will invoke whatever C++
# intervals it can, and then indicate the Python intervals
# that must be invoked through this interface.
ival = None
try:
while (self.isEventReady()):
index = self.getEventIndex()
t = self.getEventT()
eventType = self.getEventType()
self.popEvent()
ival = self.pythonIvals[index]
ival.privDoEvent(t, eventType)
ival.privPostEvent()
ival = None
except:
if ival != None:
print("Exception occurred while processing %s of %s:" % (ival.getName(), self.getName()))
else:
print("Exception occurred while processing %s:" % (self.getName()))
print(self)
raise
def privDoEvent(self, t, event):
# This function overrides the C++ function to initialize the
# intervals first if necessary.
if self.pstats:
self.pstats.start()
self.__updateIvals()
CMetaInterval.privDoEvent(self, t, event)
if self.pstats:
self.pstats.stop()
def privPostEvent(self):
if self.pstats:
self.pstats.start()
self.__doPythonCallbacks()
CMetaInterval.privPostEvent(self)
if self.pstats:
self.pstats.stop()
def setIntervalStartTime(self, *args, **kw):
# This function overrides from the parent level to force it to
# update the interval list first, if necessary.
self.__updateIvals()
# Once we have monkeyed with the interval timings, we'd better
# run the whole thing as a monolithic Python interval, since
# we can't extract the ivals list back out and append them
# into a parent MetaInterval.
self.inPython = 1
return CMetaInterval.setIntervalStartTime(self, *args, **kw)
def getIntervalStartTime(self, *args, **kw):
# This function overrides from the parent level to force it to
# update the interval list first, if necessary.
self.__updateIvals()
return CMetaInterval.getIntervalStartTime(self, *args, **kw)
def getDuration(self):
# This function overrides from the parent level to force it to
# update the interval list first, if necessary.
self.__updateIvals()
return CMetaInterval.getDuration(self)
def __repr__(self, *args, **kw):
# This function overrides from the parent level to force it to
# update the interval list first, if necessary.
self.__updateIvals()
return CMetaInterval.__repr__(self, *args, **kw)
def __str__(self, *args, **kw):
# This function overrides from the parent level to force it to
# update the interval list first, if necessary.
self.__updateIvals()
return CMetaInterval.__str__(self, *args, **kw)
def timeline(self, out = None):
# This function overrides from the parent level to force it to
# update the interval list first, if necessary.
self.__updateIvals()
if out == None:
out = ostream
CMetaInterval.timeline(self, out)
class Sequence(MetaInterval):
def applyIvals(self, meta, relTime, relTo):
meta.addSequence(self.ivals, self.getName(),
relTime, relTo, self.phonyDuration)
class Parallel(MetaInterval):
def applyIvals(self, meta, relTime, relTo):
meta.addParallel(self.ivals, self.getName(),
relTime, relTo, self.phonyDuration)
class ParallelEndTogether(MetaInterval):
def applyIvals(self, meta, relTime, relTo):
meta.addParallelEndTogether(self.ivals, self.getName(),
relTime, relTo, self.phonyDuration)
class Track(MetaInterval):
def applyIvals(self, meta, relTime, relTo):
meta.addTrack(self.ivals, self.getName(),
relTime, relTo, self.phonyDuration)
def validateComponent(self, tuple):
# This is called only in debug mode to verify that the
# indicated component added to the MetaInterval is appropriate
# to this type of MetaInterval. In most cases except Track,
# this is the same as asking that the component is itself an
# Interval.
if not (isinstance(tuple, tuple) or \
isinstance(tuple, list)):
# It's not a tuple.
return 0
relTime = tuple[0]
ival = tuple[1]
if len(tuple) >= 3:
relTo = tuple[2]
else:
relTo = TRACK_START
if not (isinstance(relTime, float) or \
isinstance(relTime, int)):
# First parameter is not a number.
return 0
if not MetaInterval.validateComponent(self, ival):
# Second parameter is not an interval.
return 0
if relTo != PREVIOUS_END and \
relTo != PREVIOUS_START and \
relTo != TRACK_START:
# Third parameter is an invalid value.
return 0
# Looks good.
return 1
|
|
#!/usr/bin/python
"""
This module specifically deals with the dynamical quantities
related to Multi Gaussian Expansion models (Monnet et al. 1992, Emsellem et al. 1994).
It includes the derivation of projected and deprojected photometry, and
the derivation of velocity moments via the Jeans Equations.
"""
"""
Importing the most import modules
This MGE module requires numpy and scipy
"""
try:
import numpy as np
except ImportError:
raise Exception("numpy is required for pygme")
from numpy import shape
from numpy import cos, sin, exp, sqrt
try:
from scipy import special
except ImportError:
raise Exception("scipy is required for pygme")
from numpy import inf
from rwcfor import floatMGE, floatG
from pygme.photMGE import photMGE
from pygme.mge_miscfunctions import quadrat_ps_roots
__version__ = '1.1.2 (21/08/2013)'
#__version__ = '1.1.1 (22/01/2013)'
#__version__ = '1.1.0 (27/08/2012)'
#__version__ = '1.0.0 (08/01/2012)'
# Version 1.1.2 Changed imin,imax into ilist
# Version 1.1.1 Small minor fixes
# Version 1.1.0 Number of small changes including for visible modules
# Version 1.0.0 extracted from the older pygme.py
## This is a maximum value to include in the derivation of exponential(x^2) and erfc(x) functions
## Beyond this value, an analytic approximation is replacing the exact expression
_Maximum_Value_forEXPERFC = 20.0
class dynMGE(photMGE) :
"""
This class contains all the dynamics-related quantities, from circular velocities, epicycle
frequencies, Jeasn Equations.
"""
def __init__(self, infilename=None, indir=None, saveMGE=None, **kwargs) :
photMGE.__init__(self, infilename=infilename, indir=indir, saveMGE=saveMGE, **kwargs)
##########################################################################################################
### Compute the terms for Jeans ###
######################################
## First order moment ===================================================================
## Two integrals: line of sight and adimensional one with variable T between 0 and 1
def _IntMu1(self, T, R2, Z2, ilist=None) :
T2 = T * T
T2Bij_soft = 1. - self._dParam.Bij_soft * T2
facdenom = 1. - self.e2 * T2
Integrand = np.zeros_like(R2)
for j in range(self.nStarGauss) :
expfac = self._pParam.qParc[j] * exp(- (R2 + Z2 / facdenom[j]) * T2 / self._dParam.dSig3Darc2_soft[j]) / sqrt(facdenom[j])
for i in ilist :
Integrand += self.rhoL[i] * expfac * (self.e2[i] - self.e2[j] * T2) / T2Bij_soft[i,j] # L*L*pc-4*arcsec-2
return Integrand * T2
def _IntlosMu1(self, LOS, X2, Y, cosi, sini, Xquad, Wquad, ilist=None) :
R2 = -Y * cosi + LOS * sini
R2 = X2 + R2 * R2
Z2 = Y * sini + LOS * cosi
Z2 = Z2 * Z2
## INTEGRAL via quadrature
result = Wquad[0] * self._IntMu1(Xquad[0], R2, Z2, ilist)
for i in xrange(1,len(Xquad)) :
result += Wquad[i] * self._IntMu1(Xquad[i], R2, Z2, ilist)
Integrand = sqrt(self.rhoLT * result)
return Integrand
def _Mu1(self, X, Y, inclin=90., ilist=None) :
X2 = X * X
Y2 = Y * Y
inclin_rad = inclin * np.pi / 180.
sini = sin(inclin_rad)
cosi = cos(inclin_rad)
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
rhop = self._rhoL2D_fromX2Y2(X2, Y2, ilist)
result = np.zeros_like(X2)
for i in xrange(shape(X2)[0]) :
for j in xrange(shape(X2)[1]) :
print "%f %f \n" %(X[i,j], Y[i,j])
### INTEGRAL between -infinity and infinity along the line of sight
result[i,j] = float(scipy.integrate.quad(self._IntlosMu1, -inf, inf, epsabs=1.e-01, epsrel=1.e-01, args=(X2[i,j], Y[i,j], cosi, sini, Xquad, Wquad, ilist))[0])
return sqrt(4. * np.pi * self.G) * result * sini * X / rhop
###=============================================================================
## Second order moment ========================================================
## Only 1 integral: variable T between 0 and 1
def _IntMu2(self, T, X2, Y2, cosi2, sini2, ilist=None) :
T2 = T * T
T4 = T2 * T2
T2Bij_soft = 1. - self._dParam.Bij_soft * T2
facdenom = 1. - self.e2 * T2
e2T4 = T4 * self.e2 / (self._pParam.dSig3Darc2 * facdenom)
Integrand = np.zeros_like(X2)
for j in range(self.nGauss) :
for i in ilist :
A = T2 / self._dParam.dSig3Darc2_soft[j] + 1. / self._pParam.dSig3Darc2[i] # in arcsec-2
B = e2T4[j] + self.e2q2dSig3Darc2[i] # in arcsec-2
ABcosi2 = A + B * cosi2 # in arcsec-2
varexp = -A * (X2 + Y2 * (A + B) / ABcosi2) # adimensionless
denom = T2Bij_soft[i,j] * sqrt(facdenom[j] * ABcosi2) # in arcsec-1
num = self._dParam.q2Sig3Darc2[i] + X2 * sini2 * (self.e2[i] - T2 * self.e2[j]) # in arcsec^2
Integrand += self._pParam.qParc[j] * self.Imax3Darc[i] * num * exp(varexp) / denom # L*M*pc-4*arcsec
return Integrand * T2
def _Mu2(self, X, Y, inclin=90., ilist=None) :
ilist = self._set_ilist(ilist)
### First compute the gaussian quadrature points, and weights
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
X2 = X * X
Y2 = Y * Y
self.rhop = np.sum(self.Imax2D[ilist] * exp(- (X2[...,np.newaxis] + Y2[...,np.newaxis] / self.Q2D2[ilist]) / self.dSig2Darc2[ilist]))
inclin_rad = inclin * np.pi / 180.
sini = sin(inclin_rad)
cosi = cos(inclin_rad)
sini2 = sini * sini
cosi2 = cosi * cosi
result = np.zeros_like(X)
for i in xrange(self.Nquad) :
result += Wquad[i] * self._IntMu2(Xquad[i], X2, Y2, cosi2, sini2, ilist)
return 4. * np.pi**1.5 * self.G * result / self.rhop # en km^2.s-2
###=============================================================================
######################################################################################
### Compute the gravitational potential ###
############################################
def _IntPot(self, T, R2, Z2, ilist=None) :
""" Integrand for the Gravitational potential
"""
ilist = self._set_ilist(ilist)
T2 = T * T
denom = 1. - self.e2[ilist] * T2
Integrand = self._dParam.Sig3Darc2_soft[ilist] * self._pParam.qParc[ilist] * exp(- (R2[...,np.newaxis] + Z2[...,np.newaxis] / denom) * T2 / self._dParam.dSig3Darc2_soft[ilist]) / sqrt(denom)
return np.sum(Integrand, axis=-1)
def Potential(self, R, Z, ilist=None) :
"""
Return, for a grid of R and Z the Gravitational potential in km^2.s^2
R and Z should be in arcseconds
:param R: cylindrical radius (float or array) in arcseconds
:param Z: vertical height (float or array) in arcseconds
:param ilist: list of indices for the Gaussians to consider (0 to Ngauss-1)
:returns: Gravitational potential :math:`\Phi` in Units of km^2.s-2
:rtype: float or array of float depending on input
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
### First compute the gaussian quadrature points, and weights
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
R2 = R*R
Z2 = Z*Z
result = np.sum(Wquad[i] * self._IntPot(Xquad[i], R2, Z2, ilist) for i in xrange(self.Nquad))
if (self.Mbh > 0.) :
mask = (R2 + Z2 == 0.)
result[~mask] += self.facMbh / sqrt(R2[~mask] + Z2[~mask] + self.SoftarcMbh2)
return -4. * np.pi * self.G * result # en km^2.s-2
############# ESCAPE VELOCITY = SQRT(2 * PHI) ####################################
def Vescape(self, R, Z, ilist=None) : # R and Z should be in arcsec
"""
Return, for a grid of R and Z the escape velocity in Km/s
R and Z should be in arcseconds
:param R: cylindrical radius (float or array) in arcseconds
:param Z: vertical height (float or array) in arcseconds
:returns: float/array -- Escape velocity [km.s-1]
"""
return sqrt(-2. * self.Potential(R, Z, ilist)) # en km.s-1
############ CIRCULAR VELOCITY ##################################################
def _IntVc(self, T, R2, ilist=None) :
T2 = T * T
Integrand = np.zeros_like(R2)
denom = 1. - self.e2 * T2
for i in ilist :
Integrand += self._pParam.qParc[i] * exp(- R2 * T2 / self._dParam.dSig3Darc2_soft[i]) / sqrt(denom[i])
return Integrand * T2
def Vcirc(self, R, ilist=None) :
"""
Derive the circular velocity for the MGE model taking into account
Only the Gaussians from the indice list (ilist) - counting from 0
A softening can be included (eps in pc)
:param R: cylindrical radius (float or array) in arcseconds
:param ilist: list of indices of Gaussians to count
:returns: float/array -- Circular velocity [km.s-1]
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
### First compute the gaussian quadrature points, and weights
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
R2 = R*R
result = R2 * np.sum(Wquad[i] * self._IntVc(Xquad[i], R2, ilist) for i in xrange(self.Nquad))
if (self.Mbh > 0.) :
mask = (R == 0.)
result[mask] += self.facMbh / np.maximum(1.e-2, self.SoftarcMbh)
result[~mask] += self.facMbh / sqrt(R2[~mask] + self.SoftarcMbh2)
return sqrt(result * self.PIG) # en km.s-1
### ==============================================================================
##################################################################
### Compute the acceleration in R and Z for orbit integration ###
##################################################################
def _IntaccR(self, T, R2, Z2, ilist=None) :
T2 = T * T
Integrand = np.zeros_like(R2)
denom = 1. - self.e2 * T2
for i in ilist :
Integrand += self._pParam.qParc[i] * exp(- (R2 + Z2 / denom[i]) * T2 / self._dParam.dSig3Darc2_soft[i]) / sqrt(denom[i])
return Integrand * T2
#===========================================================
#######################################################
def _IntaccZ(self, T, R2, Z2, ilist=None) :
T2 = T * T
Integrand = np.zeros_like(R2)
denom = 1. - self.e2 * T2
for i in ilist :
Integrand += self._pParam.qParc[i] * exp(- (R2 + Z2 / denom[i]) * T2 / self._dParam.dSig3Darc2_soft[i]) / (denom[i])**(1.5)
return Integrand * T2
#===========================================================
#######################################################
def _accR(self, R, Z, ilist=None) :
### First compute the gaussian quadrature points, and weights
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
result = np.zeros_like(R)
R2 = R*R
Z2 = Z*Z
for i in xrange(self.Nquad) :
result += Wquad[i] * self._IntaccR(Xquad[i], R2, Z2, ilist)
return self.PIG * result * R / self.pc_per_arcsec # en km^2.s-2.pc-1
#===========================================================
#######################################################
def _accZ(self, R, Z, ilist=None) :
### First compute the gaussian quadrature points, and weights
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
result = np.zeros_like(R)
R2 = R*R
Z2 = Z*Z
for i in xrange(self.Nquad) :
result += Wquad[i] * self._IntaccZ(Xquad[i], R2, Z2, ilist)
return self.PIG * result * Z / self.pc_per_arcsec # en km^2.s-2.pc-1
#===========================================================
##################################################################
### Compute the second derivative of the potential with R
##################################################################
def _Intd2Potd2R(self, T, R2, Z2, ilist=None) :
T2 = T * T
Integrand = np.zeros_like(R2)
denom = 1. - self.e2 * T2
for i in ilist :
Integrand += self._pParam.qParc[i] * exp(- (R2 + Z2 / denom[i]) * T2 / self._dParam.dSig3Darc2_soft[i]) * (1. - R2 * T2 / self._dParam.Sig3Darc2_soft[i]) / sqrt(denom[i])
return Integrand * T2
#===========================================================
#######################################################
def _d2Potd2R(self, R, Z, ilist=None) :
### First compute the gaussian quadrature points, and weights
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
result = np.zeros_like(R)
R2 = R*R
Z2 = Z*Z
for i in xrange(self.Nquad) :
result += Wquad[i] * self._Intd2Potd2R(Xquad[i], R2, Z2, ilist)
return self.PIG * result / (self.pc_per_arcsec*self/pc_per_arcsec) # en km^2.s-2.pc-2
#===========================================================
#######################################################
# FluxDensity for certain gaussians
#######################################################
def _FluxDensity(self, R2, Z2, ilist=None) :
"""
Function useful for the integration of dynamical quantities
"""
### Compute .rho and .rhoT the individual and total M density on the grid
rhoL = np.zeros((self.nGauss, len(R2)), floatMGE)
rhoLT = np.zeros_like(R2)
for i in ilist :
rhoL[i] = self._rhoL3D_1G_fromR2Z2(R2, Z2, i)
rhoLT = np.sum(rhoL, axis=0)
return rhoL, rhoLT
## WARNING: rho is in Mass/pc-2/arcsec-1
#===========================================================
#######################################################
# MassDensity for certain gaussians
#######################################################
def _MassDensity(self, R2, Z2, ilist=None) :
"""
Function useful for the integration of dynamical quantities: QToomre
"""
### Compute .rho and .rhoT the individual and total M density on the grid
rho = np.zeros((self.nGauss, len(R2)), floatMGE)
rhoT = np.zeros_like(R2)
for i in ilist :
rho[i] = self._rho3D_1G_fromR2Z2(R2, Z2, i)
rhoT = np.sum(rho, axis=0)
return rho, rhoT
## WARNING: rho is in Mass/pc-2/arcsec-1
#===========================================================
############ OmegaR ##################################################
def _IntOmega(self, T, R2, ilist=None) :
T2 = T * T
Integrand = np.zeros_like(R2)
denom = 1. - self.e2 * T2
for i in ilist :
Integrand += self._pParam.qParc[i] * exp(- R2 * T2 / self._dParam.dSig3Darc2_soft[i]) / sqrt(denom[i])
return Integrand * T2
def Omega(self, R, ilist=None) :
""" Returns :math:`\Omega`, the circular frequency, for a grid of R.
R should be in arcseconds
:param R: cylindrical radius (float or array) in arcseconds
:param Z: vertical height (float or array) in arcseconds
:returns: :math:`\Omega` Circular frequency [km.s-1.pc-1]
:rtype: float or array of float depending on input
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
### First compute the gaussian quadrature points, and weights
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
R2 = R*R
result = np.sum(Wquad[i] * self._IntVc(Xquad[i], R2, ilist) for i in xrange(self.Nquad))
if (self.Mbh > 0.) :
mask = (R == 0.)
result[mask] += self.facMbh / np.maximum(1.e-4, self.SoftarcMbh**3)
result[~mask] += self.facMbh / (R2[~mask] + self.SoftarcMbh2)**(3.0/2.0)
return sqrt(self.PIG * result) / self.pc_per_arcsec # en km.s-1.pc-1
### ==============================================================================
##################################################################
### Compute the epicycle frequency kappa (squared)
##################################################################
def _Intkappa(self, T, R2, ilist=None) :
"""
Integrand for kappa - from an MGE model
"""
T2 = T * T
Integrand = np.zeros_like(R2)
denom = 1. - self.e2 * T2
for i in ilist :
Integrand += self._pParam.qParc[i] * exp(- R2 * T2 / self._pParam.dSig3Darc2[i]) * (4. - R2 * T2 / self._dParam.Sig3Darc2_soft[i]) / sqrt(denom[i])
return Integrand * T2
#===========================================================
#######################################################
def kappa(self, R, ilist=None) :
"""
Return :math:`\kappa`, the epicycle radial frequency for an MGE model
:param R: cylindrical radius (float or array) in arcseconds
:param Z: vertical height (float or array) in arcseconds
:returns: :math:`\kappa` Radial Epicycle frequency [km.s-1.pc-1]
:rtype: float or array of float depending on input
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
### First compute the gaussian quadrature points, and weights
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
result = np.zeros_like(R)
R2 = R*R
for i in xrange(self.Nquad) :
result += Wquad[i] * self._Intkappa(Xquad[i], R2, ilist)
return sqrt(self.PIG * result) / self.pc_per_arcsec # en km.s-1.pc-1
#===========================================================
#######################################################
def EpicycleRatio(self, R, ilist=None) :
""" Derive :math:`\Omega / \left(2 \\times \kappa\\right)` as the epicycle approximation
for the ratio between sigma_R and sigma_Z
:param R: cylindrical radius (float or array) in arcseconds
:returns: The Epicycle ratio :math:`\Omega / 2 \\times \kappa`
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
### Then compute the two frequencies
k = self.kappa(R, ilist) # in km.s-1.pc-1
O = self.Omega(R, ilist) # in km.s-1.pc-1
return O / (2. * k)
#===========================================================
#######################################################
def QToomre(self, R, Zcut, ilist=None) :
""" Derive the Toomre criterion
:param R: cylindrical radius (float or array) in arcseconds
:param Zcut: cut in Z to evaluate QToomre (in arcsecconds)
:return : The Toomre parameter :math:`Q`
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
### Get the frequencies
k = self.kappa(R) # in km.s-1.pc-1
O = self.Omega(R) # in km.s-1.pc-1
Z = np.zeros_like(R)
R2 = R*R
Z2 = Z*Z
rhoSigmaR2 = np.zeros_like(R)
SigmaZ = np.zeros_like(R)
rhoT = 0.
for i in ilist :
self.rho, self.rhoT = self._MassDensity(R2, Z2, ilist=[i])
rhoSigmaR2 += self._dParam.kRZ2[i] * self.rho[i] * self._sigma_z2_fromR2Z2(R2,Z2, ilist=[i]) # in km.s-1
rhoT += self.rho[i]
SigmaR = sqrt(rhoSigmaR2 / rhoT)
self.rho, self.rhoT = self._MassDensity(R2, Z2, ilist)
SigmaZ = sqrt(self._sigma_z2_fromR2Z2(R2,Z2, ilist)) # in km.s-1
SurfDensity = self._rhointMZ(R, Zcut, ilist)
## self.G in (km/s)2. Msun-1 . pc2 . arcsec-1
## SurfDensity in Msun.pc-2
## So QT in pc-1 * arcsec, so we multiply by pc_per_arcsec
QT = k * SigmaR * self.pc_per_arcsec / (3.36 * self.G * SurfDensity)
return SigmaR, SigmaZ, O, k, QT
#===========================================================
#######################################################
### Compute some components for the Jeans modelling ##
#######################################################
def _intsigma_z2(self, T, R2, Z2, ilist=None) :
"""
Integrand for SigmaZ**2 from an MGE model
"""
T2 = T * T
T2Bij_soft = 1. - self._dParam.Bij_soft * T2
q2Sig3Darc2T2 = self._dParam.q2Sig3Darc2 * T2
Integrand = np.zeros_like(R2) # this has the dimension of the particules array
denom = 1. - self.e2 * T2
expfac = self._pParam.qParc * exp(- (R2[...,np.newaxis] + Z2[...,np.newaxis] / denom) * T2 / self._dParam.dSig3Darc2_soft) / sqrt(denom)
for j in range(self.nGauss) :
# expfac = self._pParam.qParc[j] * exp(- (R2 + Z2 / denom[j]) * T2 / self._pParam.dSig3Darc2[j]) / sqrt(denom[j])
# Integrand = Integrand + np.sum(self.rho * self._dParam.q2Sig3Darc2 * expfac / T2Bij_soft[:,j], axis=-1)
Integrand += np.sum(self.rho[ilist].transpose() * (expfac[...,j])[...,np.newaxis] * q2Sig3Darc2T2[ilist] / T2Bij_soft[ilist,j], axis=-1)
return Integrand # in rho * M arcsec pc-2 / 4 PI G
#===========================================================
#######################################################
def _intvtheta2(self, T, R2, Z2, ilist=None) :
"""
Integrand for Vtheta**2 from an MGE model
"""
T2 = T * T
T2Bij_soft = 1. - self._dParam.Bij_soft * T2
Integrand = np.zeros_like(R2)
denom = 1. - self.e2 * T2
# T2e2j = T2 * self.e2
qParcT2 = self._pParam.qParc * T2
expfac = qParcT2 * exp(- (R2[...,np.newaxis] + Z2[...,np.newaxis] / denom) * T2 / self._pParam.dSig3Darc2) / sqrt(denom)
for j in range(self.nGauss) :
for i in ilist :
# Integrand += self.rho[i] * (R2 * (self.e2[i] - T2e2j[j]) + self._dParam.q2Sig3Darc2[i]) * expfac[...,j] / T2Bij_soft[i,j]
Integrand += self.rho[i] * (R2 * (self._dParam.mkRZ2q2[i] - self._dParam.Dij_soft[i,j] * T2) + self._dParam.kRZ2[i] \
* self._dParam.q2Sig3Darc2[i]) * expfac[...,j] / T2Bij_soft[i,j]
return Integrand
#===========================================================
#######################################################
def _sigma_z2_fromR2Z2(self, R2, Z2, ilist=None) :
"""
Compute SigmaZ**2 : the second centred velocity moment from an MGE model
WARNING: this function takes R2 and Z2 as input, not R and Z
Input : R2 and Z2: squares of the R and Z coordinates
ilist: indices for Gaussians to take into account
"""
r2 = R2 + Z2
r = sqrt(r2)
r2soft = r2 + self.SoftarcMbh2
rsoft = sqrt(r2soft)
## Compute the mass density for individual gaussians as well as the sum
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
sigz2 = np.sum(Wquad[i] * self._intsigma_z2(Xquad[i], R2, Z2, ilist) for i in xrange(self.Nquad))
# Contribution from the BH
if self.Mbh > 0. :
for i in ilist:
var = (r / self._pParam.dqSig3Darc[i]).astype(floatG)
mask = (var < _Maximum_Value_forEXPERFC)
lasterm = np.empty_like(var)
# facMbh in M arcsec2 pc-2 / 4PI G
lasterm[mask] = self._dParam.sqpi2s[i] * special.erfc(var[mask]) * np.exp(var[mask]**2)
lasterm[~mask] = 2. / (r[~mask] + sqrt(r2[~mask] + self._dParam.qq2s2[i]))
sigz2 += self.rho[i] * self.facMbh * (1. / rsoft - lasterm) # in rho * M arcsec pc-2 / 4 PI G
return sigz2 * self.PIG / self.rhoT
#===========================================================
#######################################################
def sigma_z2(self, R, Z, ilist=None) :
""" Compute SigmaZ^2 : the second centred velocity moment from an MGE model
:param R: input Radial coordinate
:param Z: input Vertical coordinate
:param ilist: indices for the Gaussians to take into account
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
R2 = R*R
Z2 = Z*Z
r2 = R2 + Z2
r = sqrt(r2)
r2soft = r2 + self.SoftarcMbh2
rsoft = sqrt(r2soft)
## Compute the mass density for individual gaussians as well as the sum
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
sigz2 = np.sum(Wquad[i] * self._intsigma_z2(Xquad[i], R2, Z2, ilist) for i in xrange(self.Nquad))
# Contribution from the BH
if self.Mbh > 0. :
for i in ilist :
# facMbh in M arcsec2 pc-2 / 4PI G
var = (r / self._pParam.dqSig3Darc[i]).astype(floatG)
mask = (var < _Maximum_Value_forEXPERFC)
lasterm = np.empty_like(var)
# facMbh in M arcsec2 pc-2 / 4PI G
lasterm[mask] = self._dParam.sqpi2s[i] * special.erfc(var[mask]) * np.exp(var[mask]**2)
lasterm[~mask] = 2. / (r[~mask] + sqrt(r2[~mask] + self._dParam.qq2s2[i]))
sigz2 += self.rho[i] * self.facMbh * (1. / rsoft - lasterm) # in rho * M arcsec pc-2 / 4 PI G
return sigz2 * self.PIG / self.rhoT
#===========================================================
#######################################################
def _vtheta2_fromR2Z2(self, R2, Z2, ilist=None) :
"""
Compute Vtheta**2 : the first velocity moment from an MGE model
WARNING: This function uses R2 and Z2 (squares) as input, not R and Z
Input : R2, Z2 as input coordinates
ilist: indices of the Gaussians
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
r2 = R2 + Z2
r = sqrt(r2)
r2soft = r2 + self.SoftarcMbh2
rsoft = sqrt(r2soft)
## Compute the mass density for individual gaussians as well as the sum
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
# MU2
VT2 = np.sum(Wquad[i] * self._intvtheta2(Xquad[i], R2, Z2, ilist) for i in xrange(self.Nquad))
# Contribution from the BH
if self.Mbh > 0. :
for i in ilist :
var = (r / self._pParam.dqSig3Darc[i]).astype(floatG)
mask = (var < _Maximum_Value_forEXPERFC)
lasterm = np.empty_like(var)
# facMbh in M arcsec2 pc-2 / 4PI G
lasterm[mask] = self._dParam.sqpi2s[i] * special.erfc(var[mask]) * np.exp(var[mask]**2)
lasterm[~mask] = 2. / (r[~mask] + sqrt(r2[~mask] + self._dParam.qq2s2[i]))
VT2 += (1. + self._dParam.e2q2Sig3Darc2[i] * R2) * self.rho[i] * self.facMbh \
* (1. / rsoft - lasterm) # in rhoT * M arcsec pc-2 / 4 PI G
return VT2 * self.PIG / self.rhoT
#===========================================================
#######################################################
def vtheta2(self, R, Z, ilist=None) :
"""
Compute Vtheta**2 : the first velocity moment from an MGE model
Input : R, Z as input coordinates
ilist: indices of the Gaussians
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
R2 = R*R
Z2 = Z*Z
r2 = R2 + Z2
r = sqrt(r2)
r2soft = r2 + self.SoftarcMbh2
rsoft = sqrt(r2soft)
## Compute the mass density for individual gaussians as well as the sum
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
# MU2
VT2 = np.sum(Wquad[i] * self._intvtheta2(Xquad[i], R2, Z2, ilist=ilist) for i in xrange(self.Nquad))
# Contribution from the BH
if self.Mbh > 0. :
for i in ilist :
var = (r / self._pParam.dqSig3Darc[i]).astype(floatG)
mask = (var < _Maximum_Value_forEXPERFC)
lasterm = np.empty_like(var)
# facMbh in M arcsec2 pc-2 / 4PI G
lasterm[mask] = self._dParam.sqpi2s[i] * special.erfc(var[mask]) * np.exp(var[mask]**2)
lasterm[~mask] = 2. / (r[~mask] + sqrt(r2[~mask] + self._dParam.qq2s2[i]))
VT2 += (1. + self._dParam.e2q2Sig3Darc2[i] * R2) * self.rho[i] * self.facMbh \
* (1. / rsoft - lasterm) # in rhoT * M arcsec pc-2 / 4 PI G
return VT2 * self.PIG / self.rhoT
#===========================================================
#######################################################
def _sigmaz2_muTheta2_fromR2Z2(self, R2, Z2, ilist=None) :
"""
Compute both Sigma_Z**2 and Mu_Z**2 the centred and non-centred
second order velocity moments from an MGE model
op can be "all" or "sigma" or "mu" depending on which quantity is needed
Input : R2 and Z2 the squares of R and Z
ilist : list of indices of Gaussians to take into account
"""
r2 = R2 + Z2
r = sqrt(r2)
r2soft = r2 + self.SoftarcMbh2
rsoft = sqrt(r2soft)
## Compute the mass density for individual gaussians as well as the sum
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
sigz2 = np.zeros_like(R2)
# sigmaz2
for i in xrange(self.Nquad) :
sigz2 += Wquad[i] * self._intsigma_z2(Xquad[i], R2, Z2, ilist)
# MU2
muTheta2 = np.zeros_like(R2)
for i in xrange(self.Nquad) :
muTheta2 += Wquad[i] * self._intvtheta2(Xquad[i], R2, Z2, ilist)
# Contribution from the BH
if self.Mbh > 0. :
for i in ilist :
var = (r / self._pParam.dqSig3Darc[i]).astype(floatG)
mask = (var < _Maximum_Value_forEXPERFC)
lasterm = np.empty_like(var)
# facMbh in M arcsec2 pc-2 / 4PI G
lasterm[mask] = self._dParam.sqpi2s[i] * special.erfc(var[mask]) * np.exp(var[mask]**2)
lasterm[~mask] = 2. / (r[~mask] + sqrt(r2[~mask] + self._dParam.qq2s2[i]))
sigz2 += self.rho[i] * self.facMbh * (1. / rsoft - lasterm) # in rho * M arcsec pc-2 / 4 PI G
muTheta2 += (1. + self._dParam.e2q2Sig3Darc2[i] * R2) * self.rho[i] * self.facMbh \
* (1. / rsoft - lasterm) # in rhoT * M arcsec pc-2 / 4 PI G
sigz2 *= self.PIG / self.rhoT
muTheta2 *= self.PIG / self.rhoT
return sigz2, muTheta2
#===========================================================
#######################################################
def sigmaz2_muTheta2(self, R, Z, ilist) :
"""
Compute both Sigma_Z**2 and Mu_Z**2 the centred and non-centred
second order velocity moments from an MGE model
op can be "all" or "sigma" or "mu" depending on which quantity is needed
Input : R and Z the coordinates
ilist : Gaussian indices to take into account
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
R2 = R*R
Z2 = Z*Z
r2 = R2 + Z2
r = sqrt(r2)
r2soft = r2 + self.SoftarcMbh2
rsoft = sqrt(r2soft)
## Compute the mass density for individual gaussians as well as the sum
[Xquad, Wquad] = quadrat_ps_roots(self.Nquad)
sigz2 = np.zeros_like(R2)
# sigmaz2
for i in xrange(self.Nquad) :
sigz2 += Wquad[i] * self._intsigma_z2(Xquad[i], R2, Z2, ilist)
# MU2
muTheta2 = np.zeros_like(R2)
for i in xrange(self.Nquad) :
muTheta2 += Wquad[i] * self._intvtheta2(Xquad[i], R2, Z2, ilist)
# Contribution from the BH
if self.Mbh > 0. :
for i in ilist :
var = (r / self._pParam.dqSig3Darc[i]).astype(floatG)
mask = (var < _Maximum_Value_forEXPERFC)
lasterm = np.empty_like(var)
# facMbh in M arcsec2 pc-2 / 4PI G
lasterm[mask] = self._dParam.sqpi2s[i] * special.erfc(var[mask]) * np.exp(var[mask]**2)
lasterm[~mask] = 2. / (r [~mask]+ sqrt(r2[~mask] + self._dParam.qq2s2[i]))
sigz2 += self.rho[i] * self.facMbh * (1. / rsoft - lasterm) # in rho * M arcsec pc-2 / 4 PI G
muTheta2 += (1. + self._dParam.e2q2Sig3Darc2[i] * R2) * self.rho[i] * self.facMbh \
* (1. / rsoft - lasterm) # in rhoT * M arcsec pc-2 / 4 PI G
sigz2 *= self.PIG / self.rhoT
muTheta2 *= self.PIG / self.rhoT
return sigz2, muTheta2
#===========================================================
|
|
from __future__ import absolute_import
from .fixtures import *
from blitzdb.tests.helpers.movie_data import Actor, Director, Movie
import blitzdb
def test_basic_delete(backend, small_test_data):
backend.filter(Actor, {}).delete()
backend.commit()
assert len(backend.filter(Actor, {})) == 0
def test_basic_storage(backend, small_test_data):
(movies, actors, directors) = small_test_data
assert len(backend.filter(Movie, {})) == len(movies)
assert len(backend.filter(Actor, {})) == len(actors)
#removed this functionality since it was misleading...
@pytest.mark.skipif(True, reason='Removed functionality')
def test_keys_with_dots(backend):
actor = Actor({'some.key.with.nasty.dots': [{'some.more.nasty.dots': 100}], 'pk': 'test'})
backend.save(actor)
backend.commit()
assert actor == backend.get(Actor, {'pk': 'test'})
def test_delete(backend):
actor = Actor({'foo' : 'bar'})
backend.save(actor)
backend.commit()
assert actor.foo == 'bar'
assert backend.get(Actor,{'pk' : actor.pk}).foo == 'bar'
del actor.foo
with pytest.raises(AttributeError):
actor.foo
with pytest.raises(KeyError):
actor['foo']
backend.save(actor)
backend.commit()
with pytest.raises(AttributeError):
backend.get(Actor,{'pk' : actor.pk}).foo
def test_negative_indexing(backend, small_test_data):
(movies, actors, directors) = small_test_data
actors = backend.filter(Actor, {})
assert actors[-1] == actors[len(actors) - 1]
assert actors[-10:-1] == actors[len(actors) - 10:len(actors) - 1]
assert actors[-len(actors):-1] == actors[0:len(actors) - 1]
# To do: Make step tests for file backend (MongoDB does not support this)
# assert actors[-10:-1:2] == actors[len(actors)-10:len(actors)-1:2]
def test_missing_keys_in_slice(backend, small_test_data):
(movies, actors, directors) = small_test_data
actors = backend.filter(Actor, {})
assert actors[:] == actors
assert actors[1:] == actors[1:len(actors)]
assert actors[:len(actors)] == actors[0:len(actors)]
def test_query_set(backend):
actors = [Actor({'foo': 'bar', 'value': 10}),
Actor({'foo': 'baz', 'value': 10}),
Actor({'foo': 'baz', 'value': 11}),
Actor({'foo': 'bar', 'value': 11})
]
for actor in actors:
backend.save(actor)
backend.commit()
queryset = backend.filter(Actor, {'foo': 'bar','value' : 10})
assert queryset.next() == actors[0]
def test_and_queries(backend):
backend.save(Actor({'foo': 'bar', 'value': 10}))
backend.save(Actor({'foo': 'baz', 'value': 10}))
backend.save(Actor({'foo': 'baz', 'value': 11}))
backend.save(Actor({'foo': 'bar', 'value': 11}))
backend.commit()
assert len(backend.filter(Actor, {'foo': 'bar'})) == 2
assert len(backend.filter(Actor, {'value': 10})) == 2
assert len(backend.filter(Actor, {'foo': 'bar', 'value': 10})) == 1
assert len(backend.filter(Actor, {'foo': 'baz', 'value': 10})) == 1
assert len(backend.filter(Actor, {'foo': 'bar', 'value': 11})) == 1
assert len(backend.filter(Actor, {'foo': 'baz', 'value': 11})) == 1
def test_composite_queries(backend):
backend.filter(Actor, {}).delete()
backend.save(Actor({'values': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}))
backend.save(Actor({'values': [7, 6, 5, 4, 3, 2, 1]}))
backend.save(Actor({'values': [1, 2, 3, 4]}))
backend.save(Actor({'values': [1, 2, 3, 4, {'foo': 'bar'}]}))
backend.save(Actor({'values': 'foobar'}))
backend.commit()
for f in (lambda: True, lambda: backend.create_index(Actor, 'values')):
assert len(backend.filter(Actor, {})) == 5
assert len(backend.filter(Actor, {'values': [1, 2, 3, 4]})) == 1
assert len(backend.filter(Actor, {'values': [1, 2, 3, 4, {'foo': 'bar'}]})) == 1
assert len(backend.filter(Actor, {'values': [1, 2, 3, {'foo': 'bar'}, 4]})) == 0
assert len(backend.filter(Actor, {'values': [1, 2, 3, 4, 5]})) == 0
assert len(backend.filter(Actor, {'values': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]})) == 0
assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1]}})) == 4
assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1, {'foo': 'bar'}]}})) == 1
assert len(backend.filter(Actor, {'values': {'$all': [{'foo': 'bar'}]}})) == 1
assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1, 14]}})) == 0
assert len(backend.filter(Actor, {'values': {'$all': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]}})) == 1
assert len(backend.filter(Actor, {'values': {'$in': [[1, 2, 3, 4], [7, 6, 5, 4, 3, 2, 1], [1, 2, 3, 5], 'foobar']}})) == 3
def test_operators(backend):
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
for op, results in (('$gt', [david_hasselhoff]), ('$gte', [david_hasselhoff]), ('$lt', [charlie_chaplin]), ('$lte', [charlie_chaplin])):
query = {
'$and':
[
{'gross_income_m': {op: 1.0}},
{'is_funny': True}
]
}
assert len(backend.filter(Actor, query)) == len(results)
assert results in backend.filter(Actor, query)
for op, results in (('$gt', [david_hasselhoff, charlie_chaplin, marlon_brando]), ('$gte', [marlon_brando, david_hasselhoff, charlie_chaplin]), ('$lt', [charlie_chaplin]), ('$lte', [charlie_chaplin])):
query = {
'$and':
[
{'$or': [
{'gross_income_m': {op: 1.0}},
{'birth_year': {'$lt': 1900}},
]},
{'$or': [
{'is_funny': True},
{'name': 'Marlon Brando'},
]},
]
}
assert len(backend.filter(Actor, query)) == len(results)
assert results in backend.filter(Actor, query)
assert len(backend.filter(Actor, {'name': {'$ne': 'David Hasselhoff'}})) == 3
assert len(backend.filter(Actor, {'name': 'David Hasselhoff'})) == 1
assert len(backend.filter(Actor, {'name': {'$not': {'$in': ['David Hasselhoff', 'Marlon Brando', 'Charlie Chaplin']}}})) == 1
assert len(backend.filter(Actor, {'name': {'$in': ['Marlon Brando', 'Leonardo di Caprio']}})) == 2
def test_regex_operator(backend, small_test_data):
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
marlon_wayans = Actor({'name': 'Marlon Wayans'})
backend.save(marlon_brando)
backend.save(marlon_wayans)
backend.commit()
assert backend.get(Actor, {'name': {'$regex': r'^Marlon\s+(?!Wayans)[\w]+$'}}) == marlon_brando
assert len(backend.filter(Actor, {'name': {'$regex': r'^Marlon\s+.*$'}})) == 2
assert len(backend.filter(Actor, {'name': {'$regex': r'^.*\s+Brando$'}})) == 1
def test_list_query(backend, small_test_data):
(movies, actors, directors) = small_test_data
movie = None
i = 0
while not movie or len(movie.cast) < 4:
movie = movies[i]
i += 1
actor = movie.cast[0]['actor']
other_movie = movies[i % len(movies)]
while other_movie in actor.movies:
other_movie = movies[i % len(movies)]
i += 1
assert actor in backend.filter(Actor, {'movies': movie})
assert actor not in backend.filter(Actor, {'movies': other_movie})
def test_list_query_multiple_items(backend, small_test_data):
(movies, actors, directors) = small_test_data
actor = None
i = 0
while not actor or len(actor.movies) < 2:
actor = actors[i]
i += 1
assert actor in backend.filter(Actor, {'movies': actor.movies})
def test_indexed_delete(backend, small_test_data):
all_movies = backend.filter(Movie, {})
for movie in all_movies:
backend.filter(Actor, {'movies': movie}).delete()
backend.commit()
for actor in backend.filter(Actor, {}):
assert actor.movies == []
def test_non_indexed_delete(backend, small_test_data):
(movies, actors, directors) = small_test_data
for movie in movies:
backend.filter(Director, {'movies': {'$all': [movie]}}).delete()
backend.commit()
for director in backend.filter(Director, {}):
assert director.movies == []
def test_positional_query(backend, small_test_data):
"""
We test a search query which explicitly references a given list item in an object
"""
(movies, actors, directors) = small_test_data
movie = None
i = 0
while not movie or len(movie.cast) < 3:
if len(movies[i].cast):
movie = movies[i]
actor = movie.cast[0]['actor']
index = actor.movies.index(movie)
i += 1
assert actor in backend.filter(Actor, {'movies.%d' % index: movie})
def test_default_backend(backend, small_test_data):
movies = backend.filter(Movie, {})
old_len = len(movies)
movie = movies[0]
movie.delete()
backend.commit()
with pytest.raises(Movie.DoesNotExist):
backend.get(Movie, {'pk': movie.pk})
assert old_len == len(backend.filter(Movie, {})) + 1
def test_index_reloading(backend, small_test_data):
(movies, actors, directors) = small_test_data
backend.filter(Actor, {'movies': movies[0]}).delete()
backend.commit()
assert list(backend.filter(Actor, {'movies': movies[0]})) == []
def test_query_function(backend):
if isinstance(backend, blitzdb.backends.mongo.Backend):
pytest.skip('Query by function is not supported for MongoDB')
Movie({'name': 'The Godfather', 'year': 1972}).save(backend)
Movie({'name': 'Goodfellas', 'year': 1990}).save(backend)
Movie({'name': 'Star Wars', 'year': 1977}).save(backend)
backend.commit()
movies = backend.filter(Movie, {
'year': lambda year: year >= 1970 and year <= 1979,
})
assert sorted([m.name for m in movies]) == ['Star Wars', 'The Godfather']
|
|
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
currentMonthTotalDays = 29
currentMonthIndex = datetime.date.today().month
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
currentDayOfMonthIndex = datetime.date.today().day
# TODO: testing...
# currentDayOfMonthIndex = 31
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
# print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%** Here is the list of participants still with the challenge:\n"
def stringToPrintLegacy():
answer = "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnFinalDay():
answer = ""
answer += "These participants have checked in at least once in the last 15 days:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateFor1():
print '1\n\n'
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. ~~We will no longer be accepting new signups.~~ Good news! We will be be accepting late signups for the next 3 days. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateFor2():
print '2\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. This is the second day of our 3 day late-signup grace period. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor3():
print '3\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. This is the last day of our 3 day late-signup grace period. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you. After today, further signup requests will be silently ignored.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor4():
print '4\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Our 3 day late-signup grace period is now over. If you forgot to sign up, it's too late for CURRENT_MONTH_NAME, but feel free to leave comments here anyway, and we'll see you in NEXT_MONTH_NAME.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor5to9():
print '5 to 9\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor10to14():
print '10 to 14\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(15 - currentDayOfMonthIndex) + " days to make an update comment (if you haven't already) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_INDEX/15!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor15():
print '15\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor16toPenultimate():
print '16 to penultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed in the great purge of CURRENT_MONTH_NAME 15th because you never checked in. However, if you let me know you're still with it I will re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads since CURRENT_MONTH_NAME 15. If it is still there by CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateForUltimate():
print 'Ultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the last day of the Stay Clean CURRENT_MONTH_NAME challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors. I'm really proud of everyone who signed up for this challenge. Quitting porn is difficult, especially in an era where porn is always as close as a few keystrokes, and triggers are absolutely everywhere. Everybody who gave it their best shot deserves to take a minute right now to feel good about themselves.\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
# TODO: need to do the part where it lists the checked in and non-checked in participants separately.
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnFinalDay()
return answer
def templateToUse():
if currentDayOfMonthIndex == 1:
return templateFor1()
elif currentDayOfMonthIndex == 2:
return templateFor2()
elif currentDayOfMonthIndex == 3:
return templateFor3()
elif currentDayOfMonthIndex == 4:
return templateFor4()
elif 5 <= currentDayOfMonthIndex <= 9:
return templateFor5to9()
elif 10 <= currentDayOfMonthIndex <= 14:
return templateFor10to14()
if currentDayOfMonthIndex == 15:
return templateFor15()
elif (currentDayOfMonthIndex >= 16) and (currentDayOfMonthIndex <= currentMonthPenultimateDayIndex):
return templateFor16toPenultimate()
else:
return templateForUltimate()
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
|
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_snmp_user
version_added: "2.4"
short_description: Manages SNMP user configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP user configurations on CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
acl_number:
description:
- Access control list number.
required: false
default: null
usm_user_name:
description:
- Unique name to identify the USM user.
required: false
default: null
aaa_local_user:
description:
- Unique name to identify the local user.
required: false
default: null
remote_engine_id:
description:
- Remote engine id of the USM user.
required: false
default: null
user_group:
description:
- Name of the group where user belongs to.
required: false
default: null
auth_protocol:
description:
- Authentication protocol.
required: false
default: null
choices: ['noAuth', 'md5', 'sha']
auth_key:
description:
- The authentication password. Password length, 8-255 characters.
required: false
default: null
priv_protocol:
description:
- Encryption protocol.
required: false
default: null
choices: ['noPriv', 'des56', '3des168', 'aes128', 'aes192', 'aes256']
priv_key:
description:
- The encryption password. Password length 8-255 characters.
required: false
default: null
'''
EXAMPLES = '''
- name: CloudEngine snmp user test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP usm user"
ce_snmp_user:
state: present
usm_user_name: wdz_snmp
remote_engine_id: 800007DB03389222111200
acl_number: 2000
user_group: wdz_group
provider: "{{ cli }}"
- name: "Undo SNMP usm user"
ce_snmp_user:
state: absent
usm_user_name: wdz_snmp
remote_engine_id: 800007DB03389222111200
acl_number: 2000
user_group: wdz_group
provider: "{{ cli }}"
- name: "Config SNMP local user"
ce_snmp_user:
state: present
aaa_local_user: wdz_user
auth_protocol: md5
auth_key: huawei123
priv_protocol: des56
priv_key: huawei123
provider: "{{ cli }}"
- name: "Config SNMP local user"
ce_snmp_user:
state: absent
aaa_local_user: wdz_user
auth_protocol: md5
auth_key: huawei123
priv_protocol: des56
priv_key: huawei123
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"acl_number": "2000", "remote_engine_id": "800007DB03389222111200",
"state": "present", "user_group": "wdz_group",
"usm_user_name": "wdz_snmp"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"snmp local user": {"local_user_info": []},
"snmp usm user": {"usm_user_info": []}}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"snmp local user": {"local_user_info": []},
"snmp local user": {"local_user_info": [{"aclNumber": "2000", "engineID": "800007DB03389222111200",
"groupName": "wdz_group", "userName": "wdz_snmp"}]}}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent remote-engineid 800007DB03389222111200 usm-user v3 wdz_snmp wdz_group acl 2000"]
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec, get_config
# get snmp v3 USM user
CE_GET_SNMP_V3_USM_USER_HEADER = """
<filter type="subtree">
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<usmUsers>
<usmUser>
<userName></userName>
<remoteEngineID></remoteEngineID>
<engineID></engineID>
"""
CE_GET_SNMP_V3_USM_USER_TAIL = """
</usmUser>
</usmUsers>
</snmp>
</filter>
"""
# merge snmp v3 USM user
CE_MERGE_SNMP_V3_USM_USER_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<usmUsers>
<usmUser operation="merge">
<userName>%s</userName>
<remoteEngineID>%s</remoteEngineID>
<engineID>%s</engineID>
"""
CE_MERGE_SNMP_V3_USM_USER_TAIL = """
</usmUser>
</usmUsers>
</snmp>
</config>
"""
# create snmp v3 USM user
CE_CREATE_SNMP_V3_USM_USER_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" format-version="1.0" content-version="1.0">
<usmUsers>
<usmUser operation="create">
<userName>%s</userName>
<remoteEngineID>%s</remoteEngineID>
<engineID>%s</engineID>
"""
CE_CREATE_SNMP_V3_USM_USER_TAIL = """
</usmUser>
</usmUsers>
</snmp>
</config>
"""
# delete snmp v3 USM user
CE_DELETE_SNMP_V3_USM_USER_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<usmUsers>
<usmUser operation="delete">
<userName>%s</userName>
<remoteEngineID>%s</remoteEngineID>
<engineID>%s</engineID>
"""
CE_DELETE_SNMP_V3_USM_USER_TAIL = """
</usmUser>
</usmUsers>
</snmp>
</config>
"""
# get snmp v3 aaa local user
CE_GET_SNMP_V3_LOCAL_USER = """
<filter type="subtree">
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<localUsers>
<localUser>
<userName></userName>
<authProtocol></authProtocol>
<authKey></authKey>
<privProtocol></privProtocol>
<privKey></privKey>
</localUser>
</localUsers>
</snmp>
</filter>
"""
# merge snmp v3 aaa local user
CE_MERGE_SNMP_V3_LOCAL_USER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<localUsers>
<localUser operation="merge">
<userName>%s</userName>
<authProtocol>%s</authProtocol>
<authKey>%s</authKey>
<privProtocol>%s</privProtocol>
<privKey>%s</privKey>
</localUser>
</localUsers>
</snmp>
</config>
"""
# create snmp v3 aaa local user
CE_CREATE_SNMP_V3_LOCAL_USER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<localUsers>
<localUser operation="create">
<userName>%s</userName>
<authProtocol>%s</authProtocol>
<authKey>%s</authKey>
<privProtocol>%s</privProtocol>
<privKey>%s</privKey>
</localUser>
</localUsers>
</snmp>
</config>
"""
# delete snmp v3 aaa local user
CE_DELETE_SNMP_V3_LOCAL_USER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<localUsers>
<localUser operation="delete">
<userName>%s</userName>
<authProtocol>%s</authProtocol>
<authKey>%s</authKey>
<privProtocol>%s</privProtocol>
<privKey>%s</privKey>
</localUser>
</localUsers>
</snmp>
</config>
"""
class SnmpUser(object):
""" Manages SNMP user configuration """
def netconf_get_config(self, **kwargs):
""" Get configure by netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" Set configure by netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_snmp_v3_usm_user_args(self, **kwargs):
""" Check snmp v3 usm user invalid args """
module = kwargs["module"]
result = dict()
result["usm_user_info"] = []
need_cfg = False
state = module.params['state']
usm_user_name = module.params['usm_user_name']
remote_engine_id = module.params['remote_engine_id']
acl_number = module.params['acl_number']
user_group = module.params['user_group']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
local_user_name = module.params['aaa_local_user']
if usm_user_name:
if len(usm_user_name) > 32 or len(usm_user_name) == 0:
module.fail_json(
msg='Error: The length of usm_user_name %s is out of [1 - 32].' % usm_user_name)
if remote_engine_id:
if len(remote_engine_id) > 64 or len(remote_engine_id) < 10:
module.fail_json(
msg='Error: The length of remote_engine_id %s is out of [10 - 64].' % remote_engine_id)
conf_str = CE_GET_SNMP_V3_USM_USER_HEADER
if acl_number:
if acl_number.isdigit():
if int(acl_number) > 2999 or int(acl_number) < 2000:
module.fail_json(
msg='Error: The value of acl_number %s is out of [2000 - 2999].' % acl_number)
else:
if not acl_number[0].isalpha() or len(acl_number) > 32 or len(acl_number) < 1:
module.fail_json(
msg='Error: The length of acl_number %s is out of [1 - 32].' % acl_number)
conf_str += "<aclNumber></aclNumber>"
if user_group:
if len(user_group) > 32 or len(user_group) == 0:
module.fail_json(
msg='Error: The length of user_group %s is out of [1 - 32].' % user_group)
conf_str += "<groupName></groupName>"
if auth_protocol:
conf_str += "<authProtocol></authProtocol>"
if auth_key:
if len(auth_key) > 255 or len(auth_key) == 0:
module.fail_json(
msg='Error: The length of auth_key %s is out of [1 - 255].' % auth_key)
conf_str += "<authKey></authKey>"
if priv_protocol:
if not auth_protocol:
module.fail_json(
msg='Error: Please input auth_protocol at the same time.')
conf_str += "<privProtocol></privProtocol>"
if priv_key:
if len(priv_key) > 255 or len(priv_key) == 0:
module.fail_json(
msg='Error: The length of priv_key %s is out of [1 - 255].' % priv_key)
conf_str += "<privKey></privKey>"
conf_str += CE_GET_SNMP_V3_USM_USER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
usm_user_info = root.findall("data/snmp/usmUsers/usmUser")
if usm_user_info:
for tmp in usm_user_info:
tmp_dict = dict()
for site in tmp:
if site.tag in ["userName", "remoteEngineID", "engineID", "groupName", "authProtocol",
"authKey", "privProtocol", "privKey", "aclNumber"]:
tmp_dict[site.tag] = site.text
result["usm_user_info"].append(tmp_dict)
if result["usm_user_info"]:
for tmp in result["usm_user_info"]:
if "userName" in tmp.keys():
if state == "present":
if tmp["userName"] != usm_user_name:
need_cfg = True
else:
if tmp["userName"] == usm_user_name:
need_cfg = True
if "remoteEngineID" in tmp.keys():
if remote_engine_id:
enable = "true"
else:
enable = "false"
if state == "present":
if tmp["remoteEngineID"] != enable:
need_cfg = True
else:
if tmp["remoteEngineID"] == enable:
need_cfg = True
if remote_engine_id:
if "engineID" in tmp.keys():
if state == "present":
if tmp["engineID"] != remote_engine_id:
need_cfg = True
else:
if tmp["engineID"] == remote_engine_id:
need_cfg = True
if user_group:
if "groupName" in tmp.keys():
if state == "present":
if tmp["groupName"] != user_group:
need_cfg = True
else:
if tmp["groupName"] == user_group:
need_cfg = True
if auth_protocol:
if "authProtocol" in tmp.keys():
if state == "present":
if tmp["authProtocol"] != auth_protocol:
need_cfg = True
else:
if tmp["authProtocol"] == auth_protocol:
need_cfg = True
if auth_key:
if "authKey" in tmp.keys():
if state == "present":
if tmp["authKey"] != auth_key:
need_cfg = True
else:
if tmp["authKey"] == auth_key:
need_cfg = True
if priv_protocol:
if "privProtocol" in tmp.keys():
if state == "present":
if tmp["privProtocol"] != priv_protocol:
need_cfg = True
else:
if tmp["privProtocol"] == priv_protocol:
need_cfg = True
if priv_key:
if "privKey" in tmp.keys():
if state == "present":
if tmp["privKey"] != priv_key:
need_cfg = True
else:
if tmp["privKey"] == priv_key:
need_cfg = True
if acl_number:
if "aclNumber" in tmp.keys():
if state == "present":
if tmp["aclNumber"] != acl_number:
need_cfg = True
else:
if tmp["aclNumber"] == acl_number:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_snmp_v3_local_user_args(self, **kwargs):
""" Check snmp v3 local user invalid args """
module = kwargs["module"]
result = dict()
result["local_user_info"] = []
need_cfg = False
state = module.params['state']
local_user_name = module.params['aaa_local_user']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
usm_user_name = module.params['usm_user_name']
if local_user_name:
if usm_user_name:
module.fail_json(
msg='Error: Please do not input usm_user_name and local_user_name at the same time.')
if not auth_protocol or not auth_key or not priv_protocol or not priv_key:
module.fail_json(
msg='Error: Please input auth_protocol auth_key priv_protocol priv_key for local user.')
if len(local_user_name) > 32 or len(local_user_name) == 0:
module.fail_json(
msg='Error: The length of local_user_name %s is out of [1 - 32].' % local_user_name)
if len(auth_key) > 255 or len(auth_key) == 0:
module.fail_json(
msg='Error: The length of auth_key %s is out of [1 - 255].' % auth_key)
if len(priv_key) > 255 or len(priv_key) == 0:
module.fail_json(
msg='Error: The length of priv_key %s is out of [1 - 255].' % priv_key)
conf_str = CE_GET_SNMP_V3_LOCAL_USER
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
local_user_info = root.findall(
"data/snmp/localUsers/localUser")
if local_user_info:
for tmp in local_user_info:
tmp_dict = dict()
for site in tmp:
if site.tag in ["userName", "authProtocol", "authKey", "privProtocol", "privKey"]:
tmp_dict[site.tag] = site.text
result["local_user_info"].append(tmp_dict)
if result["local_user_info"]:
for tmp in result["local_user_info"]:
if "userName" in tmp.keys():
if state == "present":
if tmp["userName"] != local_user_name:
need_cfg = True
else:
if tmp["userName"] == local_user_name:
need_cfg = True
if auth_protocol:
if "authProtocol" in tmp.keys():
if state == "present":
if tmp["authProtocol"] != auth_protocol:
need_cfg = True
else:
if tmp["authProtocol"] == auth_protocol:
need_cfg = True
if auth_key:
if "authKey" in tmp.keys():
if state == "present":
if tmp["authKey"] != auth_key:
need_cfg = True
else:
if tmp["authKey"] == auth_key:
need_cfg = True
if priv_protocol:
if "privProtocol" in tmp.keys():
if state == "present":
if tmp["privProtocol"] != priv_protocol:
need_cfg = True
else:
if tmp["privProtocol"] == priv_protocol:
need_cfg = True
if priv_key:
if "privKey" in tmp.keys():
if state == "present":
if tmp["privKey"] != priv_key:
need_cfg = True
else:
if tmp["privKey"] == priv_key:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def merge_snmp_v3_usm_user(self, **kwargs):
""" Merge snmp v3 usm user operation """
module = kwargs["module"]
usm_user_name = module.params['usm_user_name']
remote_engine_id = module.params['remote_engine_id']
acl_number = module.params['acl_number']
user_group = module.params['user_group']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
cmds = []
if remote_engine_id:
conf_str = CE_MERGE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "true", remote_engine_id)
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
if not self.local_engine_id:
module.fail_json(
msg='Error: The local engine id is null, please input remote_engine_id.')
conf_str = CE_MERGE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "false", self.local_engine_id)
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if user_group:
conf_str += "<groupName>%s</groupName>" % user_group
cmd += " %s" % user_group
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
cmd += " acl %s" % acl_number
cmds.append(cmd)
if remote_engine_id:
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if auth_protocol:
conf_str += "<authProtocol>%s</authProtocol>" % auth_protocol
if auth_protocol != "noAuth":
cmd += " authentication-mode %s" % auth_protocol
if auth_key:
conf_str += "<authKey>%s</authKey>" % auth_key
if auth_protocol != "noAuth":
cmd += " cipher %s" % "******"
cmds.append(cmd)
if remote_engine_id:
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if priv_protocol:
conf_str += "<privProtocol>%s</privProtocol>" % priv_protocol
if auth_protocol != "noAuth" and priv_protocol != "noPriv":
cmd += " privacy-mode %s" % priv_protocol
if priv_key:
conf_str += "<privKey>%s</privKey>" % priv_key
if auth_protocol != "noAuth" and priv_protocol != "noPriv":
cmd += " cipher %s" % "******"
cmds.append(cmd)
conf_str += CE_MERGE_SNMP_V3_USM_USER_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge snmp v3 usm user failed.')
return cmds
def create_snmp_v3_usm_user(self, **kwargs):
""" Create snmp v3 usm user operation """
module = kwargs["module"]
usm_user_name = module.params['usm_user_name']
remote_engine_id = module.params['remote_engine_id']
acl_number = module.params['acl_number']
user_group = module.params['user_group']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
cmds = []
if remote_engine_id:
conf_str = CE_CREATE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "true", remote_engine_id)
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
if not self.local_engine_id:
module.fail_json(
msg='Error: The local engine id is null, please input remote_engine_id.')
conf_str = CE_CREATE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "false", self.local_engine_id)
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if user_group:
conf_str += "<groupName>%s</groupName>" % user_group
cmd += " %s" % user_group
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
cmd += " acl %s" % acl_number
cmds.append(cmd)
if remote_engine_id:
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if auth_protocol:
conf_str += "<authProtocol>%s</authProtocol>" % auth_protocol
if auth_protocol != "noAuth":
cmd += " authentication-mode %s" % auth_protocol
if auth_key:
conf_str += "<authKey>%s</authKey>" % auth_key
if auth_protocol != "noAuth":
cmd += " cipher %s" % "******"
cmds.append(cmd)
if remote_engine_id:
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if priv_protocol:
conf_str += "<privProtocol>%s</privProtocol>" % priv_protocol
if auth_protocol != "noAuth" and priv_protocol != "noPriv":
cmd += " privacy-mode %s" % priv_protocol
if priv_key:
conf_str += "<privKey>%s</privKey>" % priv_key
if auth_protocol != "noAuth" and priv_protocol != "noPriv":
cmd += " cipher %s" % "******"
cmds.append(cmd)
conf_str += CE_CREATE_SNMP_V3_USM_USER_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create snmp v3 usm user failed.')
return cmds
def delete_snmp_v3_usm_user(self, **kwargs):
""" Delete snmp v3 usm user operation """
module = kwargs["module"]
usm_user_name = module.params['usm_user_name']
remote_engine_id = module.params['remote_engine_id']
acl_number = module.params['acl_number']
user_group = module.params['user_group']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
if remote_engine_id:
conf_str = CE_DELETE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "true", remote_engine_id)
cmd = "undo snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
if not self.local_engine_id:
module.fail_json(
msg='Error: The local engine id is null, please input remote_engine_id.')
conf_str = CE_DELETE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "false", self.local_engine_id)
cmd = "undo snmp-agent usm-user v3 %s" % usm_user_name
if user_group:
conf_str += "<groupName>%s</groupName>" % user_group
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if auth_protocol:
conf_str += "<authProtocol>%s</authProtocol>" % auth_protocol
if auth_key:
conf_str += "<authKey>%s</authKey>" % auth_key
if priv_protocol:
conf_str += "<privProtocol>%s</privProtocol>" % priv_protocol
if priv_key:
conf_str += "<privKey>%s</privKey>" % priv_key
conf_str += CE_DELETE_SNMP_V3_USM_USER_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete snmp v3 usm user failed.')
return cmd
def merge_snmp_v3_local_user(self, **kwargs):
""" Merge snmp v3 local user operation """
module = kwargs["module"]
local_user_name = module.params['aaa_local_user']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
conf_str = CE_MERGE_SNMP_V3_LOCAL_USER % (
local_user_name, auth_protocol, auth_key, priv_protocol, priv_key)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge snmp v3 local user failed.')
cmd = "snmp-agent local-user v3 %s " % local_user_name + "authentication-mode %s " % auth_protocol + \
"cipher ****** " + "privacy-mode %s " % priv_protocol + "cipher ******"
return cmd
def create_snmp_v3_local_user(self, **kwargs):
""" Create snmp v3 local user operation """
module = kwargs["module"]
local_user_name = module.params['aaa_local_user']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
conf_str = CE_CREATE_SNMP_V3_LOCAL_USER % (
local_user_name, auth_protocol, auth_key, priv_protocol, priv_key)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create snmp v3 local user failed.')
cmd = "snmp-agent local-user v3 %s " % local_user_name + "authentication-mode %s " % auth_protocol + \
"cipher ****** " + "privacy-mode %s " % priv_protocol + "cipher ******"
return cmd
def delete_snmp_v3_local_user(self, **kwargs):
""" Delete snmp v3 local user operation """
module = kwargs["module"]
local_user_name = module.params['aaa_local_user']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
conf_str = CE_DELETE_SNMP_V3_LOCAL_USER % (
local_user_name, auth_protocol, auth_key, priv_protocol, priv_key)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete snmp v3 local user failed.')
cmd = "undo snmp-agent local-user v3 %s" % local_user_name
return cmd
def get_snmp_local_engine(self, **kwargs):
""" Get snmp local engine operation """
module = kwargs["module"]
regular = "| include snmp | include local-engineid"
flags = list()
flags.append(regular)
tmp_cfg = get_config(module, flags)
if tmp_cfg:
tmp_data = tmp_cfg.split(r"snmp-agent local-engineid ")
self.local_engine_id = tmp_data[1]
def main():
""" Module main function """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
acl_number=dict(type='str'),
usm_user_name=dict(type='str'),
remote_engine_id=dict(type='str'),
user_group=dict(type='str'),
auth_protocol=dict(choices=['noAuth', 'md5', 'sha']),
auth_key=dict(type='str', no_log=True),
priv_protocol=dict(
choices=['noPriv', 'des56', '3des168', 'aes128', 'aes192', 'aes256']),
priv_key=dict(type='str', no_log=True),
aaa_local_user=dict(type='str')
)
mutually_exclusive = [("usm_user_name", "local_user_name")]
argument_spec.update(ce_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True
)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
acl_number = module.params['acl_number']
usm_user_name = module.params['usm_user_name']
remote_engine_id = module.params['remote_engine_id']
user_group = module.params['user_group']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
aaa_local_user = module.params['aaa_local_user']
snmp_user_obj = SnmpUser()
if not snmp_user_obj:
module.fail_json(msg='Error: Init module failed.')
# get proposed
proposed["state"] = state
if acl_number:
proposed["acl_number"] = acl_number
if usm_user_name:
proposed["usm_user_name"] = usm_user_name
if remote_engine_id:
proposed["remote_engine_id"] = remote_engine_id
if user_group:
proposed["user_group"] = user_group
if auth_protocol:
proposed["auth_protocol"] = auth_protocol
if auth_key:
proposed["auth_key"] = auth_key
if priv_protocol:
proposed["priv_protocol"] = priv_protocol
if priv_key:
proposed["priv_key"] = priv_key
if aaa_local_user:
proposed["aaa_local_user"] = aaa_local_user
snmp_v3_usm_user_rst = snmp_user_obj.check_snmp_v3_usm_user_args(
module=module)
snmp_v3_local_user_rst = snmp_user_obj.check_snmp_v3_local_user_args(
module=module)
snmp_user_obj.get_snmp_local_engine(module=module)
# state exist snmp v3 user config
exist_tmp = dict()
for item in snmp_v3_usm_user_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_v3_usm_user_rst[item]
if exist_tmp:
existing["snmp usm user"] = exist_tmp
exist_tmp = dict()
for item in snmp_v3_local_user_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_v3_local_user_rst[item]
if exist_tmp:
existing["snmp local user"] = exist_tmp
if state == "present":
if snmp_v3_usm_user_rst["need_cfg"]:
if len(snmp_v3_usm_user_rst["usm_user_info"]) != 0:
cmd = snmp_user_obj.merge_snmp_v3_usm_user(module=module)
changed = True
updates.append(cmd)
else:
cmd = snmp_user_obj.create_snmp_v3_usm_user(module=module)
changed = True
updates.append(cmd)
if snmp_v3_local_user_rst["need_cfg"]:
if len(snmp_v3_local_user_rst["local_user_info"]) != 0:
cmd = snmp_user_obj.merge_snmp_v3_local_user(
module=module)
changed = True
updates.append(cmd)
else:
cmd = snmp_user_obj.create_snmp_v3_local_user(
module=module)
changed = True
updates.append(cmd)
else:
if snmp_v3_usm_user_rst["need_cfg"]:
cmd = snmp_user_obj.delete_snmp_v3_usm_user(module=module)
changed = True
updates.append(cmd)
if snmp_v3_local_user_rst["need_cfg"]:
cmd = snmp_user_obj.delete_snmp_v3_local_user(module=module)
changed = True
updates.append(cmd)
# state exist snmp v3 user config
snmp_v3_usm_user_rst = snmp_user_obj.check_snmp_v3_usm_user_args(
module=module)
end_tmp = dict()
for item in snmp_v3_usm_user_rst:
if item != "need_cfg":
end_tmp[item] = snmp_v3_usm_user_rst[item]
if end_tmp:
end_state["snmp usm user"] = end_tmp
snmp_v3_local_user_rst = snmp_user_obj.check_snmp_v3_local_user_args(
module=module)
end_tmp = dict()
for item in snmp_v3_local_user_rst:
if item != "need_cfg":
end_tmp[item] = snmp_v3_local_user_rst[item]
if end_tmp:
end_state["snmp local user"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
|
|
import shutil
import os
import pwd
import json
from urlparse import urljoin
os.getlogin = lambda: pwd.getpwuid(os.getuid())[0] # noqa
import requests
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch import receiver
from unicoremc import constants, exceptions, mappings
from unicoremc.managers import (
NginxManager, SettingsManager, DbManager, ProjectInfrastructureManager)
from unicoremc.websites.managers import (
UnicoreCmsWebsiteManager, SpringboardWebsiteManager,
AggregatorWebsiteManager)
from git import Repo
from elasticgit.storage import StorageManager
from elasticgit import EG
from unicore.content.models import (
Category, Page, Localisation as EGLocalisation)
from unicoremc.utils import get_hub_app_client
from ws4redis.publisher import RedisPublisher
from ws4redis.redis_store import RedisMessage
class Localisation(models.Model):
"""
Stolen from praekelt/unicore-cms-django.git :: models.Localisation
"""
country_code = models.CharField(
_('2 letter country code'), max_length=2,
help_text=(
'See http://www.worldatlas.com/aatlas/ctycodes.htm '
'for reference.'))
language_code = models.CharField(
_('3 letter language code'), max_length=3,
help_text=(
'See http://www.loc.gov/standards/iso639-2/php/code_list.php '
'for reference.'))
@classmethod
def _for(cls, language):
language_code, _, country_code = language.partition('_')
localisation, _ = cls.objects.get_or_create(
language_code=language_code, country_code=country_code)
return localisation
def get_code(self):
return u'%s_%s' % (self.language_code, self.country_code)
def get_display_name(self):
return unicode(constants.LANGUAGES.get(self.language_code))
def __unicode__(self):
language = constants.LANGUAGES.get(self.language_code)
country = constants.COUNTRIES.get(self.country_code)
return u'%s (%s)' % (language, country)
class Meta:
ordering = ('language_code', )
class AppType(models.Model):
UNICORE_CMS = 'unicore-cms'
SPRINGBOARD = 'springboard'
PROJECT_TYPES = (
(UNICORE_CMS, 'unicore-cms'),
(SPRINGBOARD, 'springboard'),
)
name = models.CharField(max_length=256, blank=True, null=True)
docker_image = models.CharField(max_length=256, blank=True, null=True)
title = models.TextField(blank=True, null=True)
project_type = models.CharField(
choices=PROJECT_TYPES, max_length=256, default=UNICORE_CMS)
def to_dict(self):
return {
'name': self.name,
'title': self.title,
'docker_image': self.docker_image,
'project_type': self.project_type
}
def get_qualified_name(self):
return "%(project_type)s-%(app_type)s" % {
'project_type': self.project_type,
'app_type': self.name
}
@classmethod
def _for(cls, name, title, project_type, docker_image):
application_type, _ = cls.objects.get_or_create(
name=name,
title=title,
project_type=project_type,
docker_image=docker_image)
return application_type
def __unicode__(self):
return u'%s (%s)' % (self.title, self.project_type)
class Meta:
ordering = ('title', )
class ProjectRepo(models.Model):
project = models.OneToOneField(
'Project', primary_key=True, related_name='repo')
base_url = models.URLField()
git_url = models.URLField(blank=True, null=True)
url = models.URLField(blank=True, null=True)
def __unicode__(self):
return os.path.basename(self.url) if self.url else None
def name(self):
return constants.NEW_REPO_NAME_FORMAT % {
'app_type': self.project.app_type,
'country': self.project.country.lower(),
'suffix': settings.GITHUB_REPO_NAME_SUFFIX}
class ProjectManager(models.Manager):
'''
Custom manager that uses prefetch_related and select_related
for repos and application_type to improve performance.
'''
def get_queryset(self):
qs = super(ProjectManager, self).get_queryset()
return (qs
.select_related('application_type', 'repo', 'organization')
.prefetch_related('external_repos'))
class Project(models.Model):
objects = ProjectManager()
application_type = models.ForeignKey(AppType, blank=True, null=True)
country = models.CharField(
choices=constants.COUNTRY_CHOICES, max_length=256)
external_repos = models.ManyToManyField(
ProjectRepo, blank=True, null=True, related_name='external_projects')
state = models.CharField(max_length=50, default='initial')
project_version = models.PositiveIntegerField(default=0)
available_languages = models.ManyToManyField(
Localisation, blank=True, null=True)
default_language = models.ForeignKey(
Localisation, blank=True, null=True,
related_name='default_language')
ga_profile_id = models.TextField(blank=True, null=True)
ga_account_id = models.TextField(blank=True, null=True)
frontend_custom_domain = models.TextField(
blank=True, null=True, default='')
cms_custom_domain = models.TextField(
blank=True, null=True, default='')
hub_app_id = models.CharField(blank=True, null=True, max_length=32)
marathon_cpus = models.FloatField(
default=settings.MESOS_DEFAULT_CPU_SHARE)
marathon_mem = models.FloatField(
default=settings.MESOS_DEFAULT_MEMORY_ALLOCATION)
marathon_instances = models.IntegerField(
default=settings.MESOS_DEFAULT_INSTANCES)
marathon_health_check_path = models.CharField(
max_length=255, blank=True, null=True)
docker_cmd = models.TextField(blank=True, null=True)
custom_frontend_settings = models.TextField(blank=True, null=True)
# Ownership and auth fields
owner = models.ForeignKey('auth.User')
team_id = models.IntegerField(blank=True, null=True)
organization = models.ForeignKey(
'organizations.Organization', blank=True, null=True)
class Meta:
ordering = ('application_type__title', 'country')
def __init__(self, *args, **kwargs):
super(Project, self).__init__(*args, **kwargs)
self.nginx_manager = NginxManager()
self.settings_manager = SettingsManager()
self.db_manager = DbManager()
self.infra_manager = ProjectInfrastructureManager(self)
@property
def app_type(self):
if self.application_type:
return self.application_type.name
return ''
@property
def app_id(self):
return "%(app_type)s-%(country)s-%(id)s" % {
'app_type': self.app_type,
'country': self.country.lower(),
'id': self.id,
}
def own_repo(self):
try:
return self.repo
except ProjectRepo.DoesNotExist:
return None
def all_repos(self):
external_repos = list(self.external_repos.all())
own_repo = self.own_repo()
if own_repo:
return [own_repo] + external_repos
return external_repos
def get_state_display(self):
return self.get_website_manager().workflow.get_state()
def get_generic_domain(self):
return '%(app_id)s.%(hub)s.unicore.io' % {
'app_id': self.app_id,
'hub': settings.HUB_SUBDOMAIN
}
def get_generic_content_domain(self):
return '%(app_id)s.%(hub)s.unicore.io' % {
'app_id': self.app_id,
'hub': settings.CMS_SUBDOMAIN
}
def get_country_domain(self):
return "%(country)s-%(app_type)s.%(hub)s.unicore.io" % {
'country': self.country.lower(),
'app_type': self.app_type,
'hub': settings.HUB_SUBDOMAIN
}
def get_frontend_custom_domain_list(self):
return [
'http://%s' % url
for url in (self.frontend_custom_domain.split(' ')
if self.frontend_custom_domain else [])
]
def get_cms_custom_domain_list(self):
return [
'http://%s' % url
for url in (self.cms_custom_domain.split(' ')
if self.cms_custom_domain else [])
]
def to_dict(self):
return {
'id': self.id,
'app_id': self.app_id,
'app_type': self.app_type,
'application_type': self.application_type.to_dict()
if self.application_type else None,
'base_repo_urls': [r.base_url for r in self.all_repos()],
'country': self.country,
'country_display': self.get_country_display(),
'state': self.state,
'state_display': self.get_state_display(),
'repo_urls': [r.url for r in self.all_repos()],
'repo_git_urls': [r.git_url for r in self.all_repos()],
'team_id': self.team_id,
'available_languages': [
lang.get_code() for lang in self.available_languages.all()],
'default_language': self.default_language.get_code()
if self.default_language else None,
'ga_profile_id': self.ga_profile_id or '',
'ga_account_id': self.ga_account_id or '',
'frontend_custom_domain': self.frontend_custom_domain or '',
'cms_custom_domain': self.cms_custom_domain or '',
'hub_app_id': self.hub_app_id or '',
'docker_cmd': self.docker_cmd or '',
'custom_frontend_settings': self.custom_frontend_settings or '',
}
def get_website_manager(self):
if not (self.application_type and self.application_type.project_type):
raise exceptions.ProjectTypeRequiredException(
'project_type is required')
if not self.own_repo():
return AggregatorWebsiteManager(self)
if self.application_type.project_type == AppType.UNICORE_CMS:
return UnicoreCmsWebsiteManager(self)
if self.application_type.project_type == AppType.SPRINGBOARD:
return SpringboardWebsiteManager(self)
raise exceptions.ProjectTypeUnknownException(
'project_type is unknown')
def frontend_url(self):
return 'http://%s' % self.get_generic_domain()
def content_url(self):
return "%(country)s-%(app_type)s.%(hub)s.unicore.io" % {
'country': self.country.lower(),
'app_type': self.app_type,
'hub': settings.CMS_SUBDOMAIN
}
def cms_url(self):
return 'http://cms.%s' % self.get_generic_domain()
def repo_path(self):
repo_folder_name = '%(app_type)s-%(country)s' % {
'app_type': self.app_type,
'country': self.country.lower()
}
return os.path.join(settings.CMS_REPO_PATH, repo_folder_name)
def frontend_repo_path(self):
repo_folder_name = '%(app_type)s-%(country)s' % {
'app_type': self.app_type,
'country': self.country.lower()
}
return os.path.join(settings.FRONTEND_REPO_PATH, repo_folder_name)
def hub_app_title(self):
return '%s - %s' % (
self.application_type.title, self.get_country_display())
def hub_app(self):
if self.hub_app_id is None:
return None
if not getattr(self, '_hub_app', None):
client = get_hub_app_client()
if client is None:
return None
self._hub_app = client.get_app(self.hub_app_id)
return self._hub_app
def create_or_update_hub_app(self):
client = get_hub_app_client()
if client is None:
return None
if self.hub_app_id:
app = client.get_app(self.hub_app_id)
app.set('title', self.hub_app_title())
app.set('url', self.frontend_url())
app.save()
else:
app = client.create_app({
'title': self.hub_app_title(),
'url': self.frontend_url()
})
self.hub_app_id = app.get('uuid')
self.save()
self._hub_app = app
return app
def create_repo(self):
repo_db = self.own_repo()
new_repo_name = repo_db.name()
post_data = {
"name": new_repo_name,
"description": "A Unicore CMS content repo for %s %s" % (
self.app_type, self.country),
"homepage": "https://github.com",
"private": False,
"has_issues": True,
"auto_init": True,
"team_id": self.team_id,
}
resp = requests.post(
urljoin(settings.GITHUB_API, 'repos'),
json=post_data,
auth=(settings.GITHUB_USERNAME, settings.GITHUB_TOKEN))
if resp.status_code != 201:
raise exceptions.GithubApiException(
'Create repo failed with response: %s - %s' %
(resp.status_code, resp.json().get('message')))
repo_db.url = resp.json().get('clone_url')
repo_db.git_url = resp.json().get('git_url')
repo_db.save()
def clone_repo(self):
repo = Repo.clone_from(self.own_repo().url, self.repo_path())
sm = StorageManager(repo)
sm.create_storage()
sm.write_config('user', {
'name': self.owner.username,
'email': self.owner.email,
})
# Github creates a README.md when initializing a repo
# We need to remove this to avoid conflicts
readme_path = os.path.join(self.repo_path(), 'README.md')
if os.path.exists(readme_path):
repo.index.remove([readme_path])
repo.index.commit('remove initial readme')
os.remove(readme_path)
def create_remote(self):
repo = Repo(self.repo_path())
repo.create_remote('upstream', self.own_repo().base_url)
def merge_remote(self):
index_prefix = 'unicore_cms_%(app_type)s_%(country)s' % {
'app_type': self.app_type,
'country': self.country.lower(),
}
workspace = self.setup_workspace(self.repo_path(), index_prefix)
workspace.fast_forward(remote_name='upstream')
def push_repo(self):
repo = Repo(self.repo_path())
origin = repo.remote(name='origin')
origin.push()
def setup_workspace(self, repo_path, index_prefix):
workspace = EG.workspace(
repo_path, index_prefix=index_prefix,
es={'urls': settings.ELASTICSEARCH_HOST})
branch = workspace.sm.repo.active_branch
if workspace.im.index_exists(branch.name):
workspace.im.destroy_index(branch.name)
workspace.setup(self.owner.username, self.owner.email)
while not workspace.index_ready():
pass
workspace.setup_custom_mapping(Category, mappings.CategoryMapping)
workspace.setup_custom_mapping(Page, mappings.PageMapping)
workspace.setup_custom_mapping(EGLocalisation,
mappings.LocalisationMapping)
return workspace
def sync_cms_index(self):
index_prefix = 'unicore_cms_%(app_type)s_%(country)s' % {
'app_type': self.app_type,
'country': self.country.lower(),
}
workspace = EG.workspace(
self.repo_path(), index_prefix=index_prefix,
es={'urls': settings.ELASTICSEARCH_HOST})
workspace.sync(Category)
workspace.sync(Page)
workspace.sync(EGLocalisation)
def sync_frontend_index(self):
index_prefix = 'unicore_frontend_%(app_type)s_%(country)s' % {
'app_type': self.app_type,
'country': self.country.lower(),
}
ws = self.setup_workspace(self.frontend_repo_path(), index_prefix)
ws.sync(Category)
ws.sync(Page)
ws.sync(EGLocalisation)
def init_workspace(self):
self.sync_cms_index()
self.create_unicore_distribute_repo()
def create_nginx(self):
domain = ' '.join([
self.get_generic_content_domain(), self.cms_custom_domain])
self.nginx_manager.write_cms_nginx(
self.app_type, self.country, domain.strip())
def create_pyramid_settings(self):
if self.application_type.project_type == AppType.UNICORE_CMS:
self.settings_manager.write_frontend_settings(
self.app_type,
self.country,
self.available_languages.all(),
self.default_language or Localisation._for('eng_GB'),
self.ga_profile_id,
self.hub_app(),
self.all_repos()[0].name(),
self.custom_frontend_settings
)
elif self.application_type.project_type == AppType.SPRINGBOARD:
self.settings_manager.write_springboard_settings(
self.app_type,
self.country,
self.available_languages.all(),
self.default_language or Localisation._for('eng_GB'),
self.ga_profile_id,
self.hub_app(),
[repo.name() for repo in self.all_repos()],
self.custom_frontend_settings
)
else:
raise exceptions.ProjectTypeRequiredException(
'project_type is required')
def create_cms_settings(self):
self.settings_manager.write_cms_settings(
self.app_type,
self.country,
self.own_repo().url,
self.repo_path()
)
self.settings_manager.write_cms_config(
self.app_type,
self.country,
self.own_repo().url,
self.repo_path()
)
def create_webhook(self):
repo_name = self.own_repo().name()
post_data = {
"name": "web",
"active": True,
"events": ["push"],
"config": {
"url": "%s/api/notify/" % self.frontend_url(),
"content_type": "json"
}
}
resp = requests.post(
settings.GITHUB_HOOKS_API % {'repo': repo_name},
json=post_data,
auth=(settings.GITHUB_USERNAME, settings.GITHUB_TOKEN))
if resp.status_code != 201:
raise exceptions.GithubApiException(
'Create hooks failed with response: %s - %s' %
(resp.status_code, resp.json().get('message')))
def create_unicore_distribute_repo(self):
post_data = {
"repo_url": self.own_repo().git_url
}
resp = requests.post(
'%s/repos.json' % settings.UNICORE_DISTRIBUTE_HOST,
json=post_data)
if resp.status_code != 200:
raise exceptions.UnicoreDistributeApiException(
'Clone repo failed with response: %s - %s' %
(resp.status_code, resp.json().get('errors')))
def create_db(self):
self.db_manager.create_db(self.app_type, self.country)
def init_db(self):
self.db_manager.init_db(
self.app_type, self.country, push_to_git=True)
def create_marathon_app(self):
self.initiate_create_marathon_app()
def get_marathon_app_data(self):
if not (self.application_type and self.application_type.project_type):
raise exceptions.ProjectTypeRequiredException(
'project_type is required')
domain = "%(generic_domain)s %(custom)s" % {
'generic_domain': self.get_generic_domain(),
'custom': self.frontend_custom_domain
}
app_data = {
"id": self.app_id,
"cmd": self.docker_cmd,
"cpus": self.marathon_cpus,
"mem": self.marathon_mem,
"instances": self.marathon_instances,
"labels": {
"domain": domain.strip(),
"country": self.get_country_display(),
"project_type": self.application_type.project_type,
"HAPROXY_GROUP": "external",
"HAPROXY_0_VHOST": domain.strip(),
},
"container": {
"type": "DOCKER",
"docker": {
"image": self.application_type.docker_image,
"forcePullImage": True,
"network": "BRIDGE",
"portMappings": [{"containerPort": 5656, "hostPort": 0}],
"parameters": [{
"key": "add-host",
"value": "servicehost:%s" % settings.SERVICE_HOST_IP}]
},
"volumes": [{
"containerPath": "/var/unicore-configs",
"hostPath": settings.UNICORE_CONFIGS_INSTALL_DIR,
"mode": "RO"
}]
}
}
if self.marathon_health_check_path:
app_data.update({
"ports": [0],
"healthChecks": [{
"gracePeriodSeconds": 3,
"intervalSeconds": 10,
"maxConsecutiveFailures": 3,
"path": self.marathon_health_check_path,
"portIndex": 0,
"protocol": "HTTP",
"timeoutSeconds": 5
}]
})
return app_data
def initiate_create_marathon_app(self):
post_data = self.get_marathon_app_data()
resp = requests.post(
'%s/v2/apps' % settings.MESOS_MARATHON_HOST,
json=post_data)
if resp.status_code != 201:
raise exceptions.MarathonApiException(
'Create Marathon app failed with response: %s - %s' %
(resp.status_code, resp.json().get('message')))
def update_marathon_app(self):
post_data = self.get_marathon_app_data()
app_id = post_data.pop('id')
resp = requests.put(
'%(host)s/v2/apps/%(id)s' % {
'host': settings.MESOS_MARATHON_HOST,
'id': app_id
},
json=post_data)
if resp.status_code not in [200, 201]:
raise exceptions.MarathonApiException(
'Update Marathon app failed with response: %s - %s' %
(resp.status_code, resp.json().get('message')))
def marathon_restart_app(self):
resp = requests.post(
'%(host)s/v2/apps/%(id)s/restart' % {
'host': settings.MESOS_MARATHON_HOST,
'id': self.app_id
},
json={})
if resp.status_code != 200:
raise exceptions.MarathonApiException(
'Restart Marathon app failed with response: %s - %s' %
(resp.status_code, resp.json().get('message')))
def exists_on_marathon(self):
resp = requests.get(
'%(host)s/v2/apps/%(id)s' % {
'host': settings.MESOS_MARATHON_HOST,
'id': self.app_id
},
json={})
return resp.status_code == 200
def destroy(self):
shutil.rmtree(self.repo_path(), ignore_errors=True)
self.nginx_manager.destroy(self.app_type, self.country)
self.settings_manager.destroy(self.app_type, self.country)
if self.application_type.project_type == AppType.UNICORE_CMS:
self.settings_manager.destroy_unicore_cms_settings(
self.app_type, self.country)
if self.application_type.project_type == AppType.SPRINGBOARD:
self.settings_manager.destroy_springboard_settings(
self.app_type, self.country)
self.db_manager.destroy(self.app_type, self.country)
@receiver(post_save, sender=Project)
def publish_to_websocket(sender, instance, created, **kwargs):
'''
Broadcasts the state of a project when it is saved.
broadcast channel: progress
'''
# TODO: apply permissions here?
data = instance.to_dict()
data.update({'is_created': created})
redis_publisher = RedisPublisher(facility='progress', broadcast=True)
message = RedisMessage(json.dumps(data))
redis_publisher.publish_message(message)
|
|
# This file is part of fesom_viz
#
################################################################################
#
# Interpolates FESOM data to regular grid. The output is netCDF4 files
# with CMOR complient attributes.
#
# Original code by Nikolay Koldunov, 2016
#
# TODO:
# Add possibility to define curvilinear grid.
# Add ESMPy as regridding tool.
# Find official CMOR descriptions for some of the variables
# Modifications:
#
################################################################################
import sys
import ConfigParser
from netCDF4 import Dataset, num2date
import numpy as np
import json
from collections import OrderedDict
import os
import datetime
# Read configuration file
try:
config_file = sys.argv[1]
except:
print "You have to provide configuration file. Example config located in \"./configs/fesom2geo_example\""
sys.exit(1)
config = ConfigParser.RawConfigParser()
config.read(config_file)
# There is an option to provide path to the pyfesom folder
pfpath = config.get('main', 'pfpath')
sys.path.append(pfpath)
import pyfesom as pf
# Read options from configuration file. See fesom2geo_example for
# explination and possible values
left_lon = config.getfloat('main', 'left_lon')
right_lon = config.getfloat('main', 'right_lon')
number_of_lons = config.getint('main', 'number_of_lons')
lower_lat = config.getfloat('main', 'lower_lat')
upper_lat = config.getfloat('main', 'upper_lat')
number_of_lats = config.getint('main', 'number_of_lats')
meshpath = config.get('main', 'meshpath')
path_to_data = config.get('main', 'path_to_data')
path_to_output = config.get('main', 'path_to_output')
zlib = config.getboolean('main', 'zlib')
radius_of_influence = config.getint('main', 'radius_of_influence')
k = config.getint('main', 'neighboring_points')
out_vars = config.get('main', 'out_vars').split(',')
out_vars = [w.strip() for w in out_vars]
print('='*50)
print("Variables that will be converted: {}".format(out_vars))
levels = np.asarray(config.get('main', 'levels').split(','), dtype='float')
angles_for_mesh = list(map(int,config.get('main','angles_for_mesh').split(',')))
angles_for_rotation = list(map(int,config.get('main','angles_for_rotation').split(',')))
start_year = config.getint('main','start_year')
end_year = config.getint('main','end_year')
ifile_template = config.get('main','ifile_template')
ifile_template_ice = config.get('main','ifile_template_ice')
ofile_template =config.get('main','ofile_template')
distribute_timesteps = config.getboolean('main', 'distribute_timesteps')
# Generate regular grid
lon = np.linspace(left_lon, right_lon, number_of_lons)
lat = np.linspace(lower_lat, upper_lat, number_of_lats)
lons, lats = np.meshgrid(lon,lat)
# read the FESOM mesh
print('='*50)
mesh = pf.load_mesh(meshpath,abg=angles_for_mesh, get3d=True, usepickle=True)
# Open CMOR variable descriptions
with open('CMIP6_Omon.json') as data_file:
cmore_table = json.load(data_file, object_pairs_hook=OrderedDict)
with open('CMIP6_SIday.json') as data_file:
cmore_table_ice = json.load(data_file, object_pairs_hook=OrderedDict)
# Add some variables that are missing in CMOR tables
cmore_table['variable_entry']['wo']= OrderedDict([(u'modeling_realm', u'ocean'),
(u'standard_name', u'sea_water_z_velocity'),
(u'units', u'm s-1'),
(u'cell_methods', u'time: mean'),
(u'cell_measures', u'--OPT'),
(u'long_name', u'Sea Water Z Velocity'),
(u'comment',
u'Not standard CMORE variable'),
(u'dimensions', u'longitude latitude olevel time'),
(u'out_name', u'wo'),
(u'type', u'real'),
(u'positive', u''),
(u'valid_min', u''),
(u'valid_max', u''),
(u'ok_min_mean_abs', u''),
(u'ok_max_mean_abs', u'')])
cmore_table['variable_entry']['wpot']= OrderedDict([(u'modeling_realm', u'ocean'),
(u'standard_name', u'sea_water_z_velocity'),
(u'units', u'm s-1'),
(u'cell_methods', u'time: mean'),
(u'cell_measures', u'--OPT'),
(u'long_name', u'Vertical Velocity Potential'),
(u'comment',
u'Not standard CMORE variable'),
(u'dimensions', u'longitude latitude olevel time'),
(u'out_name', u'wpot'),
(u'type', u'real'),
(u'positive', u''),
(u'valid_min', u''),
(u'valid_max', u''),
(u'ok_min_mean_abs', u''),
(u'ok_max_mean_abs', u'')])
cmore_table_ice['variable_entry']['esithick'] = OrderedDict([(u'modeling_realm', u'seaIce'),
(u'standard_name', u'effective_sea_ice_thickness'),
(u'units', u'm'),
(u'cell_methods', u'area: mean where sea_ice time: mean'),
(u'cell_measures', u'area: areacella'),
(u'long_name', u'Effective Sea-ice thickness'),
(u'comment',
u'Effective thickness of sea ice (volume divided by grid area as was done in CMIP5)'),
(u'dimensions', u'longitude latitude time'),
(u'out_name', u'esithick'),
(u'type', u''),
(u'positive', u''),
(u'valid_min', u''),
(u'valid_max', u''),
(u'ok_min_mean_abs', u''),
(u'ok_max_mean_abs', u'')])
#combine ocean and ice variables
cmore_table['variable_entry'].update(cmore_table_ice['variable_entry']) #
# Map FESOM variables to CMOR variables and provide some
# additional information for conversion
vardir = {}
vardir['temp'] = {}
vardir['temp']['dims'] = '3D'
vardir['temp']['cname'] = 'thetao'
vardir['salt'] = {}
vardir['salt']['dims'] = '3D'
vardir['salt']['cname'] = 'so'
vardir['u'] = {}
vardir['u']['dims'] = '3D'
vardir['u']['cname'] = 'uo'
vardir['u']['rotate_with'] = 'v'
vardir['v'] = {}
vardir['v']['dims'] = '3D'
vardir['v']['cname'] = 'vo'
vardir['v']['rotate_with'] = 'u'
vardir['w'] = {}
vardir['w']['dims'] = '3D'
vardir['w']['cname'] = 'wo'
vardir['wpot'] = {}
vardir['wpot']['dims'] = '3D'
vardir['wpot']['cname'] = 'wpot'
vardir['ssh'] = {}
vardir['ssh']['dims'] = '2D'
vardir['ssh']['cname'] = 'zos'
vardir['ssh']['realm'] = 'ocean'
vardir['area'] = {}
vardir['area']['dims'] = '2D'
vardir['area']['cname'] = 'siconc'
vardir['area']['realm'] = 'seaice'
vardir['hice'] = {}
vardir['hice']['dims'] = '2D'
vardir['hice']['cname'] = 'esithick'
vardir['hice']['realm'] = 'seaice'
vardir['uice'] = {}
vardir['uice']['dims'] = '2D'
vardir['uice']['cname'] = 'siu'
vardir['uice']['realm'] = 'seaice'
vardir['uice']['rotate_with'] = 'vice'
vardir['vice'] = {}
vardir['vice']['dims'] = '2D'
vardir['vice']['cname'] = 'siv'
vardir['vice']['realm'] = 'seaice'
vardir['vice']['rotate_with'] = 'uice'
def noempty_dict(d):
'''
Removes keys with empty string values from dictionary.
Parameters
----------
d : OrderedDict
input dictionary
Returns
-------
d_out : OrderedDict
output dict with empty strings removed
'''
d_out = OrderedDict()
for key, value in d.iteritems():
if value != u'':
d_out[key]=value
return d_out
def progressbar(progress_total, progress_passed, year, variable, \
timestep, level, time):
formated_time = num2date(time[timestep], time.units).strftime('%Y-%m-%d')
sys.stdout.write('{}\n'.format('Variable: '+variable+\
', Timestep: '+formated_time+\
', Level: '+str(level)))
tdif = progress_total
tpassed = progress_passed
ratio = tpassed/float(tdif)
filled = '=' * int( ratio * 50)
rest = '-' * ( 50 - int( ratio * 50) )
sys.stdout.write('|' + filled+'>'+rest+ '| {:.2f}%'.format(ratio*100))
sys.stdout.write('\r\033[1A')
sys.stdout.flush()
# Calculate distances and indeces that will be used for interpolation
distances, inds = pf.create_indexes_and_distances(mesh, lons, lats,\
k=k, n_jobs=8)
# The main loop
print('='*50)
for year in range(start_year, end_year+1):
# Open input and output netCDF files
ifile = os.path.join(path_to_data, ifile_template.format(str(year)))
ofile = os.path.join(path_to_output, ofile_template.format(str(year)))
print('Open {}'.format(ifile))
fl = Dataset(ifile)
fw = Dataset(ofile, mode='w',data_model='NETCDF4_CLASSIC', )
var2d = 0
var3d = 0
for varname in out_vars:
if vardir[varname]['dims'] == '2D':
var2d += 1
elif vardir[varname]['dims'] == '3D':
var3d += 1
var3d = var3d*len(levels)*fl.variables['time'].shape[0]
var2d = var2d*fl.variables['time'].shape[0]
progress_total = var3d+var2d
progress_passed = 0
# create dimensions
fw.createDimension('latitude', lons.shape[0])
fw.createDimension('longitude', lats.shape[1])
fw.createDimension('time', None)
fw.createDimension('depth_coord', levels.shape[0] )
lat = fw.createVariable('latitude', 'd', ('latitude'))
lat.setncatts(noempty_dict(cmore_table['axis_entry']['latitude']))
lat[:] = lats[:,0].flatten()
lon = fw.createVariable('longitude', 'd', ('longitude'))
lon.setncatts(noempty_dict(cmore_table['axis_entry']['longitude']))
lon[:] = lons[0,:].flatten()
depth = fw.createVariable('depth_coord','d',('depth_coord'))
depth.setncatts(noempty_dict(cmore_table['axis_entry']['depth_coord']))
depth[:] = levels
time = fw.createVariable('time','d',('time'))
time.setncatts(cmore_table['axis_entry']['time'])
if distribute_timesteps:
nsteps = fl.variables['time'].shape[0]
td = datetime.timedelta(days = 365/nsteps)
sdate = datetime.datetime(year,1,1,0,0,0)
seconds = []
for i in range(1,nsteps+1):
workdate = sdate + td*i
seconds.append( (workdate-sdate).total_seconds() )
time.units = 'seconds since {}-01-01 00:00:00'.format(str(year))
time[:] = seconds
elif fl.variables['time'].units.strip().startswith('seconds since'):
time.units = fl.variables['time'].units
time[:] = fl.variables['time'][:]
elif fl.variables['time'].shape[0] == 12:
sdate = datetime.datetime(year,1,1,0,0,0)
td = datetime.timedelta(days = 14.5)
seconds = []
for i in range(1,13):
workdate = datetime.datetime(year,i,1,0,0,0)+td
seconds.append( (workdate-sdate).total_seconds() )
time.units = 'seconds since {}-01-01 00:00:00'.format(str(year))
time[:] = seconds
else:
time.units = 'seconds since {}-01-01 00:00:00'.format(str(year))
time[:] = fl.variables['time'][:]
# Store processed variables (to not repeat
# processing for vector variables)
completed = []
# variables loop
for varname in out_vars:
# check if we have to convert two variables at once
# for vector variables.
do_two_vars = (vardir[varname].has_key('rotate_with') is True)
#print("Converting {}.".format(varname))
# skip if the variable was already converted
if varname in completed:
pass
# 3D variables processing
elif vardir[varname]['dims']=='3D':
# Create netCDF variable
temp = fw.createVariable(vardir[varname]['cname'],'d',\
('time','depth_coord','latitude','longitude'), \
fill_value=-99999, zlib=zlib, complevel=1)
# add CMOR complient attributes
temp.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname]['cname']]))
# If we have two convert two variables at once, create netCDF variable for
# the second variable
if do_two_vars is True:
varname2 = vardir[varname]['rotate_with']
temp2 = fw.createVariable(vardir[varname2]['cname'],'d',('time','depth_coord','latitude','longitude'), fill_value=-99999, zlib=zlib, complevel=1)
temp2.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname2]['cname']]))
# Loop over timesteps for 3D variables
for i in range(fl.variables[varname].shape[0]):
#for i in range(2):
# Get the whole 3D field in to memory. It turns out that this is more
# effective than to select individual levels from the file located on the disk.
all_layers = fl.variables[varname][i,:]
# Get the data for the second variable if needed
if do_two_vars is True:
#print("Also converting {}, triggered by {}.".format(varname2, varname))
all_layers2 = fl.variables[varname2][i,:]
# Loop over vertical levels
for dlev, llev in enumerate(levels):
# get indeces of the gridpoints that corespond to the level
ind_depth, ind_noempty, ind_empty = pf.ind_for_depth(llev, mesh)
# get the data for the level
level_data=np.zeros(shape=(mesh.n2d))
level_data[ind_noempty]=all_layers[ind_depth[ind_noempty]]
level_data[ind_empty] = np.nan
# Spetial treatment of the vector variables that need rotation
if do_two_vars is True:
# get the data for the level of the second variable
level_data2=np.zeros(shape=(mesh.n2d))
level_data2[ind_noempty]=all_layers2[ind_depth[ind_noempty]]
level_data2[ind_empty] = np.nan
#print('Rotate {} and {}'.format(varname, varname2))
# Rotate vector variables to geographical grid
uunr,vunr = pf.vec_rotate_r2g(angles_for_rotation[0],angles_for_rotation[1], \
angles_for_rotation[2], mesh.x2, mesh.y2,\
level_data, level_data2, 1)
# Interpolate rotated variables
#print('interpolation, layer {}'.format(str(llev)))
air_nearest = pf.fesom2regular(uunr, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
air_nearest2 = pf.fesom2regular(vunr, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
# Put values to the netCDF variables
temp[i,dlev,:,:] = air_nearest[:,:].filled(-99999)
temp2[i,dlev,:,:] = air_nearest2[:,:].filled(-99999)
else:
# Interpolate scalar variable
#print('interpolation, layer {}'.format(str(llev)))
air_nearest = pf.fesom2regular(level_data, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
# Put values to the netCDF variable
temp[i,dlev,:,:] = air_nearest[:,:].filled(-99999)
progress_passed += 1
if do_two_vars is True:
progress_passed += 1
progressbar(progress_total, progress_passed, year,\
varname, i, llev, time)
# END Loop over timesteps for 3D variables
# add variable to the list of processed variables
completed.append(varname)
if do_two_vars is True:
completed.append(varname2)
# End 3D variables processing
# 2D variables processing
elif vardir[varname]['dims']=='2D':
# Create netCDF variable
temp = fw.createVariable(vardir[varname]['cname'],'d',\
('time','latitude','longitude'), \
fill_value=-99999, zlib=zlib, complevel=1)
# add CMOR complient attributes
temp.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname]['cname']]))
# If we have two convert two variables at once, create netCDF variable for
# the second variable
if do_two_vars is True:
varname2 = vardir[varname]['rotate_with']
temp2 = fw.createVariable(vardir[varname2]['cname'],'d',\
('time','latitude','longitude'), \
fill_value=-99999, zlib=zlib, complevel=1)
temp2.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname2]['cname']]))
# For sea ice variables we have to open different file, so
# open ether ocean or sea ice input file.
if vardir[varname]['realm']=='ocean':
temp.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname]['cname']]))
ncfile_handler = fl
elif vardir[varname]['realm']=='seaice':
temp.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname]['cname']]))
ifile_ice = os.path.join(path_to_data, ifile_template_ice.format(str(year)))
ncfile_handler = Dataset(ifile_ice)
# Loop over timesteps for 2D variables
for i in range(ncfile_handler.variables[varname].shape[0]):
#for i in range(2):
# Get the whole 3D field in to memory. It turns out that this is more
# effective than to select individual levels from the file located on the disk.
all_layers = ncfile_handler.variables[varname][i,:]
# Get the data for the second variable if needed
if do_two_vars is True:
print("Also converting {}, triggered by {}.".format(varname2, varname))
all_layers2 = ncfile_handler.variables[varname2][i,:]
# get indeces of the gridpoints that corespond to the surface level
ind_depth, ind_noempty, ind_empty = pf.ind_for_depth(0, mesh)
# get the data for the surface level
level_data=np.zeros(shape=(mesh.n2d))
level_data[ind_noempty]=all_layers[ind_depth[ind_noempty]]
level_data[ind_empty] = np.nan
# Spetial treatment of the vector variables that need rotation
if do_two_vars is True:
# get the data for the surface level of the second variable
level_data2=np.zeros(shape=(mesh.n2d))
level_data2[ind_noempty]=all_layers2[ind_depth[ind_noempty]]
level_data2[ind_empty] = np.nan
# Rotate vector variables to geographical grid
print('Rotate {} and {}'.format(varname, varname2))
uunr,vunr = pf.vec_rotate_r2g(angles_for_rotation[0],angles_for_rotation[1], \
angles_for_rotation[2], mesh.x2, mesh.y2,\
level_data, level_data2, 1)
# Interpolate rotated variables )
air_nearest = pf.fesom2regular(uunr, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
air_nearest2 = pf.fesom2regular(vunr, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
# fill in netCDF variables
temp[i,:,:] = air_nearest[:,:].filled(-99999)
temp2[i,:,:] = air_nearest2[:,:].filled(-99999)
else:
# Interpolate scalar variable and fill in netCDF variable.
air_nearest = pf.fesom2regular(level_data, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
temp[i,:,:] = air_nearest[:,:].filled(-99999)
progress_passed += 1
if do_two_vars is True:
progress_passed += 1
progressbar(progress_total, progress_passed, year,\
varname, i, 0, time)
# END Loop over timesteps for 2D variables
completed.append(varname)
if do_two_vars is True:
completed.append(varname2)
# end variables loop
fw.close()
print('The {} is ready'.format(ofile))
# end of the main loop
|
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.compat import basestring, next
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f=open(fn,'wt'); f.write('\n'); f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename,'rb'); f.read(skip)
code = marshal.load(f); f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
#
|
|
"""
Inception V4, suitable for images with around 299 x 299 (original)
Implemented the following paper:
Szegedy C, Ioffe S, Vanhoucke V. Inception-v4, inception-resnet and the impact of residual connections on learning[J]. arXiv preprint arXiv:1602.07261, 2016.
Jie Hu, Li Shen, Gang Sun. "Squeeze-and-Excitation Networks" https://arxiv.org/pdf/1709.01507v1.pdf
This modification version is based on Inception-v4 original but change to 224 x 224 size of input data.
Modified by Lin Xiong, May-27, 2017
Added Squeeze-and-Excitation block by Lin Xiong Oct-30, 2017
Thanks to Cher Keng Heng
"""
#import find_mxnet
import mxnet as mx
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix='', withRelu=True, withBn=False, bn_mom=0.9, workspace=256):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad,
name='%s%s_conv2d' % (name, suffix), workspace=workspace)
if withBn:
conv = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='%s%s_bn' % (name, suffix))
if withRelu:
conv = mx.sym.Activation(data=conv, act_type='relu', name='%s%s_relu' % (name, suffix))
return conv
# Input Shape is 299*299*3 (old)
# Input Shape is 224*224*3 (new)
def inception_stem(name, data,
num_1_1=32, num_1_2=32, num_1_3=64,
num_2_1=96,
num_3_1=64, num_3_2=96,
num_4_1=64, num_4_2=64, num_4_3=64, num_4_4=96,
num_5_1=192,
bn_mom=0.9):
stem_3x3 = Conv(data=data, num_filter=num_1_1, kernel=(3, 3), stride=(2, 2), name=('%s_conv' % name), bn_mom=bn_mom, workspace=256)
stem_3x3 = Conv(data=stem_3x3, num_filter=num_1_2, kernel=(3, 3), name=('%s_stem' % name), suffix='_conv', bn_mom=bn_mom, workspace=256)
stem_3x3 = Conv(data=stem_3x3, num_filter=num_1_3, kernel=(3, 3), pad=(1, 1), name=('%s_stem' % name),
suffix='_conv_1', bn_mom=bn_mom, workspace=256)
pool1 = mx.sym.Pooling(data=stem_3x3, kernel=(3, 3), stride=(2, 2), pad=(0, 0), pool_type='max',
name=('%s_%s_pool1' % ('max', name)))
stem_1_3x3 = Conv(data=stem_3x3, num_filter=num_2_1, kernel=(3, 3), stride=(2, 2), name=('%s_stem_1' % name),
suffix='_conv_1', bn_mom=bn_mom, workspace=256)
concat1 = mx.sym.Concat(*[pool1, stem_1_3x3], name=('%s_concat_1' % name))
stem_1_1x1 = Conv(data=concat1, num_filter=num_3_1, name=('%s_stem_1' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256)
stem_1_3x3 = Conv(data=stem_1_1x1, num_filter=num_3_2, kernel=(3, 3), name=('%s_stem_1' % name), suffix='_conv_3', bn_mom=bn_mom, workspace=256)
stem_2_1x1 = Conv(data=concat1, num_filter=num_4_1, name=('%s_stem_2' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256)
stem_2_7x1 = Conv(data=stem_2_1x1, num_filter=num_4_2, kernel=(7, 1), pad=(3, 0), name=('%s_stem_2' % name),
suffix='_conv_2', bn_mom=bn_mom, workspace=256)
stem_2_1x7 = Conv(data=stem_2_7x1, num_filter=num_4_3, kernel=(1, 7), pad=(0, 3), name=('%s_stem_2' % name),
suffix='_conv_3', bn_mom=bn_mom, workspace=256)
stem_2_3x3 = Conv(data=stem_2_1x7, num_filter=num_4_4, kernel=(3, 3), name=('%s_stem_2' % name), suffix='_conv_4', bn_mom=bn_mom, workspace=256)
concat2 = mx.sym.Concat(*[stem_1_3x3, stem_2_3x3], name=('%s_concat_2' % name))
pool2 = mx.sym.Pooling(data=concat2, kernel=(3, 3), stride=(2, 2), pad=(0, 0), pool_type='max',
name=('%s_%s_pool2' % ('max', name)))
stem_3_3x3 = Conv(data=concat2, num_filter=num_5_1, kernel=(3, 3), stride=(2, 2), name=('%s_stem_3' % name),
suffix='_conv_1', withRelu=False, bn_mom=bn_mom, workspace=256)
concat3 = mx.sym.Concat(*[pool2, stem_3_3x3], name=('%s_concat_3' % name))
bn1 = mx.sym.BatchNorm(data=concat3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=('%s_bn1' % name))
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=('%s_relu1' % name))
return act1
# Output Shape is 25*25*384
# Input Shape is 25*25*384
def InceptionA(name, data,
num_1_1=96,
num_2_1=96,
num_3_1=64, num_3_2=96,
num_4_1=64, num_4_2=96, num_4_3=96,
bn_mom=0.9):
a1 = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type='avg',
name=('%s_%s_pool1' % ('avg', name)))
a1 = Conv(data=a1, num_filter=num_1_1, name=('%s_a_1' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256)
a2 = Conv(data=data, num_filter=num_2_1, name=('%s_a_2' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256)
a3 = Conv(data=data, num_filter=num_3_1, name=('%s_a_3' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256)
a3 = Conv(data=a3, num_filter=num_3_2, kernel=(3, 3), pad=(1, 1), name=('%s_a_3' % name), suffix='_conv_2',
withRelu=False, bn_mom=bn_mom, workspace=256)
a4 = Conv(data=data, num_filter=num_4_1, name=('%s_a_4' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256)
a4 = Conv(data=a4, num_filter=num_4_2, kernel=(3, 3), pad=(1, 1), name=('%s_a_4' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256)
a4 = Conv(data=a4, num_filter=num_4_3, kernel=(3, 3), pad=(1, 1), name=('%s_a_4' % name), suffix='_conv_3',
withRelu=False, bn_mom=bn_mom, workspace=256)
m = mx.sym.Concat(*[a1, a2, a3, a4], name=('%s_a_concat1' % name))
m = mx.sym.BatchNorm(data=m, fix_gamma=False, eps=2e-5, name=('%s_a_bn1' % name))
m = mx.sym.Activation(data=m, act_type='relu', name=('%s_a_relu1' % name))
return m
# Output Shape is 25*25*384
# Input Shape is 12*12*1024
def InceptionB(name, data,
num_1_1=128,
num_2_1=384,
num_3_1=192, num_3_2=224, num_3_3=256,
num_4_1=192, num_4_2=192, num_4_3=224, num_4_4=224, num_4_5=256,
bn_mom=0.9):
b1 = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type='avg',
name=('%s_%s_pool1' % ('avg', name)))
b1 = Conv(data=b1, num_filter=num_1_1, name=('%s_b_1' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256)
b2 = Conv(data=data, num_filter=num_2_1, name=('%s_b_2' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256)
b3 = Conv(data=data, num_filter=num_3_1, name=('%s_b_3' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256)
b3 = Conv(data=b3, num_filter=num_3_2, kernel=(1, 7), pad=(0, 3), name=('%s_b_3' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256)
b3 = Conv(data=b3, num_filter=num_3_3, kernel=(7, 1), pad=(3, 0), name=('%s_b_3' % name), suffix='_conv_3',
withRelu=False, bn_mom=bn_mom, workspace=256)
b4 = Conv(data=data, num_filter=num_4_1, name=('%s_b_4' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256)
b4 = Conv(data=b4, num_filter=num_4_2, kernel=(1, 7), pad=(0, 3), name=('%s_b_4' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256)
b4 = Conv(data=b4, num_filter=num_4_3, kernel=(7, 1), pad=(3, 0), name=('%s_b_4' % name), suffix='_conv_3', bn_mom=bn_mom, workspace=256)
b4 = Conv(data=b4, num_filter=num_4_4, kernel=(1, 7), pad=(0, 3), name=('%s_b_4' % name), suffix='_conv_4', bn_mom=bn_mom, workspace=256)
b4 = Conv(data=b4, num_filter=num_4_5, kernel=(7, 1), pad=(3, 0), name=('%s_b_4' % name), suffix='_conv_5',
withRelu=False, bn_mom=bn_mom, workspace=256)
m = mx.sym.Concat(*[b1, b2, b3, b4], name=('%s_b_concat1' % name))
m = mx.sym.BatchNorm(data=m, fix_gamma=False, eps=2e-5, name=('%s_b_bn1' % name))
m = mx.sym.Activation(data=m, act_type='relu', name=('%s_b_relu1' % name))
return m
# Output Shape is 12*12*1024
# Input Shape is 5*5*1536
def InceptionC(name, data,
num_1_1=256,
num_2_1=256,
num_3_1=384, num_3_2=256, num_3_3=256,
num_4_1=384, num_4_2=448, num_4_3=512, num_4_4=256, num_4_5=256,
bn_mom=0.9):
c1 = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type='avg',
name=('%s_%s_pool1' % ('avg', name)))
c1 = Conv(data=c1, num_filter=num_1_1, name=('%s_c_1' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256)
c2 = Conv(data=data, num_filter=num_2_1, name=('%s_c_2' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256)
c3 = Conv(data=data, num_filter=num_3_1, name=('%s_c_3' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256)
c3_1 = Conv(data=c3, num_filter=num_3_2, kernel=(3, 1), pad=(1, 0), name=('%s_c_3' % name), suffix='_conv_1_1',
withRelu=False, bn_mom=bn_mom, workspace=256)
c3_2 = Conv(data=c3, num_filter=num_3_3, kernel=(1, 3), pad=(0, 1), name=('%s_c_3' % name), suffix='_conv_1_2',
withRelu=False, bn_mom=bn_mom, workspace=256)
c4 = Conv(data=data, num_filter=num_4_1, name=('%s_c_4' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256)
c4 = Conv(data=c4, num_filter=num_4_2, kernel=(1, 3), pad=(0, 1), name=('%s_c_4' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256)
c4 = Conv(data=c4, num_filter=num_4_3, kernel=(3, 1), pad=(1, 0), name=('%s_c_4' % name), suffix='_conv_3', bn_mom=bn_mom, workspace=256)
c4_1 = Conv(data=c4, num_filter=num_4_4, kernel=(3, 1), pad=(1, 0), name=('%s_c_4' % name), suffix='_conv_3_1',
withRelu=False, bn_mom=bn_mom, workspace=256)
c4_2 = Conv(data=c4, num_filter=num_4_5, kernel=(1, 3), pad=(0, 1), name=('%s_c_4' % name), suffix='_conv_3_2',
withRelu=False, bn_mom=bn_mom, workspace=256)
m = mx.sym.Concat(*[c1, c2, c3_1, c3_2, c4_1, c4_2], name=('%s_c_concat1' % name))
m = mx.sym.BatchNorm(data=m, fix_gamma=False, eps=2e-5, name=('%s_c_bn1' % name))
m = mx.sym.Activation(data=m, act_type='relu', name=('%s_c_relu1' % name))
return m
# Output Shape is 5*5*1536
# Input Shape is 25*25*384
def ReductionA(name, data,
num_2_1=384,
num_3_1=192, num_3_2=224, num_3_3=256,
bn_mom=0.9):
ra1 = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(0, 0), pool_type='max', name=('%s_%s_pool1' % ('max', name)))
ra2 = Conv(data=data, num_filter=num_2_1, kernel=(3, 3), stride=(2, 2), name=('%s_ra_2' % name), suffix='_conv',
withRelu=False, bn_mom=bn_mom, workspace=256)
ra3 = Conv(data=data, num_filter=num_3_1, name=('%s_ra_3' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256)
ra3 = Conv(data=ra3, num_filter=num_3_2, kernel=(3, 3), pad=(1, 1), name=('%s_ra_3' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256)
ra3 = Conv(data=ra3, num_filter=num_3_3, kernel=(3, 3), stride=(2, 2), name=('%s_ra_3' % name), suffix='_conv_3',
withRelu=False, bn_mom=bn_mom, workspace=256)
m = mx.sym.Concat(*[ra1, ra2, ra3], name=('%s_ra_concat1' % name))
m = mx.sym.BatchNorm(data=m, fix_gamma=False, eps=2e-5, name=('%s_ra_bn1' % name))
m = mx.sym.Activation(data=m, act_type='relu', name=('%s_ra_relu1' % name))
return m
# Output Shape is 12*12*1024
# Input Shape is 12*12*1024
def ReductionB(name, data,
num_2_1=192, num_2_2=192,
num_3_1=256, num_3_2=256, num_3_3=320, num_3_4=320,
bn_mom=0.9):
rb1 = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(0, 0), pool_type='max', name=('%s_%s_pool1' % ('max', name)))
rb2 = Conv(data=data, num_filter=num_2_1, name=('%s_rb_2' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256)
rb2 = Conv(data=rb2, num_filter=num_2_2, kernel=(3, 3), stride=(2, 2), name=('%s_rb_2' % name), suffix='_conv_2',
withRelu=False, bn_mom=bn_mom, workspace=256)
rb3 = Conv(data=data, num_filter=num_3_1, name=('%s_rb_3' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256)
rb3 = Conv(data=rb3, num_filter=num_3_2, kernel=(1, 7), pad=(0, 3), name=('%s_rb_3' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256)
rb3 = Conv(data=rb3, num_filter=num_3_3, kernel=(7, 1), pad=(3, 0), name=('%s_rb_3' % name), suffix='_conv_3', bn_mom=bn_mom, workspace=256)
rb3 = Conv(data=rb3, num_filter=num_3_4, kernel=(3, 3), stride=(2, 2), name=('%s_rb_3' % name), suffix='_conv_4',
withRelu=False, bn_mom=bn_mom, workspace=256)
m = mx.sym.Concat(*[rb1, rb2, rb3], name=('%s_rb_concat1' % name))
m = mx.sym.BatchNorm(data=m, fix_gamma=False, eps=2e-5, name=('%s_rb_bn1' % name))
m = mx.sym.Activation(data=m, act_type='relu', name=('%s_rb_relu1' % name))
return m
# Output Shape is 5*5*1536
# Squeeze and excitation block
def squeeze_excitation_block(name, data, num_filter, ratio):
squeeze = mx.sym.Pooling(data=data, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_squeeze')
squeeze = mx.symbol.Flatten(data=squeeze, name=name + '_flatten')
excitation = mx.symbol.FullyConnected(data=squeeze, num_hidden=int(num_filter*ratio), name=name + '_excitation1')
excitation = mx.sym.Activation(data=excitation, act_type='relu', name=name + '_excitation1_relu')
excitation = mx.symbol.FullyConnected(data=excitation, num_hidden=num_filter, name=name + '_excitation2')
excitation = mx.sym.Activation(data=excitation, act_type='sigmoid', name=name + '_excitation2_sigmoid')
scale = mx.symbol.broadcast_mul(data, mx.symbol.reshape(data=excitation, shape=(-1, num_filter, 1, 1)))
return scale
def circle_in4a(name, data, ratio,
num_1_1=96,
num_2_1=96,
num_3_1=64, num_3_2=96,
num_4_1=64, num_4_2=96, num_4_3=96,
bn_mom=0.9,
round=4):
in4a = data
for i in xrange(round):
in4a = InceptionA(name + ('_%d' % i),
in4a,
num_1_1,
num_2_1,
num_3_1, num_3_2,
num_4_1, num_4_2, num_4_3,
bn_mom)
_, out_shapes, _ = in4a.infer_shape(data=(1, 3, 224, 224))
# import pdb
# pdb.set_trace()
num_filter = int(out_shapes[0][1])
in4a = squeeze_excitation_block(name + ('_%d' % i), in4a, num_filter, ratio)
return in4a
def circle_in7b(name, data, ratio,
num_1_1=128,
num_2_1=384,
num_3_1=192, num_3_2=224, num_3_3=256,
num_4_1=192, num_4_2=192, num_4_3=224, num_4_4=224, num_4_5=256,
bn_mom=0.9,
round=7):
in7b = data
for i in xrange(round):
in7b = InceptionB(name + ('_%d' % i),
in7b,
num_1_1,
num_2_1,
num_3_1, num_3_2, num_3_3,
num_4_1, num_4_2, num_4_3, num_4_4, num_4_5,
bn_mom)
_, out_shapes, _, = in7b.infer_shape(data=(1, 3, 224, 224))
# import pdb
# pdb.set_trace()
num_filter = int(out_shapes[0][1])
in7b = squeeze_excitation_block(name + ('_%d' % i), in7b, num_filter, ratio)
return in7b
def circle_in3c(name, data, ratio,
num_1_1=256,
num_2_1=256,
num_3_1=384, num_3_2=256, num_3_3=256,
num_4_1=384, num_4_2=448, num_4_3=512, num_4_4=256, num_4_5=256,
bn_mom=0.9,
round=3):
in3c = data
for i in xrange(round):
in3c = InceptionC(name + ('_%d' % i),
in3c,
num_1_1,
num_2_1,
num_3_1, num_3_2, num_3_3,
num_4_1, num_4_2, num_4_3, num_4_4, num_4_5,
bn_mom)
_, out_shapes, _, = in3c.infer_shape(data=(1, 3, 224, 224))
# import pdb
# pdb.set_trace()
num_filter = int(out_shapes[0][1])
in3c = squeeze_excitation_block(name + ('_%d' % i), in3c, num_filter, ratio)
return in3c
# create SE inception-v4
def get_symbol(ratio, num_classes=1000):
# input shape 229*229*3 (old)
# input shape 224*224*3 (new)
data = mx.symbol.Variable(name="data")
bn_mom = 0.9
# import pdb
# pdb.set_trace()
# stage stem
(num_1_1, num_1_2, num_1_3) = (32, 32, 64)
num_2_1 = 96
(num_3_1, num_3_2) = (64, 96)
(num_4_1, num_4_2, num_4_3, num_4_4) = (64, 64, 64, 96)
num_5_1 = 192
in_stem = inception_stem('stem_stage', data,
num_1_1, num_1_2, num_1_3,
num_2_1,
num_3_1, num_3_2,
num_4_1, num_4_2, num_4_3, num_4_4,
num_5_1,
bn_mom)
# stage 4 x InceptionA
num_1_1 = 96
num_2_1 = 96
(num_3_1, num_3_2) = (64, 96)
(num_4_1, num_4_2, num_4_3) = (64, 96, 96)
in4a = circle_in4a('in4a',
in_stem,
ratio,
num_1_1,
num_2_1,
num_3_1, num_3_2,
num_4_1, num_4_2, num_4_3,
bn_mom,
4)
# stage ReductionA
num_1_1 = 384
(num_2_1, num_2_2, num_2_3) = (192, 224, 256)
re_a = ReductionA('re_a', in4a,
num_1_1,
num_2_1, num_2_2, num_2_3,
bn_mom)
# stage 7 x InceptionB
num_1_1 = 128
num_2_1 = 384
(num_3_1, num_3_2, num_3_3) = (192, 224, 256)
(num_4_1, num_4_2, num_4_3, num_4_4, num_4_5) = (192, 192, 224, 224, 256)
in7b = circle_in7b('in7b', re_a, ratio,
num_1_1,
num_2_1,
num_3_1, num_3_2, num_3_3,
num_4_1, num_4_2, num_4_3, num_4_4, num_4_5,
bn_mom,
7)
# stage ReductionB
(num_1_1, num_1_2) = (192, 192)
(num_2_1, num_2_2, num_2_3, num_2_4) = (256, 256, 320, 320)
re_b = ReductionB('re_b', in7b,
num_1_1, num_1_2,
num_2_1, num_2_2, num_2_3, num_2_4,
bn_mom)
# stage 3 x InceptionC
num_1_1 = 256
num_2_1 = 256
(num_3_1, num_3_2, num_3_3) = (384, 256, 256)
(num_4_1, num_4_2, num_4_3, num_4_4, num_4_5) = (384, 448, 512, 256, 256)
in3c = circle_in3c('in3c', re_b, ratio,
num_1_1,
num_2_1,
num_3_1, num_3_2, num_3_3,
num_4_1, num_4_2, num_4_3, num_4_4, num_4_5,
bn_mom,
3)
# stage Average Pooling
#pool = mx.sym.Pooling(data=in3c, kernel=(8, 8), stride=(1, 1), pool_type="avg", name="global_pool")
pool = mx.sym.Pooling(data=in3c, global_pool=True, kernel=(5, 5), stride=(1, 1), pad=(0, 0), pool_type="avg", name="global_pool")
# stage Dropout
#dropout = mx.sym.Dropout(data=pool, p=0.5) #modified for vggface data
dropout = mx.sym.Dropout(data=pool, p=0.2) #original
# dropout = mx.sym.Dropout(data=pool, p=0.8)
flatten = mx.sym.Flatten(data=dropout)
# output
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc1')
softmax = mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
return softmax
# if __name__ == '__main__':
# net = get_symbol(1000)
# shape = {'softmax_label': (32, 1000), 'data': (32, 3, 299, 299)}
# mx.viz.plot_network(net, title='inception-v4', format='png', shape=shape).render('inception-v4')
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs for queries personalized to individual users."""
import ast
from core import jobs
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
import utils
(exp_models, collection_models, feedback_models, user_models) = (
models.Registry.import_models([
models.NAMES.exploration, models.NAMES.collection,
models.NAMES.feedback, models.NAMES.user]))
class UserContributionsOneOffJob(jobs.BaseMapReduceJobManager):
"""One-off job for creating and populating UserContributionsModels for
all registered users that have contributed.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationSnapshotMetadataModel]
@staticmethod
def map(item):
yield (item.committer_id, {
'exploration_id': item.get_unversioned_instance_id(),
'version_string': item.get_version_string(),
})
@staticmethod
def reduce(key, version_and_exp_ids):
created_exploration_ids = set()
edited_exploration_ids = set()
edits = [ast.literal_eval(v) for v in version_and_exp_ids]
for edit in edits:
edited_exploration_ids.add(edit['exploration_id'])
if edit['version_string'] == '1':
created_exploration_ids.add(edit['exploration_id'])
if user_services.get_user_contributions(key, strict=False) is not None:
user_services.update_user_contributions(
key, list(created_exploration_ids), list(
edited_exploration_ids))
else:
user_services.create_user_contributions(
key, list(created_exploration_ids), list(
edited_exploration_ids))
class DashboardSubscriptionsOneOffJob(jobs.BaseMapReduceJobManager):
"""One-off job for subscribing users to explorations, collections, and
feedback threads.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [
exp_models.ExplorationRightsModel,
collection_models.CollectionRightsModel,
feedback_models.FeedbackMessageModel,
]
@staticmethod
def map(item):
if isinstance(item, feedback_models.FeedbackMessageModel):
if item.author_id:
yield (item.author_id, {
'type': 'feedback',
'id': item.thread_id
})
elif isinstance(item, exp_models.ExplorationRightsModel):
if item.deleted:
return
if not item.community_owned:
for owner_id in item.owner_ids:
yield (owner_id, {
'type': 'exploration',
'id': item.id
})
for editor_id in item.editor_ids:
yield (editor_id, {
'type': 'exploration',
'id': item.id
})
else:
# Go through the history.
current_version = item.version
for version in range(1, current_version + 1):
model = exp_models.ExplorationRightsModel.get_version(
item.id, version)
if not model.community_owned:
for owner_id in model.owner_ids:
yield (owner_id, {
'type': 'exploration',
'id': item.id
})
for editor_id in model.editor_ids:
yield (editor_id, {
'type': 'exploration',
'id': item.id
})
elif isinstance(item, collection_models.CollectionRightsModel):
# NOTE TO DEVELOPERS: Although the code handling subscribing to
# collections is very similar to the code above for explorations,
# it is not abstracted out due to the majority of the coding being
# yield statements. These must happen inside the generator method
# (which is this method) and, as a result, there is little common
# code between the two code blocks which can be effectively
# abstracted.
if item.deleted:
return
if not item.community_owned:
for owner_id in item.owner_ids:
yield (owner_id, {
'type': 'collection',
'id': item.id
})
for editor_id in item.editor_ids:
yield (editor_id, {
'type': 'collection',
'id': item.id
})
else:
# Go through the history.
current_version = item.version
for version in range(1, current_version + 1):
model = (
collection_models.CollectionRightsModel.get_version(
item.id, version))
if not model.community_owned:
for owner_id in model.owner_ids:
yield (owner_id, {
'type': 'collection',
'id': item.id
})
for editor_id in model.editor_ids:
yield (editor_id, {
'type': 'collection',
'id': item.id
})
@staticmethod
def reduce(key, stringified_values):
values = [ast.literal_eval(v) for v in stringified_values]
for item in values:
if item['type'] == 'feedback':
subscription_services.subscribe_to_thread(key, item['id'])
elif item['type'] == 'exploration':
subscription_services.subscribe_to_exploration(key, item['id'])
elif item['type'] == 'collection':
subscription_services.subscribe_to_collection(key, item['id'])
class DashboardStatsOneOffJob(jobs.BaseMapReduceJobManager):
"""One-off job for populating weekly dashboard stats for all registered
users who have a non-None value of UserStatsModel.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserSettingsModel]
@staticmethod
def map(item):
user_services.update_dashboard_stats_log(item.id)
@staticmethod
def reduce(item):
pass
class UserFirstContributionMsecOneOffJob(jobs.BaseMapReduceJobManager):
"""One-off job that updates first contribution time in milliseconds for
current users. This job makes the assumption that once an exploration is
published, it remains published. This job is not completely precise in that
(1) we ignore explorations that have been published in the past but are now
unpublished, and (2) commits that were made during an interim unpublished
period are counted against the first publication date instead of the second
publication date.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationRightsSnapshotMetadataModel]
@staticmethod
def map(item):
exp_id = item.get_unversioned_instance_id()
exp_rights = rights_manager.get_exploration_rights(
exp_id, strict=False)
if exp_rights is None:
return
exp_first_published_msec = exp_rights.first_published_msec
# First contribution time in msec is only set from contributions to
# explorations that are currently published.
if not rights_manager.is_exploration_private(exp_id):
created_on_msec = utils.get_time_in_millisecs(item.created_on)
yield (
item.committer_id,
max(exp_first_published_msec, created_on_msec)
)
@staticmethod
def reduce(user_id, stringified_commit_times_msec):
commit_times_msec = [
ast.literal_eval(commit_time_string) for
commit_time_string in stringified_commit_times_msec]
first_contribution_msec = min(commit_times_msec)
user_services.update_first_contribution_msec_if_not_set(
user_id, first_contribution_msec)
class UserProfilePictureOneOffJob(jobs.BaseMapReduceJobManager):
"""One-off job that updates profile pictures for users which do not
currently have them. Users who already have profile pictures are
unaffected.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserSettingsModel]
@staticmethod
def map(item):
if item.deleted or item.profile_picture_data_url is not None:
return
user_services.generate_initial_profile_picture(item.id)
@staticmethod
def reduce(key, stringified_values):
pass
|
|
#!/usr/bin/env python2
# coding: utf-8
# RSHELL is a remote interactive access to Stash (a bash like shell for Pythonista).
# It's based on the Guido Wesdorp's work for its remote interactive shell (ripshell).
from __future__ import print_function
import socket
import sys
import traceback
import thread
import time
class config:
server_ip = '0.0.0.0'
port = 10101
__version__ = '1.0'
# get the local ip address of the current device
def get_local_ip_addr():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # make a socket object
s.connect(('8.8.8.8', 80)) # connect to google ;o)
ip = s.getsockname()[0] # get our IP address from the socket
s.close() # close the socket
return ip # and return the IP address
class STDFilePointers:
def __init__(self, conn):
self.conn = conn
def write(self, s):
self.conn.send(s)
def read(self, l):
r = self.conn.recv(l)
if r:
return r
return ' '
def readlines(self):
data = []
while 1:
c = self.read(1)
if c == '\n':
line = ''.join(data)
# well..here a little hack in order to intercept a quit command from the client....
if line == 'quit':
raise SystemExit('quit')
line += '\n'
return line
data.append(c)
#
# The server
#
# Launch Pythonista on your iOS device
# Execute launch_stash.py
# Type rshell.py
# Look at the printed ip address on the console ;o)
#
class RSHELLServer:
banner = ( 'RShell Server v%s\n'
'Type "help", "version" for more information.\n\n'
'**To stop the server: quit\n'
% ( __version__)
)
# open a socket and start waiting for connections
def __init__(self, config):
self.config = config
self.sock = s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((config.server_ip, config.port))
s.listen(1)
while 1:
conn, addr = s.accept()
print('Connection from', addr)
self.handle(conn, addr)
conn.close()
print('Connection closed')
finally:
print('Closing')
s.close()
# handle a new connection
def handle(self, conn, addr):
backup_stdin = sys.stdin
backup_stdout = sys.stdout
backup_stderr = sys.stderr
stdfps = STDFilePointers(conn)
sys.stdin = stdfps
sys.stdout = stdfps
sys.stderr = stdfps
try:
try:
command = conn.recv(1)
# dispatch depending on command (first char sent should be '-'
# for the interactive interpreter loop, 'x' for executing code)
if command == '-':
self.interpreterloop(conn, addr)
else:
print('Unexpected input, exiting...')
except SystemExit as e:
# raise a SystemExit with message 'quit' to stop the server
# from a client
if str(e) == 'quit':
print('Stopping server') # this string will be intercepted by the client...
raise # kill the server
print('SystemExit')
except:
exc, e, tb = sys.exc_info()
try:
print('%s - %s' % (exc, e))
print('\n'.join(traceback.format_tb(tb)))
except:
pass
del tb
print('Exception:', exc, '-', e, file=sys.__stdout__)
finally:
sys.stdin = backup_stdin
sys.stdout = backup_stdout #sys.__stdout__
sys.stderr = backup_stderr #sys.__stderr__
# interpreter loop
def interpreterloop(self, conn, addr):
_stash = globals()['_stash']
_, current_state = _stash.runtime.get_current_worker_and_state()
print(self.banner)
while (1):
_stash(sys.stdin.readlines(), persistent_level=1)
time.sleep(0.5)
#
# The Client
#
# On your desktop, execute rshell.py ###.###.###.### (with the ip address of your server)
# Then, you can type stash commands on the console and wait for their completion on your iOS device.
# Their output is automatically displayed on your console.
# Your desktop and your iOS device need to be on the same local network.
#
class RSHELLClient:
# connect to the server and start the session
def __init__(self, server_ip, config):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.settimeout(10.0) # 10 second timeout for connect
try:
s.connect((server_ip, config.port))
except socket.error as msg:
print("Couldn't connect with the socket-server: %s" % msg)
sys.exit(1)
s.settimeout(None)
self.interpreterloop(s)
except SystemExit as e:
if str(e) == 'quit':
print('Stopping client')
raise # kill the client
print('SystemExit')
finally:
s.close()
def interpreterloop(self, sock):
sock.send(b'-') # tell the server we want to get a prompt
thread.start_new_thread(self.readloop, (sock,))
self.writeloop(sock)
def readloop(self, sock):
while 1:
try:
sock.send(sys.stdin.read(1))
except socket.error:
return
def writeloop(self, sock):
while 1:
c = sock.recv(1)
if not c:
break
# try to decode ANSI color sequences
# need to use a buffer and apply a string replace before display the string
end_main_loop = False
buffer = ""
while c != '\n':
buffer += c
c = sock.recv(1)
if not c:
end_main_loop = True
break
if not end_main_loop:
# here an other hack in order to intercept the end of the server
if buffer == "Stopping server": # sent from the server
raise SystemExit('quit') # kill the client
buffer += '\n'
buffer = buffer.replace('\xc2\x9b', '\033[')
sys.stdout.write(buffer)
sys.stdout.flush()
if end_main_loop:
break
if __name__ == '__main__':
from optparse import OptionParser
usage = ('usage: %prog [-p] -l | remote_server\n'
'\n'
'RSHELL allows you to access STASH running on a remote\n'
'iOS device over an unencrypted socket. Use RSHELL in listening mode\n'
'(use -l) or to connect to a RSHELL server')
parser = OptionParser(usage)
parser.add_option('-l', '--listen', action='store_true', dest='listen', default=False,
help='Listen for a remote connection' )
parser.add_option('-p', '--port', action='store', type='int', dest='port', default=config.port,
help='port for rshell to use')
(options, args) = parser.parse_args()
if options.port <= 0 or options.port >= 65535:
parser.error('Invalid port specified')
if options.listen:
if len(args) > 0:
parser.error("-l cannot use be specified with a hostname")
local_ip = get_local_ip_addr()
print('RShell server listening on %s:%d' % (local_ip, options.port))
config.port = options.port
# blocks until done
RSHELLServer(config)
elif len(args) == 1:
config.server_ip = args[0]
config.port = options.port
# blocks until done
RSHELLClient(config.server_ip, config)
else:
parser.error('Invalid arguments specified.')
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the user notification dashboard and 'my explorations' pages."""
from core.domain import feedback_services
from core.domain import rights_manager
from core.domain import user_jobs_continuous
from core.tests import test_utils
import feconf
class HomePageTest(test_utils.GenericTestBase):
def test_logged_out_homepage(self):
"""Test the logged-out version of the home page."""
response = self.testapp.get('/')
self.assertEqual(response.status_int, 200)
response.mustcontain(
'Your personal tutor',
'Oppia - Gallery', 'About', 'Login', no=['Logout'])
def test_notifications_dashboard_redirects_for_logged_out_users(self):
"""Test the logged-out view of the notifications dashboard."""
response = self.testapp.get('/notifications_dashboard')
self.assertEqual(response.status_int, 302)
# This should redirect to the login page.
self.assertIn('signup', response.headers['location'])
self.assertIn('notifications_dashboard', response.headers['location'])
self.login('reader@example.com')
response = self.testapp.get('/notifications_dashboard')
# This should redirect the user to complete signup.
self.assertEqual(response.status_int, 302)
self.logout()
def test_logged_in_notifications_dashboard(self):
"""Test the logged-in view of the notifications dashboard."""
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/notifications_dashboard')
self.assertEqual(response.status_int, 200)
response.mustcontain(
'Notifications', 'Logout',
self.get_expected_logout_url('/'),
no=['Login', 'Your personal tutor',
self.get_expected_login_url('/')])
self.logout()
class MyExplorationsHandlerTest(test_utils.GenericTestBase):
MY_EXPLORATIONS_DATA_URL = '/myexplorationshandler/data'
COLLABORATOR_EMAIL = 'collaborator@example.com'
COLLABORATOR_USERNAME = 'collaborator'
EXP_ID = 'exp_id'
EXP_TITLE = 'Exploration title'
def setUp(self):
super(MyExplorationsHandlerTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.COLLABORATOR_EMAIL, self.COLLABORATOR_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.collaborator_id = self.get_user_id_from_email(
self.COLLABORATOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def test_no_explorations(self):
self.login(self.OWNER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self.logout()
def test_managers_can_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
self.set_admins([self.OWNER_EMAIL])
self.login(self.OWNER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PRIVATE)
rights_manager.publish_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
rights_manager.publicize_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLICIZED)
self.logout()
def test_collaborators_can_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
rights_manager.assign_role_for_exploration(
self.owner_id, self.EXP_ID, self.collaborator_id,
rights_manager.ROLE_EDITOR)
self.set_admins([self.OWNER_EMAIL])
self.login(self.COLLABORATOR_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PRIVATE)
rights_manager.publish_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
rights_manager.publicize_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLICIZED)
self.logout()
def test_viewer_cannot_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
rights_manager.assign_role_for_exploration(
self.owner_id, self.EXP_ID, self.viewer_id,
rights_manager.ROLE_VIEWER)
self.set_admins([self.OWNER_EMAIL])
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
rights_manager.publish_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
rights_manager.publicize_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self.logout()
def test_can_see_feedback_thread_counts(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
self.login(self.OWNER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['num_open_threads'], 0)
self.assertEqual(
response['explorations_list'][0]['num_total_threads'], 0)
def mock_get_thread_analytics(unused_exploration_id):
return {
'num_open_threads': 2,
'num_total_threads': 3,
}
with self.swap(
feedback_services, 'get_thread_analytics',
mock_get_thread_analytics):
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['num_open_threads'], 2)
self.assertEqual(
response['explorations_list'][0]['num_total_threads'], 3)
self.logout()
class NotificationsDashboardHandlerTest(test_utils.GenericTestBase):
DASHBOARD_DATA_URL = '/notificationsdashboardhandler/data'
def setUp(self):
super(NotificationsDashboardHandlerTest, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def _get_recent_notifications_mock_by_viewer(self, unused_user_id):
"""Returns a single feedback thread by VIEWER_ID."""
return (100000, [{
'activity_id': 'exp_id',
'activity_title': 'exp_title',
'author_id': self.viewer_id,
'last_updated_ms': 100000,
'subject': 'Feedback Message Subject',
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}])
def _get_recent_notifications_mock_by_anonymous_user(self, unused_user_id):
"""Returns a single feedback thread by an anonymous user."""
return (200000, [{
'activity_id': 'exp_id',
'activity_title': 'exp_title',
'author_id': None,
'last_updated_ms': 100000,
'subject': 'Feedback Message Subject',
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}])
def test_author_ids_are_handled_correctly(self):
"""Test that author ids are converted into author usernames
and that anonymous authors are handled correctly.
"""
with self.swap(
user_jobs_continuous.DashboardRecentUpdatesAggregator,
'get_recent_notifications',
self._get_recent_notifications_mock_by_viewer):
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.DASHBOARD_DATA_URL)
self.assertEqual(len(response['recent_notifications']), 1)
self.assertEqual(
response['recent_notifications'][0]['author_username'],
self.VIEWER_USERNAME)
self.assertNotIn('author_id', response['recent_notifications'][0])
with self.swap(
user_jobs_continuous.DashboardRecentUpdatesAggregator,
'get_recent_notifications',
self._get_recent_notifications_mock_by_anonymous_user):
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.DASHBOARD_DATA_URL)
self.assertEqual(len(response['recent_notifications']), 1)
self.assertEqual(
response['recent_notifications'][0]['author_username'], '')
self.assertNotIn('author_id', response['recent_notifications'][0])
|
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
test_targets: unqualified target names to search for. Any target in this list
that depends upon a file in |files| is output regardless of the type of target
or chain of dependencies.
additional_compile_targets: Unqualified targets to search for in addition to
test_targets. Targets in the combined list that depend upon a file in |files|
are not necessarily output. For example, if the target is of type none then the
target is not output (but one of the descendants of the target will be).
The following is output:
error: only supplied if there is an error.
compile_targets: minimal set of targets that directly or indirectly (for
targets of type none) depend on the files in |files| and is one of the
supplied targets or a target that one of the supplied targets depends on.
The expectation is this set of targets is passed into a build step. This list
always contains the output of test_targets as well.
test_targets: set of targets from the supplied |test_targets| that either
directly or indirectly depend upon a file in |files|. This list if useful
if additional processing needs to be done for certain targets after the
build, such as running tests.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case test_targets and compile_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets that were not found.
Example:
Consider a graph like the following:
A D
/ \
B C
A depends upon both B and C, A is of type none and B and C are executables.
D is an executable, has no dependencies and nothing depends on it.
If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and
files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then
the following is output:
|compile_targets| = ["B"] B must built as it depends upon the changed file b.cc
and the supplied target A depends upon it. A is not output as a build_target
as it is of type none with no rules and actions.
|test_targets| = ["B"] B directly depends upon the change file b.cc.
Even though the file d.cc, which D depends upon, has changed D is not output
as it was not supplied by way of |additional_compile_targets| or |test_targets|.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
In Gyp the "all" target is shorthand for the root targets in the files passed
to gyp. For example, if file "a.gyp" contains targets "a1" and
"a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency
on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2".
Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not
directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp
then the "all" target includes "b1" and "b2".
"""
from __future__ import print_function
import json
import os
import posixpath
import gyp.msvs_emulation
import gyp.common
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print('AddSource', org_source, result[len(result) - 1])
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print('ExtractSources', target, base_path)
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable.
is_static_library: true if the type of target is static_library.
is_or_has_linked_ancestor: true if the target does a link (eg executable), or
if there is a target in back_deps that does a link."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
self.is_static_library = False
self.is_or_has_linked_ancestor = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
self.additional_compile_target_names = set()
self.test_target_names = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.additional_compile_target_names = set(
config.get('additional_compile_targets', []))
self.test_target_names = set(config.get('test_targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print('gyp file modified', build_file)
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print('included gyp file modified, gyp_file=', build_file, 'included file=', rel_include_file)
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return bool(target_dict['type'] != 'none' or
target_dict.get('actions') or target_dict.get('rules'))
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Targets that constitute the 'all' target. See description at top of file
for details on the 'all' target.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
name_to_target = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(name_to_target,
target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target_type = target_dicts[target_name]['type']
target.is_executable = target_type == 'executable'
target.is_static_library = target_type == 'static_library'
target.is_or_has_linked_ancestor = (target_type == 'executable' or
target_type == 'shared_library')
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print('matching target from modified build file', target_name)
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if _ToGypPath(os.path.normpath(source)) in files:
print('target', target_name, 'matches', source)
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target,
dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return name_to_target, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a tuple of the following:
. mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|.
. any target names not found. If this is empty all targets were found."""
result = {}
if not to_find:
return {}, []
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result, []
return result, [x for x in to_find]
def _DoesTargetDependOnMatchingTargets(target):
"""Returns true if |target| or any of its dependencies is one of the
targets containing the files supplied as input to analyzer. This updates
|matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOnMatchingTargets(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
print('\t', target.name, 'matches by dep', dep.name)
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOnMatchingTargets(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on at least one of the targets containing the files
supplied as input to analyzer.
possible_targets: targets to search from."""
found = []
print('Targets that matched by dependency:')
for target in possible_targets:
if _DoesTargetDependOnMatchingTargets(target):
found.append(target)
return found
def _AddCompileTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = target in roots
for back_dep_target in target.back_deps:
_AddCompileTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
target.is_or_has_linked_ancestor |= (
back_dep_target.is_or_has_linked_ancestor)
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
# And always add static_libraries that have no dependencies on them from
# linkables. This is necessary as the other dependencies on them may be
# static libraries themselves, which are not compile time dependencies.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build)) or
(target.is_static_library and add_if_no_ancestor and
not target.is_or_has_linked_ancestor)):
print('\t\tadding to compile targets', target.name, 'executable',
target.is_executable, 'added_to_compile_targets',
target.added_to_compile_targets, 'add_if_no_ancestor',
add_if_no_ancestor, 'requires_build', target.requires_build,
'is_static_library', target.is_static_library,
'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
)
result.add(target)
target.added_to_compile_targets = True
def _GetCompileTargets(matching_targets, supplied_targets):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
supplied_targets: set of targets supplied to analyzer to search from."""
result = set()
for target in matching_targets:
print('finding compile targets for match', target.name)
_AddCompileTargets(target, supplied_targets, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print('Error:', values['error'])
if 'status' in values:
print(values['status'])
if 'targets' in values:
values['targets'].sort()
print('Supplied targets that depend on changed files:')
for target in values['targets']:
print('\t', target)
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print('The following targets were not found:')
for target in values['invalid_targets']:
print('\t', target)
if 'build_targets' in values:
values['build_targets'].sort()
print('Targets that require a build:')
for target in values['build_targets']:
print('\t', target)
if 'compile_targets' in values:
values['compile_targets'].sort()
print('Targets that need to be built:')
for target in values['compile_targets']:
print('\t', target)
if 'test_targets' in values:
values['test_targets'].sort()
print('Test targets:')
for target in values['test_targets']:
print('\t', target)
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print(json.dumps(values))
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print('Error writing to output file', output_path, str(e))
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(os.path.normpath(include)) in files:
print('Include file modified, assuming all changed', include)
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# # Copy additional generator configuration data from VS, which is shared by the Windows Ninja generator.
# import gyp.generator.msvs as msvs_generator
# generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', [])
# generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
class TargetCalculator(object):
"""Calculates the matching test_targets and matching compile_targets."""
def __init__(self, files, additional_compile_target_names, test_target_names,
data, target_list, target_dicts, toplevel_dir, build_files):
self._additional_compile_target_names = set(additional_compile_target_names)
self._test_target_names = set(test_target_names)
self._name_to_target, self._changed_targets, self._root_targets = (
_GenerateTargets(data, target_list, target_dicts, toplevel_dir,
frozenset(files), build_files))
self._unqualified_mapping, self.invalid_targets = (
_GetUnqualifiedToTargetMapping(self._name_to_target,
self._supplied_target_names_no_all()))
def _supplied_target_names(self):
return self._additional_compile_target_names | self._test_target_names
def _supplied_target_names_no_all(self):
"""Returns the supplied test targets without 'all'."""
result = self._supplied_target_names();
result.discard('all')
return result
def is_build_impacted(self):
"""Returns true if the supplied files impact the build at all."""
return self._changed_targets
def find_matching_test_target_names(self):
"""Returns the set of output test targets."""
assert self.is_build_impacted()
# Find the test targets first. 'all' is special cased to mean all the
# root targets. To deal with all the supplied |test_targets| are expanded
# to include the root targets during lookup. If any of the root targets
# match, we remove it and replace it with 'all'.
test_target_names_no_all = set(self._test_target_names)
test_target_names_no_all.discard('all')
test_targets_no_all = _LookupTargets(test_target_names_no_all,
self._unqualified_mapping)
test_target_names_contains_all = 'all' in self._test_target_names
if test_target_names_contains_all:
test_targets = [x for x in (set(test_targets_no_all) |
set(self._root_targets))]
else:
test_targets = [x for x in test_targets_no_all]
print('supplied test_targets')
for target_name in self._test_target_names:
print('\t', target_name)
print('found test_targets')
for target in test_targets:
print('\t', target.name)
print('searching for matching test targets')
matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets)
matching_test_targets_contains_all = (test_target_names_contains_all and
set(matching_test_targets) &
set(self._root_targets))
if matching_test_targets_contains_all:
# Remove any of the targets for all that were not explicitly supplied,
# 'all' is subsequentely added to the matching names below.
matching_test_targets = [x for x in (set(matching_test_targets) &
set(test_targets_no_all))]
print('matched test_targets')
for target in matching_test_targets:
print('\t', target.name)
matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matching_test_targets]
if matching_test_targets_contains_all:
matching_target_names.append('all')
print('\tall')
return matching_target_names
def find_matching_compile_target_names(self):
"""Returns the set of output compile targets."""
assert self.is_build_impacted();
# Compile targets are found by searching up from changed targets.
# Reset the visited status for _GetBuildTargets.
for target in self._name_to_target.values():
target.visited = False
supplied_targets = _LookupTargets(self._supplied_target_names_no_all(),
self._unqualified_mapping)
if 'all' in self._supplied_target_names():
supplied_targets = [x for x in (set(supplied_targets) |
set(self._root_targets))]
print('Supplied test_targets & compile_targets')
for target in supplied_targets:
print('\t', target.name)
print('Finding compile targets')
compile_targets = _GetCompileTargets(self._changed_targets,
supplied_targets)
return [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in compile_targets]
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print('toplevel_dir', toplevel_dir)
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'test_targets': list(config.test_target_names),
'compile_targets': list(
config.additional_compile_target_names |
config.test_target_names) }
_WriteOutput(params, **result_dict)
return
calculator = TargetCalculator(config.files,
config.additional_compile_target_names,
config.test_target_names, data,
target_list, target_dicts, toplevel_dir,
params['build_files'])
if not calculator.is_build_impacted():
result_dict = { 'status': no_dependency_string,
'test_targets': [],
'compile_targets': [] }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
return
test_target_names = calculator.find_matching_test_target_names()
compile_target_names = calculator.find_matching_compile_target_names()
found_at_least_one_target = compile_target_names or test_target_names
result_dict = { 'test_targets': test_target_names,
'status': found_dependency_string if
found_at_least_one_target else no_dependency_string,
'compile_targets': list(
set(compile_target_names) |
set(test_target_names)) }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
|
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import unittest
import requests_mock
from mock import Mock
from urlparse import urlparse, parse_qs
from cartodb_services.here import HereMapsRoutingIsoline
from cartodb_services.here.exceptions import BadGeocodingParams
from cartodb_services.here.exceptions import NoGeocodingParams
from cartodb_services.here.exceptions import MalformedResult
requests_mock.Mocker.TEST_PREFIX = 'test_'
@requests_mock.Mocker()
class HereMapsRoutingIsolineTestCase(unittest.TestCase):
EMPTY_RESPONSE = """{
"response": {
"metaInfo": {
"timestamp": "2016-02-10T10:42:21Z",
"mapVersion": "8.30.61.107",
"moduleVersion": "7.2.65.0-1222",
"interfaceVersion": "2.6.20"
},
"center": {
"latitude": 33,
"longitude": 0.9999999
},
"isoline": [
{
"range": 1000,
"component": [
{
"id": 0,
"shape": []
}
]
}
],
"start": {
"linkId": "+1025046831",
"mappedPosition": {
"latitude": 32.968725,
"longitude": 0.9993629
},
"originalPosition": {
"latitude": 33,
"longitude": 0.9999999
}
}
}
}"""
ERROR_RESPONSE = """{
"_type": "ns2:RoutingServiceErrorType",
"type": "ApplicationError",
"subtype": "InitIsolineSearchFailed",
"details": "Error is NGEO_ERROR_UNKNOWN",
"additionalData": [
{
"key": "error_code",
"value": "NGEO_ERROR_UNKNOWN"
}
],
"metaInfo": {
"timestamp": "2016-02-10T10:39:35Z",
"mapVersion": "8.30.61.107",
"moduleVersion": "7.2.65.0-1222",
"interfaceVersion": "2.6.20"
}
}"""
GOOD_RESPONSE = """{
"response": {
"metaInfo": {
"timestamp": "2016-02-10T10:42:21Z",
"mapVersion": "8.30.61.107",
"moduleVersion": "7.2.65.0-1222",
"interfaceVersion": "2.6.20"
},
"center": {
"latitude": 33,
"longitude": 0.9999999
},
"isoline": [
{
"range": 1000,
"component": [
{
"id": 0,
"shape": [
"32.9699707,0.9462833",
"32.9699707,0.9458542",
"32.9699707,0.9462833"
]
}
]
}, {
"range": 2000,
"component": [
{
"id": 0,
"shape": [
"32.9699707,0.9462833",
"32.9699707,0.9750366",
"32.9699707,0.9462833"
]
}
]
}
],
"start": {
"linkId": "+1025046831",
"mappedPosition": {
"latitude": 32.968725,
"longitude": 0.9993629
},
"originalPosition": {
"latitude": 33,
"longitude": 0.9999999
}
}
}
}"""
MALFORMED_RESPONSE = """{"manolo": "escobar"}"""
def setUp(self):
self.logger = Mock()
self.routing = HereMapsRoutingIsoline(None, None, self.logger)
self.isoline_url = "{0}{1}".format(HereMapsRoutingIsoline.PRODUCTION_ROUTING_BASE_URL,
HereMapsRoutingIsoline.ISOLINE_PATH)
def test_calculate_isodistance_with_valid_params(self, req_mock):
url = "{0}?start=geo%2133.0%2C1.0&mode=shortest%3Bcar".format(self.isoline_url)
req_mock.register_uri('GET', url, text=self.GOOD_RESPONSE)
response = self.routing.calculate_isodistance('geo!33.0,1.0', 'car',
['1000', '2000'])
self.assertEqual(len(response), 2)
self.assertEqual(response[0]['range'], 1000)
self.assertEqual(response[1]['range'], 2000)
self.assertEqual(response[0]['geom'], [u'32.9699707,0.9462833',
u'32.9699707,0.9458542',
u'32.9699707,0.9462833'])
self.assertEqual(response[1]['geom'], [u'32.9699707,0.9462833',
u'32.9699707,0.9750366',
u'32.9699707,0.9462833'])
def test_calculate_isochrone_with_valid_params(self, req_mock):
url = "{0}?start=geo%2133.0%2C1.0&mode=shortest%3Bcar".format(self.isoline_url)
req_mock.register_uri('GET', url, text=self.GOOD_RESPONSE)
response = self.routing.calculate_isochrone('geo!33.0,1.0', 'car',
['1000', '2000'])
self.assertEqual(len(response), 2)
self.assertEqual(response[0]['range'], 1000)
self.assertEqual(response[1]['range'], 2000)
self.assertEqual(response[0]['geom'], [u'32.9699707,0.9462833',
u'32.9699707,0.9458542',
u'32.9699707,0.9462833'])
self.assertEqual(response[1]['geom'], [u'32.9699707,0.9462833',
u'32.9699707,0.9750366',
u'32.9699707,0.9462833'])
def test_calculate_isolines_empty_response(self, req_mock):
url = "{0}?start=geo%2133.0%2C1.0&mode=shortest%3Bcar".format(
self.isoline_url)
req_mock.register_uri('GET', url, text=self.EMPTY_RESPONSE)
response = self.routing.calculate_isochrone('geo!33.0,1.0', 'car',
['1000', '2000'])
self.assertEqual(len(response), 1)
self.assertEqual(response[0]['range'], 1000)
self.assertEqual(response[0]['geom'], [])
def test_non_listed_parameters_filter_works_properly(self, req_mock):
url = "{0}?start=geo%2133.0%2C1.0&mode=shortest%3Bcar".format(
self.isoline_url)
req_mock.register_uri('GET', url, text=self.GOOD_RESPONSE)
response = self.routing.calculate_isochrone('geo!33.0,1.0', 'car',
['1000', '2000'],
['singlecomponent=true',
'resolution=3',
'maxpoints=1000',
'quality=2',
'false_option=true'])
parsed_url = urlparse(req_mock.request_history[0].url)
url_params = parse_qs(parsed_url.query)
self.assertEqual(len(url_params), 8)
self.assertEqual('false_option' in url_params, False)
def test_mode_parameters_works_properly(self, req_mock):
req_mock.register_uri('GET', requests_mock.ANY,
text=self.GOOD_RESPONSE)
response = self.routing.calculate_isochrone('geo!33.0,1.0', 'car',
['1000', '2000'],
['mode_type=fastest',
'mode_feature=motorway',
'mode_feature_weight=-1',
'mode_traffic=false'])
parsed_url = urlparse(req_mock.request_history[0].url)
url_params = parse_qs(parsed_url.query)
self.assertEqual(url_params['mode'][0],
'fastest;car;traffic:false;motorway:-1')
def test_source_parameters_works_properly(self, req_mock):
req_mock.register_uri('GET', requests_mock.ANY,
text=self.GOOD_RESPONSE)
response = self.routing.calculate_isochrone('geo!33.0,1.0', 'car',
['1000', '2000'],
['is_destination=false'])
parsed_url = urlparse(req_mock.request_history[0].url)
url_params = parse_qs(parsed_url.query)
self.assertEqual(url_params['start'][0], 'geo!33.0,1.0')
def test_destination_parameters_works_properly(self, req_mock):
req_mock.register_uri('GET', requests_mock.ANY,
text=self.GOOD_RESPONSE)
response = self.routing.calculate_isochrone('geo!33.0,1.0', 'car',
['1000', '2000'],
['is_destination=true'])
parsed_url = urlparse(req_mock.request_history[0].url)
url_params = parse_qs(parsed_url.query)
self.assertEqual(url_params['destination'][0], 'geo!33.0,1.0')
def test_isodistance_with_nonstandard_url(self, req_mock):
base_url = 'http://nonstandard_base'
url = "{0}{1}".format(base_url, HereMapsRoutingIsoline.ISOLINE_PATH)
routing = HereMapsRoutingIsoline(None, None, Mock(), { 'base_url': base_url })
req_mock.register_uri('GET', url, text=self.GOOD_RESPONSE)
response = routing.calculate_isodistance('geo!33.0,1.0', 'car',
['1000', '2000'])
self.assertEqual(len(response), 2)
self.assertEqual(response[0]['range'], 1000)
self.assertEqual(response[1]['range'], 2000)
self.assertEqual(response[0]['geom'], [u'32.9699707,0.9462833',
u'32.9699707,0.9458542',
u'32.9699707,0.9462833'])
self.assertEqual(response[1]['geom'], [u'32.9699707,0.9462833',
u'32.9699707,0.9750366',
u'32.9699707,0.9462833'])
|
|
# Copyright 2015-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import importlib
import json
import logging
import os
import platform
import py_compile
import shutil
import site
import sys
import tempfile
import time
import unittest
import zipfile
import mock
from c7n.config import Config
from c7n.mu import (
custodian_archive,
generate_requirements,
get_exec_options,
LambdaFunction,
LambdaManager,
PolicyLambda,
PythonPackageArchive,
SNSSubscription,
SQSSubscription,
CloudWatchEventSource
)
from .common import (
BaseTest, event_data, functional, Bag, ACCOUNT_ID)
from .data import helloworld
ROLE = "arn:aws:iam::644160558196:role/custodian-mu"
def test_get_exec_options():
assert get_exec_options(Config().empty()) == {'tracer': 'default'}
assert get_exec_options(Config().empty(output_dir='/tmp/xyz')) == {
'tracer': 'default'}
assert get_exec_options(
Config().empty(log_group='gcp', output_dir='gs://mybucket/myprefix')) == {
'tracer': 'default',
'output_dir': 'gs://mybucket/myprefix',
'log_group': 'gcp'}
def test_generate_requirements():
lines = generate_requirements(
'boto3', ignore=('docutils', 's3transfer', 'six'), exclude=['urllib3'])
packages = []
for l in lines.split('\n'):
pkg_name, version = l.split('==')
packages.append(pkg_name)
assert set(packages) == {'botocore', 'jmespath', 'python-dateutil'}
class Publish(BaseTest):
def make_func(self, **kw):
func_data = dict(
name="test-foo-bar",
handler="index.handler",
memory_size=128,
timeout=3,
role='custodian-mu',
runtime="python2.7",
description="test",
)
func_data.update(kw)
archive = PythonPackageArchive()
archive.add_contents(
"index.py", """def handler(*a, **kw):\n print("Greetings, program!")"""
)
archive.close()
self.addCleanup(archive.remove)
return LambdaFunction(func_data, archive)
def test_publishes_a_lambda(self):
session_factory = self.replay_flight_data("test_publishes_a_lambda")
mgr = LambdaManager(session_factory)
func = self.make_func()
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result["CodeSize"], 169)
def test_publish_a_lambda_with_layer_and_concurrency(self):
factory = self.replay_flight_data('test_lambda_layer_concurrent_publish')
mgr = LambdaManager(factory)
layers = ['arn:aws:lambda:us-east-1:644160558196:layer:CustodianLayer:2']
func = self.make_func(
concurrency=5,
layers=layers)
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result['Layers'][0]['Arn'], layers[0])
state = mgr.get(func.name)
self.assertEqual(state['Concurrency']['ReservedConcurrentExecutions'], 5)
func = self.make_func(layers=layers)
output = self.capture_logging("custodian.serverless", level=logging.DEBUG)
result = mgr.publish(func)
self.assertEqual(result['Layers'][0]['Arn'], layers[0])
lines = output.getvalue().strip().split("\n")
self.assertFalse('Updating function: test-foo-bar config Layers' in lines)
self.assertTrue('Removing function: test-foo-bar concurrency' in lines)
def test_can_switch_runtimes(self):
session_factory = self.replay_flight_data("test_can_switch_runtimes")
func = self.make_func()
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result["Runtime"], "python2.7")
func.func_data["runtime"] = "python3.6"
result = mgr.publish(func)
self.assertEqual(result["Runtime"], "python3.6")
class PolicyLambdaProvision(BaseTest):
role = "arn:aws:iam::644160558196:role/custodian-mu"
def assert_items(self, result, expected):
for k, v in expected.items():
self.assertEqual(v, result[k])
def test_config_rule_provision(self):
session_factory = self.replay_flight_data("test_config_rule")
p = self.load_policy(
{
"resource": "security-group",
"name": "sg-modified",
"mode": {"type": "config-rule"},
},
session_factory=session_factory
)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assertEqual(result["FunctionName"], "custodian-sg-modified")
self.addCleanup(mgr.remove, pl)
def test_config_poll_rule_evaluation(self):
session_factory = self.record_flight_data("test_config_poll_rule_provision")
p = self.load_policy({
'name': 'configx',
'resource': 'aws.kinesis',
'mode': {
'schedule': 'Three_Hours',
'type': 'config-poll-rule'}})
mu_policy = PolicyLambda(p)
mu_policy.arn = "arn:aws:lambda:us-east-1:644160558196:function:CloudCustodian"
events = mu_policy.get_events(session_factory)
self.assertEqual(len(events), 1)
config_rule = events.pop()
self.assertEqual(
config_rule.get_rule_params(mu_policy),
{'ConfigRuleName': 'custodian-configx',
'Description': 'cloud-custodian lambda policy',
'MaximumExecutionFrequency': 'Three_Hours',
'Scope': {'ComplianceResourceTypes': ['AWS::Kinesis::Stream']},
'Source': {
'Owner': 'CUSTOM_LAMBDA',
'SourceDetails': [{'EventSource': 'aws.config',
'MessageType': 'ScheduledNotification'}],
'SourceIdentifier': 'arn:aws:lambda:us-east-1:644160558196:function:CloudCustodian'} # noqa
})
def test_config_rule_evaluation(self):
session_factory = self.replay_flight_data("test_config_rule_evaluate")
p = self.load_policy(
{
"resource": "ec2",
"name": "ec2-modified",
"mode": {"type": "config-rule"},
"filters": [{"InstanceId": "i-094bc87c84d56c589"}],
},
session_factory=session_factory,
)
mode = p.get_execution_mode()
event = event_data("event-config-rule-instance.json")
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
def test_phd_account_mode(self):
factory = self.replay_flight_data('test_phd_event_mode')
p = self.load_policy(
{'name': 'ec2-retire',
'resource': 'account',
'mode': {
'categories': ['scheduledChange'],
'events': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED'],
'type': 'phd'}}, session_factory=factory)
mode = p.get_execution_mode()
event = event_data('event-phd-ec2-retire.json')
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
self.assertTrue('c7n:HealthEvent' in resources[0])
def test_phd_mode(self):
factory = self.replay_flight_data('test_phd_event_mode')
p = self.load_policy(
{'name': 'ec2-retire',
'resource': 'ec2',
'mode': {
'categories': ['scheduledChange'],
'events': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED'],
'type': 'phd'}}, session_factory=factory)
mode = p.get_execution_mode()
event = event_data('event-phd-ec2-retire.json')
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
p_lambda = PolicyLambda(p)
events = p_lambda.get_events(factory)
self.assertEqual(
json.loads(events[0].render_event_pattern()),
{'detail': {
'eventTypeCategory': ['scheduledChange'],
'eventTypeCode': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED']},
'source': ['aws.health']}
)
def test_cloudtrail_delay(self):
p = self.load_policy({
'name': 'aws-account',
'resource': 'aws.account',
'mode': {
'type': 'cloudtrail',
'delay': 32,
'role': 'CustodianRole',
'events': ['RunInstances']}})
from c7n import policy
class time:
invokes = []
@classmethod
def sleep(cls, duration):
cls.invokes.append(duration)
self.patch(policy, 'time', time)
trail_mode = p.get_execution_mode()
results = trail_mode.run({
'detail': {
'eventSource': 'ec2.amazonaws.com',
'eventName': 'RunInstances'}},
None)
self.assertEqual(len(results), 0)
self.assertEqual(time.invokes, [32])
def test_user_pattern_merge(self):
p = self.load_policy({
'name': 'ec2-retire',
'resource': 'ec2',
'mode': {
'type': 'cloudtrail',
'pattern': {
'detail': {
'userIdentity': {
'userName': [{'anything-but': 'deputy'}]}}},
'events': [{
'ids': 'responseElements.subnet.subnetId',
'source': 'ec2.amazonaws.com',
'event': 'CreateSubnet'}]}})
p_lambda = PolicyLambda(p)
events = p_lambda.get_events(None)
self.assertEqual(
json.loads(events[0].render_event_pattern()),
{'detail': {'eventName': ['CreateSubnet'],
'eventSource': ['ec2.amazonaws.com'],
'userIdentity': {'userName': [{'anything-but': 'deputy'}]}},
'detail-type': ['AWS API Call via CloudTrail']})
@functional
def test_sqs_subscriber(self):
session_factory = self.replay_flight_data('test_mu_sqs_subscriber')
func_name = 'c7n-hello-sqs'
queue_name = "my-dev-test-3"
# Setup Queues
session = session_factory()
client = session.client('sqs')
queue_url = client.create_queue(QueueName=queue_name).get('QueueUrl')
queue_arn = client.get_queue_attributes(
QueueUrl=queue_url,
AttributeNames=['QueueArn'])['Attributes']['QueueArn']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
# Setup Function
params = dict(
session_factory=session_factory,
name=func_name,
role="arn:aws:iam::644160558196:role/custodian-mu",
events=[SQSSubscription(session_factory, [queue_arn])])
func = helloworld.get_function(**params)
manager = LambdaManager(session_factory)
manager.publish(func)
self.addCleanup(manager.remove, func)
# Send and Receive Check
client.send_message(
QueueUrl=queue_url, MessageBody=json.dumps({'jurassic': 'block'}))
if self.recording:
time.sleep(60)
# log_events = list(manager.logs(func, "1970-1-1 UTC", "2037-1-1"))
# messages = [
# e["message"] for e in log_events if e["message"].startswith('{"Records')
# ]
self.addCleanup(
session.client("logs").delete_log_group,
logGroupName="/aws/lambda/%s" % func_name)
# self.assertIn(
# 'jurassic',
# json.loads(messages[0])["Records"][0]["body"])
@functional
def test_sns_subscriber_and_ipaddress(self):
self.patch(SNSSubscription, "iam_delay", 0.01)
session_factory = self.replay_flight_data("test_sns_subscriber_and_ipaddress")
session = session_factory()
client = session.client("sns")
# create an sns topic
tname = "custodian-test-sns-sub"
topic_arn = client.create_topic(Name=tname)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
# provision a lambda via mu
params = dict(
session_factory=session_factory,
name="c7n-hello-world",
role="arn:aws:iam::644160558196:role/custodian-mu",
events=[SNSSubscription(session_factory, [topic_arn])],
)
func = helloworld.get_function(**params)
manager = LambdaManager(session_factory)
manager.publish(func)
self.addCleanup(manager.remove, func)
# now publish to the topic and look for lambda log output
client.publish(TopicArn=topic_arn, Message="Greetings, program!")
if self.recording:
time.sleep(30)
# log_events = manager.logs(func, "1970-1-1 UTC", "2037-1-1")
# messages = [
# e["message"] for e in log_events if e["message"].startswith('{"Records')
# ]
# self.addCleanup(
# session.client("logs").delete_log_group,
# logGroupName="/aws/lambda/c7n-hello-world",
# )
# self.assertEqual(
# json.loads(messages[0])["Records"][0]["Sns"]["Message"],
# "Greetings, program!",
# )
def test_cwe_update_config_and_code(self):
# Originally this was testing the no update case.. but
# That is tricky to record, any updates to the code end up
# causing issues due to checksum mismatches which imply updating
# the function code / which invalidate the recorded data and
# the focus of the test.
session_factory = self.replay_flight_data("test_cwe_update", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {"type": "cloudtrail",
"events": ["CreateBucket"], 'runtime': 'python2.7'},
"filters": [
{"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"]},
],
"actions": ["no-op"],
})
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, "Dev", role=ROLE)
self.addCleanup(mgr.remove, pl)
p = self.load_policy(
{
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {
"type": "cloudtrail",
"memory": 256,
'runtime': 'python2.7',
"events": [
"CreateBucket",
{
"event": "PutBucketPolicy",
"ids": "requestParameters.bucketName",
"source": "s3.amazonaws.com",
},
],
},
"filters": [
{
"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"],
}
],
"actions": ["no-op"],
},
)
output = self.capture_logging("custodian.serverless", level=logging.DEBUG)
result2 = mgr.publish(PolicyLambda(p), "Dev", role=ROLE)
lines = output.getvalue().strip().split("\n")
self.assertTrue("Updating function custodian-s3-bucket-policy code" in lines)
self.assertTrue(
"Updating function: custodian-s3-bucket-policy config MemorySize" in lines)
self.assertEqual(result["FunctionName"], result2["FunctionName"])
# drive by coverage
functions = [
i
for i in mgr.list_functions()
if i["FunctionName"] == "custodian-s3-bucket-policy"
]
self.assertTrue(len(functions), 1)
def test_cwe_trail(self):
session_factory = self.replay_flight_data("test_cwe_trail", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {"type": "cloudtrail", "events": ["CreateBucket"]},
"filters": [
{
"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"],
}
],
"actions": ["no-op"]},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
events = pl.get_events(session_factory)
self.assertEqual(len(events), 1)
event = events.pop()
self.assertEqual(
json.loads(event.render_event_pattern()),
{
u"detail": {
u"eventName": [u"CreateBucket"],
u"eventSource": [u"s3.amazonaws.com"],
},
u"detail-type": ["AWS API Call via CloudTrail"],
},
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-s3-bucket-policy",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
def test_cwe_instance(self):
session_factory = self.replay_flight_data("test_cwe_instance", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "ec2-encrypted-vol",
"mode": {"type": "ec2-instance-state", "events": ["pending"]}},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-ec2-encrypted-vol",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-ec2-encrypted-vol")
self.assert_items(
result["Rules"][0],
{"State": "ENABLED", "Name": "custodian-ec2-encrypted-vol"},
)
self.assertEqual(
json.loads(result["Rules"][0]["EventPattern"]),
{
"source": ["aws.ec2"],
"detail": {"state": ["pending"]},
"detail-type": ["EC2 Instance State-change Notification"],
},
)
def test_cwe_asg_instance(self):
session_factory = self.replay_flight_data("test_cwe_asg", zdata=True)
p = self.load_policy(
{
"resource": "asg",
"name": "asg-spin-detector",
"mode": {"type": "asg-instance-state", "events": ["launch-failure"]},
}, session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"FunctionName": "custodian-asg-spin-detector",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-asg-spin-detector")
self.assert_items(
result["Rules"][0],
{"State": "ENABLED", "Name": "custodian-asg-spin-detector"},
)
self.assertEqual(
json.loads(result["Rules"][0]["EventPattern"]),
{
"source": ["aws.autoscaling"],
"detail-type": ["EC2 Instance Launch Unsuccessful"],
},
)
def test_cwe_security_hub_action(self):
factory = self.replay_flight_data('test_mu_cwe_sechub_action')
p = self.load_policy({
'name': 'sechub',
'resource': 'account',
'mode': {
'type': 'hub-action'}},
session_factory=factory,
config={'account_id': ACCOUNT_ID})
mu_policy = PolicyLambda(p)
events = mu_policy.get_events(factory)
self.assertEqual(len(events), 1)
hub_action = events.pop()
self.assertEqual(
json.loads(hub_action.cwe.render_event_pattern()),
{'resources': [
'arn:aws:securityhub:us-east-1:644160558196:action/custom/sechub'],
'source': ['aws.securityhub'],
'detail-type': [
'Security Hub Findings - Custom Action', 'Security Hub Insight Results'
]})
hub_action.cwe = cwe = mock.Mock(CloudWatchEventSource)
cwe.get.return_value = False
cwe.update.return_value = True
cwe.add.return_value = True
self.assertEqual(repr(hub_action), "<SecurityHub Action sechub>")
self.assertEqual(
hub_action._get_arn(),
"arn:aws:securityhub:us-east-1:644160558196:action/custom/sechub")
self.assertEqual(
hub_action.get(mu_policy.name), {'event': False, 'action': None})
hub_action.add(mu_policy)
self.assertEqual(
{'event': False,
'action': {
'ActionTargetArn': ('arn:aws:securityhub:us-east-1:'
'644160558196:action/custom/sechub'),
'Name': 'Account sechub', 'Description': 'sechub'}},
hub_action.get(mu_policy.name))
hub_action.update(mu_policy)
hub_action.remove(mu_policy)
self.assertEqual(
hub_action.get(mu_policy.name),
{'event': False, 'action': None})
def test_cwe_schedule(self):
session_factory = self.replay_flight_data("test_cwe_schedule", zdata=True)
p = self.load_policy(
{
"resource": "ec2",
"name": "periodic-ec2-checker",
"mode": {"type": "periodic", "schedule": "rate(1 day)"},
}, session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"FunctionName": "custodian-periodic-ec2-checker",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-periodic-ec2-checker")
self.assert_items(
result["Rules"][0],
{
"State": "ENABLED",
"ScheduleExpression": "rate(1 day)",
"Name": "custodian-periodic-ec2-checker",
},
)
key_arn = "arn:aws:kms:us-west-2:644160558196:key/" "44d25a5c-7efa-44ed-8436-b9511ea921b3"
sns_arn = "arn:aws:sns:us-west-2:644160558196:config-topic"
def create_a_lambda(self, flight, **extra):
session_factory = self.replay_flight_data(flight, zdata=True)
mode = {
"type": "config-rule", "role": "arn:aws:iam::644160558196:role/custodian-mu"
}
mode.update(extra)
p = self.load_policy({
"resource": "s3",
"name": "hello-world",
"actions": ["no-op"],
"mode": mode},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
def cleanup():
mgr.remove(pl)
if self.recording:
time.sleep(60)
self.addCleanup(cleanup)
return mgr, mgr.publish(pl)
def create_a_lambda_with_lots_of_config(self, flight):
extra = {
"environment": {"Variables": {"FOO": "bar"}},
"kms_key_arn": self.key_arn,
"dead_letter_config": {"TargetArn": self.sns_arn},
"tracing_config": {"Mode": "Active"},
"tags": {"Foo": "Bar"},
}
return self.create_a_lambda(flight, **extra)
def update_a_lambda(self, mgr, **config):
mode = {
"type": "config-rule", "role": "arn:aws:iam::644160558196:role/custodian-mu"
}
mode.update(config)
p = self.load_policy({
"resource": "s3",
"name": "hello-world",
"actions": ["no-op"],
"mode": mode,
})
pl = PolicyLambda(p)
return mgr.publish(pl)
def test_config_coverage_for_lambda_creation(self):
mgr, result = self.create_a_lambda_with_lots_of_config(
"test_config_coverage_for_lambda_creation"
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello-world",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
"DeadLetterConfig": {"TargetArn": self.sns_arn},
"Environment": {"Variables": {"FOO": "bar"}},
"KMSKeyArn": self.key_arn,
"TracingConfig": {"Mode": "Active"},
},
)
tags = mgr.client.list_tags(Resource=result["FunctionArn"])["Tags"]
self.assert_items(tags, {"Foo": "Bar"})
def test_config_coverage_for_lambda_update_from_plain(self):
mgr, result = self.create_a_lambda(
"test_config_coverage_for_lambda_update_from_plain"
)
result = self.update_a_lambda(
mgr,
**{
"environment": {"Variables": {"FOO": "bloo"}},
"kms_key_arn": self.key_arn,
"dead_letter_config": {"TargetArn": self.sns_arn},
"tracing_config": {"Mode": "Active"},
"tags": {"Foo": "Bloo"},
}
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello-world",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
"DeadLetterConfig": {"TargetArn": self.sns_arn},
"Environment": {"Variables": {"FOO": "bloo"}},
"TracingConfig": {"Mode": "Active"},
},
)
tags = mgr.client.list_tags(Resource=result["FunctionArn"])["Tags"]
self.assert_items(tags, {"Foo": "Bloo"})
def test_config_coverage_for_lambda_update_from_complex(self):
mgr, result = self.create_a_lambda_with_lots_of_config(
"test_config_coverage_for_lambda_update_from_complex"
)
result = self.update_a_lambda(
mgr,
**{
"runtime": "python3.6",
"environment": {"Variables": {"FOO": "baz"}},
"kms_key_arn": "",
"dead_letter_config": {},
"tracing_config": {},
"tags": {"Foo": "Baz", "Bah": "Bug"},
}
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello-world",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python3.6",
"Timeout": 60,
"DeadLetterConfig": {"TargetArn": self.sns_arn},
"Environment": {"Variables": {"FOO": "baz"}},
"TracingConfig": {"Mode": "Active"},
},
)
tags = mgr.client.list_tags(Resource=result["FunctionArn"])["Tags"]
self.assert_items(tags, {"Foo": "Baz", "Bah": "Bug"})
def test_optional_packages(self):
data = {
"name": "s3-lambda-extra",
"resource": "s3",
"mode": {
"type": "cloudtrail",
"packages": ["boto3"],
"events": ["CreateBucket"],
},
}
p = self.load_policy(data)
pl = PolicyLambda(p)
pl.archive.close()
self.assertTrue("boto3/utils.py" in pl.archive.get_filenames())
def test_delta_config_diff(self):
delta = LambdaManager.delta_function
self.assertFalse(
delta(
{
"VpcConfig": {
"SubnetIds": ["s-1", "s-2"],
"SecurityGroupIds": ["sg-1", "sg-2"],
}
},
{
"VpcConfig": {
"SubnetIds": ["s-2", "s-1"],
"SecurityGroupIds": ["sg-2", "sg-1"],
}
},
)
)
self.assertTrue(
delta(
{
"VpcConfig": {
"SubnetIds": ["s-1", "s-2"],
"SecurityGroupIds": ["sg-1", "sg-2"],
}
},
{
"VpcConfig": {
"SubnetIds": ["s-2", "s-1"],
"SecurityGroupIds": ["sg-3", "sg-1"],
}
},
)
)
self.assertFalse(delta({}, {"DeadLetterConfig": {}}))
self.assertTrue(delta({}, {"DeadLetterConfig": {"TargetArn": "arn"}}))
self.assertFalse(delta({}, {"Environment": {"Variables": {}}}))
self.assertTrue(delta({}, {"Environment": {"Variables": {"k": "v"}}}))
self.assertFalse(delta({}, {"KMSKeyArn": ""}))
self.assertFalse(
delta({}, {"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []}})
)
def test_config_defaults(self):
p = PolicyLambda(Bag({"name": "hello", "data": {"mode": {}}}))
self.maxDiff = None
self.assertEqual(
p.get_config(),
{
"DeadLetterConfig": {},
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello",
"Handler": "custodian_policy.run",
"KMSKeyArn": "",
"MemorySize": 512,
"Role": "",
"Runtime": "python3.8",
"Tags": {},
"Timeout": 900,
"TracingConfig": {"Mode": "PassThrough"},
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
},
)
class PythonArchiveTest(unittest.TestCase):
def make_archive(self, modules=(), cache_file=None):
archive = self.make_open_archive(modules, cache_file=cache_file)
archive.close()
return archive
def make_open_archive(self, modules=(), cache_file=None):
archive = PythonPackageArchive(modules=modules, cache_file=cache_file)
self.addCleanup(archive.remove)
return archive
def get_filenames(self, modules=()):
return self.make_archive(modules).get_filenames()
def test_handles_stdlib_modules(self):
filenames = self.get_filenames(["webbrowser"])
self.assertTrue("webbrowser.py" in filenames)
def test_handles_third_party_modules(self):
filenames = self.get_filenames(["botocore"])
self.assertTrue("botocore/__init__.py" in filenames)
def test_handles_packages(self):
filenames = self.get_filenames(["c7n"])
self.assertTrue("c7n/__init__.py" in filenames)
self.assertTrue("c7n/resources/s3.py" in filenames)
self.assertTrue("c7n/ufuncs/s3crypt.py" in filenames)
def _install_namespace_package(self, tmp_sitedir):
# Install our test namespace package in such a way that both py27 and
# py36 can find it.
from setuptools import namespaces
installer = namespaces.Installer()
class Distribution:
namespace_packages = ["namespace_package"]
installer.distribution = Distribution()
installer.target = os.path.join(tmp_sitedir, "namespace_package.pth")
installer.outputs = []
installer.dry_run = False
installer.install_namespaces()
site.addsitedir(tmp_sitedir, known_paths=site._init_pathinfo())
def test_handles_namespace_packages(self):
bench = tempfile.mkdtemp()
def cleanup():
while bench in sys.path:
sys.path.remove(bench)
shutil.rmtree(bench)
self.addCleanup(cleanup)
subpackage = os.path.join(bench, "namespace_package", "subpackage")
os.makedirs(subpackage)
open(os.path.join(subpackage, "__init__.py"), "w+").write("foo = 42\n")
def _():
from namespace_package.subpackage import foo
assert foo # dodge linter
self.assertRaises(ImportError, _)
self._install_namespace_package(bench)
from namespace_package.subpackage import foo
self.assertEqual(foo, 42)
filenames = self.get_filenames(["namespace_package"])
self.assertTrue("namespace_package/__init__.py" not in filenames)
self.assertTrue("namespace_package/subpackage/__init__.py" in filenames)
self.assertTrue(filenames[-1].endswith("-nspkg.pth"))
def test_excludes_non_py_files(self):
filenames = self.get_filenames(["ctypes"])
self.assertTrue("README.ctypes" not in filenames)
def test_cant_get_bytes_when_open(self):
archive = self.make_open_archive()
self.assertRaises(AssertionError, archive.get_bytes)
def test_cant_add_files_when_closed(self):
archive = self.make_archive()
self.assertRaises(AssertionError, archive.add_file, __file__)
def test_cant_add_contents_when_closed(self):
archive = self.make_archive()
self.assertRaises(AssertionError, archive.add_contents, "foo", "bar")
def test_can_add_additional_files_while_open(self):
archive = self.make_open_archive()
archive.add_file(__file__)
archive.close()
filenames = archive.get_filenames()
self.assertTrue(os.path.basename(__file__) in filenames)
def test_can_set_path_when_adding_files(self):
archive = self.make_open_archive()
archive.add_file(__file__, "cheese/is/yummy.txt")
archive.close()
filenames = archive.get_filenames()
self.assertTrue(os.path.basename(__file__) not in filenames)
self.assertTrue("cheese/is/yummy.txt" in filenames)
def test_can_add_a_file_with_contents_from_a_string(self):
archive = self.make_open_archive()
archive.add_contents("cheese.txt", "So yummy!")
archive.close()
self.assertTrue("cheese.txt" in archive.get_filenames())
with archive.get_reader() as reader:
self.assertEqual(b"So yummy!", reader.read("cheese.txt"))
def test_custodian_archive_creates_a_custodian_archive(self):
archive = custodian_archive()
self.addCleanup(archive.remove)
archive.close()
filenames = archive.get_filenames()
self.assertTrue("c7n/__init__.py" in filenames)
def make_file(self):
bench = tempfile.mkdtemp()
path = os.path.join(bench, "foo.txt")
open(path, "w+").write("Foo.")
self.addCleanup(lambda: shutil.rmtree(bench))
return path
def check_world_readable(self, archive):
world_readable = 0o004 << 16
for info in zipfile.ZipFile(archive.path).filelist:
self.assertEqual(info.external_attr & world_readable, world_readable)
def test_files_are_all_readable(self):
self.check_world_readable(self.make_archive(["c7n"]))
def test_even_unreadable_files_become_readable(self):
path = self.make_file()
os.chmod(path, 0o600)
archive = self.make_open_archive()
archive.add_file(path)
archive.close()
self.check_world_readable(archive)
def test_unless_you_make_your_own_zipinfo(self):
info = zipfile.ZipInfo(self.make_file())
archive = self.make_open_archive()
archive.add_contents(info, "foo.txt")
archive.close()
self.assertRaises(AssertionError, self.check_world_readable, archive)
def test_cache_zip_file(self):
archive = self.make_archive(cache_file=os.path.join(os.path.dirname(__file__),
"data",
"test.zip"))
self.assertTrue("cheese.txt" in archive.get_filenames())
self.assertTrue("cheese/is/yummy.txt" in archive.get_filenames())
with archive.get_reader() as reader:
self.assertEqual(b"So yummy!", reader.read("cheese.txt"))
self.assertEqual(b"True!", reader.read("cheese/is/yummy.txt"))
class PycCase(unittest.TestCase):
def setUp(self):
self.bench = tempfile.mkdtemp()
sys.path.insert(0, self.bench)
def tearDown(self):
sys.path.remove(self.bench)
shutil.rmtree(self.bench)
def py_with_pyc(self, name):
path = os.path.join(self.bench, name)
with open(path, "w+") as fp:
fp.write("42")
py_compile.compile(path)
return path
class Constructor(PycCase):
def test_class_constructor_only_accepts_py_modules_not_pyc(self):
# Create a module with both *.py and *.pyc.
self.py_with_pyc("foo.py")
# Create another with a *.pyc but no *.py behind it.
os.unlink(self.py_with_pyc("bar.py"))
# Now: *.py takes precedence over *.pyc ...
def get(name):
return os.path.basename(importlib.import_module(name).__file__)
self.assertTrue(get("foo"), "foo.py")
try:
# ... and while *.pyc is importable ...
self.assertTrue(get("bar"), "bar.pyc")
except ImportError:
try:
# (except on PyPy)
# http://doc.pypy.org/en/latest/config/objspace.lonepycfiles.html
self.assertEqual(platform.python_implementation(), "PyPy")
except AssertionError:
# (... aaaaaand Python 3)
self.assertEqual(platform.python_version_tuple()[0], "3")
else:
# ... we refuse it.
with self.assertRaises(ValueError) as raised:
PythonPackageArchive(modules=["bar"])
msg = raised.exception.args[0]
self.assertTrue(msg.startswith("Could not find a *.py source file"))
self.assertTrue(msg.endswith("bar.pyc"))
# We readily ignore a *.pyc if a *.py exists.
archive = PythonPackageArchive(modules=["foo"])
archive.close()
self.assertEqual(archive.get_filenames(), ["foo.py"])
with archive.get_reader() as reader:
self.assertEqual(b"42", reader.read("foo.py"))
class AddPyFile(PycCase):
def test_can_add_py_file(self):
archive = PythonPackageArchive()
archive.add_py_file(self.py_with_pyc("foo.py"))
archive.close()
self.assertEqual(archive.get_filenames(), ["foo.py"])
def test_reverts_to_py_if_available(self):
archive = PythonPackageArchive()
py = self.py_with_pyc("foo.py")
archive.add_py_file(py + "c")
archive.close()
self.assertEqual(archive.get_filenames(), ["foo.py"])
def test_fails_if_py_not_available(self):
archive = PythonPackageArchive()
py = self.py_with_pyc("foo.py")
os.unlink(py)
self.assertRaises(IOError, archive.add_py_file, py + "c")
class DiffTags(unittest.TestCase):
def test_empty(self):
assert LambdaManager.diff_tags({}, {}) == ({}, [])
def test_removal(self):
assert LambdaManager.diff_tags({"Foo": "Bar"}, {}) == ({}, ["Foo"])
def test_addition(self):
assert LambdaManager.diff_tags({}, {"Foo": "Bar"}) == ({"Foo": "Bar"}, [])
def test_update(self):
assert LambdaManager.diff_tags(
{"Foo": "Bar"}, {"Foo": "Baz"}) == ({"Foo": "Baz"}, [])
|
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# An alternate Python Minecraft library for the Rasperry-Pi
# Copyright (c) 2013-2015 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The events module defines the :class:`Events` class, which provides methods for
querying events in the Minecraft world, and :class:`BlockHitEvent` which is the
only event type currently supported.
.. note::
All items in this module are available from the :mod:`picraft` namespace
without having to import :mod:`picraft.events` directly.
The following items are defined in the module:
Events
======
.. autoclass:: Events
:members:
BlockHitEvent
=============
.. autoclass:: BlockHitEvent(pos, face, player)
:members:
PlayerPosEvent
==============
.. autoclass:: PlayerPosEvent(old_pos, new_pos, player)
:members:
IdleEvent
=========
.. autoclass:: IdleEvent()
:members:
"""
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import logging
import threading
import time
from collections import namedtuple, Container
from .exc import ConnectionClosed
from .vector import Vector
from .player import Player
logger = logging.getLogger('picraft')
class BlockHitEvent(namedtuple('BlockHitEvent', ('pos', 'face', 'player'))):
"""
Event representing a block being hit by a player.
This tuple derivative represents the event resulting from a player striking
a block with their sword in the Minecraft world. Users will not normally
need to construct instances of this class, rather they are constructed and
returned by calls to :meth:`~Events.poll`.
.. note::
Please note that the block hit event only registers when the player
*right clicks* with the sword. For some reason, left clicks do not
count.
.. attribute:: pos
A :class:`~picraft.vector.Vector` indicating the position of the block
which was struck.
.. attribute:: face
A string indicating which side of the block was struck. This can be one
of six values: 'x+', 'x-', 'y+', 'y-', 'z+', or 'z-'. The value
indicates the axis, and direction along that axis, that the side faces:
.. image:: block_faces.png
.. attribute:: player
A :class:`~picraft.player.Player` instance representing the player that
hit the block.
"""
@classmethod
def from_string(cls, connection, s):
v, f, p = s.rsplit(',', 2)
return cls(Vector.from_string(v), {
0: 'y-',
1: 'y+',
2: 'z-',
3: 'z+',
4: 'x-',
5: 'x+',
}[int(f)], Player(connection, int(p)))
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(BlockHitEvent, self).__dict__
def __repr__(self):
return '<BlockHitEvent pos=%s face=%r player=%d>' % (
self.pos, self.face, self.player.player_id)
class PlayerPosEvent(namedtuple('PlayerPosEvent', ('old_pos', 'new_pos', 'player'))):
"""
Event representing a player moving.
This tuple derivative represents the event resulting from a player moving
within the Minecraft world. Users will not normally need to construct
instances of this class, rather they are constructed and returned by calls
to :meth:`~Events.poll`.
.. attribute:: old_pos
A :class:`~picraft.vector.Vector` indicating the location of the player
prior to this event. The location includes decimal places (it is not
the tile-position, but the actual position).
.. attribute:: new_pos
A :class:`~picraft.vector.Vector` indicating the location of the player
as of this event. The location includes decimal places (it is not
the tile-position, but the actual position).
.. attribute:: player
A :class:`~picraft.player.Player` instance representing the player that
moved.
"""
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(PlayerPosEvent, self).__dict__
def __repr__(self):
return '<PlayerPosEvent old_pos=%s new_pos=%s player=%d>' % (
self.old_pos, self.new_pos, self.player.player_id)
class IdleEvent(namedtuple('IdleEvent', ())):
"""
Event that fires in the event that no other events have occurred since the
last poll. This is only used if :attr:`Events.include_idle` is ``True``.
"""
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(IdleEvent, self).__dict__
def __repr__(self):
return '<IdleEvent>'
class Events(object):
"""
This class implements the :attr:`~picraft.world.World.events` attribute.
There are two ways of responding to picraft's events: the first is to
:meth:`poll` for them manually, and process each event in the resulting
list::
>>> for event in world.events.poll():
... print(repr(event))
...
<BlockHitEvent pos=1,1,1 face="y+" player=1>,
<PlayerPosEvent old_pos=0.2,1.0,0.7 new_pos=0.3,1.0,0.7 player=1>
The second is to "tag" functions as event handlers with the decorators
provided and then call the :meth:`main_loop` function which will handle
polling the server for you, and call all the relevant functions as needed::
@world.events.on_block_hit(pos=Vector(1,1,1))
def hit_block(event):
print('You hit the block at %s' % event.pos)
world.events.main_loop()
By default, only block hit events will be tracked. This is because it is
the only type of event that the Minecraft server provides information about
itself, and thus the only type of event that can be processed relatively
efficiently. If you wish to track player positions, assign a set of player
ids to the :attr:`track_players` attribute. If you wish to include idle
events (which fire when nothing else is produced in response to
:meth:`poll`) then set :attr:`include_idle` to ``True``.
Finally, the :attr:`poll_gap` attribute specifies how long to pause during
each iteration of :meth:`main_loop` to permit event handlers some time to
interact with the server. Setting this to 0 will provide the fastest
response to events, but will result in event handlers having to fight with
event polling for access to the server.
"""
def __init__(self, connection):
self._connection = connection
self._handlers = []
self._poll_gap = 0.1
self._include_idle = False
self._track_players = {}
def _get_poll_gap(self):
return self._poll_gap
def _set_poll_gap(self, value):
self._poll_gap = float(value)
poll_gap = property(_get_poll_gap, _set_poll_gap, doc="""\
The length of time (in seconds) to pause during :meth:`main_loop`.
This property specifies the length of time to wait at the end of each
iteration of :meth:`main_loop`. By default this is 0.1 seconds.
The purpose of the pause is to give event handlers executing in the
background time to communicate with the Minecraft server. Setting this
to 0.0 will result in faster response to events, but also starves
threaded event handlers of time to communicate with the server,
resulting in "choppy" performance.
""")
def _get_track_players(self):
return self._track_players.keys()
def _set_track_players(self, value):
try:
self._track_players = {
pid: Player(self._connection, pid).pos.round(1)
for pid in value
}
except TypeError:
if not isinstance(value, int):
raise ValueError(
'track_players value must be a player id '
'or a sequence of player ids')
self._track_players = {
value: Player(self._connection, value).pos
}
track_players = property(_get_track_players, _set_track_players, doc="""\
The set of player ids for which movement should be tracked.
By default the :meth:`poll` method will not produce player position
events (:class:`PlayerPosEvent`). Producing these events requires extra
interactions with the Minecraft server (one for each player tracked)
which slow down response to block hit events.
If you wish to track player positions, set this attribute to the set of
player ids you wish to track and their positions will be stored. The
next time :meth:`poll` is called it will query the positions for all
specified players and fire player position events if they have changed.
Given that the :attr:`~picraft.world.World.players` attribute
represents a dictionary mapping player ids to players, if you wish to
track all players you can simply do::
>>> world.events.track_players = world.players
""")
def _get_include_idle(self):
return self._include_idle
def _set_include_idle(self, value):
self._include_idle = bool(value)
include_idle = property(_get_include_idle, _set_include_idle, doc="""\
If ``True``, generate an idle event when no other events would be
generated by :meth:`poll`. This attribute defaults to ``False``.
""")
def clear(self):
"""
Forget all pending events that have not yet been retrieved with
:meth:`poll`.
This method is used to clear the list of events that have occurred
since the last call to :meth:`poll` without retrieving them. This is
useful for ensuring that events subsequently retrieved definitely
occurred *after* the call to :meth:`clear`.
"""
self._set_track_players(self._get_track_players())
self._connection.send('events.clear()')
def poll(self):
"""
Return a list of all events that have occurred since the last call to
:meth:`poll`.
For example::
>>> w = World()
>>> w.events.track_players = w.players
>>> w.events.include_idle = True
>>> w.events.poll()
[<PlayerPosEvent old_pos=0.2,1.0,0.7 new_pos=0.3,1.0,0.7 player=1>,
<BlockHitEvent pos=1,1,1 face="x+" player=1>,
<BlockHitEvent pos=1,1,1 face="x+" player=1>]
>>> w.events.poll()
[<IdleEvent>]
"""
def player_pos_events(positions):
for pid, old_pos in positions.items():
player = Player(self._connection, pid)
new_pos = player.pos.round(1)
if old_pos != new_pos:
yield PlayerPosEvent(old_pos, new_pos, player)
positions[pid] = new_pos
def block_hit_events():
s = self._connection.transact('events.block.hits()')
if s:
for e in s.split('|'):
yield BlockHitEvent.from_string(self._connection, e)
events = list(player_pos_events(self._track_players)) + list(block_hit_events())
if events:
return events
elif self._include_idle:
return [IdleEvent()]
else:
return []
def main_loop(self):
"""
Starts the event polling loop when using the decorator style of event
handling (see :meth:`on_block_hit`).
This method will not return, so be sure that you have specified all
your event handlers before calling it. The event loop can only be
broken by an unhandled exception, or by closing the world's connection
(in the latter case the resulting :exc:`~picraft.exc.ConnectionClosed`
exception will be suppressed as it is assumed that you want to end the
script cleanly).
"""
logger.info('Entering event loop')
try:
while True:
self.process()
time.sleep(self.poll_gap)
except ConnectionClosed:
logger.info('Connection closed; exiting event loop')
def process(self):
"""
Poll the server for events and call any relevant event handlers
registered with :meth:`on_block_hit`.
This method is called repeatedly the event handler loop implemented by
:meth:`main_loop`; developers should only call this method when their
(presumably non-threaded) event handler is engaged in a long operation
and they wish to permit events to be processed in the meantime.
"""
for event in self.poll():
for handler in self._handlers:
if handler.matches(event):
handler.execute(event)
def on_idle(self, thread=False, multi=True):
"""
Decorator for registering a function as an idle handler.
This decorator is used to mark a function as an event handler which
will be called when no other event handlers have been called in an
iteration of :meth:`main_loop`. The function will be called with the
corresponding :class:`IdleEvent` as the only argument.
Note that idle events will only be generated if :attr:`include_idle`
is set to ``True``.
"""
def decorator(f):
self._handlers.append(IdleHandler(f, thread, multi))
return f
return decorator
def on_player_pos(self, thread=False, multi=True, old_pos=None, new_pos=None):
"""
Decorator for registering a function as a position change handler.
This decorator is used to mark a function as an event handler which
will be called for any events indicating that a player's position has
changed while :meth:`main_loop` is executing. The function will be
called with the corresponding :class:`PlayerPosEvent` as the only
argument.
The *old_pos* and *new_pos* attributes can be used to specify vectors
or sequences of vectors (including a
:class:`~picraft.vector.vector_range`) that the player position events
must match in order to activate the associated handler. For example, to
fire a handler every time any player enters or walks over blocks within
(-10, 0, -10) to (10, 0, 10)::
from picraft import World, Vector, vector_range
world = World()
world.events.track_players = world.players
from_pos = Vector(-10, 0, -10)
to_pos = Vector(10, 0, 10)
@world.events.on_player_pos(new_pos=vector_range(from_pos, to_pos + 1))
def in_box(event):
world.say('Player %d stepped in the box' % event.player.player_id)
world.events.main_loop()
Various effects can be achieved by combining *old_pos* and *new_pos*
filters. For example, one could detect when a player crosses a boundary
in a particular direction, or decide when a player enters or leaves a
particular area.
Note that only players specified in :attr:`track_players` will generate
player position events.
"""
def decorator(f):
self._handlers.append(PlayerPosHandler(f, thread, multi, old_pos, new_pos))
return f
return decorator
def on_block_hit(self, thread=False, multi=True, pos=None, face=None):
"""
Decorator for registering a function as an event handler.
This decorator is used to mark a function as an event handler which
will be called for any events indicating a block has been hit while
:meth:`main_loop` is executing. The function will be called with the
corresponding :class:`BlockHitEvent` as the only argument.
The *pos* attribute can be used to specify a vector or sequence of
vectors (including a :class:`~picraft.vector.vector_range`); in this
case the event handler will only be called for block hits on matching
vectors.
The *face* attribute can be used to specify a face or sequence of
faces for which the handler will be called.
For example, to specify that one handler should be called for hits
on the top of any blocks, and another should be called only for hits
on any face of block at the origin one could use the following code::
from picraft import World, Vector
world = World()
@world.events.on_block_hit(pos=Vector(0, 0, 0))
def origin_hit(event):
world.say('You hit the block at the origin')
@world.events.on_block_hit(face="y+")
def top_hit(event):
world.say('You hit the top of a block at %d,%d,%d' % event.pos)
world.events.main_loop()
The *thread* parameter (which defaults to ``False``) can be used to
specify that the handler should be executed in its own background
thread, in parallel with other handlers.
Finally, the *multi* parameter (which only applies when *thread* is
``True``) specifies whether multi-threaded handlers should be allowed
to execute in parallel. When ``True`` (the default), threaded handlers
execute as many times as activated in parallel. When ``False``, a
single instance of a threaded handler is allowed to execute at any
given time; simultaneous activations are ignored (but not queued, as
with unthreaded handlers).
"""
def decorator(f):
self._handlers.append(BlockHitHandler(f, thread, multi, pos, face))
return f
return decorator
class EventHandler(object):
"""
This is an internal object used to associate event handlers with their
activation restrictions.
The *action* parameter specifies the function to be run when a matching
event is received from the server.
The *thread* parameter specifies whether the *action* will be launched in
its own background thread. If *multi* is ``False``, then the
:meth:`execute` method will ensure that any prior execution has finished
before launching another one.
"""
def __init__(self, action, thread, multi):
self.action = action
self.thread = thread
self.multi = multi
self._thread = None
def execute(self, event):
"""
Launches the *action* in a background thread if necessary. If required,
this method also ensures threaded actions don't overlap.
"""
if self.thread:
if self.multi:
threading.Thread(target=self.action, args=(event,)).start()
elif not self._thread:
self._thread = threading.Thread(target=self.execute_single, args=(event,))
self._thread.start()
else:
self.action(event)
def execute_single(self, event):
try:
self.action(event)
finally:
self._thread = None
def matches(self, event):
"""
Tests whether or not *event* match all the filters for the handler that
this object represents.
"""
return False
class PlayerPosHandler(EventHandler):
"""
This class associates a handler with a player-position event.
Constructor parameters are similar to the parent class,
:class:`EventHandler` but additionally include
"""
def __init__(self, action, thread, multi, old_pos, new_pos):
super(PlayerPosHandler, self).__init__(action, thread, multi)
self.old_pos = old_pos
self.new_pos = new_pos
def matches(self, event):
return (
isinstance(event, PlayerPosEvent) and
self.matches_pos(self.old_pos, event.old_pos) and
self.matches_pos(self.new_pos, event.new_pos))
def matches_pos(self, test, pos):
if test is None:
return True
if isinstance(test, Vector):
return test == pos.floor()
if isinstance(test, Container):
return pos.floor() in test
return False
class BlockHitHandler(EventHandler):
"""
This class associates a handler with a block-hit event.
Constructor parameters are similar to the parent class,
:class:`EventHandler` but additionally include *pos* to specify the vector
(or sequence of vectors) which an event must match in order to activate
this action, and *face* to specify the block face (or set of faces) which
an event must match. These filters must both match in order for the action
to fire.
"""
def __init__(self, action, thread, multi, pos, face):
super(BlockHitHandler, self).__init__(action, thread, multi)
self.pos = pos
if isinstance(face, bytes):
face = face.decode('ascii')
self.face = face
def matches(self, event):
return (
isinstance(event, BlockHitEvent) and
self.matches_pos(event.pos) and
self.matches_face(event.face))
def matches_pos(self, pos):
if self.pos is None:
return True
if isinstance(self.pos, Vector):
return self.pos == pos
if isinstance(self.pos, Container):
return pos in self.pos
return False
def matches_face(self, face):
if self.face is None:
return True
if isinstance(self.face, str):
return self.face == face
if isinstance(self.face, Container):
return face in self.face
return False
class IdleHandler(EventHandler):
"""
This class associates a handler with an idle event.
"""
def matches(self, event):
return isinstance(event, IdleEvent)
|
|
# coding:utf-8
# GANs model
import random
import numpy as np
from ...configure import BACKEND
class WGAN_TF:
def __init__(self, x_size, z_size, net_G, net_D, lr=5e-5, clip=0.01, d_loss_addition=0, g_loss_addition=0,
batch_size=32):
"""
:param x_size: the size of every input sample.
:param z_size: the size of every noise sample.
:param net_G: a function ,build the G
:param net_D: a function build the D
:param lr: learning rate
:param clip: the value use for cliping the weight
:param d_loss_addition: a number of graph of tensorflow will add with loss function of d
:param g_loss_addition: a number of graph of tensorflow will add with loss function of g
"""
self.tf = BACKEND["tf"]
self._z_size = z_size
self._lr = lr
self._clip = clip
self._d_loss_addition = d_loss_addition
self._g_loss_addition = g_loss_addition
self._x = self.tf.placeholder("float32", [None, x_size])
self._z = self.tf.placeholder("float32", [None, z_size])
with self.tf.variable_scope("G"):
self._G = net_G(self.z)
with self.tf.variable_scope("D"):
self._fake_D = net_D(self.G)
with self.tf.variable_scope("D", reuse=True):
self._real_D = net_D(self.x)
self._batch_size = batch_size
self._session = None
self._netG = net_G
self._netD = net_D
self._build_loss_and_opt()
self._dlist = self._get_train_dlist()
@property
def x(self):
return self._x
@property
def z(self):
return self._z
@property
def G(self):
return self._G
@property
def fake_D(self):
return self._fake_D
@property
def real_D(self):
return self._real_D
@property
def d_clip(self):
return self._d_clip
@property
def wd(self):
return self._wd
@property
def d_loss(self):
return self._d_loss
@property
def g_loss(self):
return self._g_loss
def _build_loss_and_opt(self):
vars = self.tf.trainable_variables()
D_PARAMS = [var for var in vars if var.name.startswith("D")]
G_PARAMS = [var for var in vars if var.name.startswith("G")]
d_clip = [self.tf.assign(var, self.tf.clip_by_value(var, -self._clip, self._clip)) for var in D_PARAMS]
self._d_clip = self.tf.group(*d_clip)
self._wd = self.tf.reduce_mean(self.real_D) - self.tf.reduce_mean(self.fake_D)
self._d_loss = self.tf.reduce_mean(self.fake_D) - self.tf.reduce_mean(self.real_D) + self._d_loss_addition
self._g_loss = self.tf.reduce_mean(-self.fake_D) + self._g_loss_addition
self.d_opt = self.tf.train.RMSPropOptimizer(self._lr).minimize(
self.d_loss,
global_step=self.tf.Variable(0),
var_list=D_PARAMS
)
self.g_opt = self.tf.train.RMSPropOptimizer(self._lr).minimize(
self.g_loss,
global_step=self.tf.Variable(0),
var_list=G_PARAMS
)
def open_session(self,path=None,is_restore=False):
if self._session is None:
self._session = self.tf.Session()
self._session.run(self.tf.global_variables_initializer())
if is_restore:
saver = self.tf.train.Saver()
saver.restore(self._session, path)
def save_model(self, path):
"""
save the model in your path
:param path: your path
:return:
"""
if self._session is None:
raise ValueError("session is None")
saver = self.tf.train.Saver()
saver.save(self._session, path)
def close_session(self):
self._session.close()
self._session = None
def _get_train_dlist(self):
return [self.wd, self.d_loss, self.d_opt, self.d_clip]
def fit(self, x, epoch, visual=True, callbacks=None):
"""
:param x: your input
:param epoch: the train epoch of this model
:param batch_size: the batch size of this model
:param visual: Will print the progress of train if the value is True,otherwise it will be silent
:param callbacks: a function list called after a epoch
:return:
"""
if self._session is None:
raise ValueError("session is None")
def predict(n):
return self.predict(n)
length = len(x)
batch_size = self._batch_size
for ep in range(epoch):
index = [_ for _ in range(length)] # will be repeated if shuffle x directly
random.shuffle(index)
train_image = [x[i] for i in index]
# random.shuffle(x)
step = 0
i = 0
j = i + batch_size
g_loss = np.inf
d_loss = np.inf
wd = np.inf
while step < length:
for _ in range(5):
noise = np.random.normal(size=(batch_size, self._z_size))
if abs(j - i) < batch_size:
i = i - (batch_size - (j - i))
wd, d_loss, _, _ = self._session.run(self._dlist, feed_dict={
self.x: train_image[i:j],
self.z: noise
})
i = j
j += batch_size
if j > length:
j = length
step += batch_size
if i == length:
break
if visual:
print("\rep:%d/%d %d/%d d_loss:%.4f g_loss:%.4f wd:%.4f" % (
ep + 1, epoch, step, length, d_loss, g_loss, wd), end="")
for _ in range(1):
noise = np.random.normal(size=(batch_size, self._z_size))
g_loss, _ = self._session.run([self.g_loss, self.g_opt], feed_dict={
self.z: noise
})
if visual:
print("\rep:%d/%d %d/%d d_loss:%.4f g_loss:%.4f wd:%.4f" % (
ep + 1, epoch, step, length, d_loss, g_loss, wd), end="")
if visual:
print()
context = {
"predict": predict,
"ep": ep,
"g_loss": g_loss,
"wd": wd,
"d_loss": d_loss
}
if callbacks is not None:
for callback in callbacks:
callback(context)
def predict(self, n):
"""
generate the sample with noise
:param n: the number of sample.
:return:
"""
if self._session is None:
raise ValueError("session is None")
noise = np.random.normal(size=(n, self._z_size))
gen = self._session.run(self.G, feed_dict={
self.z: noise
})
return gen
class WGAN_GP_TF(WGAN_TF):
def __init__(self, x_size, z_size, net_G, net_D, lr=1e-4, d_loss_addition=0, g_loss_addition=0,batch_size=32, LAMBDA=1, K=1):
"""
:param x_size:
:param z_size:
:param net_G:
:param net_D:
:param lr:
:param d_loss_addition:
:param g_loss_addition:
:param LAMBDA:
:param K:
"""
self._K = K
self._LAMBDA = LAMBDA
super(WGAN_GP_TF, self).__init__(x_size, z_size, net_G, net_D, lr=lr,
d_loss_addition=d_loss_addition,
g_loss_addition=g_loss_addition, clip=np.inf,
batch_size=batch_size)
def _build_loss_and_opt(self):
vars = self.tf.trainable_variables()
D_PARAMS = [var for var in vars if var.name.startswith("D")]
G_PARAMS = [var for var in vars if var.name.startswith("G")]
self._wd = self.tf.reduce_mean(self.real_D) - self.tf.reduce_mean(self.fake_D)
self._d_loss = self.tf.reduce_mean(self.fake_D) - self.tf.reduce_mean(self.real_D) + self._d_loss_addition
self._g_loss = self.tf.reduce_mean(-self.fake_D) + self._g_loss_addition
#######GP-METHOD
alpha = self.tf.random_uniform(
shape=[self._batch_size, 1],
minval=0.,
maxval=1.
) # sampling
insert_value = self.G - alpha * (self.x - self.G)
with self.tf.variable_scope("D", reuse=True):
gradients = self.tf.gradients(self._netD(insert_value), [insert_value])[0]
slopes = self.tf.sqrt(self.tf.reduce_sum(self.tf.square(gradients), reduction_indices=[1]))
gradient_penalty = self.tf.reduce_mean((slopes - self._K) ** 2)
#######
self._d_loss += self._LAMBDA * gradient_penalty
self.d_opt = self.tf.train.AdamOptimizer(self._lr, beta1=0.4, beta2=0.9).minimize(
self._d_loss,
global_step=self.tf.Variable(0),
var_list=D_PARAMS
)
self.g_opt = self.tf.train.AdamOptimizer(self._lr, beta1=0.4, beta2=0.9).minimize(
self._g_loss,
global_step=self.tf.Variable(0),
var_list=G_PARAMS
)
def _get_train_dlist(self):
return [self.wd, self.d_loss, self.d_opt, self.tf.Variable(0)]
@property
def x(self):
return self._x
@property
def z(self):
return self._z
@property
def G(self):
return self._G
@property
def fake_D(self):
return self._fake_D
@property
def real_D(self):
return self._real_D
@property
def d_clip(self):
return self._d_clip
@property
def wd(self):
return self._wd
@property
def d_loss(self):
return self._d_loss
@property
def g_loss(self):
return self._g_loss
|
|
# Standard Python packages
import subprocess, os
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.cElementTree as ElementTree
# Special dependencies
import numpy # sudo apt-get install python-numpy
# Cassius interdependencies
import cassius.mathtools
import cassius.utilities
import cassius.color
import cassius.containers
class ScorePlane:
"""Calls AugustusPMMLConsumer on all points in a plane of feature-space and returns the result for plotting.
Arguments:
pmmlModel (string): name of the PMML model to evaluate
unitable (InspectTable): dataset to overlay on the scored plane
featureX (string or `None`): name of the feature to plot on the
X axis (attempts to guess if `None`)
xmin, xmax (numbers or `None`): minimum and maximum X values
(guesses from the data if `None`)
featureY (string or `None`): name of the feature to plot on the
Y axis (attempts to guess if `None`)
ymin, ymax (numbers or `None`): minimum and maximum Y values
(guesses from the data if `None`)
othervalue (dict): option values of the features that are not
plotted as {"feature1": value1, "feature2": value2} (if
unspecified, the mean value is used)
cuts (string or `None`): cuts to apply as an arbitrary
expression or `None` for no cuts
numbins (int): number of bins in x and y (total number is bins**2)
limit (int or `None`): limit the number of data points in each
category to plot
configFileName (string): configuration file for AugustusPMMLConsumer
planeFileName (string): file of values to score
outputFileName (string): output file from AugustusPMMLConsumer
Behavior:
Constructor only configures the object; no action is taken
until `configure` or `score` are called.
"""
def __init__(self, pmmlModel, unitable, featureX=None, xmin=None, xmax=None, featureY=None, ymin=None, ymax=None, othervalue={}, cuts=None, numbins=300, limit=1000, configFileName="/tmp/consume.xml", planeFileName="/tmp/plane.csv", outputFileName="/tmp/scores.xml"):
self.pmmlModel, self.unitable, self.featureX, self.xmin, self.xmax, self.featureY, self.ymin, self.ymax, self.othervalue, self.cuts, self.numbins, self.limit, self.configFileName, self.planeFileName, self.outputFileName = pmmlModel, unitable, featureX, xmin, xmax, featureY, ymin, ymax, othervalue, cuts, numbins, limit, configFileName, planeFileName, outputFileName
def score(self, dryrun=False):
"""Creates configuration files and runs AugustusPMMLConsumer.
Arguments:
dryrun (bool): if True, only set up the configuration
files; don't run AugustusPMMLConsumer
Sets the following member data:
plot (Overlay): the final plot, call `view(scoreplane.plot)`
regionplot (RegionMap): just the pixel-map of scored regions
scatterplots (list of Scatter): the data points with each
category in a separate plot
legend (Legend): plot legend
stdout (string): standard output from AugustusPMMLConsumer
stderr (string): standard error from AugustusPMMLConsumer
scores (ElementTree): output of AugustusPMMLConsumer,
parsed from the XML
"""
# 0. Preliminaries
def findModel(pmml):
for m in "MiningModel", "TreeModel", "ClusteringModel":
if pmml.find(m) is not None:
return pmml.find(m)
def findFeatures(pmmlModel):
pmml = ElementTree.ElementTree(file=pmmlModel)
optype = {}
for child in pmml.find("DataDictionary"):
if child.tag == "DataField":
optype[child.attrib["name"]] = child.attrib["optype"]
continuous_features, categorical_features = [], []
for child in findModel(pmml).find("MiningSchema"):
if child.tag == "MiningField" and child.attrib.get("usageType") != "predicted":
if optype[child.attrib["name"]] == "continuous":
continuous_features.append(child.attrib["name"])
elif optype[child.attrib["name"]] == "categorical":
categorical_features.append(child.attrib["name"])
return continuous_features, categorical_features
continuous_features, categorical_features = findFeatures(self.pmmlModel)
if self.featureX is None:
for feature in continuous_features:
if feature != self.featureY:
self.featureX = feature
break
if self.featureY is None:
for feature in continuous_features:
if feature != self.featureX:
self.featureY = feature
break
if self.featureX is None or self.featureY is None:
raise ValueError, "There must be at least two continuous features in the PMML file."
otherfields = []
othervalues = []
for feature in continuous_features + categorical_features:
if feature != self.featureX and feature != self.featureY:
if feature in self.othervalue:
otherfields.append(feature)
othervalues.append(self.othervalue[feature])
else:
otherfields.append(feature)
if self.cuts is None:
low, high = cassius.utilities.calcrange_quartile(self.unitable.field(feature))
if low == high:
low, high = min(self.unitable.field(feature)), max(self.unitable.field(feature))
if low == high:
raise ValueError, "Feature %s does not have any dynamic range: %g %g" % (feature, low, high)
cuts = "(%g < %s) & (%s < %g)" % (low, feature, feature, high)
else:
low, high = cassius.utilities.calcrange_quartile(self.unitable(feature, self.cuts))
if low == high:
low, high = min(self.unitable.field(feature)), max(self.unitable(feature, self.cuts))
if low == high:
raise ValueError, "Feature %s with cuts \"%s\" does not have any dynamic range: %g %g" % (feature, self.cuts, low, high)
cuts = "(%g < %s) & (%s < %g)" % (low, feature, feature, high)
cuts += " & (%s)" % self.cuts
if feature in continuous_features:
othervalues.append(cassius.mathtools.mean(self.unitable(feature, cuts)))
else:
othervalues.append(cassius.mathtools.ubiquitous(self.unitable(feature, cuts)))
def categoryLabel(pmmlModel):
pmml = ElementTree.ElementTree(file=pmmlModel)
def recurse(node):
if node.tag == "MiningField" and node.attrib.get("usageType") == "predicted":
raise StopIteration, node.attrib.get("name")
for child in node.getchildren():
recurse(child)
try:
recurse(pmml.getroot())
except StopIteration, name:
return str(name)
raise RuntimeError, "Predicted field not found in PMML."
clusteringModel = ElementTree.ElementTree(file=self.pmmlModel).getroot().find("ClusteringModel")
if clusteringModel is None:
try:
clusteringModel = ElementTree.ElementTree(file=self.pmmlModel).getroot().find("MiningModel").find("Segmentation").find("Segment").find("ClusteringModel")
except AttributeError:
pass
if clusteringModel is not None:
numberOfClusters = int(clusteringModel.attrib["numberOfClusters"])
categories = ["category_%02d" % i for i in range(numberOfClusters)]
else:
predictedField = categoryLabel(self.pmmlModel)
categories = numpy.unique(self.unitable.field(predictedField))
categories.sort()
category_lookup = dict(map(lambda (x, y): (y, x), enumerate(categories)))
# 1. Create scatter-plots of the original data to overlay on the Augustus output
self.scatterplots = []
fillcolors = cassius.color.lightseries(len(categories))
if clusteringModel:
self.scatterplots.append(self.unitable.scatter("%s, %s" % (self.featureX, self.featureY),
(self.cuts if self.cuts is not None else ""),
markercolor="black",
markeroutline=None,
limit=self.limit,
xlabel=self.featureX,
ylabel=self.featureY))
else:
for category, color in zip(categories, fillcolors):
self.scatterplots.append(self.unitable.scatter("%s, %s" % (self.featureX, self.featureY),
"(%s) & (%s == '%s')" % ((self.cuts if self.cuts is not None else True), predictedField, category),
markercolor=cassius.color.darken(color),
markeroutline="black",
limit=self.limit,
xlabel=self.featureX,
ylabel=self.featureY,
))
fillcolors = map(cassius.color.lighten, fillcolors) # even lighter
# 2. Make a configuration file for AugustusPMMLConsumer
def addelement(base, childtag, **attrib):
child = ElementTree.Element(childtag)
for name, value in attrib.items():
child.set(name, value)
base.append(child)
return child
pmmlDeployment = ElementTree.Element("pmmlDeployment")
logging = addelement(pmmlDeployment, "logging")
addelement(logging, "toStandardOutput")
inputModel = addelement(pmmlDeployment, "inputModel")
addelement(inputModel, "fromFile", name=self.pmmlModel)
inputData = addelement(pmmlDeployment, "inputData")
addelement(inputData, "fromFile", name=self.planeFileName, type="UniTable")
addelement(inputData, "readOnce")
output = addelement(pmmlDeployment, "output")
report = addelement(output, "report", name="Scores")
addelement(report, "toFile", name=self.outputFileName)
outputRow = addelement(report, "outputRow", name="Event")
addelement(outputRow, "outputColumn", name="x", fieldName=self.featureX)
addelement(outputRow, "outputColumn", name="y", fieldName=self.featureY)
addelement(outputRow, "score", name="Score")
ElementTree.ElementTree(pmmlDeployment).write(self.configFileName)
# 3. Create a RegionMap object to represent the output
if self.xmin is None or self.xmax is None or self.ymin is None or self.ymax is None:
xmin, ymin, xmax, ymax = cassius.containers.Overlay(*self.scatterplots).ranges()
if self.xmin is not None: xmin = self.xmin
if self.xmax is not None: xmax = self.xmax
if self.ymin is not None: ymin = self.ymin
if self.ymax is not None: ymax = self.ymax
width = xmax - xmin; height = ymax - ymin
if self.xmin is None: xmin -= 0.2*width
if self.xmax is None: xmax += 0.2*width
if self.ymin is None: ymin -= 0.2*height
if self.ymax is None: ymax += 0.4*height
else:
xmin, ymin, xmax, ymax = self.xmin, self.ymin, self.xmax, self.ymax
self.regionplot = cassius.containers.RegionMap(self.numbins, xmin, xmax, self.numbins, ymin, ymax, categories, None, colors=fillcolors, bordercolor="black")
# 4. Create a data file of points on the plane to score
planeFile = file(self.planeFileName, "w")
planeFile.write("%s,%s," % (self.featureX, self.featureY))
planeFile.write(",".join(otherfields))
if clusteringModel is not None:
planeFile.write("\n")
else:
planeFile.write("," + predictedField + "\n")
for i in xrange(self.regionplot.xbins):
for j in xrange(self.regionplot.ybins):
planeFile.write("%g,%g," % self.regionplot.center(i, j))
planeFile.write(",".join(map(str, othervalues)))
if clusteringModel is not None:
planeFile.write("\n")
else:
planeFile.write(",dummy\n")
del planeFile
if not dryrun:
# 5. Run AugustusPMMLConsumer
try:
os.remove(self.outputFileName)
except OSError:
pass
AugustusPMMLConsumer = subprocess.Popen(["AugustusPMMLConsumer", "-c", self.configFileName], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if AugustusPMMLConsumer.wait() != 0:
raise RuntimeError, "AugustusPMMLConsumer failed: (stderr shown below)\n%s" % AugustusPMMLConsumer.stderr.read()
self.stdout = AugustusPMMLConsumer.stdout.read()
self.stderr = AugustusPMMLConsumer.stderr.read()
# 6. Read the Augustus output
self.scores = ElementTree.ElementTree(file=self.outputFileName)
if clusteringModel is not None:
categories = set()
for score in self.scores.getroot().getchildren():
categories.add(score.find("Score").text)
categories = list(categories)
categories.sort()
category_lookup = dict(map(lambda (x, y): (y, x), enumerate(categories)))
self.regionplot.categories = categories
scoreValues = numpy.empty((self.regionplot.xbins, self.regionplot.ybins), dtype=numpy.int)
i = 0; j = 0
for score in self.scores.getroot().getchildren():
# x1, y1 = float(score.find("x").text), float(score.find("y").text)
# x2, y2 = self.regionplot.center(i, j)
# if abs(x1 - x2) > 1e-5 or abs(y1 - y2) > 1e-5:
# raise Exception, "Mismatch with input data: x = %g or %g, y = %g or %g" % (x1, x2, y1, y2)
scoreValues[i,j] = category_lookup[score.find("Score").text]
j += 1
if j == self.regionplot.ybins:
j = 0
i += 1
self.regionplot.categorizer = scoreValues
# 7. Plot the results
self.regionplot.xlabel = self.featureX
self.regionplot.ylabel = self.featureY
legend_data = []
if clusteringModel is None:
for i, category in enumerate(categories):
legend_data.append([category, cassius.containers.Style(marker="circle",
markercolor=self.scatterplots[i].markercolor,
markeroutline="black",
fillcolor=fillcolors[i])])
for field, value in zip(otherfields, othervalues):
try:
legend_data.append([field, cassius.mathtools.str_sigfigs(value, 2)])
except TypeError:
legend_data.append([field, value])
if len(legend_data) > 0:
self.legend = cassius.containers.Legend(legend_data, justify="lr", colwid=[0.7, 0.3])
plots = [self.regionplot] + self.scatterplots + [self.legend]
else:
self.legend = None
plots = [self.regionplot] + self.scatterplots
self.plot = cassius.containers.Overlay(*plots, frame=0)
|
|
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import web
import string
import re
import karesansui
from karesansui.lib.rest import Rest, auth
from karesansui.db.model._2pysilhouette import Job, JobGroup, JOBGROUP_TYPE
from karesansui.db.access.machine import findbyhost1
from karesansui.db.access._2pysilhouette import jg_findby1, jg_save, corp
from karesansui.db.access._2pysilhouette import save_job_collaboration
from karesansui.db.access.machine2jobgroup import new as m2j_new
from pysilhouette.command import dict2command
from karesansui.lib.utils import is_param, generate_phrase, create_file, \
get_filelist, symlink2real
from karesansui.lib.const import ISCSI_COMMAND_GET, ISCSI_COMMAND_ADD, \
ISCSI_CONFIG_VALUE_AUTH_METHOD_CHAP, PORT_MIN_NUMBER, PORT_MAX_NUMBER, \
CHAP_USER_MIN_LENGTH, CHAP_USER_MAX_LENGTH, \
CHAP_PASSWORD_MIN_LENGTH, CHAP_PASSWORD_MAX_LENGTH, \
ISCSI_DEVICE_DIR, ISCSI_DEVICE_NAME_TPL
from karesansui.lib.checker import Checker, CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, \
CHECK_MIN, CHECK_MAX
def get_network_storages(data):
network_storages = []
dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR)
dev_symlink_list.sort()
unmountable_regexp = re.compile("-part[0-9]+$")
for line in data.split('\n'):
if not line:
continue
(host,port,tpgt,iqn,activity,autostart) = line.split(' ', 6)
node = {
'type' : "iSCSI",
'hostname' : host,
'port' : port,
'tpgt' : tpgt,
'iqn' : iqn,
'activity' : string.atoi(activity),
'autostart' : string.atoi(autostart),
'disk_list' : [],
}
if activity == '1':
disk_list = []
symlink_regexp = re.compile("^%s" % (re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn))))
unmountable_flag = {}
for sym_link in dev_symlink_list:
if symlink_regexp.search(sym_link):
real_path = symlink2real("%s/%s" % (ISCSI_DEVICE_DIR, sym_link))
is_blockable = True
if unmountable_regexp.search(sym_link):
is_blockable = False
unmountable_flag[unmountable_regexp.sub("", sym_link)] = True
disk_list.append({'symlink_name' : sym_link,
'realpath_list' : real_path,
'is_blockable' : is_blockable,
'is_partitionable' : False,
})
for disk in disk_list:
for key in unmountable_flag.keys():
if disk['symlink_name'] == key:
disk['is_partitionable'] = True
node['disk_list'] = disk_list
network_storages.append(node)
return network_storages
def get_iscsi_cmd(obj, host_id):
cmd_name = u'Get iSCSI List'
jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey'])
jobgroup.jobs.append(Job('%s command' % cmd_name, 0, "%s/%s" \
% (karesansui.config['application.bin.dir'], ISCSI_COMMAND_GET)))
jobgroup.type = JOBGROUP_TYPE['PARALLEL']
host = findbyhost1(obj.orm, host_id)
_machine2jobgroup = m2j_new(machine=host,
jobgroup_id=-1,
uniq_key=karesansui.sheconf['env.uniqkey'],
created_user=obj.me,
modified_user=obj.me,
)
if corp(obj.orm, obj.pysilhouette.orm,_machine2jobgroup, jobgroup) is False:
return False
ret = jobgroup.jobs[0].action_stdout
network_storages = get_network_storages(ret)
return network_storages
def validates_network_storage(obj):
checker = Checker()
check = True
_ = obj._
checker.errors = []
if is_param(obj.input, 'network_storage_host_name'):
check = checker.check_domainname(_('Target Hostname'),
obj.input.network_storage_host_name,
CHECK_EMPTY | CHECK_VALID,
) and check
else:
check = False
checker.add_error(_('"%s" is required.') %_('Target Hostname'))
if is_param(obj.input, 'network_storage_port_number'):
check = checker.check_number(_('Target Port Number'),
obj.input.network_storage_port_number,
CHECK_VALID | CHECK_MIN | CHECK_MAX,
PORT_MIN_NUMBER,
PORT_MAX_NUMBER,
) and check
if is_param(obj.input, 'network_storage_authentication'):
check = checker.check_empty(_('iSCSI Authentication Type'),
obj.input.network_storage_authentication,
) and check
if obj.input.network_storage_authentication == ISCSI_CONFIG_VALUE_AUTH_METHOD_CHAP:
if is_param(obj.input, 'network_storage_user'):
check = checker.check_username_with_num(_('iSCSI Authentication User'),
obj.input.network_storage_user,
CHECK_VALID | CHECK_LENGTH,
CHAP_USER_MIN_LENGTH,
CHAP_USER_MAX_LENGTH,
) and check
else:
check = False
checker.add_error(_('"%s" is required.') %_('iSCSI Authentication User'))
if is_param(obj.input, 'network_storage_password'):
check = checker.check_password(_('iSCSI Authentication Password'),
obj.input.network_storage_password,
obj.input.network_storage_password,
CHECK_LENGTH,
CHAP_PASSWORD_MIN_LENGTH,
CHAP_PASSWORD_MAX_LENGTH,
) and check
else:
check = False
checker.add_error(_('"%s" is required.') %_('iSCSI Authentication Password'))
else:
check = False
checker.add_error(_('"%s" is required.') %_('iSCSI Authentication Type'))
obj.view.alert = checker.errors
return check
class HostBy1NetworkStorage(Rest):
@auth
def _GET(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
if self.is_mode_input() is True:
self.view.host_id = host_id
self.view.info = {
'type' : "iSCSI",
'hostname' : "",
'port' : "3260",
'tpgt' : "",
'iqn' : "",
'activity' : "",
'autostart' : "",
'auth' : "",
'user' : "",
}
return True
network_storages = get_iscsi_cmd(self, host_id)
if network_storages is False:
self.logger.debug("Get iSCSI List command failed. Return to timeout")
#return web.internalerror('Internal Server Error. (Timeout)')
self.view.network_storages = network_storages
return True
@auth
def _POST(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
if not validates_network_storage(self):
self.logger.debug("Network storage add failed. Did not validate.")
return web.badrequest(self.view.alert)
hostname = self.input.network_storage_host_name
port = self.input.network_storage_port_number
auth = self.input.network_storage_authentication
user = self.input.network_storage_user
password = self.input.network_storage_password
auto_start = False
if is_param(self.input, 'network_storage_auto_start'):
auto_start = True
options = {'auth' : auth}
if port:
options['target'] = "%s:%s" % (hostname, port)
else:
options['target'] = hostname
if auth == ISCSI_CONFIG_VALUE_AUTH_METHOD_CHAP:
options['user'] = user
try:
password_file_name = '/tmp/' + generate_phrase(12,'abcdefghijklmnopqrstuvwxyz')
create_file(password_file_name, password)
options['password-file'] = password_file_name
except Exception, e:
self.logger.error('Failed to create tmp password file. - file=%s' % (password_file_name))
options['password'] = password
_cmd = dict2command(
"%s/%s" % (karesansui.config['application.bin.dir'], ISCSI_COMMAND_ADD), options)
if auto_start:
_cmd = _cmd + " --autostart"
cmd_name = u'Add iSCSI'
jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey'])
jobgroup.jobs.append(Job('%s command' % cmd_name, 0, _cmd))
host = findbyhost1(self.orm, host_id)
_machine2jobgroup = m2j_new(machine=host,
jobgroup_id=-1,
uniq_key=karesansui.sheconf['env.uniqkey'],
created_user=self.me,
modified_user=self.me,
)
save_job_collaboration(self.orm,
self.pysilhouette.orm,
_machine2jobgroup,
jobgroup,
)
return web.accepted()
urls = (
'/host/(\d+)/networkstorage[/]?(\.part)?$', HostBy1NetworkStorage,
)
|
|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from neutron.plugins.sriovnicagent.common import config # noqa
from neutron.plugins.sriovnicagent import sriov_nic_agent
from neutron.tests import base
DEVICE_MAC = '11:22:33:44:55:66'
class TestSriovAgent(base.BaseTestCase):
def setUp(self):
super(TestSriovAgent, self).setUp()
# disable setting up periodic state reporting
cfg.CONF.set_override('report_interval', 0, 'AGENT')
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_default('enable_security_group',
False,
group='SECURITYGROUP')
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall)
self.agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0)
def test_treat_devices_removed_with_existed_device(self):
agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0)
devices = [DEVICE_MAC]
with mock.patch.object(agent.plugin_rpc,
"update_device_down") as fn_udd:
fn_udd.return_value = {'device': DEVICE_MAC,
'exists': True}
with mock.patch.object(sriov_nic_agent.LOG,
'info') as log:
resync = agent.treat_devices_removed(devices)
self.assertEqual(2, log.call_count)
self.assertFalse(resync)
self.assertTrue(fn_udd.called)
def test_treat_devices_removed_with_not_existed_device(self):
agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0)
devices = [DEVICE_MAC]
with mock.patch.object(agent.plugin_rpc,
"update_device_down") as fn_udd:
fn_udd.return_value = {'device': DEVICE_MAC,
'exists': False}
with mock.patch.object(sriov_nic_agent.LOG,
'debug') as log:
resync = agent.treat_devices_removed(devices)
self.assertEqual(1, log.call_count)
self.assertFalse(resync)
self.assertTrue(fn_udd.called)
def test_treat_devices_removed_failed(self):
agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0)
devices = [DEVICE_MAC]
with mock.patch.object(agent.plugin_rpc,
"update_device_down") as fn_udd:
fn_udd.side_effect = Exception()
with mock.patch.object(sriov_nic_agent.LOG,
'debug') as log:
resync = agent.treat_devices_removed(devices)
self.assertEqual(1, log.call_count)
self.assertTrue(resync)
self.assertTrue(fn_udd.called)
def mock_scan_devices(self, expected, mock_current,
registered_devices, updated_devices):
self.agent.eswitch_mgr = mock.Mock()
self.agent.eswitch_mgr.get_assigned_devices.return_value = mock_current
results = self.agent.scan_devices(registered_devices, updated_devices)
self.assertEqual(expected, results)
def test_scan_devices_returns_empty_sets(self):
registered = set()
updated = set()
mock_current = set()
expected = {'current': set(),
'updated': set(),
'added': set(),
'removed': set()}
self.mock_scan_devices(expected, mock_current, registered, updated)
def test_scan_devices_no_changes(self):
registered = set(['1', '2'])
updated = set()
mock_current = set(['1', '2'])
expected = {'current': set(['1', '2']),
'updated': set(),
'added': set(),
'removed': set()}
self.mock_scan_devices(expected, mock_current, registered, updated)
def test_scan_devices_new_and_removed(self):
registered = set(['1', '2'])
updated = set()
mock_current = set(['2', '3'])
expected = {'current': set(['2', '3']),
'updated': set(),
'added': set(['3']),
'removed': set(['1'])}
self.mock_scan_devices(expected, mock_current, registered, updated)
def test_scan_devices_new_updates(self):
registered = set(['1'])
updated = set(['2'])
mock_current = set(['1', '2'])
expected = {'current': set(['1', '2']),
'updated': set(['2']),
'added': set(['2']),
'removed': set()}
self.mock_scan_devices(expected, mock_current, registered, updated)
def test_scan_devices_updated_missing(self):
registered = set(['1'])
updated = set(['2'])
mock_current = set(['1'])
expected = {'current': set(['1']),
'updated': set(),
'added': set(),
'removed': set()}
self.mock_scan_devices(expected, mock_current, registered, updated)
def test_process_network_devices(self):
agent = self.agent
device_info = {'current': set(),
'added': set(['mac3', 'mac4']),
'updated': set(['mac2', 'mac3']),
'removed': set(['mac1'])}
agent.sg_agent.prepare_devices_filter = mock.Mock()
agent.sg_agent.refresh_firewall = mock.Mock()
agent.treat_devices_added_updated = mock.Mock(return_value=False)
agent.treat_devices_removed = mock.Mock(return_value=False)
agent.process_network_devices(device_info)
agent.sg_agent.prepare_devices_filter.assert_called_with(
set(['mac3', 'mac4']))
self.assertTrue(agent.sg_agent.refresh_firewall.called)
agent.treat_devices_added_updated.assert_called_with(set(['mac2',
'mac3',
'mac4']))
agent.treat_devices_removed.assert_called_with(set(['mac1']))
def test_treat_devices_added_updated_admin_state_up_true(self):
agent = self.agent
mock_details = {'device': 'aa:bb:cc:dd:ee:ff',
'port_id': 'port123',
'network_id': 'net123',
'admin_state_up': True,
'network_type': 'vlan',
'segmentation_id': 100,
'profile': {'pci_slot': '1:2:3.0'},
'physical_network': 'physnet1'}
agent.plugin_rpc = mock.Mock()
agent.plugin_rpc.get_devices_details_list.return_value = [mock_details]
agent.eswitch_mgr = mock.Mock()
agent.eswitch_mgr.device_exists.return_value = True
agent.set_device_state = mock.Mock()
resync_needed = agent.treat_devices_added_updated(
set(['aa:bb:cc:dd:ee:ff']))
self.assertFalse(resync_needed)
agent.eswitch_mgr.device_exists.assert_called_with('aa:bb:cc:dd:ee:ff',
'1:2:3.0')
agent.eswitch_mgr.set_device_state.assert_called_with(
'aa:bb:cc:dd:ee:ff',
'1:2:3.0',
True)
self.assertTrue(agent.plugin_rpc.update_device_up.called)
def test_treat_devices_added_updated_admin_state_up_false(self):
agent = self.agent
mock_details = {'device': 'aa:bb:cc:dd:ee:ff',
'port_id': 'port123',
'network_id': 'net123',
'admin_state_up': False,
'network_type': 'vlan',
'segmentation_id': 100,
'profile': {'pci_slot': '1:2:3.0'},
'physical_network': 'physnet1'}
agent.plugin_rpc = mock.Mock()
agent.plugin_rpc.get_devices_details_list.return_value = [mock_details]
agent.remove_port_binding = mock.Mock()
resync_needed = agent.treat_devices_added_updated(
set(['aa:bb:cc:dd:ee:ff']))
self.assertFalse(resync_needed)
self.assertFalse(agent.plugin_rpc.update_device_up.called)
|
|
from .element import Element
from .constants import LABEL_CLASS_MARK, ALIGN_JUSTIFY
class TextAtom(Element):
def init(self):
self.content = self._params.get('content', '')
self.font_family = self._params.get('font-family')
self.font_style = self._params.get('font-style')
self.font_size = self._params.get('font-size')
self.text_color = self._params.get('text-color')
self.base_line = self._params.get('base_line')
self.line_height = self._params.get('line_height')
self.rise = self._params.get('rise')
self.offset = self._params.get('offset')
self.gray = self._params.get('gray')
self.indent = self._params.get('indent')
self.scale = self._params.get('scale')
self.beginning_of_line = self._params.get('beginning-of-line', False)
self.end_of_line = self._params.get('end-of-line', False)
self.end_of_word = self._params.get('end-of-word', True)
self.word_spacing = self._params.get('word-spacing', 0)
self.char_spacing = self._params.get('char-spacing', 0)
self.length = len(self.content)
def __repr__(self):
specials = ''
if self.beginning_of_line:
specials += "\u03B1"
if self.end_of_word:
specials += "."
if self.end_of_line:
specials += "\u00B6"
if specials:
specials = ' ' + specials
return '<Atom %s%i _%s |%i w%i "%s"%s>' % ('+' if self.indent is not None and self.indent >= 0 else '',
int(self.indent or 0),
'?' if self.base_line is None else str(int(self.base_line)),
self.line_height or 0,
self.width or 0,
self.content[:20],
specials)
@property
def font_key(self):
return "%s-%s" % (self.font_family, self.font_style)
@property
def font_key_ext(self):
return "%s-%s-%i" % (self.font_family, self.font_style, self.font_size)
def calculate(self):
font = self.doc.get_font(self.font_family, self.font_style, self.font_size)
self.width, self.height = font.getsize(self.content)
self.line_height = self.font_size * self.parent.line_height
@property
def absolute_base_line(self):
px, py = self.parent.absolute_position
return py + self.base_line
class TextLine:
def __init__(self, top, width, indent=0):
self.children = []
self.max_char_spacing = 3.0
self.max_word_spacing = .25
self.line_height = None
self.top = top
self.width = width
self.indent = indent
self.base_line = None
def add_atom(self, atom):
self.children.append(atom)
self.add_spaces()
all_words_width = float(sum([x.width for x in self.children]))
return self.width - self.indent - all_words_width
def add_spaces(self):
for atom in self.children[:-1]:
if atom.end_of_word and not atom.content.endswith(' '):
atom.content += ' '
atom.calculate()
atom.end_of_line = False
self.children[0].beginning_of_line = True
self.children[0].indent = self.indent
self.children[-1].end_of_line = True
def calculate_line_height(self):
if len(self.children) == 0:
self.line_height = 0
return 0
self.line_height = max([x.line_height for x in self.children])
self.children[0].line_height = self.line_height
self.base_line = self.top + self.line_height
for atom in self.children:
atom.base_line = self.base_line
atom.y = self.base_line - atom.height
return self.line_height
def justify(self):
width = float(self.width) - float(self.indent)
all_words_width = float(sum([x.width for x in self.children]))
nr_of_chars = sum([x.length for x in self.children])
nr_of_words = len([x for x in self.children if x.end_of_word or x.end_of_line])
missing = width - all_words_width
self.children[0].word_spacing = missing / float(nr_of_words - 1)
class Paragraph(Element):
def init(self):
self.label_class = LABEL_CLASS_MARK
self.calculated = False
self.word_wrap = self._params.get(
'word-wrap', self.parent.word_wrap)
self.font_family = self._params.get(
'font-family', self.parent.font_family)
self.font_style = self._params.get(
'font-style', self.parent.font_style)
self.font_size = self._params.get(
'font-size', self.parent.font_size)
self.text_color = self._params.get(
'text-color', self.parent.text_color)
self.gray = self._params.get(
'gray')
self.scale = self._params.get(
'scale')
self.align = self._params.get(
'align', self.parent.align)
self.content = self._params.get(
'content', '')
self.max_width = self._params.get(
'max-width', self.parent.width - (self.margin_left or 0) -
(self.margin_right or 0))
self.max_height = self._params.get(
'max-height', self.parent.height - (self.margin_top or 0) -
(self.margin_bottom or 0))
self.line_height = self._params.get(
'line-height', self.parent.line_height)
self.space_width = self._params.get(
'space-width', self.parent.space_width)
self.margin_top = self._params.get(
'margin-top', 0)
self.margin_right = self._params.get(
'margin-right', 0)
self.margin_bottom = self._params.get(
'margin-bottom', 0)
self.margin_left = self._params.get(
'margin-left', 0)
self.first_indent = self._params.get(
'first-indent', self.parent.first_indent)
self.preformatted = self._params.get(
'preformatted', False)
self.split_min_height = self._params.get(
'split-min-height', 0)
self.min_after = self._params.get(
'min-after', 0)
self.no_split = self._params.get(
'no-split', False)
self.base_line = 0
def __repr__(self):
return '<Paragraph (%i, _%i/%i) %i+%i+%i ay%i a_%i w%i "%s">' % (
self.x,
self.base_line,
self.y,
self.margin_top,
self.height,
self.margin_bottom,
int(self.absolute_position[1]),
int(self.absolute_base_line),
int(self.width),
self.content[:20].replace('\n', '')
)
@property
def stream(self):
for obj in self.children:
yield obj
def calculate(self):
self.width = self.max_width
self.height = 0
x = 0
y = 0
lines = []
line_no = 0
current_line = TextLine(0, self.width, indent=self.first_indent)
left = self.width
for atom in self.children:
atom.doc = self.doc
atom.parent = self
atom.font_family = atom.font_family or self.font_family
atom.font_style = atom.font_style or self.font_style
atom.font_size = atom.font_size or self.font_size
atom.text_color = atom.text_color or self.text_color
atom.gray = atom.gray or self.gray
atom.scale = atom.scale or self.scale
atom.calculate()
if atom.width <= left:
left = current_line.add_atom(atom)
if self.preformatted:
left = 0
else:
line_height = current_line.calculate_line_height()
self.height += line_height
last_top = current_line.top
lines.append(current_line)
current_line = TextLine(last_top + line_height, self.width)
line_no += 1
if line_no == 1 and self.first_indent > 0: # second line
current_line.indent = -self.first_indent
left = current_line.add_atom(atom)
if self.preformatted:
left = 0
self.height += current_line.calculate_line_height()
lines.append(current_line)
for line in lines:
if self.align == ALIGN_JUSTIFY:
line.justify()
self.children = []
for line in lines:
for atom in line.children:
self.children.append(atom)
self.base_line = lines[0].base_line or 0
self.calculated = True
@property
def atoms(self):
for atom in self.children:
yield atom
@property
def absolute_base_line(self):
ax, ay = self.absolute_position
return ay + self.base_line
def split(self, height_left):
if self.no_split:
return None
latest_split_point = None
for n, atom in enumerate(self.children):
if atom.base_line <= height_left:
latest_split_point = n
else:
break
if latest_split_point is None:
return None
else:
left_over = self.children[n:]
self.children = self.children[:n]
self.height = self.children[-1].base_line
lop = Paragraph(self.doc, self.parent, {
'margin-top' : self.margin_top,
'margin-bottom': self.margin_bottom,
})
lop.children = left_over
for atom in lop.atoms:
atom.base_line -= self.height
atom.y -= self.height
lop.height = lop.children[-1].base_line
lop.width = self.width
lop.calculated = True
return lop
|
|
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common routines shared by gen_parse_tree.py and gen_resolved_ast.py.
"""
import re
def Trim(s):
"""Remove double blank lines and trailing spaces from string."""
s = re.sub(r' *\n', r'\n', s)
s = re.sub(r'\n\n\n*', r'\n\n', s)
return s
def CleanIndent(text, prefix=''):
"""Remove extra indentation from comments or code.
This allows code to be written as triple-quoted strings with natural
indentation in the python file, and that indentation will be removed
and replaced with the provided prefix.
Args:
text: Input code
prefix: will be added to the start of each line.
Returns:
Code text with indentation regenerated.
"""
if text:
text = text.strip()
lines = text.split('\n')
def NumLeadingSpaces(line):
num = 0
for c in line:
if c.isspace():
num += 1
else:
break
return num
# Compute the indentation we will strip off. Do it by looking at the
# minimum indentation on non-empty lines after the first line.
# (The first line will have already been stripped.)
# This is assuming there is a uniform indentation on the left of the
# comment that we should strip off, but there could be additional
# indentation on some lines that we'd rather keep.
non_empty_lines = [line for line in lines[1:] if line.strip()]
if non_empty_lines:
leading_spaces = min(NumLeadingSpaces(line) for line in non_empty_lines)
strip_prefix = ' ' * leading_spaces
else:
strip_prefix = ''
def Unindent(line):
if line.startswith(strip_prefix):
line = line[len(strip_prefix):]
return line
# Remove leading spaces from each line and add prefix.
text = '\n'.join(prefix + Unindent(line.rstrip()) for line in lines)
return text
class ScalarType(object):
"""Class used for scalar types as Field ctype parameters."""
def __init__(self,
ctype,
proto_type=None,
java_type=None,
java_reference_type=None,
passed_by_reference=False,
has_proto_setter=False,
is_enum=False,
scoped_ctype=None,
java_default=None,
cpp_default=None,
not_serialize_if_default=None):
"""Create a ScalarType.
Args:
ctype: C type name for this ScalarType
proto_type: The proto field type name used to store this field. If not
set, this defaults to using the same name as ctype.
java_type: Java type name for this ScalarType. Defaults to ctype.
java_reference_type: Java type name when reference type is needed for this
ScalarType. Defaults to java_type.
passed_by_reference: Specify whether this ScalarType should be passed by
value or by reference in constructors and getter methods. Types that
are really classes should be passed by reference. Real scalar types
(PODs) should be passed by value.
has_proto_setter: True if fields of this type have a set_X(value) method
in C++. For example, enum, int64, string, fields do. Message fields
don't. Always set to True if ctype == proto_type. Otherwise defaults
to False.
is_enum: True if this ScalarType represents an Enum normally persisted
as integers in proto form. This is used to generate serialization
logic that casts between the enum type and underlying int as
necessary.
scoped_ctype: C type, possibly with scope prepended as in the case of
inner types. Useful for locally declared enums that need to be
referenced externally to that class.
If not set, this defaults to using the same name as ctype.
java_default: Non-Constructor args and optional constructor args require a
default value. While java field defaults match c++ (for PODS), it's
best practice to initialize them explicitly.
cpp_default: Non-Constructor args and optional constructor args require a
default value. This value could be set using this argument, otherwise
C++ default value is used.
not_serialize_if_default: Do not serialize this field when its value is in
the default value, and set to default value during deserialization
when its proto field is empty.
"""
self.ctype = ctype
self.is_enum = is_enum
self.passed_by_reference = passed_by_reference
if java_type is None:
self.java_type = ctype
else:
self.java_type = java_type
if java_reference_type is None:
self.java_reference_type = self.java_type
else:
self.java_reference_type = java_reference_type
if proto_type is None:
self.proto_type = ctype
self.has_proto_setter = True
else:
self.proto_type = proto_type
self.has_proto_setter = has_proto_setter
if scoped_ctype is None:
self.scoped_ctype = ctype
else:
self.scoped_ctype = scoped_ctype
self.java_default = java_default
if cpp_default is None:
self.cpp_default = ''
else:
self.cpp_default = cpp_default
if not_serialize_if_default is None:
self.not_serialize_if_default = False
else:
self.not_serialize_if_default = not_serialize_if_default
def JavaDoc(text, indent=0):
"""Returns text for a JavaDoc comment from the given text.
Args:
text: comment text
indent: indent level
It will be indented by the specified number of spaces.
"""
if not text:
return text
indent_text = ' ' * indent
content = CleanIndent(text, '%s * ' % indent_text)
# Prefix <p> to lines that start a new paragraph. The regex finds lines that
# follow an empty line.
add_paragraph_re = re.compile(r'\* \n( *\* )(\S)')
content = add_paragraph_re.sub(r'* \n\1<p> \2', content)
# Add the leading line (/**) and trailing line (*/)
return '%s/**\n%s\n%s */' % (indent_text, content, indent_text)
def LowerCamelCase(value):
"""Turns some_string or SOME_STRING into someString."""
split_value = value.lower().split('_')
result = [split_value[0]] + [part.capitalize() for part in split_value[1:]]
return ''.join(result)
def UpperCamelCase(value):
"""Turns some_string or SOME_STRING into SomeString."""
split_value = value.lower().split('_')
return ''.join([part.capitalize() for part in split_value])
def NameToNodeKindName(name, prefix):
"""Returns the name of a node kind, suitable for a tree dump.
This is simply the name, minus the "Resolved" or "AST" prefix (if present).
Args:
name: name of node class (in Java/C++).
prefix: prefix to remove.
"""
if name.startswith(prefix):
return name[len(prefix):]
else:
return name
|
|
import time
import xbmc
import xbmcaddon
import denon
from datetime import datetime
from datetime import timedelta
__PLUGIN_ID__ = "plugin.audio.denon-dra-f109-remote"
settings = xbmcaddon.Addon(id=__PLUGIN_ID__);
addon_dir = xbmc.translatePath( settings.getAddonInfo('path') )
class XBMCPlayer(xbmc.Player):
__MIN_STOP_START_INTERVAL = 10
__RESWITCH_INTERVAL = 3600
__sources = [["analog", "1"],
["analog", "2"],
["optical"],
["cd"],
["net"]]
def __init__(self, *args):
self.__set_has_played(False)
self.__set_last_switch("")
self.__set_last_stop(time.localtime())
def check_idle(self):
__now = time.time()
__turn_off_on_idle = 60 * int(
settings.getSetting("turn_off_on_idle"))
if __turn_off_on_idle == 0 or self.isPlaying() \
or not self.__get_has_played():
return
if self.__get_last_stop() + __turn_off_on_idle > __now:
return
self.__send_to_denon(["off"])
self.__set_last_switch("")
self.__set_last_stop(time.localtime())
self.__set_has_played(False)
def __set_last_switch(self, v):
if v == "":
settings.setSetting("smart_last_switch", "")
else:
settings.setSetting("smart_last_switch",
time.strftime("%Y-%m-%d %H:%M:%S", v))
def __get_last_switch(self):
s = settings.getSetting("smart_last_switch")
if s == "":
return 0
else:
return time.mktime(time.strptime(s, "%Y-%m-%d %H:%M:%S"))
def __set_last_stop(self, v):
if v == "":
settings.setSetting("smart_last_stop", "")
else:
settings.setSetting("smart_last_stop",
time.strftime("%Y-%m-%d %H:%M:%S", v))
def __get_last_stop(self):
s = settings.getSetting("smart_last_stop")
if s == "":
return 0
else:
return time.mktime(time.strptime(s, "%Y-%m-%d %H:%M:%S"))
def __set_has_played(self, v):
settings.setSetting("smart_has_played", str(v).lower())
def __get_has_played(self):
return settings.getSetting("smart_has_played") == "true"
def __is_navigation_event(self):
return time.time() < self.__get_last_stop() \
+ self.__MIN_STOP_START_INTERVAL
def __is_switch_fresh(self):
return time.time() < self.__get_last_switch() \
+ self.__RESWITCH_INTERVAL
def _now(self):
t_now = time.localtime()
td_now = timedelta(hours = t_now.tm_hour,
minutes = t_now.tm_min,
seconds = t_now.tm_sec)
return td_now
def _parse_time(self,s_time):
try:
t_time = time.strptime(s_time, "%H:%M")
return timedelta(
hours = t_time.tm_hour,
minutes = t_time.tm_min)
except:
return timedelta(seconds = 0)
def _is_no_kodi_period(self):
not_before = self._parse_time(settings.getSetting("auto_kodi_not_before"))
not_after = self._parse_time(settings.getSetting("auto_kodi_not_after"))
now = self._now()
if not_before < not_after:
return now < not_before or now > not_after
else:
return not_after < now < not_before
def onPlayBackStarted(self):
if self._is_no_kodi_period():
return
__now = time.time()
if self.__is_navigation_event():
self.__set_last_switch(time.localtime())
return
if self.__is_switch_fresh():
return
self.__send_to_denon(self.__sources[int(
settings.getSetting("kodi_input_source"))])
self.__set_last_switch(time.localtime())
self.__set_has_played(True)
def onPlayBackStopped(self):
self.__set_last_switch("")
self.__set_last_stop(time.localtime())
self.__set_has_played(True)
def onPlayBackEnded(self):
self.__set_last_switch("")
self.__set_last_stop(time.localtime())
self.__set_has_played(True)
if settings.getSetting("turn_off_on_end") == "true":
self.__send_to_denon(["off"])
def __send_to_denon(self, send_params):
params = [settings.getSetting("device")]
params += send_params
xbmc.executebuiltin("Notification(Send to Denon, "
+ " ".join(send_params)
+ ", 5000, " + addon_dir + "/icon.png)")
denon.sendto_denon(params)
if __name__ == "__main__" \
and settings.getSetting("auto_kodi") == "true":
xbmc.log('[Denon] Service started', xbmc.LOGNOTICE)
monitor = xbmc.Monitor()
player = XBMCPlayer()
while not monitor.abortRequested():
if monitor.waitForAbort(10):
break
player.check_idle()
|
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import itertools
import json
import os
import re
import shutil
import tempfile
import time
import unittest
import urllib
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from bs4 import BeautifulSoup
import flask
import mock
import PIL.Image
from urlparse import urlparse
from digits.config import config_value
import digits.dataset.images.classification.test_views
import digits.test_views
import digits.webapp
# Must import after importing digit.config
import caffe_pb2
# May be too short on a slow system
TIMEOUT_DATASET = 45
TIMEOUT_MODEL = 60
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
CAFFE_NETWORK = \
"""
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
-- adjust to number of classes
local nclasses = p.nclasses or 1
-- model should adjust to any 3D input
local nDim = 1
if p.inputShape then p.inputShape:apply(function(x) nDim=nDim*x end) end
local model = nn.Sequential()
model:add(nn.View(-1):setNumInputDims(3)) -- c*h*w -> chw (flattened)
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer = nn.Linear(nDim, nclasses)
linearLayer.weight:fill(0)
linearLayer.bias:fill(0)
model:add(linearLayer) -- chw -> nclasses
model:add(nn.LogSoftMax())
return {
model = model
}
end
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTest, cls).setUpClass()
if cls.FRAMEWORK=='torch' and not config_value('torch_root'):
raise unittest.SkipTest('Torch not found')
@classmethod
def model_exists(cls, job_id):
return cls.job_exists(job_id, 'models')
@classmethod
def model_status(cls, job_id):
return cls.job_status(job_id, 'models')
@classmethod
def abort_model(cls, job_id):
return cls.abort_job(job_id, job_type='models')
@classmethod
def model_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'models'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_MODEL
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_model(cls, job_id):
return cls.delete_job(job_id, job_type='models')
@classmethod
def network(cls):
return cls.TORCH_NETWORK if cls.FRAMEWORK=='torch' else cls.CAFFE_NETWORK
class BaseViewsTestWithDataset(BaseViewsTest,
digits.dataset.images.classification.test_views.BaseViewsTestWithDataset):
"""
Provides a dataset
"""
# Inherited classes may want to override these attributes
CROP_SIZE = None
TRAIN_EPOCHS = 1
SHUFFLE = False
LR_POLICY = None
LR_MULTISTEP_VALUES = None
LEARNING_RATE = None
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.created_models = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_models:
cls.delete_model(job_id)
super(BaseViewsTestWithDataset, cls).tearDownClass()
@classmethod
def create_model(cls, network=None, **kwargs):
"""
Create a model
Returns the job_id
Raise RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
if network is None:
network = cls.network()
data = {
'model_name': 'test_model',
'dataset': cls.dataset_id,
'method': 'custom',
'custom_network': network,
'batch_size': 10,
'train_epochs': cls.TRAIN_EPOCHS,
'framework' : cls.FRAMEWORK,
'random_seed': 0xCAFEBABE,
'shuffle': 'true' if cls.SHUFFLE else 'false'
}
if cls.CROP_SIZE is not None:
data['crop_size'] = cls.CROP_SIZE
if cls.LR_POLICY is not None:
data['lr_policy'] = cls.LR_POLICY
if cls.LEARNING_RATE is not None:
data['learning_rate'] = cls.LEARNING_RATE
if cls.LR_MULTISTEP_VALUES is not None:
data['lr_multistep_values'] = cls.LR_MULTISTEP_VALUES
data.update(kwargs)
request_json = data.pop('json', False)
url = '/models/images/classification'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
data = json.loads(rv.data)
if 'jobs' in data.keys():
return [j['id'] for j in data['jobs']]
else:
return data['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
print 'Status code:', rv.status_code
s = BeautifulSoup(rv.data, 'html.parser')
div = s.select('div.alert-danger')
if div:
print div[0]
else:
print rv.data
raise RuntimeError('Failed to create dataset - status %s' % rv.status_code)
job_id = cls.job_id_from_response(rv)
assert cls.model_exists(job_id), 'model not found after successful creation'
cls.created_models.append(job_id)
return job_id
class BaseViewsTestWithModel(BaseViewsTestWithDataset):
"""
Provides a model
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithModel, cls).setUpClass()
cls.model_id = cls.create_model(json=True)
assert cls.model_wait_completion(cls.model_id) == 'Done', 'create failed'
class BaseTestViews(BaseViewsTest):
"""
Tests which don't require a dataset or a model
"""
def test_page_model_new(self):
rv = self.app.get('/models/images/classification/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Classification Model' in rv.data, 'unexpected page format'
def test_nonexistent_model(self):
assert not self.model_exists('foo'), "model shouldn't exist"
def test_visualize_network(self):
rv = self.app.post('/models/visualize-network?framework='+self.FRAMEWORK,
data = {'custom_network': self.network()}
)
s = BeautifulSoup(rv.data, 'html.parser')
if rv.status_code != 200:
body = s.select('body')[0]
if 'InvocationException' in str(body):
raise unittest.SkipTest('GraphViz not installed')
raise AssertionError('POST failed with %s\n\n%s' % (rv.status_code, body))
image = s.select('img')
assert image is not None, "didn't return an image"
def test_customize(self):
rv = self.app.post('/models/customize?network=lenet&framework='+self.FRAMEWORK)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
class BaseTestCreation(BaseViewsTestWithDataset):
"""
Model creation tests
"""
def test_create_json(self):
job_id = self.create_model(json=True)
self.abort_model(job_id)
def test_create_delete(self):
job_id = self.create_model()
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_wait_delete(self):
job_id = self.create_model()
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_abort_delete(self):
job_id = self.create_model()
assert self.abort_model(job_id) == 200, 'abort failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_snapshot_interval_2(self):
job_id = self.create_model(snapshot_interval=0.5)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) > 1, 'should take >1 snapshot'
def test_snapshot_interval_0_5(self):
job_id = self.create_model(train_epochs=4, snapshot_interval=2)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) == 2, 'should take 2 snapshots'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe_root')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
config_value('caffe_root')['multi_gpu'],
'multi-GPU enabled')
def test_select_gpu(self):
for index in config_value('gpu_list').split(','):
yield self.check_select_gpu, index
def check_select_gpu(self, gpu_index):
job_id = self.create_model(select_gpu=gpu_index)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe_root')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
not config_value('caffe_root')['multi_gpu'],
'multi-GPU disabled')
def test_select_gpus(self):
# test all possible combinations
gpu_list = config_value('gpu_list').split(',')
for i in xrange(len(gpu_list)):
for combination in itertools.combinations(gpu_list, i+1):
yield self.check_select_gpus, combination
def check_select_gpus(self, gpu_list):
job_id = self.create_model(select_gpus_list=','.join(gpu_list), batch_size=len(gpu_list))
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
def classify_one_for_job(self, job_id, test_misclassification = True):
# carry out one inference test per category in dataset
for category in self.imageset_paths.keys():
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one?job_id=%s' % job_id,
data = {
'image_file': image_upload,
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
# gets an array of arrays [[confidence, label],...]
predictions = [p.get_text().split() for p in s.select('ul.list-group li')]
if test_misclassification:
assert predictions[0][1] == category, 'image misclassified'
def test_classify_one_mean_image(self):
# test the creation
job_id = self.create_model(use_mean = 'image')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id)
def test_classify_one_mean_pixel(self):
# test the creation
job_id = self.create_model(use_mean = 'pixel')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id)
def test_classify_one_mean_none(self):
# test the creation
job_id = self.create_model(use_mean = 'none')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id, False)
def test_retrain(self):
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options = {
'method': 'previous',
'previous_networks': job1_id,
}
options['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
def test_retrain_twice(self):
# retrain from a job which already had a pretrained model
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options_2 = {
'method': 'previous',
'previous_networks': job1_id,
}
options_2['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
options_3 = {
'method': 'previous',
'previous_networks': job2_id,
}
options_3['%s-snapshot' % job2_id] = -1
job3_id = self.create_model(**options_3)
assert self.model_wait_completion(job3_id) == 'Done', 'third job failed'
def test_bad_network_definition(self):
if self.FRAMEWORK == 'caffe':
bogus_net = """
layer {
name: "hidden"
type: 'BogusCode'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
elif self.FRAMEWORK == 'torch':
bogus_net = """
local model = BogusCode(0)
return function(params)
return {
model = model
}
end
"""
job_id = self.create_model(json=True, network=bogus_net)
assert self.model_wait_completion(job_id) == 'Error', 'job should have failed'
job_info = self.job_info_html(job_id=job_id, job_type='models')
assert 'BogusCode' in job_info, "job_info: \n%s" % str(job_info)
def test_clone(self):
options_1 = {
'shuffle': True,
'snapshot_interval': 2.0,
'lr_step_size': 33.0,
'lr_inv_power': 0.5,
'lr_inv_gamma': 0.1,
'lr_poly_power': 3.0,
'lr_exp_gamma': 0.9,
'use_mean': 'image',
'lr_multistep_gamma': 0.5,
'lr_policy': 'exp',
'val_interval': 3.0,
'random_seed': 123,
'learning_rate': 0.0125,
'lr_step_gamma': 0.1,
'lr_sigmoid_step': 50.0,
'lr_sigmoid_gamma': 0.1,
'lr_multistep_values': '50,85',
}
job1_id = self.create_model(**options_1)
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content1 = json.loads(rv.data)
## Clone job1 as job2
options_2 = {
'clone': job1_id,
}
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
rv = self.app.get('/models/%s.json' % job2_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content2 = json.loads(rv.data)
## These will be different
content1.pop('id')
content2.pop('id')
content1.pop('directory')
content2.pop('directory')
assert (content1 == content2), 'job content does not match'
job1 = digits.webapp.scheduler.get_job(job1_id)
job2 = digits.webapp.scheduler.get_job(job2_id)
assert (job1.form_data == job2.form_data), 'form content does not match'
class BaseTestCreated(BaseViewsTestWithModel):
"""
Tests on a model that has already been created
"""
def test_save(self):
job = digits.webapp.scheduler.get_job(self.model_id)
assert job.save(), 'Job failed to save'
def test_download(self):
for extension in ['tar', 'zip', 'tar.gz', 'tar.bz2']:
yield self.check_download, extension
def check_download(self, extension):
url = '/models/%s/download.%s' % (self.model_id, extension)
rv = self.app.get(url)
assert rv.status_code == 200, 'download "%s" failed with %s' % (url, rv.status_code)
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for m in content['models']:
if m['id'] == self.model_id:
found = True
break
assert found, 'model not found in list'
def test_model_json(self):
rv = self.app.get('/models/%s.json' % self.model_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.model_id, 'id %s != %s' % (content['id'], self.model_id)
assert content['dataset_id'] == self.dataset_id, 'dataset_id %s != %s' % (content['dataset_id'], self.dataset_id)
assert len(content['snapshots']) > 0, 'no snapshots in list'
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_classify_one(self):
# test first image in first category
category = self.imageset_paths.keys()[0]
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one?job_id=%s' % self.model_id,
data = {
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
# gets an array of arrays [[confidence, label],...]
predictions = [p.get_text().split() for p in s.select('ul.list-group li')]
assert predictions[0][1] == category, 'image misclassified'
def test_classify_one_json(self):
# test last image in last category
category = self.imageset_paths.keys()[-1]
image_path = self.imageset_paths[category][-1]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one.json?job_id=%s' % self.model_id,
data = {
'image_file': image_upload,
'show_visualizations': 'y',
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert data['predictions'][0][0] == category, 'image misclassified'
def test_classify_many(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_from_folder(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload, 'image_folder': self.imageset_folder}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_invalid_ground_truth(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
# test label_id with -1 and >len(labels)
textfile_images += '%s %s\n' % (image_path, 3*label_id-1)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_json(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many.json?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert 'classifications' in data, 'invalid response'
# verify classification of first image in each category
for category in self.imageset_paths.keys():
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
prediction = data['classifications'][image_path][0][0]
assert prediction == category, 'image misclassified- predicted %s - expected %s' % (prediction, category)
def test_top_n(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/top_n?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
keys = self.imageset_paths.keys()
for key in keys:
assert key in rv.data, '"%s" not found in the response'
def test_top_n_from_folder(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/top_n?job_id=%s' % self.model_id,
data = {'image_list': file_upload, 'image_folder': self.imageset_folder}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
keys = self.imageset_paths.keys()
for key in keys:
assert key in rv.data, '"%s" not found in the response'
class BaseTestDatasetModelInteractions(BaseViewsTestWithDataset):
"""
Test the interactions between datasets and models
"""
# If you try to create a model using a deleted dataset, it should fail
def test_create_model_deleted_dataset(self):
dataset_id = self.create_dataset()
assert self.delete_dataset(dataset_id) == 200, 'delete failed'
assert not self.dataset_exists(dataset_id), 'dataset exists after delete'
try:
model_id = self.create_model(dataset=dataset_id)
except RuntimeError:
return
assert False, 'Should have failed'
# If you try to create a model using a running dataset,
# it should wait to start until the dataset is completed
def test_create_model_running_dataset(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
# Model should be in WAIT status while dataset is running
# Copying functionality from job_wait_completion ...
start_time = time.time()
timeout = TIMEOUT_DATASET
dataset_status = self.dataset_status(dataset_id)
while dataset_status != 'Done':
model_status = self.model_status(model_id)
if model_status == 'Initialized':
# give it some time ...
pass
elif model_status == 'Waiting':
# That's what we were waiting for
break
else:
raise Exception('Model not waiting - "%s"' % model_status)
assert (time.time() - start_time) < timeout, 'Job took more than %s seconds' % timeout
time.sleep(0.5)
dataset_status = self.dataset_status(dataset_id)
# Model should switch to RUN status after dataset is DONE
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
time.sleep(1)
assert self.model_status(model_id) in ['Running', 'Done'], "model didn't start"
self.abort_model(model_id)
# If you try to delete a completed dataset with a dependent model, it should fail
def test_delete_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_model(model_id)
# If you try to delete a running dataset with a dependent model, it should fail
def test_delete_running_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_dataset(dataset_id)
self.abort_model(model_id)
class BaseTestCreatedWide(BaseTestCreated):
IMAGE_WIDTH = 20
class BaseTestCreatedTall(BaseTestCreated):
IMAGE_HEIGHT = 20
class BaseTestCreatedCropInForm(BaseTestCreated):
CROP_SIZE = 8
class BaseTestCreatedCropInNetwork(BaseTestCreated):
CAFFE_NETWORK = \
"""
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
crop_size: 8
}
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
crop_size: 8
}
}
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
local nclasses = p.nclasses or 1
local croplen = 8, channels
if p.inputShape then channels=p.inputShape[1] else channels=1 end
local model = nn.Sequential()
model:add(nn.View(-1):setNumInputDims(3)) -- flatten
local linLayer = nn.Linear(channels*croplen*croplen, nclasses)
linLayer.weight:fill(0)
linLayer.bias:fill(0)
model:add(linLayer) -- chw -> nclasses
model:add(nn.LogSoftMax())
return {
model = model,
croplen = croplen
}
end
"""
################################################################################
# Test classes
################################################################################
class TestCaffeViews(BaseTestViews):
FRAMEWORK = 'caffe'
class TestCaffeCreation(BaseTestCreation):
FRAMEWORK = 'caffe'
class TestCaffeCreatedWideMoreNumOutput(BaseTestCreatedWide):
FRAMEWORK = 'caffe'
CAFFE_NETWORK = \
"""
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
inner_product_param {
num_output: 1000
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
class TestCaffeDatasetModelInteractions(BaseTestDatasetModelInteractions):
FRAMEWORK = 'caffe'
class TestCaffeCreatedCropInForm(BaseTestCreatedCropInForm):
FRAMEWORK = 'caffe'
class TestCaffeCreatedCropInNetwork(BaseTestCreatedCropInNetwork):
FRAMEWORK = 'caffe'
class TestCaffeCreatedTallMultiStepLR(BaseTestCreatedTall):
FRAMEWORK = 'caffe'
LR_POLICY = 'multistep'
LR_MULTISTEP_VALUES = '50,75,90'
class TestTorchViews(BaseTestViews):
FRAMEWORK = 'torch'
class TestTorchCreation(BaseTestCreation):
FRAMEWORK = 'torch'
class TestTorchCreatedUnencodedShuffle(BaseTestCreated):
FRAMEWORK = 'torch'
ENCODING = 'none'
SHUFFLE = True
class TestTorchCreatedHdf5(BaseTestCreated):
FRAMEWORK = 'torch'
BACKEND = 'hdf5'
class TestTorchCreatedTallHdf5Shuffle(BaseTestCreatedTall):
FRAMEWORK = 'torch'
BACKEND = 'hdf5'
SHUFFLE = True
class TestTorchDatasetModelInteractions(BaseTestDatasetModelInteractions):
FRAMEWORK = 'torch'
class TestCaffeLeNet(BaseTestCreated):
FRAMEWORK = 'caffe'
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
CAFFE_NETWORK=open(
os.path.join(
os.path.dirname(digits.__file__),
'standard-networks', 'caffe', 'lenet.prototxt')
).read()
class TestTorchCreatedCropInForm(BaseTestCreatedCropInForm):
FRAMEWORK = 'torch'
class TestTorchCreatedCropInNetwork(BaseTestCreatedCropInNetwork):
FRAMEWORK = 'torch'
class TestTorchCreatedWideMultiStepLR(BaseTestCreatedWide):
FRAMEWORK = 'torch'
LR_POLICY = 'multistep'
LR_MULTISTEP_VALUES = '50,75,90'
class TestTorchLeNet(BaseTestCreated):
FRAMEWORK = 'torch'
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
TRAIN_EPOCHS = 20
# need more aggressive learning rate
# on such a small dataset
LR_POLICY = 'fixed'
LEARNING_RATE = 0.1
# standard lenet model will adjust to color
# or grayscale images
TORCH_NETWORK=open(
os.path.join(
os.path.dirname(digits.__file__),
'standard-networks', 'torch', 'lenet.lua')
).read()
class TestTorchLeNetHdf5Shuffle(TestTorchLeNet):
BACKEND = 'hdf5'
SHUFFLE = True
class TestPythonLayer(BaseViewsTestWithDataset):
FRAMEWORK = 'caffe'
CAFFE_NETWORK = """\
layer {
name: "hidden"
type: 'InnerProduct'
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "py_test"
type: "Python"
bottom: "output"
top: "py_test"
python_param {
module: "digits_python_layers"
layer: "PythonLayer"
}
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
def write_python_layer_script(self, filename):
with open(filename, 'w') as f:
f.write("""\
import caffe
import numpy as np
class PythonLayer(caffe.Layer):
def setup(self, bottom, top):
print 'PythonLayer::setup'
if len(bottom) != 1:
raise Exception("Need one input.")
def reshape(self, bottom, top):
print 'PythonLayer::reshape'
top[0].reshape(1)
def forward(self, bottom, top):
print 'PythonLayer::forward'
top[0].data[...] = np.sum(bottom[0].data) / 2. / bottom[0].num
""")
## This test makes a temporary python layer file whose path is set
## as py_layer_server_file. The job creation process copies that
## file to the job_dir. The CAFFE_NETWORK above, requires that
## python script to be in the correct spot. If there is an error
## in the script or if the script is named incorrectly, or does
## not exist in the job_dir, then the test will fail.
def test_python_layer(self):
tmpdir = tempfile.mkdtemp()
py_file = tmpdir + '/py_test.py'
self.write_python_layer_script(py_file)
job_id = self.create_model(python_layer_server_file=py_file)
# remove the temporary python script.
shutil.rmtree(tmpdir)
assert self.model_wait_completion(job_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
class TestSweepCreation(BaseViewsTestWithDataset):
FRAMEWORK = 'caffe'
"""
Model creation tests
"""
def test_sweep(self):
job_ids = self.create_model(json=True, learning_rate='[0.01, 0.02]', batch_size='[8, 10]')
for job_id in job_ids:
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
|
|
import os
import sys
import glob
import shutil
import errno
import logging
from contextlib import contextmanager
from plumbum.lib import _setdoc, IS_WIN32
from plumbum.path.base import Path, FSUser
from plumbum.path.remote import RemotePath
try:
from pwd import getpwuid, getpwnam
from grp import getgrgid, getgrnam
except ImportError:
def getpwuid(x):
return (None,)
def getgrgid(x):
return (None,)
def getpwnam(x):
raise OSError("`getpwnam` not supported")
def getgrnam(x):
raise OSError("`getgrnam` not supported")
try: # Py3
import urllib.parse as urlparse
import urllib.request as urllib
except ImportError:
import urlparse
import urllib
logger = logging.getLogger("plumbum.local")
#===================================================================================================
# Local Paths
#===================================================================================================
class LocalPath(Path):
"""The class implementing local-machine paths"""
CASE_SENSITIVE = not IS_WIN32
def __new__(cls, *parts):
if len(parts) == 1 and \
isinstance(parts[0], cls) and \
not isinstance(parts[0], LocalWorkdir):
return parts[0]
if not parts:
raise TypeError("At least one path part is required (none given)")
if any(isinstance(path, RemotePath) for path in parts):
raise TypeError("LocalPath cannot be constructed from %r" % (parts,))
self = super(LocalPath, cls).__new__(cls, os.path.normpath(os.path.join(*(str(p) for p in parts))))
return self
@property
def _path(self):
return str(self)
def _get_info(self):
return self._path
def _form(self, *parts):
return LocalPath(*parts)
@property
@_setdoc(Path)
def name(self):
return os.path.basename(str(self))
@property
@_setdoc(Path)
def dirname(self):
return LocalPath(os.path.dirname(str(self)))
@property
@_setdoc(Path)
def suffix(self):
return os.path.splitext(str(self))[1]
@property
def suffixes(self):
exts = []
base = str(self)
while True:
base, ext = os.path.splitext(base)
if ext:
exts.append(ext)
else:
return list(reversed(exts))
@property
@_setdoc(Path)
def uid(self):
uid = self.stat().st_uid
name = getpwuid(uid)[0]
return FSUser(uid, name)
@property
@_setdoc(Path)
def gid(self):
gid = self.stat().st_gid
name = getgrgid(gid)[0]
return FSUser(gid, name)
@_setdoc(Path)
def join(self, *others):
return LocalPath(self, *others)
@_setdoc(Path)
def list(self):
return [self / fn for fn in os.listdir(str(self))]
@_setdoc(Path)
def iterdir(self):
try:
return (self / fn.name for fn in os.scandir(str(self)))
except AttributeError:
return (self / fn for fn in os.listdir(str(self)))
@_setdoc(Path)
def is_dir(self):
return os.path.isdir(str(self))
@_setdoc(Path)
def is_file(self):
return os.path.isfile(str(self))
@_setdoc(Path)
def is_symlink(self):
return os.path.islink(str(self))
@_setdoc(Path)
def exists(self):
return os.path.exists(str(self))
@_setdoc(Path)
def stat(self):
return os.stat(str(self))
@_setdoc(Path)
def with_name(self, name):
return LocalPath(self.dirname) / name
@property
@_setdoc(Path)
def stem(self):
return self.name.rsplit(os.path.extsep)[0]
@_setdoc(Path)
def with_suffix(self, suffix, depth=1):
if (suffix and not suffix.startswith(os.path.extsep) or suffix == os.path.extsep):
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
depth = len(self.suffixes) if depth is None else min(depth, len(self.suffixes))
for i in range(depth):
name, ext = os.path.splitext(name)
return LocalPath(self.dirname) / (name + suffix)
@_setdoc(Path)
def glob(self, pattern):
fn = lambda pat: [LocalPath(m) for m in glob.glob(str(self / pat))]
return self._glob(pattern, fn)
@_setdoc(Path)
def delete(self):
if not self.exists():
return
if self.is_dir():
shutil.rmtree(str(self))
else:
try:
os.remove(str(self))
except OSError: # pragma: no cover
# file might already been removed (a race with other threads/processes)
_, ex, _ = sys.exc_info()
if ex.errno != errno.ENOENT:
raise
@_setdoc(Path)
def move(self, dst):
if isinstance(dst, RemotePath):
raise TypeError("Cannot move local path %s to %r" % (self, dst))
shutil.move(str(self), str(dst))
return LocalPath(dst)
@_setdoc(Path)
def copy(self, dst, override = False, overwrite = True):
if isinstance(dst, RemotePath):
raise TypeError("Cannot copy local path %s to %r" % (self, dst))
dst = LocalPath(dst)
if not overwrite and dst.exists():
raise TypeError("File exists and override was not specified")
if override:
dst.delete()
if self.is_dir():
shutil.copytree(str(self), str(dst))
else:
dst_dir = LocalPath(dst).dirname
if not dst_dir.exists():
dst_dir.mkdir()
shutil.copy2(str(self), str(dst))
return dst
@_setdoc(Path)
def mkdir(self):
if not self.exists():
try:
os.makedirs(str(self))
except OSError: # pragma: no cover
# directory might already exist (a race with other threads/processes)
_, ex, _ = sys.exc_info()
if ex.errno != errno.EEXIST:
raise
@_setdoc(Path)
def open(self, mode = "rb"):
return open(str(self), mode)
@_setdoc(Path)
def read(self, encoding=None):
with self.open("rb") as f:
data = f.read()
if encoding:
data = data.decode(encoding)
return data
@_setdoc(Path)
def write(self, data, encoding=None):
if encoding:
data = data.encode(encoding)
with self.open("wb") as f:
f.write(data)
@_setdoc(Path)
def touch(self):
with open(str(self), 'a'):
os.utime(str(self), None)
@_setdoc(Path)
def chown(self, owner = None, group = None, recursive = None):
if not hasattr(os, "chown"):
raise OSError("os.chown() not supported")
uid = self.uid if owner is None else (owner if isinstance(owner, int) else getpwnam(owner)[2])
gid = self.gid if group is None else (group if isinstance(group, int) else getgrnam(group)[2])
os.chown(str(self), uid, gid)
if recursive or (recursive is None and self.is_dir()):
for subpath in self.walk():
os.chown(str(subpath), uid, gid)
@_setdoc(Path)
def chmod(self, mode):
if not hasattr(os, "chmod"):
raise OSError("os.chmod() not supported")
os.chmod(str(self), mode)
@_setdoc(Path)
def access(self, mode = 0):
return os.access(str(self), self._access_mode_to_flags(mode))
@_setdoc(Path)
def link(self, dst):
if isinstance(dst, RemotePath):
raise TypeError("Cannot create a hardlink from local path %s to %r" % (self, dst))
if hasattr(os, "link"):
os.link(str(self), str(dst))
else:
from plumbum.machines.local import local
# windows: use mklink
if self.is_dir():
local["cmd"]("/C", "mklink", "/D", "/H", str(dst), str(self))
else:
local["cmd"]("/C", "mklink", "/H", str(dst), str(self))
@_setdoc(Path)
def symlink(self, dst):
if isinstance(dst, RemotePath):
raise TypeError("Cannot create a symlink from local path %s to %r" % (self, dst))
if hasattr(os, "symlink"):
os.symlink(str(self), str(dst))
else:
from plumbum.machines.local import local
# windows: use mklink
if self.is_dir():
local["cmd"]("/C", "mklink", "/D", str(dst), str(self))
else:
local["cmd"]("/C", "mklink", str(dst), str(self))
@_setdoc(Path)
def unlink(self):
try:
if hasattr(os, "symlink") or not self.is_dir():
os.unlink(str(self))
else:
# windows: use rmdir for directories and directory symlinks
os.rmdir(str(self))
except OSError: # pragma: no cover
# file might already been removed (a race with other threads/processes)
_, ex, _ = sys.exc_info()
if ex.errno != errno.ENOENT:
raise
@_setdoc(Path)
def as_uri(self, scheme='file'):
return urlparse.urljoin(str(scheme)+':', urllib.pathname2url(str(self)))
@property
@_setdoc(Path)
def drive(self):
return os.path.splitdrive(str(self))[0]
@property
@_setdoc(Path)
def root(self):
return os.path.sep
class LocalWorkdir(LocalPath):
"""Working directory manipulator"""
def __hash__(self):
raise TypeError("unhashable type")
def __new__(cls):
return super(LocalWorkdir, cls).__new__(cls, os.getcwd())
def chdir(self, newdir):
"""Changes the current working directory to the given one
:param newdir: The destination director (a string or a ``LocalPath``)
"""
if isinstance(newdir, RemotePath):
raise TypeError("newdir cannot be %r" % (newdir,))
logger.debug("Chdir to %s", newdir)
os.chdir(str(newdir))
return self.__class__()
def getpath(self):
"""Returns the current working directory as a ``LocalPath`` object"""
return LocalPath(self._path)
@contextmanager
def __call__(self, newdir):
"""A context manager used to ``chdir`` into a directory and then ``chdir`` back to
the previous location; much like ``pushd``/``popd``.
:param newdir: The destination directory (a string or a ``LocalPath``)
"""
prev = self._path
newdir = self.chdir(newdir)
try:
yield newdir
finally:
self.chdir(prev)
|
|
import os
import re
import random
import hashlib
import hmac
import string
from collections import namedtuple
from time import sleep
from functools import wraps
import webapp2
import jinja2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
SECRET = "Write your secret key here! :3"
def datetimeformat(value, format='%d %B %Y'):
""" Filter for template. """
return value.strftime(format)
def formattext(value):
""" Filter for template. """
return value.replace('\n', '<br>')
jinja_env.filters['datetimeformat'] = datetimeformat
jinja_env.filters['formattext'] = formattext
# Decorators
def user_logged_in(function):
""" Decorator that checks if user is logged in. """
@wraps(function)
def wrapper(self, *args, **kwargs):
if self.user:
return function(self, *args, **kwargs)
else:
self.redirect("/signup")
return wrapper
def post_exists(function):
""" Decorator that checks if post with specific id exists in db. """
@wraps(function)
def wrapper(self, post_id, *args, **kwargs):
post_id = int(post_id)
key = db.Key.from_path('BlogPost', post_id)
post = db.get(key)
if post:
return function(self, post_id, post, *args, **kwargs)
else:
self.error(404)
return
return wrapper
def comment_exists(function):
""" Decorator that checks if comment with specific id exists in db. """
@wraps(function)
def wrapper(self, comment_id, *args, **kwargs):
comment_id = int(comment_id)
key = db.Key.from_path('Comment', comment_id)
comment = db.get(key)
if comment:
return function(self, comment_id, comment, *args, **kwargs)
else:
self.error(404)
return
return wrapper
def user_owns_post(function):
""" Decorator that checks that post was created by current user. """
@wraps(function)
def wrapper(self, post_id, post, *args, **kwargs):
if self.user_id == post.author_id:
return function(self, post_id, post, *args, **kwargs)
else:
self.error(404)
return
return wrapper
def user_owns_comment(function):
""" Decorator that checks that comment was created by current user. """
@wraps(function)
def wrapper(self, comment_id, comment, *args, **kwargs):
if self.user_id == comment.user_id:
return function(self, comment_id, comment, *args, **kwargs)
else:
self.error(404)
return
return wrapper
class BlogHandler(webapp2.RequestHandler):
""" Parent handler class for all pages. """
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def render_str(self, template, **params):
t = jinja_env.get_template(template)
params["user"] = self.user
if self.user:
# Pass user id to View
params["user_id"] = User.get_user_id(self.user)
return t.render(params)
def set_secure_cookie(self, name, val):
cookie_val = make_secure_val(val)
self.response.headers.add_header(
'Set-Cookie',
'%s=%s; Path=/' % (name, cookie_val))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
return cookie_val and check_secure_val(cookie_val)
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
self.user = uid and User.get_by_id(int(uid))
if self.user:
self.user_id = User.get_user_id(self.user)
self.username = self.user.username
def error(self, code):
super(BlogHandler, self).error(code)
if code == 404:
self.write("Oh no! 404!!!")
class MainPage(BlogHandler):
""" Handles main page. """
def get(self):
posts = BlogPost.get_all_posts()
self.render("main.html",
posts=posts)
class NewPost(BlogHandler):
""" Allows logged user to create a post. """
@user_logged_in
def get(self):
self.render("create_post.html")
@user_logged_in
def post(self):
subject = self.request.get("subject")
content = self.request.get("content")
if subject and content:
# write to db
new_post_id = BlogPost.create_post(title=subject,
content=content,
author_id=self.user_id)
self.redirect("/" + str(new_post_id))
else:
error = "Fill all fields"
self.render("create_post.html",
subject=subject,
content=content,
error=error)
class EditPost(BlogHandler):
""" Allows the author of a blog post to edit it. """
@user_logged_in
@post_exists
@user_owns_post
def get(self, post_id, post):
subject = post.title
content = post.content
self.render("edit_post.html",
subject=subject,
content=content,
post_id=post_id)
@user_logged_in
@post_exists
@user_owns_post
def post(self, post_id, post):
subject = self.request.get("subject")
content = self.request.get("content")
if subject and content:
# write to db
BlogPost.update_post(post_id=post_id,
title=subject,
content=content)
self.redirect("/%s" % post_id)
else:
error = "Fill all fields"
self.render("edit_post.html",
subject=subject,
content=content,
error=error)
class DeletePost(BlogHandler):
""" Allows the author of a blog post to delete it. """
@user_logged_in
@post_exists
@user_owns_post
def get(self, post_id, post):
BlogPost.delete_post(post_id)
sleep(0.1)
self.redirect("/")
class ViewPost(BlogHandler):
"""
Shows certain blog post in separate page with all its likes and comments.
"""
@post_exists
def get(self, post_id, post):
self.render("view_post.html", post=post)
@post_exists
def post(self, post_id, post):
""" Handles form for comments in permalink. """
if self.user_id:
content = self.request.get("content")
error = "Write your comment"
if content:
Comment.create_comment(user_id=self.user_id,
post_id=post_id,
content=content)
sleep(0.1)
self.redirect("/%s" % post_id)
else:
self.render("view_post.html", post=post, error=error)
else:
self.redirect("/login")
class Signup(BlogHandler):
""" Handles registration page. """
def get(self):
self.render("signup.html")
def post(self):
have_error = False
input_username = self.request.get('username')
input_password = self.request.get('password')
input_verify = self.request.get('verify')
input_email = self.request.get('email')
params = dict(username=input_username,
email=input_email)
if not valid_username(input_username):
params['error_username'] = "That's not a valid username"
have_error = True
if is_username_exist(input_username):
params['error_username'] = "Such name already exists"
have_error = True
if not valid_password(input_password):
params['error_password'] = "That wasn't a valid password"
have_error = True
elif input_password != input_verify:
params['error_verify'] = "Your passwords didn't match"
have_error = True
if not valid_email(input_email):
params['error_email'] = "That's not a valid email"
have_error = True
if have_error:
self.render('signup.html', **params)
else:
# write name to db and redirect to welcome page
salt = make_salt()
pw_hash = make_pw_hash(input_username, input_password, salt)
new_user_id = str(
User.create_user(
username=input_username, pw_hash=pw_hash, salt=salt
)
)
# set cookie and redirect
self.set_secure_cookie("user_id", new_user_id)
self.redirect("/welcome")
class Login(BlogHandler):
""" Handles log in page. """
def get(self):
self.render("login.html")
def post(self):
input_username = self.request.get('username')
input_password = self.request.get('password')
user = valid_pw(input_username, input_password)
if user:
self.set_secure_cookie("user_id", str(user.key().id()))
self.redirect("/welcome")
pass
else:
error = "Invalid login"
params = dict(username=input_username, error=error)
self.render('login.html', **params)
class Logout(BlogHandler):
""" Handles link for log out. """
def get(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
self.redirect("/signup")
class Welcome(BlogHandler):
""" Handles the welcome page. """
@user_logged_in
def get(self):
posts = BlogPost.get_all_user_posts(int(self.user_id))
self.render("welcome.html", username=self.username, posts=posts)
class LikePost(BlogHandler):
""" Handles like link. """
@user_logged_in
def get(self):
post_id = int(self.request.get("post_id"))
# Remember to which page to return.
source = self.request.get("source")
post = BlogPost.get_post_by_id(post_id)
likes = Like.get_all_likes(post_id)
liker_ids = [like.user_id for like in likes]
# If user already liked this post - remove like, otherwise add.
if self.user_id != post.author_id:
if self.user_id in liker_ids:
Like.delete_like(self.user_id, likes)
else:
Like.create_like(self.user_id, post_id)
sleep(0.1)
self.redirect("/" + source)
class EditComment(BlogHandler):
""" Allows user to edit his/her comment on the separate page. """
@user_logged_in
@comment_exists
@user_owns_comment
def get(self, comment_id, comment):
content = comment.content
self.render("edit_comment.html",
content=content,
comment_id=comment_id,
post_id=comment.post_id)
@user_logged_in
@comment_exists
@user_owns_comment
def post(self, comment_id, comment):
content = self.request.get("content")
if content and comment_id:
# write to db
Comment.update_comment(comment_id, content)
sleep(0.1)
self.redirect("/%s" %
Comment.get_comment_by_id(comment_id).post_id)
else:
error = "Comment can't be empty"
self.render("edit_comment.html",
content=content,
error=error)
class DeleteComment(BlogHandler):
""" Handles link for removing user's comment. """
@user_logged_in
@comment_exists
@user_owns_comment
def get(self, comment_id, comment):
post_id = comment.post_id
Comment.delete_comment(comment_id)
sleep(0.1)
self.redirect("/%s" % post_id)
# DB ENTITIES
class BlogPost(db.Model):
""" Represents user's blog post. """
title = db.StringProperty(required=True)
content = db.TextProperty(required=True)
author_id = db.IntegerProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
last_modified = db.DateTimeProperty(auto_now=True)
@property
def id(self):
""" Contains post id. """
return BlogPost.get_post_id(self)
@property
def author(self):
""" Contains author nickname. """
return User.get_name_by_id(self.author_id)
@property
def comments(self):
""" Contains all comments for certain blog post. """
return list(Comment.get_all_comments(self.id))
@property
def likes(self):
""" Contains all likes for certain blog post. """
return list(Like.get_all_likes(self.id))
@property
def users_liked(self):
""" Contains ids of all users that liked this post. """
return [x.user_id for x in self.likes]
@classmethod
def get_all_posts(cls):
""" Returns all posts ordered by date created. """
return BlogPost.all().order("-created")
@classmethod
def get_all_user_posts(cls, user_id):
"""
Returns all posts owned by certain user.
Args:
user_id: Integer, id of the author.
Returns:
List of all user's posts.
"""
return BlogPost.all().filter("author_id =", user_id).order("-created")
@classmethod
def create_post(cls, title, content, author_id):
"""
Adds new post in db.
Args:
title: String, the topic of blog post.
content: String, text of blog post.
author_id: Integer, associated with author id from User table.
Returns:
Integer, generated id of created blog post.
"""
new_post = BlogPost(title=title, content=content, author_id=author_id)
new_post.put()
return BlogPost.get_post_id(new_post)
@classmethod
def update_post(cls, post_id, title, content):
"""
Changes specific post in db.
Args:
post_id: Integer, id of blog post.
title: String, the topic of blog post.
content: String, text of blog post.
"""
post = BlogPost.get_post_by_id(post_id)
if post:
post.title = title
post.content = content
post.put()
@classmethod
def delete_post(cls, post_id):
"""
Delete post and all associated with it likes and comments
Args:
post_id: Integer that represents id of blog post.
"""
post = BlogPost.get_post_by_id(post_id)
if post:
post.delete()
likes = Like.get_all_likes(post_id)
for like in likes:
like.delete()
comments = Comment.get_all_comments(post_id)
for comment in comments:
comment.delete()
@classmethod
def get_post_id(cls, post):
""" Returns id for specific post. """
return post.key().id()
@classmethod
def get_post_by_id(cls, post_id):
"""
Finds specific post in db by it's id.
Args:
post_id: Integer, id of the post given by db.
Returns:
Specific blog post.
"""
return BlogPost.get_by_id(post_id)
class User(db.Model):
""" Represents a user. """
username = db.StringProperty(required=True)
hash = db.StringProperty(required=True)
salt = db.StringProperty(required=True)
@classmethod
def get_name_by_id(cls, user_id):
"""
Finds user's nickname in db.
Args:
user_id: Integer, user's id in db.
Returns:
String that represents user's nickname.
"""
return User.get_by_id(user_id).username
@classmethod
def get_user_by_name(cls, name):
""" Returns user by his/her nickname. """
return User.all().filter('username =', name).get()
@classmethod
def create_user(cls, username, pw_hash, salt):
"""
Create a new user in db.
Args:
username: String with unique user's nickname.
pw_hash: String, generated has of user's password.
salt: String, auto generated secret word.
Returns:
Integer, generated by db user's id.
"""
new_user = User(username=username, hash=pw_hash, salt=salt)
new_user.put()
return User.get_user_id(new_user)
@classmethod
def get_user_id(cls, user):
""" Find specific user id. """
return user.key().id()
class Like(db.Model):
""" Represents like for a blog post. """
user_id = db.IntegerProperty(required=True)
post_id = db.IntegerProperty(required=True)
@classmethod
def create_like(cls, user_id, post_id):
"""
Adds new like to db.
Args:
user_id: Integer, id of the user who liked post.
post_id: Integer, id of the post that was liked.
"""
new_like = Like(user_id=user_id, post_id=post_id)
new_like.put()
@classmethod
def delete_like(cls, user_id, likes):
"""
Delete specific like from db.
Args:
user_id: Integer, id of the user who removed his/her like
likes: List of likes for specific blog post.
"""
like_to_delete = likes.filter("user_id =", user_id).get()
like_to_delete.delete()
@classmethod
def get_all_likes(cls, post_id):
"""
Retrieves all likes associated with certain blog post.
Args:
post_id: Integer that represents blog post id.
Returns:
List of likes for certain blog post.
"""
return Like.all().filter("post_id =", post_id)
class Comment(db.Model):
""" Represents comment for a blog post. """
content = db.TextProperty(required=True)
user_id = db.IntegerProperty(required=True)
post_id = db.IntegerProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
last_modified = db.DateTimeProperty(auto_now=True)
@property
def id(self):
""" Returns comment id. """
return Comment.get_comment_id(self)
@property
def author(self):
""" Returns author nickname. """
return User.get_name_by_id(self.user_id)
@classmethod
def get_comment_id(cls, comment):
""" Returns id for specific comment. """
return comment.key().id()
@classmethod
def get_all_comments(cls, post_id):
"""
Retrieves all comments associated with certain blog post.
Args:
post_id: Integer that represents blog post id.
Returns:
List of all comments ordered by time created.
"""
return Comment.all().filter("post_id =", post_id).order("-created")
@classmethod
def create_comment(cls, user_id, post_id, content):
"""
Adds new comment to db.
Args:
user_id: Integer, id of the user who commented the post.
post_id: Integer, id of the post which was commented.
content: String, text of the comment.
"""
new_comment = Comment(user_id=user_id,
post_id=post_id,
content=content)
new_comment.put()
@classmethod
def update_comment(cls, comment_id, content):
"""
Updates the specific comment in db.
Ars:
comment_id: Integer, id of the specific comment.
content: String, updated text of the comment.
"""
comment = Comment.get_comment_by_id(comment_id)
if comment:
comment.content = content
comment.put()
@classmethod
def get_comment_by_id(cls, comment_id):
""" Returns specific comment by it's id. """
return Comment.get_by_id(comment_id)
@classmethod
def delete_comment(cls, comment_id):
"""
Delete specific comment from db.
Args:
comment_id: Integer, id of comment that should be removed.
"""
comment = Comment.get_comment_by_id(comment_id)
if comment:
comment.delete()
# Cookie security
def hash_str(s):
"""
Creates hash for specific s with secret word.
Args:
s: String that should be encrypted.
Returns:
Hash.
"""
return hmac.new(SECRET, s).hexdigest()
def make_secure_val(s):
""" Combines value and its hash. """
return "%s|%s" % (s, hash_str(s))
def check_secure_val(h):
"""
Check if value matches generated hash.
Args:
h: String, combination of value and its hash.
Returns:
String value if it matches the hash.
"""
val = h.split("|")[0]
if h == make_secure_val(val):
return val
# Password security
def make_salt():
""" Auto generates secret word. """
return ''.join(random.choice(string.letters) for x in range(0, 15))
def make_pw_hash(name, pw, salt):
"""
Generates hash for user's password.
Args:
name: String, user's nickname.
pw: String, user's password.
salt: String, auto generated secret word.
Returns:
Hash.
"""
return hashlib.sha256(name + pw + salt).hexdigest()
def valid_pw(name, pw):
"""
Check if user's password matches its hash.
Args:
name: String, user's nickname
pw: String, entered password.
Returns:
User if password matches.
"""
user = User.get_user_by_name(name)
if user:
salt = user.salt
if user.hash == make_pw_hash(name, pw, salt):
return user
# Validate entries
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
PASS_RE = re.compile(r"^.{3,20}$")
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_username(username):
return username and USER_RE.match(username)
def is_username_exist(name):
users = db.GqlQuery("SELECT * FROM User")
for user in users:
if user.username == name:
return name
def valid_password(password):
return password and PASS_RE.match(password)
def valid_email(email):
return not email or EMAIL_RE.match(email)
# Routing
app = webapp2.WSGIApplication([('/', MainPage),
('/signup', Signup),
('/welcome', Welcome),
('/logout', Logout),
('/login', Login),
('/newpost', NewPost),
('/(\d+)', ViewPost),
('/deletepost/(\d+)', DeletePost),
('/editpost/(\d+)', EditPost),
('/like', LikePost),
('/deletecomment/(\d+)', DeleteComment),
('/editcomment/(\d+)', EditComment)
],
debug=True)
|
|
#! /usr/local/bin/python
"""
See LICENSE file for copyright and license details.
"""
from decimal import Decimal
from database.databaseaccess import DatabaseAccess
from database.mappings import *
from modules.core_module import CoreModule
from modules.statement import Statement
from modules.constant import *
from modules.function import *
from generic.modules.function import *
from database.mappings import T_TRADE
from generic.modules.calculator_finance import CalculatorFinance
class Trade(CoreModule):
"""
Trade class.
"""
def __init__(self, config):
"""
Initialisation
"""
self.config = config
self.statement_trade = Statement(T_TRADE)
self.flag_insupdel = StatementType.INSERT
self.trade_id = DEFAULT_INT
self.market_id = DEFAULT_INT
self.commodity_id = DEFAULT_INT
self.commodity_name = ''
self.date_buy = DEFAULT_DATE
self.year_buy = DEFAULT_INT
self.month_buy = DEFAULT_INT
self.day_buy = DEFAULT_INT
self.date_sell = DEFAULT_DATE
self.year_sell = DEFAULT_INT
self.month_sell = DEFAULT_INT
self.day_sell = DEFAULT_INT
self.long_flag = DEFAULT_INT
self.price_buy = DEFAULT_DECIMAL
self.price_buy_orig = DEFAULT_DECIMAL
self.price_sell = DEFAULT_DECIMAL
self.price_sell_orig = DEFAULT_DECIMAL
self.shares_buy = DEFAULT_DECIMAL
self.shares_sell = DEFAULT_DECIMAL
self.commission_buy = DEFAULT_DECIMAL
self.commission_sell = DEFAULT_DECIMAL
self.tax_buy = DEFAULT_DECIMAL
self.tax_sell = DEFAULT_DECIMAL
self.risk_input = DEFAULT_DECIMAL
self.risk_input_percent = DEFAULT_DECIMAL
self.risk_initial = DEFAULT_DECIMAL
self.risk_initial_percent = DEFAULT_DECIMAL
self.risk_actual = DEFAULT_DECIMAL
self.risk_actual_percent = DEFAULT_DECIMAL
self.cost_total = DEFAULT_DECIMAL
self.cost_other = DEFAULT_DECIMAL
self.amount_buy = DEFAULT_DECIMAL
self.amount_sell = DEFAULT_DECIMAL
self.amount_buy_simple = DEFAULT_DECIMAL
self.amount_sell_simple = DEFAULT_DECIMAL
self.stoploss = DEFAULT_DECIMAL
self.stoploss_orig = DEFAULT_DECIMAL
self.profit_loss = DEFAULT_DECIMAL
self.profit_loss_orig = DEFAULT_DECIMAL
self.profit_loss_total = DEFAULT_DECIMAL
self.profit_loss_total_percent = DEFAULT_DECIMAL
self.r_multiple = DEFAULT_DECIMAL
self.win_flag = DEFAULT_DECIMAL
self.id_buy = DEFAULT_INT
self.id_sell = DEFAULT_INT
self.drawdown_id = DEFAULT_INT
self.pool_at_start = DEFAULT_DECIMAL
self.date_expiration = DEFAULT_DATE
self.expired_flag = DEFAULT_INT
self.spread = DEFAULT_INT
self.active = DEFAULT_INT
self.date_created = DEFAULT_DATE
self.date_modified = DEFAULT_DATE
self.trade_record = []
self.open_trade_position = -1
def create_statements(self, input_fields, statements_finance):
"""
Creates the records needed for Table.TRADE and returns them as a
Statement object.
"""
#NOTE: price_buy will be fields['i_price']
#When we buy more, it will be overwritten!
#Trading without adding to positions is assumed by this code!
try:
dba = DatabaseAccess(self.config)
calc = CalculatorFinance()
self.date_created = current_date()
self.date_modified = current_date()
records = 0
self.finance_id = dba.first_finance_id_from_latest()
if self.finance_id != -1:
for fields in input_fields:
if deals_with_commodities(
fields[Input.ACCOUNT_FROM],
fields[Input.ACCOUNT_TO]
):
records = records + 1
# GENERAL INFO AT START
self.general_info_at_start(dba, calc, fields)
# UPDATE/INSERT
print "test: invade_started = ",
(self.open_trade_position > -1)
if self.open_trade_position > -1:
self.update_info(dba, calc, fields)
else:
self.insert_info(dba, calc, fields)
# GENERAL VARIABLES THAT CAN BE CALCULATED
# ON THE DATA WE HAVE
self.general_info_at_end(dba, fields, self.trade_record)
# TEST INFO
#self.print_test_info()
# ADDING THE STATEMENTS
self.add_to_statement(records)
self.finance_id = self.finance_id + 1
return self.statement_trade
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
finally:
calc = None
dba = None
def general_info_at_start(self, dba, calc, fields):
"""
General info at the start of the trade.
"""
try:
self.market_id = dba.market_id_from_market(
fields[Input.MARKET_CODE])
self.commodity_id = dba.commodity_id_from_commodity_name(
fields[Input.COMMODITY_NAME], self.market_id)
self.open_trade_position = dba.open_trade_position(
self.market_id,
self.commodity_id,
T_TRADE)
self.finance_record = dba.get_finance_record(self.finance_id)
self.trade_record = dba.get_trade_record(self.open_trade_position)
self.long_flag = dba.get_long_flag_value(fields[Input.ACCOUNT_FROM],
fields[Input.ACCOUNT_TO], self.trade_record)
self.spread = fields[Input.SPREAD]
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def update_info(self, dba, calc, fields):
"""
Update info.
"""
#NOTE: Correct way of updating = Supplier.query.filter(<your stuff here, or user filter_by, or whatever is in your where clause>).update(values)
#e.g.: session.query(Supplier).filter_by(id=2).update({"name": u"Mayowa"})
#TABLE_TRADE.query.filter(market_name=...,commodity_name=...).update({"date_...": date_... etc.})
try:
self.flag_insupdel = StatementType.UPDATE
self.trade_id = self.trade_record['trade_id']
## buy/sell related fields
if (
we_are_buying(
fields[Input.ACCOUNT_FROM],
fields[Input.ACCOUNT_TO]
)
and self.trade_record['id_buy'] == -1
):
self.id_buy = self.finance_id
self.id_sell = self.trade_record['id_sell']
self.date_buy = fields[Input.DATE]
self.date_sell = self.trade_record['date_sell']
self.price_buy = calc.convert_from_orig(
fields[Input.PRICE],
fields[Input.EXCHANGE_RATE])
self.price_buy_orig = fields[Input.PRICE]
self.price_sell = self.trade_record['price_sell']
self.price_sell_orig = self.trade_record['price_sell_orig']
self.shares_buy = fields[Input.QUANTITY]
self.shares_sell = self.trade_record['shares_sell']
self.commission_buy = fields[Input.COMMISSION]
self.commission_sell = self.trade_record['commission_sell']
self.tax_buy = fields[Input.TAX]
self.tax_sell = self.trade_record['tax_sell']
self.amount_buy = fields[Input.AMOUNT]
self.amount_buy_simple = calc.calculate_amount_simple(
calc.convert_from_orig(
fields[Input.PRICE],
fields[Input.EXCHANGE_RATE]
),
fields[Input.QUANTITY]
)
self.amount_sell = self.trade_record['amount_sell']
self.amount_sell_simple = self.trade_record['amount_sell_simple']
elif (
not we_are_buying(
fields[Input.ACCOUNT_FROM],
fields[Input.ACCOUNT_TO]
)
and self.trade_record['id_sell'] == -1
):
self.id_buy = self.trade_record['id_buy']
self.id_sell = self.finance_id
self.date_buy = self.trade_record['date_buy']
self.date_sell = fields[Input.DATE]
self.price_buy = self.trade_record['price_buy']
self.price_buy_orig = self.trade_record['price_buy_orig']
self.price_sell = calc.convert_from_orig(
fields[Input.PRICE],
fields[Input.EXCHANGE_RATE]
)
self.price_sell_orig = fields[Input.PRICE]
self.shares_buy = self.trade_record['shares_buy']
self.shares_sell = fields[Input.QUANTITY]
self.commission_buy = self.trade_record['commission_buy']
self.commission_sell = fields[Input.COMMISSION]
self.tax_buy = self.trade_record['tax_buy']
self.tax_sell = fields[Input.TAX]
self.amount_buy = self.trade_record['amount_buy']
self.amount_buy_simple = self.trade_record['amount_buy_simple']
self.amount_sell = fields[Input.AMOUNT]
self.amount_sell_simple = calc.calculate_amount_simple(
calc.convert_from_orig(
fields[Input.PRICE],
fields[Input.EXCHANGE_RATE]
),
fields[Input.QUANTITY]
)
else:
raise Exception(
"{0} already contains a sell or buy record"
" and you are trying to add one like it"
" again?".format(T_TRADE))
self.stoploss = self.trade_record['stoploss']
self.stoploss_orig = self.trade_record['stoploss_orig']
self.profit_loss = calc.calculate_profit_loss(
self.price_buy,
self.shares_buy,
self.price_sell,
self.shares_sell,
self.tax_buy,
self.tax_sell,
self.commission_buy,
self.commission_sell,
self.long_flag)
self.profit_loss_orig = calc.convert_to_orig(
self.profit_loss,
fields[Input.EXCHANGE_RATE]
)
self.profit_loss_total = calc.calculate_profit_loss_total(
self.price_buy,
self.shares_buy,
self.price_sell,
self.shares_sell,
self.tax_buy,
self.tax_sell,
self.commission_buy,
self.commission_sell,
self.long_flag)
self.profit_loss_total_percent = (
self.profit_loss_total / self.amount_buy_simple
) * Decimal(100.0)
self.pool_at_start = self.trade_record['pool_at_start']
self.date_created = self.trade_record['date_created']
self.risk_input = self.trade_record['risk_input']
self.risk_input_percent = self.trade_record['risk_input_percent']
self.risk_initial = self.trade_record['risk_initial']
self.risk_initial_percent = (
self.risk_initial / self.amount_buy_simple
) * Decimal(100.0)
self.risk_actual = calc.calculate_risk_actual(
self.price_buy,
self.shares_buy,
self.tax_buy,
self.commission_buy,
self.price_sell,
self.shares_sell,
self.tax_sell,
self.commission_sell,
self.stoploss,
self.risk_initial,
self.profit_loss,
self.long_flag)
self.risk_actual_percent = (
self.risk_actual /
get_pool_without_margin(
fields[Input.POOL],
dba.get_margin_pool())
) * Decimal(100.0)
self.cost_total = calc.calculate_cost_total(
self.tax_buy,
self.commission_buy,
self.tax_sell,
self.commission_sell)
self.cost_other = calc.calculate_cost_other(
self.cost_total,
self.profit_loss)
self.win_flag = dba.get_win_flag_value(
self.price_buy,
self.price_sell)
self.drawdown_id = self.trade_record['drawdown_id']
self.r_multiple = calc.calculate_r_multiple(
self.profit_loss,
self.risk_initial)
self.date_expiration = self.trade_record['date_expiration']
# TODO: for investing, id_buy/sell is id_firstbuy and id_firstsell
# and expiration flag should only be set at the end of the trade
# when
# the trade is closed. This means that date_buy and date_sell is not
# enough to determine if a trade is closed or not. The total shares
# should also be 0 when added up OR shares_buy = shares_sell.
# So add:
#if trade_closed: (or something like that)
self.expired_flag = (
1 if (self.date_sell > self.date_expiration)
and (self.date_expiration != DEFAULT_DATE)
else 0
)
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def insert_info(self, dba, calc, fields):
"""
Insert info.
"""
try:
self.flag_insupdel = StatementType.INSERT
self.trade_id = None # insert: new one created automatically
## buy/sell related fields
if we_are_buying(
fields[Input.ACCOUNT_FROM],
fields[Input.ACCOUNT_TO]
):
self.id_buy = self.finance_id
self.id_sell = -1
self.date_buy = fields[Input.DATE]
self.date_sell = DEFAULT_DATE
self.price_buy = calc.convert_from_orig(
fields[Input.PRICE],
fields[Input.EXCHANGE_RATE]
)
self.price_buy_orig = fields[Input.PRICE]
self.price_sell = DEFAULT_DECIMAL
self.price_sell_orig = DEFAULT_DECIMAL
self.shares_buy = fields[Input.QUANTITY]
self.shares_sell = DEFAULT_INT
# TODO: commission and tax from T_RATE,
# when fields[Input.AUTOMATIC_FLAG] is 1
self.commission_buy = fields[Input.COMMISSION]
self.commission_sell = DEFAULT_DECIMAL
self.amount_buy = fields[Input.AMOUNT]
self.amount_sell = DEFAULT_DECIMAL
self.amount_buy_simple = calc.calculate_amount_simple(
calc.convert_from_orig(
fields[Input.PRICE],
fields[Input.EXCHANGE_RATE]
),
fields[Input.QUANTITY]
)
self.amount_sell_simple = DEFAULT_DECIMAL
self.tax_buy = fields[Input.TAX]
self.tax_sell = DEFAULT_DECIMAL
self.risk_initial_percent = (
self.risk_initial / self.amount_buy_simple
) * Decimal(100.0)
else:
self.id_buy = -1
self.id_sell = self.finance_id
self.date_sell = fields[Input.DATE]
self.date_buy = DEFAULT_DATE
self.price_buy = DEFAULT_DECIMAL
self.price_buy_orig = DEFAULT_DECIMAL
self.price_sell = calc.convert_from_orig(fields[Input.PRICE], fields[Input.EXCHANGE_RATE])
self.price_sell_orig = fields[Input.PRICE]
self.shares_buy = DEFAULT_INT
self.shares_sell = fields[Input.QUANTITY]
self.commission_buy = DEFAULT_DECIMAL
# TODO: commission and tax from T_RATE (see also higher)
self.commission_sell = fields[Input.COMMISSION]
self.amount_buy = DEFAULT_DECIMAL
self.amount_sell = fields[Input.AMOUNT]
self.amount_buy_simple = DEFAULT_DECIMAL
self.amount_sell_simple = calc.calculate_amount_simple(
calc.convert_from_orig(fields[Input.PRICE], fields[Input.EXCHANGE_RATE])
, fields[Input.QUANTITY])
self.tax_buy = DEFAULT_DECIMAL
self.tax_sell = fields[Input.TAX]
self.risk_initial_percent = Decimal(100.0)*self.risk_initial/self.amount_sell_simple
self.stoploss = calc.calculate_stoploss(
calc.convert_from_orig(fields[Input.PRICE], fields[Input.EXCHANGE_RATE]),
fields[Input.QUANTITY],
fields[Input.TAX],
fields[Input.COMMISSION],
fields[Input.RISK],
get_pool_without_margin(
fields[Input.POOL],
dba.get_margin_pool()),
self.long_flag)
self.stoploss_orig = calc.convert_to_orig(self.stoploss, fields[Input.EXCHANGE_RATE])
self.profit_loss = DEFAULT_DECIMAL #Only calculated at end of trade.
self.profit_loss_orig = DEFAULT_DECIMAL
self.profit_loss_total = DEFAULT_DECIMAL
self.profit_loss_total_percent = DEFAULT_DECIMAL
self.pool_at_start = fields[Input.POOL]
self.risk_input = calc.calculate_risk_input(
get_pool_without_margin(
fields[Input.POOL],
dba.get_margin_pool()),
fields[Input.RISK])
self.risk_input_percent = fields[Input.RISK]
self.risk_initial = calc.calculate_risk_initial(
calc.convert_from_orig(
fields[Input.PRICE],
fields[Input.EXCHANGE_RATE]
),
fields[Input.QUANTITY],
fields[Input.TAX],
fields[Input.COMMISSION],
self.stoploss,
self.long_flag)
self.risk_actual = DEFAULT_DECIMAL
self.risk_actual_percent = DEFAULT_DECIMAL
self.cost_total = DEFAULT_DECIMAL
self.cost_other = DEFAULT_DECIMAL
self.win_flag = -1 # not yet finished, we can not know it yet.
self.drawdown_id = dba.new_drawdown_record()
self.r_multiple = DEFAULT_DECIMAL
self.date_expiration = fields[Input.DATE_EXPIRATION]
self.expired_flag = DEFAULT_INT
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def general_info_at_end(self, dba, fields, trade_record):
"""
General info at the end of the trade.
"""
try:
self.profit_loss_percent = (
self.profit_loss / get_pool_without_margin(
self.pool_at_start,
dba.get_margin_pool())
)
self.year_buy = self.date_buy.year
self.month_buy = self.date_buy.month
self.day_buy = self.date_buy.day
self.year_sell = self.date_sell.year
self.month_sell = self.date_sell.month
self.day_sell = self.date_sell.day
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def add_to_statement(self, records):
"""
Add the data to the statement list.
"""
self.statement_trade.add(
records,
{
'trade_id': self.trade_id,
'market_id': int(self.market_id),
'commodity_id': int(self.commodity_id),
'date_buy': self.date_buy,
'year_buy': self.year_buy,
'month_buy': self.month_buy,
'day_buy': self.day_buy,
'date_sell': self.date_sell,
'year_sell': self.year_sell,
'month_sell': self.month_sell,
'day_sell': self.day_sell,
'long_flag': int(self.long_flag),
'price_buy': Decimal(self.price_buy),
'price_buy_orig': Decimal(self.price_buy_orig),
'price_sell': Decimal(self.price_sell),
'price_sell_orig': Decimal(self.price_sell_orig),
'shares_buy': int(self.shares_buy),
'shares_sell': int(self.shares_sell),
'commission_buy': Decimal(self.commission_buy),
'commission_sell': Decimal(self.commission_sell),
'tax_buy': Decimal(self.tax_buy),
'tax_sell': Decimal(self.tax_sell),
'amount_buy': Decimal(self.amount_buy),
'amount_sell': Decimal(self.amount_sell),
'amount_buy_simple': Decimal(self.amount_buy_simple),
'amount_sell_simple': Decimal(self.amount_sell_simple),
'risk_input': Decimal(self.risk_input),
'risk_input_percent': Decimal(self.risk_input_percent),
'risk_initial': Decimal(self.risk_initial),
'risk_initial_percent': Decimal(self.risk_initial_percent),
'risk_actual': Decimal(self.risk_actual),
'risk_actual_percent': Decimal(self.risk_actual_percent),
'cost_total': Decimal(self.cost_total),
'cost_other': Decimal(self.cost_other),
'stoploss': Decimal(self.stoploss),
'stoploss_orig': Decimal(self.stoploss_orig),
'profit_loss': Decimal(self.profit_loss),
'profit_loss_orig': Decimal(self.profit_loss_orig),
'profit_loss_total': Decimal(self.profit_loss_total),
'profit_loss_total_percent': Decimal(
self.profit_loss_total_percent),
'r_multiple': Decimal(self.r_multiple),
'win_flag': int(self.win_flag),
'id_buy': int(self.id_buy),
'id_sell': int(self.id_sell),
'drawdown_id': int(self.drawdown_id),
'pool_at_start': Decimal(self.pool_at_start),
'date_expiration': self.date_expiration,
'expired_flag': self.expired_flag,
'spread': self.spread,
'active': 1,
'date_created': self.date_created,
'date_modified': self.date_modified
},
self.flag_insupdel
)
def print_test_info(self):
"""
Print test info.
"""
print('<print>')
print('market_id =', self.market_id)
print('commodity_id =', self.commodity_id)
print('date_buy =', self.date_buy)
print('date_sell =', self.date_sell)
print('long_flag =', self.long_flag)
print('price_buy =', self.price_buy)
print('price_sell =', self.price_sell)
print('price_buy_orig =', self.price_buy_orig)
print('price_sell_orig =', self.price_sell_orig)
print('amount_buy_simple =', self.amount_buy_simple)
print('amount_sell_simple =', self.amount_sell_simple)
print('risk_input =', self.risk_input)
print('risk_input_percent =', self.risk_input_percent)
print('risk_initial =', self.risk_initial)
print('risk_initial_percent =', self.risk_initial_percent)
print('risk_actual =', self.risk_actual)
print('risk_actual_percent =', self.risk_actual_percent)
print('cost_total =', self.cost_total)
print('cost_other =', self.cost_other)
print('stoploss =', self.stoploss)
print('profit_loss =', self.profit_loss)
print('profit_loss_orig =', self.profit_loss_orig)
print('profit_loss_total =', self.profit_loss_total)
print('profit_loss_total_percent =', self.profit_loss_total_percent)
print('r_multiple =', self.r_multiple)
print('win_flag =', self.win_flag)
print('id_buy =', self.id_buy)
print('id_sell =', self.id_sell)
print('drawdown_id =', self.drawdown_id)
print('pool_at_start =', self.pool_at_start)
print('spread=', self.spread)
print('date_expiration =', self.date_expiration)
print('expired_flag =', self.expired_flag)
print('<\print>')
|
|
# Natural Language Toolkit: Text Segmentation Metrics
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com>
# David Doukhan <david.doukhan@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Text Segmentation Metrics
1. Windowdiff
Pevzner, L., and Hearst, M., A Critique and Improvement of
an Evaluation Metric for Text Segmentation,
Computational Linguistics 28, 19-36
2. Generalized Hamming Distance
Bookstein A., Kulyukin V.A., Raita T.
Generalized Hamming Distance
Information Retrieval 5, 2002, pp 353-375
Baseline implementation in C++
http://digital.cs.usu.edu/~vkulyukin/vkweb/software/ghd/ghd.html
Study describing benefits of Generalized Hamming Distance Versus
WindowDiff for evaluating text segmentation tasks
Begsten, Y. Quel indice pour mesurer l'efficacite en segmentation de textes ?
TALN 2009
3. Pk text segmentation metric
Beeferman D., Berger A., Lafferty J. (1999)
Statistical Models for Text Segmentation
Machine Learning, 34, 177-210
"""
try:
import numpy as np
except ImportError:
pass
from nltk.compat import xrange
def windowdiff(seg1, seg2, k, boundary="1", weighted=False):
"""
Compute the windowdiff score for a pair of segmentations. A
segmentation is any sequence over a vocabulary of two items
(e.g. "0", "1"), where the specified boundary value is used to
mark the edge of a segmentation.
>>> s1 = "000100000010"
>>> s2 = "000010000100"
>>> s3 = "100000010000"
>>> '%.2f' % windowdiff(s1, s1, 3)
'0.00'
>>> '%.2f' % windowdiff(s1, s2, 3)
'0.30'
>>> '%.2f' % windowdiff(s2, s3, 3)
'0.80'
:param seg1: a segmentation
:type seg1: str or list
:param seg2: a segmentation
:type seg2: str or list
:param k: window width
:type k: int
:param boundary: boundary value
:type boundary: str or int or bool
:param weighted: use the weighted variant of windowdiff
:type weighted: boolean
:rtype: float
"""
if len(seg1) != len(seg2):
raise ValueError("Segmentations have unequal length")
if k > len(seg1):
raise ValueError("Window width k should be smaller or equal than segmentation lengths")
wd = 0
for i in range(len(seg1) - k + 1):
ndiff = abs(seg1[i:i+k].count(boundary) - seg2[i:i+k].count(boundary))
if weighted:
wd += ndiff
else:
wd += min(1, ndiff)
return wd / (len(seg1) - k + 1.)
# Generalized Hamming Distance
def _init_mat(nrows, ncols, ins_cost, del_cost):
mat = np.empty((nrows, ncols))
mat[0, :] = ins_cost * np.arange(ncols)
mat[:, 0] = del_cost * np.arange(nrows)
return mat
def _ghd_aux(mat, rowv, colv, ins_cost, del_cost, shift_cost_coeff):
for i, rowi in enumerate(rowv):
for j, colj in enumerate(colv):
shift_cost = shift_cost_coeff * abs(rowi - colj) + mat[i, j]
if rowi == colj:
# boundaries are at the same location, no transformation required
tcost = mat[i, j]
elif rowi > colj:
# boundary match through a deletion
tcost = del_cost + mat[i, j + 1]
else:
# boundary match through an insertion
tcost = ins_cost + mat[i + 1, j]
mat[i + 1, j + 1] = min(tcost, shift_cost)
def ghd(ref, hyp, ins_cost=2.0, del_cost=2.0, shift_cost_coeff=1.0, boundary='1'):
"""
Compute the Generalized Hamming Distance for a reference and a hypothetical
segmentation, corresponding to the cost related to the transformation
of the hypothetical segmentation into the reference segmentation
through boundary insertion, deletion and shift operations.
A segmentation is any sequence over a vocabulary of two items
(e.g. "0", "1"), where the specified boundary value is used to
mark the edge of a segmentation.
Recommended parameter values are a shift_cost_coeff of 2.
Associated with a ins_cost, and del_cost equal to the mean segment
length in the reference segmentation.
>>> # Same examples as Kulyukin C++ implementation
>>> ghd('1100100000', '1100010000', 1.0, 1.0, 0.5)
0.5
>>> ghd('1100100000', '1100000001', 1.0, 1.0, 0.5)
2.0
>>> ghd('011', '110', 1.0, 1.0, 0.5)
1.0
>>> ghd('1', '0', 1.0, 1.0, 0.5)
1.0
>>> ghd('111', '000', 1.0, 1.0, 0.5)
3.0
>>> ghd('000', '111', 1.0, 2.0, 0.5)
6.0
:param ref: the reference segmentation
:type ref: str or list
:param hyp: the hypothetical segmentation
:type hyp: str or list
:param ins_cost: insertion cost
:type ins_cost: float
:param del_cost: deletion cost
:type del_cost: float
:param shift_cost_coeff: constant used to compute the cost of a shift.
shift cost = shift_cost_coeff * |i - j| where i and j are
the positions indicating the shift
:type shift_cost_coeff: float
:param boundary: boundary value
:type boundary: str or int or bool
:rtype: float
"""
ref_idx = [i for (i, val) in enumerate(ref) if val == boundary]
hyp_idx = [i for (i, val) in enumerate(hyp) if val == boundary]
nref_bound = len(ref_idx)
nhyp_bound = len(hyp_idx)
if nref_bound == 0 and nhyp_bound == 0:
return 0.0
elif nref_bound > 0 and nhyp_bound == 0:
return nref_bound * ins_cost
elif nref_bound == 0 and nhyp_bound > 0:
return nhyp_bound * del_cost
mat = _init_mat(nhyp_bound + 1, nref_bound + 1, ins_cost, del_cost)
_ghd_aux(mat, hyp_idx, ref_idx, ins_cost, del_cost, shift_cost_coeff)
return mat[-1, -1]
# Beeferman's Pk text segmentation evaluation metric
def pk(ref, hyp, k=None, boundary='1'):
"""
Compute the Pk metric for a pair of segmentations A segmentation
is any sequence over a vocabulary of two items (e.g. "0", "1"),
where the specified boundary value is used to mark the edge of a
segmentation.
>>> '%.2f' % pk('0100'*100, '1'*400, 2)
'0.50'
>>> '%.2f' % pk('0100'*100, '0'*400, 2)
'0.50'
>>> '%.2f' % pk('0100'*100, '0100'*100, 2)
'0.00'
:param ref: the reference segmentation
:type ref: str or list
:param hyp: the segmentation to evaluate
:type hyp: str or list
:param k: window size, if None, set to half of the average reference segment length
:type boundary: str or int or bool
:param boundary: boundary value
:type boundary: str or int or bool
:rtype: float
"""
if k is None:
k = int(round(len(ref) / (ref.count(boundary) * 2.)))
err = 0
for i in xrange(len(ref)-k +1):
r = ref[i:i+k].count(boundary) > 0
h = hyp[i:i+k].count(boundary) > 0
if r != h:
err += 1
return err / (len(ref)-k +1.)
# skip doctests if numpy is not installed
def setup_module(module):
from nose import SkipTest
try:
import numpy
except ImportError:
raise SkipTest("numpy is required for nltk.metrics.segmentation")
|
|
#-*- coding:utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import with_statement
from __future__ import print_function
from __future__ import unicode_literals
from UserDict import IterableUserDict
from contextlib import contextmanager
from datetime import datetime
from uuid import uuid4
from werkzeug import cached_property, LocalProxy
from ZODB.DB import DB
from flask import g, current_app
import transaction
from persistent import Persistent
from persistent.list import PersistentList
from persistent.mapping import PersistentMapping
from BTrees.OOBTree import OOBTree
__all__ = ('ZODB', 'Model', 'Factory',
'List', 'Mapping', 'Timestamp', 'UUID4', 'current_db')
class ZODB(IterableUserDict):
"""ZODB extension for Flask: persistence of native Python objects.
Basic setup::
from ZODB.FileStorage import FileStorage
from flask import Flask, redirect, render_template_string
from flaskext.zodb import ZODB
ZODB_STORAGE = lambda: FileStorage('app.fs')
app = Flask(__name__)
app.config.from_object(__name__)
db = ZODB(app)
@app.route('/')
@app.route('/<message>')
def index(message=None):
if message is not None:
db['message'] = message
return redirect('/')
return render_template_string('Latest message: {{ message }}',
message=db['message'])
During requests the ``db`` object acts like a Python ``dict``. Any changes
made to it *directly* will persist, changes made to mutable objects
within will have to be marked as mutated. This is done for you if you
inherit :class:`Model` for your own classes and use :attr:`List` and
:attr:`Mapping` as substitutes for Python's ``list`` and ``dict``.
Outside of requests, the object can be used as a context manager if
called, yielding the root object to be used inside the context.
See :meth:`__call__`.
"""
cb = None
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init(self, cb):
"""Register a callback to be called with the root mapping when the
database is first loaded. Useful for setting defaults.
"""
self.cb = cb
return cb
@cached_property
def db(self):
db = DB(self.app.config['ZODB_STORAGE']())
if self.cb is not None:
with self.transaction(db) as root:
self.cb(root)
return db
@property
def connection(self):
"""Request-local database connection."""
return g._zodb_connection
@connection.setter
def connection(self, new):
g._zodb_connection = new
@property
def root(self):
"""Root object for the request-local connection."""
return self.connection.root()
@property
def data(self):
return self.root
def init_app(self, app):
assert 'ZODB_STORAGE' in app.config, \
'ZODB_STORAGE must be configured.'
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['zodb'] = self
@app.before_request
def open_db():
self.connection = self.db.open()
transaction.begin()
@app.after_request
def close_db(response):
transaction.commit()
self.connection.close()
return response
@contextmanager
def transaction(self, db=None):
if db is None:
db = self.db
try:
connection = db.open()
transaction.begin()
yield connection.root()
finally:
transaction.commit()
connection.close()
def __call__(self):
"""Transactional context, for database access outside of requests.
::
with db() as root:
root['this'] = 'is committed at the end of the context.'
"""
return self.transaction()
class Factory(object):
"""Set a :class:`Model` attribute with a callable on instantiation.
Useful for delaying initiation of mutable or dynamic objects.
::
class Dice(Model):
side = Factory(random.randint, 1, 6)
::
>>> Dice()
Dice(side=3)
>>> Dice()
Dice(side=5)
"""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
def __call__(self):
return self.callable(*self.args, **self.kwargs)
#: UTC timestamp factory
Timestamp = Factory(datetime.utcnow)
#: UUID4 factory
UUID4 = Factory(uuid4)
#: Factory for :func:`PersistentList`
List = Factory(PersistentList)
#: Factory for :func:`PersistentMapping`
Mapping = Factory(PersistentMapping)
#: Factory for an object-to-object balance tree mapping,
#: a :class:`~BTrees.OOBTree.OOBTree`.
BTree = Factory(OOBTree)
class Model(Persistent):
"""Convinience model base.
You can subclass :class:`persistent.Persistent` directly if you prefer,
but this base provides some conviniences.
Set attributes in instantiation::
>>> Model(title='Hello!')
Model(title='Hello!')
>> Model(title='Hello!').title
'Hello!'
Declare mutable and dynamic attributes in the class definition::
class Post(Model):
id = UUID4
posted_on = Timestamp
comments = List
::
>>> Post()
Post(id=UUID('c3f043a8-8f1f-4381-89b3-fd1f35265925'),
posted_on=datetime.datetime(2010, 10, 20, 15, 42, 34, 138015),
comments=[])
>>> type(Post().comments)
<class 'persistent.list.PersistentList'>
"""
def __init__(self, **kwargs):
for name in dir(self):
value = getattr(self, name)
if isinstance(value, Factory):
setattr(self, name, value())
for name, value in kwargs.iteritems():
try:
attribute = getattr(self, name)
except AttributeError:
attribute = None
if isinstance(attribute, PersistentList):
attribute.extend(value)
elif isinstance(attribute, (PersistentMapping, OOBTree)):
attribute.update(value)
else:
setattr(self, name, value)
def __repr__(self):
attributes = ', '.join('{0}={1!r}'.format(name, value)
for (name, value) in vars(self).iteritems())
return '{0}({1})'.format(self.__class__.__name__, attributes)
#: The :class:`ZODB` instance for the current :class:`~flask.Flask`
#: application.
current_db = LocalProxy(lambda: current_app.extensions['zodb'])
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import threading
import multiprocessing
from multiprocessing.pool import ThreadPool
import os
import signal
import sys
from pants.reporting.report import Report
class Work(object):
"""Represents multiple concurrent calls to the same callable."""
def __init__(self, func, args_tuples, workunit_name=None):
# A callable.
self.func = func
# A list of tuples of args. func will be called once per tuple, concurrently.
# The length of this list is the cardinality of the work.
self.args_tuples = args_tuples
# If specified, each invocation will be executed in a workunit of this name.
self.workunit_name = workunit_name
class WorkerPool(object):
"""A pool of workers.
Workers are threads, and so are subject to GIL constraints. Submitting CPU-bound work
may not be effective. Use this class primarily for IO-bound work.
"""
def __init__(self, parent_workunit, run_tracker, num_workers):
self._run_tracker = run_tracker
# All workers accrue work to the same root.
self._pool = ThreadPool(processes=num_workers,
initializer=self._run_tracker.register_thread,
initargs=(parent_workunit, ))
# We mustn't shutdown when there are pending workchains, as they may need to submit work
# in the future, and the pool doesn't know about this yet.
self._pending_workchains = 0
self._pending_workchains_cond = threading.Condition() # Protects self._pending_workchains.
self._shutdown_hooks = []
def add_shutdown_hook(self, hook):
self._shutdown_hooks.append(hook)
def submit_async_work(self, work, workunit_parent=None, on_success=None, on_failure=None):
"""Submit work to be executed in the background.
- work: The work to execute.
- workunit_parent: If specified, work is accounted for under this workunit.
- on_success: If specified, a callable taking a single argument, which will be a list
of return values of each invocation, in order. Called only if all work succeeded.
- on_failure: If specified, a callable taking a single argument, which is an exception
thrown in the work.
Don't do work in on_success: not only will it block the result handling thread, but
that thread is not a worker and doesn't have a logging context etc. Use it just to
submit further work to the pool.
"""
if work is None or len(work.args_tuples) == 0: # map_async hangs on 0-length iterables.
if on_success:
on_success([])
else:
def do_work(*args):
self._do_work(work.func, *args, workunit_name=work.workunit_name,
workunit_parent=workunit_parent, on_failure=on_failure)
self._pool.map_async(do_work, work.args_tuples, chunksize=1, callback=on_success)
def submit_async_work_chain(self, work_chain, workunit_parent, done_hook=None):
"""Submit work to be executed in the background.
- work_chain: An iterable of Work instances. Will be invoked serially. Each instance may
have a different cardinality. There is no output-input chaining: the argument
tuples must already be present in each work instance. If any work throws an
exception no subsequent work in the chain will be attempted.
- workunit_parent: Work is accounted for under this workunit.
- done_hook: If not None, invoked with no args after all work is done, or on error.
"""
def done():
if done_hook:
done_hook()
with self._pending_workchains_cond:
self._pending_workchains -= 1
self._pending_workchains_cond.notify()
def error(e):
done()
self._run_tracker.log(Report.ERROR, '%s' % e)
# We filter out Nones defensively. There shouldn't be any, but if a bug causes one,
# Pants might hang indefinitely without this filtering.
work_iter = iter(filter(None, work_chain))
def submit_next():
try:
self.submit_async_work(work_iter.next(), workunit_parent=workunit_parent,
on_success=lambda x: submit_next(), on_failure=error)
except StopIteration:
done() # The success case.
with self._pending_workchains_cond:
self._pending_workchains += 1
try:
submit_next()
except Exception as e: # Handles errors in the submission code.
done()
self._run_tracker.log(Report.ERROR, '%s' % e)
raise
def submit_work_and_wait(self, work, workunit_parent=None):
"""Submit work to be executed on this pool, but wait for it to complete.
- work: The work to execute.
- workunit_parent: If specified, work is accounted for under this workunit.
Returns a list of return values of each invocation, in order. Throws if any invocation does.
"""
if work is None or len(work.args_tuples) == 0: # map hangs on 0-length iterables.
return []
else:
def do_work(*args):
return self._do_work(work.func, *args, workunit_name=work.workunit_name,
workunit_parent=workunit_parent)
# We need to specify a timeout explicitly, because otherwise python ignores SIGINT when waiting
# on a condition variable, so we won't be able to ctrl-c out.
return self._pool.map_async(do_work, work.args_tuples, chunksize=1).get(timeout=1000000000)
def _do_work(self, func, args_tuple, workunit_name, workunit_parent, on_failure=None):
try:
if workunit_name:
with self._run_tracker.new_workunit_under_parent(name=workunit_name, parent=workunit_parent):
return func(*args_tuple)
else:
return func(*args_tuple)
except Exception as e:
if on_failure:
# Note that here the work's workunit is closed. So, e.g., it's OK to use on_failure()
# to close an ancestor workunit.
on_failure(e)
raise
def shutdown(self):
with self._pending_workchains_cond:
while self._pending_workchains > 0:
self._pending_workchains_cond.wait()
self._pool.close()
self._pool.join()
for hook in self._shutdown_hooks:
hook()
def abort(self):
self._pool.terminate()
class SubprocPool(object):
"""Singleton for managing multiprocessing.Pool instances
Subprocesses (including multiprocessing.Pool workers) can inherit locks in poorly written
libraries (eg zlib) if other threads in the parent process happen to be holding them at the
moment the worker is fork()'ed. Thus it is important to create any subprocesses BEFORE
starting any threads, or they may deadlock mysteriously when sent a particular piece of work.
This is accomplished in pants by these initializing pools early, when creating the RunTracker.
However, in tests, RunTrackers are created repeatedly, as part of creating Contexts that
are used briefly and discarded. Creating a new subprocess pool every time is expensive, and will
lead to os.fork failing once too many processes are spawned.
To avoid this, the pools themselves are kept in this singleton and new RunTrackers re-use them.
"""
_pool = None
_lock = threading.Lock()
@staticmethod
def worker_init():
# Exit quietly on sigint, otherwise we get {num_procs} keyboardinterrupt stacktraces spewn
signal.signal(signal.SIGINT, lambda *args: os._exit(0))
@classmethod
def foreground(cls):
with cls._lock:
if cls._pool is None:
cls._pool = multiprocessing.Pool(initializer=SubprocPool.worker_init)
return cls._pool
@classmethod
def shutdown(cls, force):
with cls._lock:
old = cls._pool
cls._pool = None
if old:
if force:
old.terminate()
else:
old.close()
old.join()
|
|
import utils.decisions_constants as log
from game.ai.strategies.main import BaseStrategy
from mahjong.constants import HONOR_INDICES, TERMINAL_INDICES
from mahjong.tile import TilesConverter
from mahjong.utils import is_honor, is_tile_strictly_isolated
from utils.test_helpers import tiles_to_string
class TanyaoStrategy(BaseStrategy):
min_shanten = 3
not_suitable_tiles = TERMINAL_INDICES + HONOR_INDICES
def get_open_hand_han(self):
return 1
def should_activate_strategy(self, tiles_136, meld_tile=None):
"""
Tanyao hand is a hand without terminal and honor tiles, to achieve this
we will use different approaches
:return: boolean
"""
result = super(TanyaoStrategy, self).should_activate_strategy(tiles_136)
if not result:
return False
tiles = TilesConverter.to_34_array(self.player.tiles)
closed_hand_34 = TilesConverter.to_34_array(self.player.closed_hand)
isolated_tiles = [
x // 4 for x in self.player.tiles if is_tile_strictly_isolated(closed_hand_34, x // 4) or is_honor(x // 4)
]
count_of_terminal_pon_sets = 0
count_of_terminal_pairs = 0
count_of_valued_pairs = 0
count_of_not_suitable_tiles = 0
count_of_not_suitable_not_isolated_tiles = 0
for x in range(0, 34):
tile = tiles[x]
if not tile:
continue
if x in self.not_suitable_tiles and tile == 3:
count_of_terminal_pon_sets += 1
if x in self.not_suitable_tiles and tile == 2:
count_of_terminal_pairs += 1
if x in self.player.valued_honors:
count_of_valued_pairs += 1
if x in self.not_suitable_tiles:
count_of_not_suitable_tiles += tile
if x in self.not_suitable_tiles and x not in isolated_tiles:
count_of_not_suitable_not_isolated_tiles += tile
# we have too much terminals and honors
if count_of_not_suitable_tiles >= 5:
return False
# if we already have pon of honor\terminal tiles
# we don't need to open hand for tanyao
if count_of_terminal_pon_sets > 0:
return False
# with valued pair (yakuhai wind or dragon)
# we don't need to go for tanyao
if count_of_valued_pairs > 0:
return False
# one pair is ok in tanyao pair
# but 2+ pairs can't be suitable
if count_of_terminal_pairs > 1:
return False
# 3 or more not suitable tiles that
# are not isolated is too much
if count_of_not_suitable_not_isolated_tiles >= 3:
return False
# if we are 1 shanten, even 2 tiles
# that are not suitable and not isolated
# is too much
if count_of_not_suitable_not_isolated_tiles >= 2 and self.player.ai.shanten == 1:
return False
# TODO: don't open from good 1-shanten into tanyao 1-shaten with same ukeire or worse
# 123 and 789 indices
indices = [[0, 1, 2], [6, 7, 8], [9, 10, 11], [15, 16, 17], [18, 19, 20], [24, 25, 26]]
for index_set in indices:
first = tiles[index_set[0]]
second = tiles[index_set[1]]
third = tiles[index_set[2]]
if first >= 1 and second >= 1 and third >= 1:
return False
# if we have 2 or more non-central doras
# we don't want to go for tanyao
if self.dora_count_not_central >= 2:
return False
# if we have less than two central doras
# let's not consider open tanyao
if self.dora_count_central < 2:
return False
# if we have only two central doras let's
# wait for 5th turn before opening our hand
if self.dora_count_central == 2 and self.player.round_step < 5:
return False
return True
def determine_what_to_discard(self, discard_options, hand, open_melds):
is_open_hand = len(open_melds) > 0
# our hand is closed, we don't need to discard terminal tiles here
if not is_open_hand:
return discard_options
first_option = sorted(discard_options, key=lambda x: x.shanten)[0]
shanten = first_option.shanten
if shanten > 1:
return super(TanyaoStrategy, self).determine_what_to_discard(discard_options, hand, open_melds)
results = []
not_suitable_tiles = []
for item in discard_options:
if not self.is_tile_suitable(item.tile_to_discard_136):
item.had_to_be_discarded = True
not_suitable_tiles.append(item)
continue
# there is no sense to wait 1-4 if we have open hand
# but let's only avoid atodzuke tiles in tempai, the rest will be dealt with in
# generic logic
if item.shanten == 0:
all_waiting_are_fine = all(
[(self.is_tile_suitable(x * 4) or item.wait_to_ukeire[x] == 0) for x in item.waiting]
)
if all_waiting_are_fine:
results.append(item)
if not_suitable_tiles:
return not_suitable_tiles
# we don't have a choice
# we had to have on bad wait
if not results:
return discard_options
return results
def is_tile_suitable(self, tile):
"""
We can use only simples tiles (2-8) in any suit
:param tile: 136 tiles format
:return: True
"""
tile //= 4
return tile not in self.not_suitable_tiles
def validate_meld(self, chosen_meld_dict):
# if we have already opened our hand, let's go by default riles
if self.player.is_open_hand:
return True
# choose if base method requires us to keep hand closed
if not super(TanyaoStrategy, self).validate_meld(chosen_meld_dict):
return False
# otherwise let's not open hand if that does not improve our ukeire
closed_tiles_34 = TilesConverter.to_34_array(self.player.closed_hand)
waiting, shanten = self.player.ai.hand_builder.calculate_waits(
closed_tiles_34, closed_tiles_34, use_chiitoitsu=False
)
wait_to_ukeire = dict(
zip(waiting, [self.player.ai.hand_builder.count_tiles([x], closed_tiles_34) for x in waiting])
)
old_ukeire = sum(wait_to_ukeire.values())
selected_tile = chosen_meld_dict["discard_tile"]
logger_context = {
"hand": tiles_to_string(self.player.closed_hand),
"meld": chosen_meld_dict,
"old_shanten": shanten,
"old_ukeire": old_ukeire,
"new_shanten": selected_tile.shanten,
"new_ukeire": selected_tile.ukeire,
}
if selected_tile.shanten > shanten:
self.player.logger.debug(
log.MELD_DEBUG, "Opening into tanyao increases number of shanten, let's not do that", logger_context
)
return False
if selected_tile.shanten == shanten:
if old_ukeire >= selected_tile.ukeire:
self.player.logger.debug(
log.MELD_DEBUG,
"Opening into tanyao keeps same number of shanten and does not improve ukeire, let's not do that",
logger_context,
)
return False
if old_ukeire != 0:
improvement_percent = ((selected_tile.ukeire - old_ukeire) / old_ukeire) * 100
else:
improvement_percent = selected_tile.ukeire * 100
if improvement_percent < 30:
self.player.logger.debug(
log.MELD_DEBUG,
"Opening into tanyao keeps same number of shanten and ukeire improvement is low, don't open",
logger_context,
)
return False
self.player.logger.debug(
log.MELD_DEBUG,
"Opening into tanyao keeps same number of shanten and ukeire improvement is good, let's call meld",
logger_context,
)
return True
self.player.logger.debug(
log.MELD_DEBUG, "Opening into tanyao improves number of shanten, let's call meld", logger_context
)
return True
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_fm
short_description: Configure FM in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and fm category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
system_fm:
description:
- Configure FM.
default: null
type: dict
suboptions:
auto_backup:
description:
- Enable/disable automatic backup.
type: str
choices:
- enable
- disable
id:
description:
- ID.
type: str
ip:
description:
- IP address.
type: str
ipsec:
description:
- Enable/disable IPsec.
type: str
choices:
- enable
- disable
scheduled_config_restore:
description:
- Enable/disable scheduled configuration restore.
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable FM.
type: str
choices:
- enable
- disable
vdom:
description:
- VDOM. Source system.vdom.name.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure FM.
fortios_system_fm:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_fm:
auto_backup: "enable"
id: "4"
ip: "<your_own_value>"
ipsec: "enable"
scheduled_config_restore: "enable"
status: "enable"
vdom: "<your_own_value> (source system.vdom.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_fm_data(json):
option_list = ['auto_backup', 'id', 'ip',
'ipsec', 'scheduled_config_restore', 'status',
'vdom']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_fm(data, fos):
vdom = data['vdom']
system_fm_data = data['system_fm']
filtered_data = underscore_to_hyphen(filter_system_fm_data(system_fm_data))
return fos.set('system',
'fm',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_fm']:
resp = system_fm(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_fm": {
"required": False, "type": "dict", "default": None,
"options": {
"auto_backup": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"id": {"required": False, "type": "str"},
"ip": {"required": False, "type": "str"},
"ipsec": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"scheduled_config_restore": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"vdom": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from oslo_log import log as logging
from designate import exceptions
from designate import utils
from designate.api.v2.controllers import rest
from designate.objects import RecordSet
from designate.objects import RecordSetList
from designate.objects.adapters import DesignateAdapter
LOG = logging.getLogger(__name__)
class RecordSetsController(rest.RestController):
SORT_KEYS = ['created_at', 'id', 'updated_at', 'zone_id', 'tenant_id',
'name', 'type', 'ttl', 'records']
@pecan.expose(template='json:', content_type='application/json')
@utils.validate_uuid('zone_id', 'recordset_id')
def get_one(self, zone_id, recordset_id):
"""Get RecordSet"""
request = pecan.request
context = request.environ['context']
return DesignateAdapter.render(
'API_v2',
self.central_api.get_recordset(
context, zone_id, recordset_id),
request=request)
@pecan.expose(template='json:', content_type='application/json')
@utils.validate_uuid('zone_id')
def get_all(self, zone_id, **params):
"""List RecordSets"""
request = pecan.request
context = request.environ['context']
# NOTE: We need to ensure the zone actually exists, otherwise we may
# return deleted recordsets instead of a zone not found
self.central_api.get_zone(context, zone_id)
# Extract the pagination params
marker, limit, sort_key, sort_dir = utils.get_paging_params(
params, self.SORT_KEYS)
# Extract any filter params.
accepted_filters = (
'name', 'type', 'ttl', 'data', 'status', 'description', )
criterion = self._apply_filter_params(
params, accepted_filters, {})
criterion['zone_id'] = zone_id
# Data must be filtered separately, through the Records table
data = criterion.pop('data', None)
status = criterion.pop('status', None)
# Retrieve recordsets
recordsets = self.central_api.find_recordsets(
context, criterion, marker, limit, sort_key, sort_dir)
# 'data' filter param: only return recordsets with matching data
if data:
records = self.central_api.find_records(
context, criterion={'data': data, 'zone_id': zone_id})
recordset_with_data_ids = set(record.recordset_id
for record in records)
new_rsets = RecordSetList()
for recordset in recordsets:
if recordset.id in recordset_with_data_ids:
new_rsets.append(recordset)
recordsets = new_rsets
recordsets.total_count = len(recordset_with_data_ids)
# 'status' filter param: only return recordsets with matching status
if status:
new_rsets = RecordSetList()
for recordset in recordsets:
if recordset.status == status:
new_rsets.append(recordset)
recordsets = new_rsets
return DesignateAdapter.render('API_v2', recordsets, request=request)
@pecan.expose(template='json:', content_type='application/json')
@utils.validate_uuid('zone_id')
def post_all(self, zone_id):
"""Create RecordSet"""
request = pecan.request
response = pecan.response
context = request.environ['context']
body = request.body_dict
recordset = DesignateAdapter.parse('API_v2', body, RecordSet())
recordset.validate()
# SOA recordsets cannot be created manually
if recordset.type == 'SOA':
raise exceptions.BadRequest(
"Creating a SOA recordset is not allowed")
# Create the recordset
recordset = self.central_api.create_recordset(
context, zone_id, recordset)
# Prepare the response headers
if recordset['status'] == 'PENDING':
response.status_int = 202
else:
response.status_int = 201
recordset = DesignateAdapter.render(
'API_v2', recordset, request=request)
response.headers['Location'] = recordset['links']['self']
# Prepare and return the response body
return recordset
@pecan.expose(template='json:', content_type='application/json')
@utils.validate_uuid('zone_id', 'recordset_id')
def put_one(self, zone_id, recordset_id):
"""Update RecordSet"""
request = pecan.request
context = request.environ['context']
body = request.body_dict
response = pecan.response
# Fetch the existing recordset
recordset = self.central_api.get_recordset(context, zone_id,
recordset_id)
# TODO(graham): Move this further down the stack
if recordset.managed and not context.edit_managed_records:
raise exceptions.BadRequest('Managed records may not be updated')
# SOA recordsets cannot be updated manually
if recordset['type'] == 'SOA':
raise exceptions.BadRequest(
'Updating SOA recordsets is not allowed')
# NS recordsets at the zone root cannot be manually updated
if recordset['type'] == 'NS':
zone = self.central_api.get_zone(context, zone_id)
if recordset['name'] == zone['name']:
raise exceptions.BadRequest(
'Updating a root zone NS record is not allowed')
# Convert to APIv2 Format
recordset = DesignateAdapter.parse('API_v2', body, recordset)
recordset.validate()
# Persist the resource
recordset = self.central_api.update_recordset(context, recordset)
if recordset['status'] == 'PENDING':
response.status_int = 202
else:
response.status_int = 200
return DesignateAdapter.render('API_v2', recordset, request=request)
@pecan.expose(template='json:', content_type='application/json')
@utils.validate_uuid('zone_id', 'recordset_id')
def delete_one(self, zone_id, recordset_id):
"""Delete RecordSet"""
request = pecan.request
response = pecan.response
context = request.environ['context']
# Fetch the existing recordset
recordset = self.central_api.get_recordset(context, zone_id,
recordset_id)
if recordset['type'] == 'SOA':
raise exceptions.BadRequest(
'Deleting a SOA recordset is not allowed')
recordset = self.central_api.delete_recordset(
context, zone_id, recordset_id)
response.status_int = 202
return DesignateAdapter.render('API_v2', recordset, request=request)
|
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# Copyright (C) 2015 Kevin Fox <kevin@efox.cc>
# Copyright (C) 2015 Tom Barron <tpb@dyncloud.net>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic base class to implement metadata, compression and chunked data
operations
"""
import abc
import hashlib
import json
import os
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
chunkedbackup_service_opts = [
cfg.StrOpt('backup_compression_algorithm',
default='zlib',
help='Compression algorithm (None to disable)'),
]
CONF = cfg.CONF
CONF.register_opts(chunkedbackup_service_opts)
@six.add_metaclass(abc.ABCMeta)
class ChunkedBackupDriver(driver.BackupDriver):
"""Abstract chunked backup driver.
Implements common functionality for backup drivers that store volume
data in multiple "chunks" in a backup repository when the size of
the backed up cinder volume exceeds the size of a backup repository
"chunk."
Provides abstract methods to be implmented in concrete chunking drivers.
"""
DRIVER_VERSION = '1.0.0'
DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'}
def _get_compressor(self, algorithm):
try:
if algorithm.lower() in ('none', 'off', 'no'):
return None
elif algorithm.lower() in ('zlib', 'gzip'):
import zlib as compressor
return compressor
elif algorithm.lower() in ('bz2', 'bzip2'):
import bz2 as compressor
return compressor
except ImportError:
pass
err = _('unsupported compression algorithm: %s') % algorithm
raise ValueError(err)
def __init__(self, context, chunk_size_bytes, sha_block_size_bytes,
backup_default_container, enable_progress_timer,
db_driver=None):
super(ChunkedBackupDriver, self).__init__(context, db_driver)
self.chunk_size_bytes = chunk_size_bytes
self.sha_block_size_bytes = sha_block_size_bytes
self.backup_default_container = backup_default_container
self.enable_progress_timer = enable_progress_timer
self.backup_timer_interval = CONF.backup_timer_interval
self.data_block_num = CONF.backup_object_number_per_notification
self.az = CONF.storage_availability_zone
self.backup_compression_algorithm = CONF.backup_compression_algorithm
self.compressor = \
self._get_compressor(CONF.backup_compression_algorithm)
# To create your own "chunked" backup driver, implement the following
# abstract methods.
@abc.abstractmethod
def put_container(self, container):
"""Create the container if needed. No failure if it pre-exists."""
return
@abc.abstractmethod
def get_container_entries(self, container, prefix):
"""Get container entry names."""
return
@abc.abstractmethod
def get_object_writer(self, container, object_name, extra_metadata=None):
"""Returns a writer object which stores the chunk data in backup repository.
The object returned should be a context handler that can be used
in a "with" context.
"""
return
@abc.abstractmethod
def get_object_reader(self, container, object_name, extra_metadata=None):
"""Returns a reader object for the backed up chunk."""
return
@abc.abstractmethod
def delete_object(self, container, object_name):
"""Delete object from container."""
return
@abc.abstractmethod
def _generate_object_name_prefix(self, backup):
return
@abc.abstractmethod
def update_container_name(self, backup, container):
"""This method exists so that sub-classes can override the container name
as it comes in to the driver in the backup object. Implementations
should return None if no change to the container name is desired.
"""
return
@abc.abstractmethod
def get_extra_metadata(self, backup, volume):
"""This method allows for collection of extra metadata in prepare_backup()
which will be passed to get_object_reader() and get_object_writer().
Subclass extensions can use this extra information to optimize
data transfers. Return a json serializable object.
"""
return
def _create_container(self, context, backup):
backup_id = backup['id']
backup['container'] = self.update_container_name(backup,
backup['container'])
container = backup['container']
LOG.debug('_create_container started, container: %(container)s,'
'backup: %(backup_id)s.',
{'container': container, 'backup_id': backup_id})
if container is None:
container = self.backup_default_container
self.db.backup_update(context, backup_id, {'container': container})
self.put_container(container)
return container
def _generate_object_names(self, backup):
prefix = backup['service_metadata']
object_names = self.get_container_entries(backup['container'], prefix)
LOG.debug('generated object list: %s.', object_names)
return object_names
def _metadata_filename(self, backup):
object_name = backup['service_metadata']
filename = '%s_metadata' % object_name
return filename
def _sha256_filename(self, backup):
object_name = backup['service_metadata']
filename = '%s_sha256file' % object_name
return filename
def _write_metadata(self, backup, volume_id, container, object_list,
volume_meta, extra_metadata=None):
filename = self._metadata_filename(backup)
LOG.debug('_write_metadata started, container name: %(container)s,'
' metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
metadata = {}
metadata['version'] = self.DRIVER_VERSION
metadata['backup_id'] = backup['id']
metadata['volume_id'] = volume_id
metadata['backup_name'] = backup['display_name']
metadata['backup_description'] = backup['display_description']
metadata['created_at'] = str(backup['created_at'])
metadata['objects'] = object_list
metadata['parent_id'] = backup['parent_id']
metadata['volume_meta'] = volume_meta
if extra_metadata:
metadata['extra_metadata'] = extra_metadata
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
with self.get_object_writer(container, filename) as writer:
writer.write(metadata_json)
LOG.debug('_write_metadata finished. Metadata: %s.', metadata_json)
def _write_sha256file(self, backup, volume_id, container, sha256_list):
filename = self._sha256_filename(backup)
LOG.debug('_write_sha256file started, container name: %(container)s,'
' sha256file filename: %(filename)s.',
{'container': container, 'filename': filename})
sha256file = {}
sha256file['version'] = self.DRIVER_VERSION
sha256file['backup_id'] = backup['id']
sha256file['volume_id'] = volume_id
sha256file['backup_name'] = backup['display_name']
sha256file['backup_description'] = backup['display_description']
sha256file['created_at'] = six.text_type(backup['created_at'])
sha256file['chunk_size'] = self.sha_block_size_bytes
sha256file['sha256s'] = sha256_list
sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2)
with self.get_object_writer(container, filename) as writer:
writer.write(sha256file_json)
LOG.debug('_write_sha256file finished.')
def _read_metadata(self, backup):
container = backup['container']
filename = self._metadata_filename(backup)
LOG.debug('_read_metadata started, container name: %(container)s, '
'metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
with self.get_object_reader(container, filename) as reader:
metadata_json = reader.read()
metadata = json.loads(metadata_json)
LOG.debug('_read_metadata finished. Metadata: %s.', metadata_json)
return metadata
def _read_sha256file(self, backup):
container = backup['container']
filename = self._sha256_filename(backup)
LOG.debug('_read_sha256file started, container name: %(container)s, '
'sha256 filename: %(filename)s.',
{'container': container, 'filename': filename})
with self.get_object_reader(container, filename) as reader:
sha256file_json = reader.read()
sha256file = json.loads(sha256file_json)
LOG.debug('_read_sha256file finished (%s).', sha256file)
return sha256file
def _prepare_backup(self, backup):
"""Prepare the backup process and return the backup metadata."""
backup_id = backup['id']
volume_id = backup['volume_id']
volume = self.db.volume_get(self.context, volume_id)
if volume['size'] <= 0:
err = _('volume size %d is invalid.') % volume['size']
raise exception.InvalidVolume(reason=err)
container = self._create_container(self.context, backup)
object_prefix = self._generate_object_name_prefix(backup)
backup['service_metadata'] = object_prefix
self.db.backup_update(self.context, backup_id, {'service_metadata':
object_prefix})
volume_size_bytes = volume['size'] * units.Gi
availability_zone = self.az
LOG.debug('starting backup of volume: %(volume_id)s,'
' volume size: %(volume_size_bytes)d, object names'
' prefix %(object_prefix)s, availability zone:'
' %(availability_zone)s',
{
'volume_id': volume_id,
'volume_size_bytes': volume_size_bytes,
'object_prefix': object_prefix,
'availability_zone': availability_zone,
})
object_meta = {'id': 1, 'list': [], 'prefix': object_prefix,
'volume_meta': None}
object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix}
extra_metadata = self.get_extra_metadata(backup, volume)
if extra_metadata is not None:
object_meta['extra_metadata'] = extra_metadata
return (object_meta, object_sha256, extra_metadata, container,
volume_size_bytes)
def _backup_chunk(self, backup, container, data, data_offset,
object_meta, extra_metadata):
"""Backup data chunk based on the object metadata and offset."""
object_prefix = object_meta['prefix']
object_list = object_meta['list']
object_id = object_meta['id']
object_name = '%s-%05d' % (object_prefix, object_id)
obj = {}
obj[object_name] = {}
obj[object_name]['offset'] = data_offset
obj[object_name]['length'] = len(data)
LOG.debug('Backing up chunk of data from volume.')
algorithm, output_data = self._prepare_output_data(data)
obj[object_name]['compression'] = algorithm
LOG.debug('About to put_object')
with self.get_object_writer(
container, object_name, extra_metadata=extra_metadata
) as writer:
writer.write(output_data)
md5 = hashlib.md5(data).hexdigest()
obj[object_name]['md5'] = md5
LOG.debug('backup MD5 for %(object_name)s: %(md5)s',
{'object_name': object_name, 'md5': md5})
object_list.append(obj)
object_id += 1
object_meta['list'] = object_list
object_meta['id'] = object_id
LOG.debug('Calling eventlet.sleep(0)')
eventlet.sleep(0)
def _prepare_output_data(self, data):
if self.compressor is None:
return 'none', data
data_size_bytes = len(data)
compressed_data = self.compressor.compress(data)
comp_size_bytes = len(compressed_data)
algorithm = CONF.backup_compression_algorithm.lower()
if comp_size_bytes >= data_size_bytes:
LOG.debug('Compression of this chunk was ineffective: '
'original length: %(data_size_bytes)d, '
'compressed length: %(compressed_size_bytes)d. '
'Using original data for this chunk.',
{'data_size_bytes': data_size_bytes,
'comp_size_bytes': comp_size_bytes,
})
return 'none', data
LOG.debug('Compressed %(data_size_bytes)d bytes of data '
'to %(comp_size_bytes)d bytes using %(algorithm)s.',
{'data_size_bytes': data_size_bytes,
'comp_size_bytes': comp_size_bytes,
'algorithm': algorithm,
})
return algorithm, compressed_data
def _finalize_backup(self, backup, container, object_meta, object_sha256):
"""Write the backup's metadata to the backup repository."""
object_list = object_meta['list']
object_id = object_meta['id']
volume_meta = object_meta['volume_meta']
sha256_list = object_sha256['sha256s']
extra_metadata = object_meta.get('extra_metadata')
self._write_sha256file(backup,
backup['volume_id'],
container,
sha256_list)
self._write_metadata(backup,
backup['volume_id'],
container,
object_list,
volume_meta,
extra_metadata)
self.db.backup_update(self.context, backup['id'],
{'object_count': object_id})
LOG.debug('backup %s finished.', backup['id'])
def _backup_metadata(self, backup, object_meta):
"""Backup volume metadata.
NOTE(dosaboy): the metadata we are backing up is obtained from a
versioned api so we should not alter it in any way here.
We must also be sure that the service that will perform
the restore is compatible with version used.
"""
json_meta = self.get_metadata(backup['volume_id'])
if not json_meta:
LOG.debug("No volume metadata to backup.")
return
object_meta["volume_meta"] = json_meta
def _send_progress_end(self, context, backup, object_meta):
object_meta['backup_percent'] = 100
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def _send_progress_notification(self, context, backup, object_meta,
total_block_sent_num, total_volume_size):
backup_percent = total_block_sent_num * 100 / total_volume_size
object_meta['backup_percent'] = backup_percent
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def backup(self, backup, volume_file, backup_metadata=True):
"""Backup the given volume.
If backup['parent_id'] is given, then an incremental backup
is performed.
"""
if self.chunk_size_bytes % self.sha_block_size_bytes:
err = _('Chunk size is not multiple of '
'block size for creating hash.')
raise exception.InvalidBackup(reason=err)
# Read the shafile of the parent backup if backup['parent_id']
# is given.
parent_backup_shafile = None
parent_backup = None
if backup['parent_id']:
parent_backup = self.db.backup_get(self.context,
backup['parent_id'])
parent_backup_shafile = self._read_sha256file(parent_backup)
parent_backup_shalist = parent_backup_shafile['sha256s']
if (parent_backup_shafile['chunk_size'] !=
self.sha_block_size_bytes):
err = (_('Hash block size has changed since the last '
'backup. New hash block size: %(new)s. Old hash '
'block size: %(old)s. Do a full backup.')
% {'old': parent_backup_shafile['chunk_size'],
'new': self.sha_block_size_bytes})
raise exception.InvalidBackup(reason=err)
# If the volume size increased since the last backup, fail
# the incremental backup and ask user to do a full backup.
if backup['size'] > parent_backup['size']:
err = _('Volume size increased since the last '
'backup. Do a full backup.')
raise exception.InvalidBackup(reason=err)
(object_meta, object_sha256, extra_metadata, container,
volume_size_bytes) = self._prepare_backup(backup)
counter = 0
total_block_sent_num = 0
# There are two mechanisms to send the progress notification.
# 1. The notifications are periodically sent in a certain interval.
# 2. The notifications are sent after a certain number of chunks.
# Both of them are working simultaneously during the volume backup,
# when swift is taken as the backup backend.
def _notify_progress():
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
timer = loopingcall.FixedIntervalLoopingCall(
_notify_progress)
if self.enable_progress_timer:
timer.start(interval=self.backup_timer_interval)
sha256_list = object_sha256['sha256s']
shaindex = 0
while True:
data_offset = volume_file.tell()
data = volume_file.read(self.chunk_size_bytes)
if data == '':
break
# Calculate new shas with the datablock.
shalist = []
off = 0
datalen = len(data)
while off < datalen:
chunk_start = off
chunk_end = chunk_start + self.sha_block_size_bytes
if chunk_end > datalen:
chunk_end = datalen
chunk = data[chunk_start:chunk_end]
sha = hashlib.sha256(chunk).hexdigest()
shalist.append(sha)
off += self.sha_block_size_bytes
sha256_list.extend(shalist)
# If parent_backup is not None, that means an incremental
# backup will be performed.
if parent_backup:
# Find the extent that needs to be backed up.
extent_off = -1
for idx, sha in enumerate(shalist):
if sha != parent_backup_shalist[shaindex]:
if extent_off == -1:
# Start of new extent.
extent_off = idx * self.sha_block_size_bytes
else:
if extent_off != -1:
# We've reached the end of extent.
extent_end = idx * self.sha_block_size_bytes
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta,
extra_metadata)
extent_off = -1
shaindex += 1
# The last extent extends to the end of data buffer.
if extent_off != -1:
extent_end = datalen
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta, extra_metadata)
extent_off = -1
else: # Do a full backup.
self._backup_chunk(backup, container, data, data_offset,
object_meta, extra_metadata)
# Notifications
total_block_sent_num += self.data_block_num
counter += 1
if counter == self.data_block_num:
# Send the notification to Ceilometer when the chunk
# number reaches the data_block_num. The backup percentage
# is put in the metadata as the extra information.
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
# Reset the counter
counter = 0
# Stop the timer.
timer.stop()
# All the data have been sent, the backup_percent reaches 100.
self._send_progress_end(self.context, backup, object_meta)
object_sha256['sha256s'] = sha256_list
if backup_metadata:
try:
self._backup_metadata(backup, object_meta)
# Whatever goes wrong, we want to log, cleanup, and re-raise.
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Backup volume metadata failed: %s."),
err)
self.delete(backup)
self._finalize_backup(backup, container, object_meta, object_sha256)
def _restore_v1(self, backup, volume_id, metadata, volume_file):
"""Restore a v1 volume backup."""
backup_id = backup['id']
LOG.debug('v1 volume backup restore of %s started.', backup_id)
extra_metadata = metadata.get('extra_metadata')
container = backup['container']
metadata_objects = metadata['objects']
metadata_object_names = sum((obj.keys() for obj in metadata_objects),
[])
LOG.debug('metadata_object_names = %s.', metadata_object_names)
prune_list = [self._metadata_filename(backup),
self._sha256_filename(backup)]
object_names = [object_name for object_name in
self._generate_object_names(backup)
if object_name not in prune_list]
if sorted(object_names) != sorted(metadata_object_names):
err = _('restore_backup aborted, actual object list '
'does not match object list stored in metadata.')
raise exception.InvalidBackup(reason=err)
for metadata_object in metadata_objects:
object_name = metadata_object.keys()[0]
LOG.debug('restoring object. backup: %(backup_id)s, '
'container: %(container)s, object name: '
'%(object_name)s, volume: %(volume_id)s.',
{
'backup_id': backup_id,
'container': container,
'object_name': object_name,
'volume_id': volume_id,
})
with self.get_object_reader(
container, object_name,
extra_metadata=extra_metadata) as reader:
body = reader.read()
compression_algorithm = metadata_object[object_name]['compression']
decompressor = self._get_compressor(compression_algorithm)
volume_file.seek(metadata_object.values()[0]['offset'])
if decompressor is not None:
LOG.debug('decompressing data using %s algorithm',
compression_algorithm)
decompressed = decompressor.decompress(body)
volume_file.write(decompressed)
else:
volume_file.write(body)
# force flush every write to avoid long blocking write on close
volume_file.flush()
# Be tolerant to IO implementations that do not support fileno()
try:
fileno = volume_file.fileno()
except IOError:
LOG.info(_LI("volume_file does not support "
"fileno() so skipping "
"fsync()"))
else:
os.fsync(fileno)
# Restoring a backup to a volume can take some time. Yield so other
# threads can run, allowing for among other things the service
# status to be updated
eventlet.sleep(0)
LOG.debug('v1 volume backup restore of %s finished.',
backup_id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from backup repository."""
backup_id = backup['id']
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('starting restore of backup %(object_prefix)s '
'container: %(container)s, to volume %(volume_id)s, '
'backup: %(backup_id)s.',
{
'object_prefix': object_prefix,
'container': container,
'volume_id': volume_id,
'backup_id': backup_id,
})
metadata = self._read_metadata(backup)
metadata_version = metadata['version']
LOG.debug('Restoring backup version %s', metadata_version)
try:
restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get(
metadata_version))
except TypeError:
err = (_('No support to restore backup version %s')
% metadata_version)
raise exception.InvalidBackup(reason=err)
# Build a list of backups based on parent_id. A full backup
# will be the last one in the list.
backup_list = []
backup_list.append(backup)
current_backup = backup
while current_backup['parent_id']:
prev_backup = (self.db.backup_get(
self.context, current_backup['parent_id']))
backup_list.append(prev_backup)
current_backup = prev_backup
# Do a full restore first, then layer the incremental backups
# on top of it in order.
index = len(backup_list) - 1
while index >= 0:
backup1 = backup_list[index]
index = index - 1
metadata = self._read_metadata(backup1)
restore_func(backup1, volume_id, metadata, volume_file)
volume_meta = metadata.get('volume_meta', None)
try:
if volume_meta:
self.put_metadata(volume_id, volume_meta)
else:
LOG.debug("No volume metadata in this backup.")
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version.")
LOG.error(msg)
raise exception.BackupOperationError(msg)
LOG.debug('restore %(backup_id)s to %(volume_id)s finished.',
{'backup_id': backup_id, 'volume_id': volume_id})
def delete(self, backup):
"""Delete the given backup."""
container = backup['container']
LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
'prefix: %(pre)s.',
{'id': backup['id'],
'cont': container,
'pre': backup['service_metadata']})
if container is not None:
object_names = []
try:
object_names = self._generate_object_names(backup)
except Exception:
LOG.warning(_LW('swift error while listing objects, continuing'
' with delete.'))
for object_name in object_names:
self.delete_object(container, object_name)
LOG.debug('deleted object: %(object_name)s'
' in container: %(container)s.',
{
'object_name': object_name,
'container': container
})
# Deleting a backup's objects can take some time.
# Yield so other threads can run
eventlet.sleep(0)
LOG.debug('delete %s finished.', backup['id'])
|
|
import ast
import json
import threading
import os
import shutil
from copy import deepcopy
from util import user_dir, print_error, print_msg, print_stderr, PrintError
from bitcoin import MAX_FEE_RATE, FEE_TARGETS
SYSTEM_CONFIG_PATH = "/etc/electron-cash.conf"
config = None
def get_config():
global config
return config
def set_config(c):
global config
config = c
class SimpleConfig(PrintError):
"""
The SimpleConfig class is responsible for handling operations involving
configuration files.
There are 3 different sources of possible configuration values:
1. Command line options.
2. User configuration (in the user's config directory)
3. System configuration (in /etc/)
They are taken in order (1. overrides config options set in 2., that
override config set in 3.)
"""
def __init__(self, options={}, read_system_config_function=None,
read_user_config_function=None, read_user_dir_function=None):
# This lock needs to be acquired for updating and reading the config in
# a thread-safe way.
self.lock = threading.RLock()
self.fee_estimates = {}
# The following two functions are there for dependency injection when
# testing.
if read_system_config_function is None:
read_system_config_function = read_system_config
if read_user_config_function is None:
read_user_config_function = read_user_config
if read_user_dir_function is None:
self.user_dir = user_dir
else:
self.user_dir = read_user_dir_function
# The command line options
self.cmdline_options = deepcopy(options)
# Portable wallets don't use a system config
if self.cmdline_options.get('portable', False):
self.system_config = {}
else:
self.system_config = read_system_config_function()
# Set self.path and read the user config
self.user_config = {} # for self.get in electrum_path()
self.path = self.electrum_path()
self.user_config = read_user_config_function(self.path)
# Upgrade obsolete keys
self.fixup_keys({'auto_cycle': 'auto_connect'})
# Make a singleton instance of 'self'
set_config(self)
def electrum_path(self):
# Read electrum_cash_path from command line / system configuration
# Otherwise use the user's default data directory.
path = self.get('electron_cash_path')
if path is None:
path = self.user_dir()
if self.get('testnet'):
path = os.path.join(path, 'testnet')
# Make directory if it does not yet exist.
if not os.path.exists(path):
if os.path.islink(path):
raise BaseException('Dangling link: ' + path)
self.print_error("Making directory {}".format(path))
os.makedirs(path)
obsolete_file = os.path.join(path, 'recent_servers')
if os.path.exists(obsolete_file):
os.remove(obsolete_file)
self.print_error("electron-cash directory", path)
return path
def fixup_config_keys(self, config, keypairs):
updated = False
for old_key, new_key in keypairs.iteritems():
if old_key in config:
if not new_key in config:
config[new_key] = config[old_key]
del config[old_key]
updated = True
return updated
def fixup_keys(self, keypairs):
'''Migrate old key names to new ones'''
self.fixup_config_keys(self.cmdline_options, keypairs)
self.fixup_config_keys(self.system_config, keypairs)
if self.fixup_config_keys(self.user_config, keypairs):
self.save_user_config()
def set_key(self, key, value, save = True):
if not self.is_modifiable(key):
print_stderr("Warning: not changing config key '%s' set on the command line" % key)
return
with self.lock:
self.user_config[key] = value
if save:
self.save_user_config()
return
def get(self, key, default=None):
with self.lock:
out = self.cmdline_options.get(key)
if out is None:
out = self.user_config.get(key)
if out is None:
out = self.system_config.get(key, default)
return out
def is_modifiable(self, key):
return not key in self.cmdline_options
def save_user_config(self):
if not self.path:
return
path = os.path.join(self.path, "config")
s = json.dumps(self.user_config, indent=4, sort_keys=True)
f = open(path, "w")
f.write(s)
f.close()
if 'ANDROID_DATA' not in os.environ:
import stat
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
def get_wallet_path(self):
"""Set the path of the wallet."""
# command line -w option
if self.get('wallet_path'):
return os.path.join(self.get('cwd'), self.get('wallet_path'))
# path in config file
path = self.get('default_wallet_path')
if path and os.path.exists(path):
return path
# default path
dirpath = os.path.join(self.path, "wallets")
if not os.path.exists(dirpath):
if os.path.islink(dirpath):
raise BaseException('Dangling link: ' + dirpath)
os.mkdir(dirpath)
new_path = os.path.join(self.path, "wallets", "default_wallet")
# default path in pre 1.9 versions
old_path = os.path.join(self.path, "electrum.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def remove_from_recently_open(self, filename):
recent = self.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.set_key('recently_open', recent)
def set_session_timeout(self, seconds):
self.print_error("session timeout -> %d seconds" % seconds)
self.set_key('session_timeout', seconds)
def get_session_timeout(self):
return self.get('session_timeout', 300)
def open_last_wallet(self):
if self.get('wallet_path') is None:
last_wallet = self.get('gui_last_wallet')
if last_wallet is not None and os.path.exists(last_wallet):
self.cmdline_options['default_wallet_path'] = last_wallet
def save_last_wallet(self, wallet):
if self.get('wallet_path') is None:
path = wallet.storage.path
self.set_key('gui_last_wallet', path)
def max_fee_rate(self):
f = self.get('max_fee_rate', MAX_FEE_RATE)
if f==0:
f = MAX_FEE_RATE
return f
def dynfee(self, i):
if i < 4:
j = FEE_TARGETS[i]
fee = self.fee_estimates.get(j)
else:
assert i == 4
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee/2
if fee is not None:
fee = min(5*MAX_FEE_RATE, fee)
return fee
def reverse_dynfee(self, fee_per_kb):
import operator
l = self.fee_estimates.items() + [(1, self.dynfee(4))]
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), l)
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(25)/2:
min_target = -1
return min_target
def has_fee_estimates(self):
return len(self.fee_estimates)==4
def fee_per_kb(self):
return self.get('fee_per_kb', self.max_fee_rate()/2)
def get_video_device(self):
device = self.get("video_device", "default")
if device == 'default':
device = ''
return device
def read_system_config(path=SYSTEM_CONFIG_PATH):
"""Parse and return the system config settings in /etc/electrum.conf."""
result = {}
if os.path.exists(path):
try:
import ConfigParser
except ImportError:
print "cannot parse electron-cash.conf. please install ConfigParser"
return
p = ConfigParser.ConfigParser()
try:
p.read(path)
for k, v in p.items('client'):
result[k] = v
except (ConfigParser.NoSectionError, ConfigParser.MissingSectionHeaderError):
pass
return result
def read_user_config(path):
"""Parse and store the user config settings in electron-cash.conf into user_config[]."""
if not path:
return {}
config_path = os.path.join(path, "config")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r") as f:
data = f.read()
result = json.loads(data)
except:
print_error("Warning: Cannot read config file.", config_path)
return {}
if not type(result) is dict:
return {}
return result
|
|
"""
Introduce some basic refactoring functions to |jedi|. This module is still in a
very early development stage and needs much testing and improvement.
.. warning:: I won't do too much here, but if anyone wants to step in, please
do. Refactoring is none of my priorities
It uses the |jedi| `API <plugin-api.html>`_ and supports currently the
following functions (sometimes bug-prone):
- rename
- extract variable
- inline variable
"""
import difflib
from jedi import common
from jedi.evaluate import helpers
from jedi.parser import representation as pr
class Refactoring(object):
def __init__(self, change_dct):
"""
:param change_dct: dict(old_path=(new_path, old_lines, new_lines))
"""
self.change_dct = change_dct
def old_files(self):
dct = {}
for old_path, (new_path, old_l, new_l) in self.change_dct.items():
dct[new_path] = '\n'.join(new_l)
return dct
def new_files(self):
dct = {}
for old_path, (new_path, old_l, new_l) in self.change_dct.items():
dct[new_path] = '\n'.join(new_l)
return dct
def diff(self):
texts = []
for old_path, (new_path, old_l, new_l) in self.change_dct.items():
if old_path:
udiff = difflib.unified_diff(old_l, new_l)
else:
udiff = difflib.unified_diff(old_l, new_l, old_path, new_path)
texts.append('\n'.join(udiff))
return '\n'.join(texts)
def rename(script, new_name):
""" The `args` / `kwargs` params are the same as in `api.Script`.
:param operation: The refactoring operation to execute.
:type operation: str
:type source: str
:return: list of changed lines/changed files
"""
return Refactoring(_rename(script.usages(), new_name))
def _rename(names, replace_str):
""" For both rename and inline. """
order = sorted(names, key=lambda x: (x.module_path, x.line, x.column),
reverse=True)
def process(path, old_lines, new_lines):
if new_lines is not None: # goto next file, save last
dct[path] = path, old_lines, new_lines
dct = {}
current_path = object()
new_lines = old_lines = None
for name in order:
if name.in_builtin_module():
continue
if current_path != name.module_path:
current_path = name.module_path
process(current_path, old_lines, new_lines)
if current_path is not None:
# None means take the source that is a normal param.
with open(current_path) as f:
source = f.read()
new_lines = common.source_to_unicode(source).splitlines()
old_lines = new_lines[:]
nr, indent = name.line, name.column
line = new_lines[nr - 1]
new_lines[nr - 1] = line[:indent] + replace_str + \
line[indent + len(name.name):]
process(current_path, old_lines, new_lines)
return dct
def extract(script, new_name):
""" The `args` / `kwargs` params are the same as in `api.Script`.
:param operation: The refactoring operation to execute.
:type operation: str
:type source: str
:return: list of changed lines/changed files
"""
new_lines = common.source_to_unicode(script.source).splitlines()
old_lines = new_lines[:]
user_stmt = script._parser.user_stmt()
# TODO care for multiline extracts
dct = {}
if user_stmt:
pos = script._pos
line_index = pos[0] - 1
arr, index = helpers.array_for_pos(user_stmt, pos)
if arr is not None:
start_pos = arr[index].start_pos
end_pos = arr[index].end_pos
# take full line if the start line is different from end line
e = end_pos[1] if end_pos[0] == start_pos[0] else None
start_line = new_lines[start_pos[0] - 1]
text = start_line[start_pos[1]:e]
for l in range(start_pos[0], end_pos[0] - 1):
text += '\n' + l
if e is None:
end_line = new_lines[end_pos[0] - 1]
text += '\n' + end_line[:end_pos[1]]
# remove code from new lines
t = text.lstrip()
del_start = start_pos[1] + len(text) - len(t)
text = t.rstrip()
del_end = len(t) - len(text)
if e is None:
new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:]
e = len(start_line)
else:
e = e - del_end
start_line = start_line[:del_start] + new_name + start_line[e:]
new_lines[start_pos[0] - 1] = start_line
new_lines[start_pos[0]:end_pos[0] - 1] = []
# add parentheses in multiline case
open_brackets = ['(', '[', '{']
close_brackets = [')', ']', '}']
if '\n' in text and not (text[0] in open_brackets and text[-1] ==
close_brackets[open_brackets.index(text[0])]):
text = '(%s)' % text
# add new line before statement
indent = user_stmt.start_pos[1]
new = "%s%s = %s" % (' ' * indent, new_name, text)
new_lines.insert(line_index, new)
dct[script.path] = script.path, old_lines, new_lines
return Refactoring(dct)
def inline(script):
"""
:type script: api.Script
"""
new_lines = common.source_to_unicode(script.source).splitlines()
dct = {}
definitions = script.goto_assignments()
with common.ignored(AssertionError):
assert len(definitions) == 1
stmt = definitions[0]._definition
usages = script.usages()
inlines = [r for r in usages
if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos]
inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column),
reverse=True)
expression_list = stmt.expression_list()
# don't allow multiline refactorings for now.
assert stmt.start_pos[0] == stmt.end_pos[0]
index = stmt.start_pos[0] - 1
line = new_lines[index]
replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1]
replace_str = replace_str.strip()
# tuples need parentheses
if expression_list and isinstance(expression_list[0], pr.Array):
arr = expression_list[0]
if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1:
replace_str = '(%s)' % replace_str
# if it's the only assignment, remove the statement
if len(stmt.get_defined_names()) == 1:
line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:]
dct = _rename(inlines, replace_str)
# remove the empty line
new_lines = dct[script.path][2]
if line.strip():
new_lines[index] = line
else:
new_lines.pop(index)
return Refactoring(dct)
|
|
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
class bitstamp (Exchange):
def describe(self):
return self.deep_extend(super(bitstamp, self).describe(), {
'id': 'bitstamp',
'name': 'Bitstamp',
'countries': 'GB',
'rateLimit': 1000,
'version': 'v2',
'hasCORS': False,
'hasFetchOrder': True,
'hasWithdraw': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27786377-8c8ab57e-5fe9-11e7-8ea4-2b05b6bcceec.jpg',
'api': 'https://www.bitstamp.net/api',
'www': 'https://www.bitstamp.net',
'doc': 'https://www.bitstamp.net/api',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'order_book/{pair}/',
'ticker_hour/{pair}/',
'ticker/{pair}/',
'transactions/{pair}/',
'trading-pairs-info/',
],
},
'private': {
'post': [
'balance/',
'balance/{pair}/',
'bch_withdrawal/',
'bch_address/',
'user_transactions/',
'user_transactions/{pair}/',
'open_orders/all/',
'open_orders/{pair}/',
'order_status/',
'cancel_order/',
'buy/{pair}/',
'buy/market/{pair}/',
'sell/{pair}/',
'sell/market/{pair}/',
'ltc_withdrawal/',
'ltc_address/',
'eth_withdrawal/',
'eth_address/',
'transfer-to-main/',
'transfer-from-main/',
'withdrawal/open/',
'withdrawal/status/',
'withdrawal/cancel/',
'liquidation_address/new/',
'liquidation_address/info/',
],
},
'v1': {
'post': [
'bitcoin_deposit_address/',
'unconfirmed_btc/',
'bitcoin_withdrawal/',
'ripple_withdrawal/',
'ripple_address/',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.25 / 100,
'maker': 0.25 / 100,
'tiers': {
'taker': [
[0, 0.25 / 100],
[20000, 0.24 / 100],
[100000, 0.22 / 100],
[400000, 0.20 / 100],
[600000, 0.15 / 100],
[1000000, 0.14 / 100],
[2000000, 0.13 / 100],
[4000000, 0.12 / 100],
[20000000, 0.11 / 100],
[20000001, 0.10 / 100],
],
'maker': [
[0, 0.25 / 100],
[20000, 0.24 / 100],
[100000, 0.22 / 100],
[400000, 0.20 / 100],
[600000, 0.15 / 100],
[1000000, 0.14 / 100],
[2000000, 0.13 / 100],
[4000000, 0.12 / 100],
[20000000, 0.11 / 100],
[20000001, 0.10 / 100],
],
},
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0,
'LTC': 0,
'ETH': 0,
'XRP': 0,
'USD': 25,
'EUR': 0.90,
},
'deposit': {
'BTC': 0,
'LTC': 0,
'ETH': 0,
'XRP': 0,
'USD': 25,
'EUR': 0,
},
},
},
})
async def fetch_markets(self):
markets = await self.publicGetTradingPairsInfo()
result = []
for i in range(0, len(markets)):
market = markets[i]
symbol = market['name']
base, quote = symbol.split('/')
baseId = base.lower()
quoteId = quote.lower()
symbolId = baseId + '_' + quoteId
id = market['url_symbol']
precision = {
'amount': market['base_decimals'],
'price': market['counter_decimals'],
}
cost, currency = market['minimum_order'].split(' ')
active = (market['trading'] == 'Enabled')
lot = math.pow(10, -precision['amount'])
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'symbolId': symbolId,
'info': market,
'lot': lot,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': lot,
'max': None,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': float(cost),
'max': None,
},
},
})
return result
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderBookPair(self.extend({
'pair': self.market_id(symbol),
}, params))
timestamp = int(orderbook['timestamp']) * 1000
return self.parse_order_book(orderbook, timestamp)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
ticker = await self.publicGetTickerPair(self.extend({
'pair': self.market_id(symbol),
}, params))
timestamp = int(ticker['timestamp']) * 1000
vwap = float(ticker['vwap'])
baseVolume = float(ticker['volume'])
quoteVolume = baseVolume * vwap
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': vwap,
'open': float(ticker['open']),
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = None
if 'date' in trade:
timestamp = int(trade['date']) * 1000
elif 'datetime' in trade:
timestamp = self.parse8601(trade['datetime'])
side = 'buy' if (trade['type'] == '0') else 'sell'
order = None
if 'order_id' in trade:
order = str(trade['order_id'])
if 'currency_pair' in trade:
marketId = trade['currency_pair']
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
price = self.safe_float(trade, 'price')
price = self.safe_float(trade, market['symbolId'], price)
amount = self.safe_float(trade, 'amount')
amount = self.safe_float(trade, market['baseId'], amount)
id = self.safe_value(trade, 'tid')
id = self.safe_value(trade, 'id', id)
if id:
id = str(id)
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': order,
'type': None,
'side': side,
'price': float(price),
'amount': float(amount),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTransactionsPair(self.extend({
'pair': market['id'],
'time': 'minute',
}, params))
return self.parse_trades(response, market, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
balance = await self.privatePostBalance()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
lowercase = currency.lower()
total = lowercase + '_balance'
free = lowercase + '_available'
used = lowercase + '_reserved'
account = self.account()
if free in balance:
account['free'] = float(balance[free])
if used in balance:
account['used'] = float(balance[used])
if total in balance:
account['total'] = float(balance[total])
result[currency] = account
return self.parse_balance(result)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
method = 'privatePost' + self.capitalize(side)
order = {
'pair': self.market_id(symbol),
'amount': amount,
}
if type == 'market':
method += 'Market'
else:
order['price'] = price
method += 'Pair'
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostCancelOrder({'id': id})
def parse_order_status(self, order):
if (order['status'] == 'Queue') or (order['status'] == 'Open'):
return 'open'
if order['status'] == 'Finished':
return 'closed'
return order['status']
async def fetch_order_status(self, id, symbol=None):
await self.load_markets()
response = await self.privatePostOrderStatus({'id': id})
return self.parse_order_status(response)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol:
market = self.market(symbol)
pair = market['id'] if market else 'all'
request = self.extend({'pair': pair}, params)
response = await self.privatePostUserTransactionsPair(request)
return self.parse_trades(response, market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostOrderStatus({'id': id})
def get_currency_name(self, code):
if code == 'BTC':
return 'bitcoin'
if code == 'XRP':
return 'ripple'
return code.lower()
def is_fiat(self, code):
if code == 'USD':
return True
if code == 'EUR':
return True
return False
async def withdraw(self, code, amount, address, params={}):
isFiat = self.is_fiat(code)
if isFiat:
raise ExchangeError(self.id + ' fiat withdraw() for ' + code + ' is not implemented yet')
name = self.get_currency_name(code)
request = {
'amount': amount,
'address': address,
}
v1 = (code == 'BTC') or (code == 'XRP')
method = 'v1' if v1 else 'private' # v1 or v2
method += 'Post' + self.capitalize(name) + 'Withdrawal'
query = params
if code == 'XRP':
tag = self.safe_string(params, 'destination_tag')
if tag:
request['destination_tag'] = tag
query = self.omit(params, 'destination_tag')
else:
raise ExchangeError(self.id + ' withdraw() requires a destination_tag param for ' + code)
response = await getattr(self, method)(self.extend(request, query))
return {
'info': response,
'id': response['id'],
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/'
if api != 'v1':
url += self.version + '/'
url += self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.encode(self.hmac(self.encode(auth), self.encode(self.secret)))
query = self.extend({
'key': self.apiKey,
'signature': signature.upper(),
'nonce': nonce,
}, query)
body = self.urlencode(query)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'status' in response:
if response['status'] == 'error':
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""mel conversion ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import shape_ops
from tensorflow.python.util.tf_export import tf_export
# mel spectrum constants.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def _mel_to_hertz(mel_values, name=None):
"""Converts frequencies in `mel_values` from the mel scale to linear scale.
Args:
mel_values: A `Tensor` of frequencies in the mel scale.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type as `mel_values` containing linear
scale frequencies in Hertz.
"""
with ops.name_scope(name, 'mel_to_hertz', [mel_values]):
mel_values = ops.convert_to_tensor(mel_values)
return _MEL_BREAK_FREQUENCY_HERTZ * (
math_ops.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0
)
def _hertz_to_mel(frequencies_hertz, name=None):
"""Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.
Args:
frequencies_hertz: A `Tensor` of frequencies in Hertz.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type of `frequencies_hertz` containing
frequencies in the mel scale.
"""
with ops.name_scope(name, 'hertz_to_mel', [frequencies_hertz]):
frequencies_hertz = ops.convert_to_tensor(frequencies_hertz)
return _MEL_HIGH_FREQUENCY_Q * math_ops.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ))
def _validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype):
"""Checks the inputs to linear_to_mel_weight_matrix."""
if num_mel_bins <= 0:
raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)
if sample_rate <= 0.0:
raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)
if lower_edge_hertz < 0.0:
raise ValueError('lower_edge_hertz must be non-negative. Got: %s' %
lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' %
(lower_edge_hertz, upper_edge_hertz))
if upper_edge_hertz > sample_rate / 2:
raise ValueError('upper_edge_hertz must not be larger than the Nyquist '
'frequency (sample_rate / 2). Got: %s for sample_rate: %s'
% (upper_edge_hertz, sample_rate))
if not dtype.is_floating:
raise ValueError('dtype must be a floating point type. Got: %s' % dtype)
@tf_export('signal.linear_to_mel_weight_matrix')
def linear_to_mel_weight_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0,
dtype=dtypes.float32,
name=None):
"""Returns a matrix to warp linear scale spectrograms to the [mel scale][mel].
Returns a weight matrix that can be used to re-weight a `Tensor` containing
`num_spectrogram_bins` linearly sampled frequency information from
`[0, sample_rate / 2]` into `num_mel_bins` frequency information from
`[lower_edge_hertz, upper_edge_hertz]` on the [mel scale][mel].
For example, the returned matrix `A` can be used to right-multiply a
spectrogram `S` of shape `[frames, num_spectrogram_bins]` of linear
scale spectrum values (e.g. STFT magnitudes) to generate a "mel spectrogram"
`M` of shape `[frames, num_mel_bins]`.
# `S` has shape [frames, num_spectrogram_bins]
# `M` has shape [frames, num_mel_bins]
M = tf.matmul(S, A)
The matrix can be used with `tf.tensordot` to convert an arbitrary rank
`Tensor` of linear-scale spectral bins into the mel scale.
# S has shape [..., num_spectrogram_bins].
# M has shape [..., num_mel_bins].
M = tf.tensordot(S, A, 1)
# tf.tensordot does not support shape inference for this case yet.
M.set_shape(S.shape[:-1].concatenate(A.shape[-1:]))
Args:
num_mel_bins: Python int. How many bands in the resulting mel spectrum.
num_spectrogram_bins: An integer `Tensor`. How many bins there are in the
source spectrogram data, which is understood to be `fft_size // 2 + 1`,
i.e. the spectrogram only contains the nonredundant FFT bins.
sample_rate: Python float. Samples per second of the input signal used to
create the spectrogram. We need this to figure out the actual frequencies
for each spectrogram bin, which dictates how they are mapped into the mel
scale.
lower_edge_hertz: Python float. Lower bound on the frequencies to be
included in the mel spectrum. This corresponds to the lower edge of the
lowest triangular band.
upper_edge_hertz: Python float. The desired top edge of the highest
frequency band.
dtype: The `DType` of the result matrix. Must be a floating point type.
name: An optional name for the operation.
Returns:
A `Tensor` of shape `[num_spectrogram_bins, num_mel_bins]`.
Raises:
ValueError: If `num_mel_bins`/`num_spectrogram_bins`/`sample_rate` are not
positive, `lower_edge_hertz` is negative, frequency edges are incorrectly
ordered, or `upper_edge_hertz` is larger than the Nyquist frequency.
[mel]: https://en.wikipedia.org/wiki/Mel_scale
"""
with ops.name_scope(name, 'linear_to_mel_weight_matrix') as name:
# Note: As num_spectrogram_bins is passed to `math_ops.linspace`
# and the validation is already done in linspace (both in shape function
# and in kernel), there is no need to validate num_spectrogram_bins here.
_validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype)
# This function can be constant folded by graph optimization since there are
# no Tensor inputs.
sample_rate = ops.convert_to_tensor(
sample_rate, dtype, name='sample_rate')
lower_edge_hertz = ops.convert_to_tensor(
lower_edge_hertz, dtype, name='lower_edge_hertz')
upper_edge_hertz = ops.convert_to_tensor(
upper_edge_hertz, dtype, name='upper_edge_hertz')
zero = ops.convert_to_tensor(0.0, dtype)
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = math_ops.linspace(
zero, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:]
spectrogram_bins_mel = array_ops.expand_dims(
_hertz_to_mel(linear_frequencies), 1)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = shape_ops.frame(
math_ops.linspace(_hertz_to_mel(lower_edge_hertz),
_hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2), frame_length=3, frame_step=1)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel, center_mel, upper_edge_mel = tuple(array_ops.reshape(
t, [1, num_mel_bins]) for t in array_ops.split(
band_edges_mel, 3, axis=1))
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = math_ops.maximum(
zero, math_ops.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
return array_ops.pad(
mel_weights_matrix, [[bands_to_zero, 0], [0, 0]], name=name)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for gmm_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class GmmOpsTest(test.TestCase):
def setUp(self):
self.num_examples = 1000
self.iterations = 40
self.seed = 4
random_seed_lib.set_random_seed(self.seed)
np.random.seed(self.seed * 2)
self.data, self.true_assignments = self.make_data(self.num_examples)
# Generate more complicated data.
self.centers = [[1, 1], [-1, 0.5], [2, 1]]
self.more_data, self.more_true_assignments = self.make_data_from_centers(
self.num_examples, self.centers)
@staticmethod
def make_data(num_vectors):
"""Generates 2-dimensional data centered on (2,2), (-1,-1).
Args:
num_vectors: number of training examples.
Returns:
A tuple containing the data as a numpy array and the cluster ids.
"""
vectors = []
classes = []
for _ in xrange(num_vectors):
if np.random.random() > 0.5:
vectors.append([np.random.normal(2.0, 0.6), np.random.normal(2.0, 0.9)])
classes.append(0)
else:
vectors.append(
[np.random.normal(-1.0, 0.4), np.random.normal(-1.0, 0.5)])
classes.append(1)
return np.asarray(vectors), classes
@staticmethod
def make_data_from_centers(num_vectors, centers):
"""Generates 2-dimensional data with random centers.
Args:
num_vectors: number of training examples.
centers: a list of random 2-dimensional centers.
Returns:
A tuple containing the data as a numpy array and the cluster ids.
"""
vectors = []
classes = []
for _ in xrange(num_vectors):
current_class = np.random.random_integers(0, len(centers) - 1)
vectors.append([
np.random.normal(centers[current_class][0],
np.random.random_sample()),
np.random.normal(centers[current_class][1], np.random.random_sample())
])
classes.append(current_class)
return np.asarray(vectors), len(centers)
def test_covariance(self):
start_time = time.time()
data = self.data.T
np_cov = np.cov(data)
logging.info('Numpy took %f', time.time() - start_time)
start_time = time.time()
with self.test_session() as sess:
op = gmm_ops._covariance(
constant_op.constant(
data.T, dtype=dtypes.float32), False)
op_diag = gmm_ops._covariance(
constant_op.constant(
data.T, dtype=dtypes.float32), True)
variables.global_variables_initializer().run()
tf_cov = sess.run(op)
np.testing.assert_array_almost_equal(np_cov, tf_cov)
logging.info('Tensorflow took %f', time.time() - start_time)
tf_cov = sess.run(op_diag)
np.testing.assert_array_almost_equal(
np.diag(np_cov), np.ravel(tf_cov), decimal=5)
def test_simple_cluster(self):
"""Tests that the clusters are correct."""
num_classes = 2
graph = ops.Graph()
with graph.as_default() as g:
g.seed = 5
with self.test_session() as sess:
data = constant_op.constant(self.data, dtype=dtypes.float32)
_, assignments, _, training_op, init_op, _ = gmm_ops.gmm(
data, 'random', num_classes, random_seed=self.seed)
variables.global_variables_initializer().run()
sess.run(init_op)
for _ in xrange(self.iterations):
sess.run(training_op)
assignments = sess.run(assignments)
accuracy = np.mean(
np.asarray(self.true_assignments) == np.squeeze(assignments))
logging.info('Accuracy: %f', accuracy)
self.assertGreater(accuracy, 0.98)
def testParams(self):
"""Tests that the params work as intended."""
num_classes = 2
with self.test_session() as sess:
# Experiment 1. Update weights only.
data = constant_op.constant(self.data, dtype=dtypes.float32)
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[3.0, 3.0], [0.0, 0.0]], 'w')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
# Only the probability to each class is updated.
alphas = sess.run(gmm_tool.alphas())
self.assertGreater(alphas[1], 0.6)
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[3.0, 3.0], [0.0, 0.0]], 1), means)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(covs[0], covs[1])
# Experiment 2. Update means and covariances.
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[3.0, 3.0], [0.0, 0.0]], 'mc')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
alphas = sess.run(gmm_tool.alphas())
self.assertAlmostEqual(alphas[0], alphas[1])
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[2.0, 2.0], [-1.0, -1.0]], 1), means, decimal=1)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(
[[0.371111, -0.0050774], [-0.0050774, 0.8651744]], covs[0], decimal=4)
np.testing.assert_almost_equal(
[[0.146976, 0.0259463], [0.0259463, 0.2543971]], covs[1], decimal=4)
# Experiment 3. Update covariances only.
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[-1.0, -1.0], [1.0, 1.0]], 'c')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
alphas = sess.run(gmm_tool.alphas())
self.assertAlmostEqual(alphas[0], alphas[1])
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[-1.0, -1.0], [1.0, 1.0]], 1), means)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(
[[0.1299582, 0.0435872], [0.0435872, 0.2558578]], covs[0], decimal=5)
np.testing.assert_almost_equal(
[[3.195385, 2.6989155], [2.6989155, 3.3881593]], covs[1], decimal=5)
if __name__ == '__main__':
test.main()
|
|
"""Posts JSON with basic information for each completed build to the URL specified
in the config variable ANALYTICS_POST_URL, if that value is set.
The data posted to ANALYTICS_POST_URL will be a JSON array of objects, with each
object representing a completed build.
"""
import logging
import json
import re
import requests
from sqlalchemy import distinct
from collections import defaultdict
from datetime import datetime
from flask import current_app
from changes.config import db, statsreporter
from changes.constants import Result
from changes.models import Build, FailureReason, Job, JobStep, LogSource, LogChunk
from changes.experimental import categorize
logger = logging.getLogger('analytics_notifier')
def _datetime_to_timestamp(dt):
"""Convert a datetime to unix epoch time in seconds."""
return int((dt - datetime.utcfromtimestamp(0)).total_seconds())
_REV_URL_RE = re.compile(r'^\s*Differential Revision:\s+(http.*/D[0-9]+)\s*$', re.MULTILINE)
def _get_phabricator_revision_url(build):
"""Returns the Phabricator Revision URL for a Build.
Args:
build (Build): The Build.
Returns:
A str with the Phabricator Revision URL, or None if we couldn't find one (or found
multiple).
"""
source_data = build.source.data or {}
rev_url = source_data.get('phabricator.revisionURL')
if rev_url:
return rev_url
if build.message:
matches = _REV_URL_RE.findall(build.message)
# only return if there's a clear choice.
if matches and len(matches) == 1:
return matches[0]
return None
def _get_build_failure_reasons(build):
"""Return the names of all the FailureReasons associated with a build.
Args:
build (Build): The build to return reasons for.
Returns:
list: A sorted list of the distinct FailureReason.reason values associated with
the build.
"""
failure_reasons = [r for r, in db.session.query(
distinct(FailureReason.reason)
).filter(
FailureReason.build_id == build.id
).all()]
# The order isn't particularly meaningful; the sorting is primarily
# to make the same set of reasons reliably result in the same JSON.
return sorted(failure_reasons)
def maybe_ts(dt):
if dt:
return _datetime_to_timestamp(dt)
return None
def build_finished_handler(build_id, **kwargs):
url = current_app.config.get('ANALYTICS_POST_URL')
if not url:
return
build = Build.query.get(build_id)
if build is None:
return
failure_reasons = _get_build_failure_reasons(build)
data = {
'build_id': build.id.hex,
'result': unicode(build.result),
'project_slug': build.project.slug,
'is_commit': build.source.is_commit(),
'label': build.label,
'number': build.number,
'duration': build.duration,
'target': build.target,
'date_created': maybe_ts(build.date_created),
'date_started': maybe_ts(build.date_started),
'date_finished': maybe_ts(build.date_finished),
# Revision URL rather than just revision id because the URL should
# be globally unique, whereas the id is only certain to be unique for
# a single Phabricator instance.
'phab_revision_url': _get_phabricator_revision_url(build),
'failure_reasons': failure_reasons,
}
if build.author:
data['author'] = build.author.email
post_analytics_data(url, [data])
def post_analytics_data(url, data):
"""
Args:
url (str): HTTP URL to POST to.
data (list): Records to POST as JSON.
"""
try:
resp = requests.post(url, headers={'Content-Type': 'application/json'},
data=json.dumps(data), timeout=10)
resp.raise_for_status()
# Should probably retry here so that transient failures don't result in
# missing data.
except Exception:
logger.exception("Failed to post to Analytics")
def job_finished_handler(job_id, **kwargs):
job = Job.query.get(job_id)
if job is None:
return
tags_by_step = _categorize_step_logs(job)
url = current_app.config.get('ANALYTICS_JOBSTEP_POST_URL')
if not url:
return
records = []
failure_reasons_by_jobstep = _get_job_failure_reasons_by_jobstep(job)
for jobstep in JobStep.query.filter(JobStep.job == job):
data = {
'jobstep_id': jobstep.id.hex,
'job_id': jobstep.job_id.hex,
'phase_id': jobstep.phase_id.hex,
'build_id': job.build_id.hex,
'label': jobstep.label,
'result': unicode(jobstep.result),
'date_started': maybe_ts(jobstep.date_started),
'date_finished': maybe_ts(jobstep.date_finished),
'date_created': maybe_ts(jobstep.date_created),
# jobstep.data is a changes.db.types.json.MutableDict.
# It is not directly jsonable but its value should be a jsonable dict.
'data': jobstep.data.value,
'log_categories': sorted(list(tags_by_step[jobstep.id])),
'failure_reasons': failure_reasons_by_jobstep[jobstep.id],
# TODO: Node? Duration (to match build, for efficiency)?
}
records.append(data)
post_analytics_data(url, records)
def _categorize_step_logs(job):
"""
Args:
job (Job): The Job to categorize logs for.
Returns:
Dict[UUID, Set[str]]: Mapping from JobStep ID to the categories observed for its logs.
"""
tags_by_step = defaultdict(set)
rules = _get_rules()
if rules:
for ls in _get_failing_log_sources(job):
logdata = _get_log_data(ls)
tags, applicable = categorize.categorize(job.project.slug, rules, logdata)
tags_by_step[ls.step_id].update(tags)
_incr("failing-log-processed")
if not tags and applicable:
_incr("failing-log-uncategorized")
logger.warning("Uncategorized log", extra={
# Supplying the 'data' this way makes it available in log handlers
# like Sentry while keeping the warnings grouped together.
# See https://github.com/getsentry/raven-python/blob/master/docs/integrations/logging.rst#usage
# for Sentry's interpretation.
'data': {
'logsource.id': ls.id.hex,
'log.url': ls.get_url(),
}
})
else:
for tag in tags:
_incr("failing-log-category-{}".format(tag))
return tags_by_step
def _get_job_failure_reasons_by_jobstep(job):
"""Return dict mapping jobstep ids to names of all associated FailureReasons.
Args:
job (Job): The job to return failure reasons for.
Returns:
dict: A dict mapping from jobstep id to a sorted list of failure reasons
"""
reasons = [r for r in db.session.query(
FailureReason.reason, FailureReason.step_id
).filter(
FailureReason.job_id == job.id
).all()]
reasons_by_jobsteps = defaultdict(list)
for reason in reasons:
reasons_by_jobsteps[reason.step_id].append(reason.reason)
# The order isn't particularly meaningful; the sorting is primarily
# to make the same set of reasons reliably result in the same JSON.
for step_id in reasons_by_jobsteps:
reasons_by_jobsteps[step_id].sort()
return reasons_by_jobsteps
def _get_failing_log_sources(job):
return list(LogSource.query.filter(
LogSource.job_id == job.id,
).join(
JobStep, LogSource.step_id == JobStep.id,
).filter(
JobStep.result.in_([Result.failed, Result.infra_failed]),
).order_by(JobStep.date_created))
def _get_log_data(source):
queryset = LogChunk.query.filter(
LogChunk.source_id == source.id,
).order_by(LogChunk.offset.asc())
return ''.join(l.text for l in queryset)
def _get_rules():
"""Return the current rules to be used with categorize.categorize.
NB: Reloads the rules file at each call.
"""
rules_file = current_app.config.get('CATEGORIZE_RULES_FILE')
if not rules_file:
return None
return categorize.load_rules(rules_file)
def _incr(name):
"""Helper to increments a stats counter.
Mostly exists to ease mocking in tests.
Args:
name (str): Name of counter to increment.
"""
statsreporter.stats().incr(name)
|
|
#
# wcsmod.py -- module wrapper for WCS calculations.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
We are fortunate to have several possible choices for a python WCS package
compatible with Ginga: astlib, kapteyn, starlink and astropy.
kapteyn and astropy wrap Doug Calabretta's "WCSLIB", astLib wraps
Doug Mink's "wcstools", and I'm not sure what starlink uses (their own?).
Note that astlib requires pyfits (or astropy) in order to create a WCS
object from a FITS header.
To force the use of one, do:
from ginga.util import wcsmod
wcsmod.use('kapteyn')
before you load any images. Otherwise Ginga will try to pick one for
you.
"""
import math
import re
import numpy
from ginga.util.six.moves import map, zip
# Module variables that get configured at module load time
# or when use() is called
coord_types = []
display_types = ['sexagesimal', 'degrees']
wcs_configured = False
have_kapteyn = False
have_astlib = False
have_pywcs = False
have_astropy = False
have_starlink = False
WCS = None
class WCSError(Exception):
pass
def use(wcspkg, raise_err=True):
global coord_types, wcs_configured, WCS, \
have_kapteyn, kapwcs, \
have_astlib, astWCS, astCoords, \
have_starlink, Ast, Atl, \
have_astropy, pywcs, pyfits, astropy, coordinates, units
if wcspkg == 'kapteyn':
try:
from kapteyn import wcs as kapwcs
coord_types = ['icrs', 'fk5', 'fk4', 'galactic', 'ecliptic']
have_kapteyn = True
wcs_configured = True
WCS = KapteynWCS
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'starlink':
try:
import starlink.Ast as Ast
import starlink.Atl as Atl
coord_types = ['icrs', 'fk5', 'fk4', 'galactic', 'ecliptic']
have_starlink = True
wcs_configured = True
WCS = StarlinkWCS
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'astlib':
try:
from astLib import astWCS, astCoords
# astlib requires pyfits (or astropy) in order
# to create a WCS object from a FITS header.
try:
from astropy.io import fits as pyfits
except ImportError:
try:
import pyfits
except ImportError:
raise ImportError("Need pyfits module to use astLib WCS")
astWCS.NUMPY_MODE = True
coord_types = ['j2000', 'b1950', 'galactic']
have_astlib = True
wcs_configured = True
WCS = AstLibWCS
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'astropy2':
try:
import astropy
except ImportError:
if raise_err:
raise
from distutils.version import LooseVersion
if LooseVersion(astropy.__version__) <= LooseVersion('1'):
return False
import astropy.coordinates
import astropy.wcs as pywcs
from astropy.io import fits as pyfits
import astropy.units as u
from astropy.version import version
have_pywcs = True
have_astropy = True
wcs_configured = True
WCS = AstropyWCS2
try:
import sunpy.coordinates
except ImportError:
pass
coord_types = [f.name for f in astropy.coordinates.frame_transform_graph.frame_set]
return True
elif wcspkg == 'astropy':
try:
import astropy.wcs as pywcs
from astropy.io import fits as pyfits
have_pywcs = True
except ImportError:
try:
import pywcs
have_pywcs = True
except ImportError as e:
if raise_err:
raise
try:
from astropy import coordinates
from astropy import units
have_astropy = True
wcs_configured = True
WCS = AstropyWCS
if hasattr(coordinates, 'SkyCoord'):
try:
import sunpy.coordinates
except ImportError:
pass
coord_types = [f.name for f in coordinates.frame_transform_graph.frame_set]
else:
coord_types = ['icrs', 'fk5', 'fk4', 'galactic']
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'barebones':
coord_types = ['fk5']
WCS = BareBonesWCS
wcs_configured = True
return True
return False
class BaseWCS(object):
def get_keyword(self, key):
return self.header[key]
def get_keywords(self, *args):
return list(map(lambda key: self.header[key], args))
def fix_bad_headers(self):
"""Fix up bad headers that cause problems for WCSLIB.
Subclass can override this method to fix up issues with the
header for problem FITS files.
"""
# WCSLIB doesn't like "nonstandard" units
unit = self.header.get('CUNIT1', 'deg')
if unit.upper() == 'DEGREE':
#self.header.update('CUNIT1', 'deg')
self.header['CUNIT1'] = 'deg'
unit = self.header.get('CUNIT2', 'deg')
if unit.upper() == 'DEGREE':
#self.header.update('CUNIT2', 'deg')
self.header['CUNIT2'] = 'deg'
def has_valid_wcs(self):
return self.wcs != None
class AstropyWCS2(BaseWCS):
"""
Astropy 1.0+ WCS / Coordinate System
"""
def __init__(self, logger):
super(AstropyWCS2, self).__init__()
self.kind = 'astropy/WCSLIB'
self.logger = logger
self.header = None
self.wcs = None
self.coordframe = None
def load_header(self, header, fobj=None):
from astropy.wcs.utils import wcs_to_celestial_frame
# reconstruct a pyfits header, because otherwise we take an
# incredible performance hit in astropy.wcs
self.header = pyfits.Header(header.items())
try:
self.logger.debug("Trying to make astropy wcs object")
self.wcs = pywcs.WCS(self.header, fobj=fobj, relax=True)
self.coordframe = wcs_to_celestial_frame(self.wcs)
except Exception as e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def vaild_transform_frames(self):
global coord_types
frames = [f.name for f in astropy.coordinates.frame_transform_graph.frame_set
if self.coordframe.is_transformable_to(f)]
coord_types = frames
def realize_frame(self, data):
"""
Wrap frame.realize_frame, modify self.coordframe to reflect the
new coords.
Parameters
----------
data : tuple of `astropy.units.Quantity`
The coordinate data (assumed unit spherical)
Returns
-------
None
Notes
-----
This is really an ugly hack, which should be in BaseFrame. What it is
doing is only changing the internal representation of the data in a Frame.
This means that a new frame is not initilized, which is a substantial
speed improvement.
"""
# If the representation is a subclass of Spherical we need to check for
# the new _unitrep attr to give the corresponding unit spherical subclass.
if (issubclass(self.coordframe.representation,
astropy.coordinates.SphericalRepresentation) and
hasattr(self.coordframe.representation, '_unitrep')):
rep = self.coordframe.representation._unitrep(*data)
elif issubclass(self.coordframe.representation,
astropy.coordinates.UnitSphericalRepresentation):
rep = self.coordframe.representation(*data)
else:
self.logger.info("Falling back to UnitSphericalRepresentation"
" from {}".format(self.coordframe.representation))
rep = astropy.coordinates.UnitSphericalRepresentation(*data)
if hasattr(self.coordframe._set_data, '_set_data'):
self.coordframe._set_data(rep)
else:
self.coordframe._data = rep
self.coordframe._rep_cache[self.coordframe._data.__class__.__name__,
False] = self.coordframe._data
# This will eventually work, once upstream PR is complete.
# self.coordframe = self.coordframe.realize_frame(rep, copy=False)
def spectral_coord(self, idxs, coords='data'):
if coords == 'data':
origin = 0
else:
origin = 1
pixcrd = numpy.array([idxs], numpy.float_)
try:
sky = self.wcs.all_pix2world(pixcrd, origin)
return float(sky[0, 2])
except Exception as e:
self.logger.error("Error calculating spectral coordinate: %s" % (str(e)))
raise WCSError(e)
def pixtoradec(self, idxs, coords='data'):
return self._frametofloats(self.pixtonative(idxs, coords=coords))
def pixtonative(self, idxs, coords='data'):
"""
Convert the pixel value to the native coordinate frame of the header
"""
import astropy.units as u
if coords == 'data':
origin = 0
else:
origin = 1
pixcrd = numpy.array([idxs], numpy.float_)
try:
sky = self.wcs.all_pix2world(pixcrd, origin)[0] * u.deg
except Exception as e:
self.logger.error("Error calculating pixtonative: %s" % (str(e)))
raise WCSError(e)
# Update our frame with the new data
self.realize_frame(sky)
return self.coordframe
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
import astropy.units as u
args = [ra_deg, dec_deg]
if naxispath:
args += [0] * len(naxispath)
skycrd = u.Quantity(args, unit=u.deg)
self.realize_frame(skycrd)
return self.nativetopix(coords=coords, naxispath=naxispath)
def nativetopix(self, coords='data',naxispath=None):
"""
Take a frame in native coords and transform to pixel coordinates.
"""
import astropy.units as u
if coords == 'data':
origin = 0
else:
origin = 1
r = self.coordframe.data
data = list([getattr(r, component).to(u.deg).value
for component in r.components[:2]])
if naxispath:
data += [0] * len(naxispath)
data = numpy.array([data])
pixels = self.wcs.wcs_world2pix(data, origin)[0][:2]
return pixels
def pixtocoords(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
coord = self.pixtonative(idxs, coords=coords)
if system is None:
return coord
toclass = astropy.coordinates.frame_transform_graph.lookup_name(system)
transform = self.coordframe.is_transformable_to(toclass)
if transform and transform != 'same':
coord = coord.transform_to(toclass)
else:
self.logger.error("Frame {} is not Transformable to {}, falling back to {}".format(self.coordframe.name, toclass.name, self.coordframe.name))
# self.prefs.set("wcs_coords", self.coordframe.name)
return coord
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'pixel':
x, y = self.pixtoradec(idxs, coords=coords)
return (x, y)
c = self.pixtocoords(idxs, system=system, coords=coords)
return self._frametofloats(c)
def _frametofloats(self, frame):
"""
Take any astropy coord frame and return the first two components as
floats in a tuple.
"""
r = frame.data
return tuple([getattr(r, component).value for component in r.components[:2]])
class AstropyWCS(BaseWCS):
"""A WCS interface for astropy.wcs
You need to install python module 'astropy'
http://pypi.python.org/pypi/astropy
if you want to use this version.
"""
def __init__(self, logger):
super(AstropyWCS, self).__init__()
if not have_astropy:
raise WCSError("Please install module 'astropy' first!")
self.logger = logger
self.header = None
self.wcs = None
self.coordsys = 'raw'
self.new_coords = False # new astropy coordinate system
if hasattr(coordinates, 'SkyCoord'):
# v0.4 series astropy and later
self.new_coords = True
elif hasattr(coordinates, 'ICRS'):
# v0.3 series astropy
self.coord_table = {
'icrs': coordinates.ICRS,
'fk5': coordinates.FK5,
'fk4': coordinates.FK4,
'galactic': coordinates.Galactic,
}
else:
# v0.2 series astropy
self.coord_table = {
'icrs': coordinates.ICRSCoordinates,
'fk5': coordinates.FK5Coordinates,
'fk4': coordinates.FK4Coordinates,
'galactic': coordinates.GalacticCoordinates,
}
self.kind = 'astropy/WCSLIB'
def load_header(self, header, fobj=None):
# reconstruct a pyfits header, because otherwise we take an
# incredible performance hit in astropy.wcs
self.logger.debug("Reconstructing PyFITS header")
self.header = pyfits.Header(header.items())
try:
self.logger.debug("Trying to make astropy-- wcs object")
self.wcs = pywcs.WCS(self.header, fobj=fobj, relax=True)
self.logger.debug("made astropy wcs object")
self.coordsys = choose_coord_system(self.header)
self.logger.debug("Coordinate system is: %s" % (self.coordsys))
except Exception as e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def spectral_coord(self, idxs, coords='data'):
if coords == 'data':
origin = 0
else:
origin = 1
pixcrd = numpy.array([idxs], numpy.float_)
try:
sky = self.wcs.all_pix2world(pixcrd, origin)
return float(sky[0, 2])
except Exception as e:
self.logger.error("Error calculating spectral coordinate: %s" % (str(e)))
raise WCSError(e)
def pixtoradec(self, idxs, coords='data'):
if coords == 'data':
origin = 0
else:
origin = 1
pixcrd = numpy.array([idxs], numpy.float_)
try:
#sky = self.wcs.wcs_pix2sky(pixcrd, origin)
#sky = self.wcs.all_pix2sky(pixcrd, origin)
# astropy only?
sky = self.wcs.all_pix2world(pixcrd, origin)
except Exception as e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise WCSError(e)
ra_deg = float(sky[0, 0])
dec_deg = float(sky[0, 1])
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
if coords == 'data':
origin = 0
else:
origin = 1
args = [ra_deg, dec_deg]
if naxispath:
args += [0] * len(naxispath)
skycrd = numpy.array([args], numpy.float_)
try:
#pix = self.wcs.wcs_sky2pix(skycrd, origin)
# Doesn't seem to be a all_sky2pix
#pix = self.wcs.all_sky2pix(skycrd, origin)
# astropy only?
pix = self.wcs.wcs_world2pix(skycrd, origin)
except Exception as e:
self.logger.error("Error calculating radectopix: %s" % (str(e)))
raise WCSError(e)
x = float(pix[0, 0])
y = float(pix[0, 1])
return (x, y)
def pixtocoords(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system is None:
system = 'icrs'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
if not self.new_coords:
# convert to astropy coord
try:
fromclass = self.coord_table[self.coordsys]
except KeyError:
raise WCSError("No such coordinate system available: '%s'" % (
self.coordsys))
coord = fromclass(ra_deg, dec_deg,
unit=(units.degree, units.degree))
if (system is None) or (system == self.coordsys):
return coord
# Now give it back to the user in the system requested
try:
toclass = self.coord_table[system]
except KeyError:
raise WCSError("No such coordinate system available: '%s'" % (
system))
coord = coord.transform_to(toclass)
else:
frameClass = coordinates.frame_transform_graph.lookup_name(self.coordsys)
coord = frameClass(ra_deg * units.degree, dec_deg * units.degree)
toClass = coordinates.frame_transform_graph.lookup_name(system)
# Skip in input and output is the same (no realize_frame
# call in astropy)
if toClass != frameClass:
coord = coord.transform_to(toClass)
return coord
def _deg(self, coord):
# AstroPy changed the API so now we have to support more
# than one--we don't know what version the user has installed!
if hasattr(coord, 'degrees'):
return coord.degrees
else:
return coord.degree
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'pixel':
x, y = self.pixtoradec(idxs, coords=coords)
return (x, y)
c = self.pixtocoords(idxs, system=system, coords=coords)
if not self.new_coords:
# older astropy
return (self._deg(c.lonangle), self._deg(c.latangle))
else:
r = c.data
return tuple(map(self._deg, [getattr(r, component)
for component in r.components[:2]]))
class AstLibWCS(BaseWCS):
"""A WCS interface for astLib.astWCS.WCS
You need to install python module 'astLib'
http://sourceforge.net/projects/astlib
if you want to use this version.
"""
def __init__(self, logger):
super(AstLibWCS, self).__init__()
if not have_astlib:
raise WCSError("Please install package 'astLib' first!")
self.logger = logger
self.header = None
self.wcs = None
self.coordsys = 'raw'
self.kind = 'astlib/wcstools'
def load_header(self, header, fobj=None):
self.header = {}
self.header.update(header.items())
self.fix_bad_headers()
# reconstruct a pyfits header
hdr = pyfits.Header(header.items())
try:
self.logger.debug("Trying to make astLib wcs object")
self.wcs = astWCS.WCS(hdr, mode='pyfits')
self.coordsys = self.choose_coord_system(self.header)
self.logger.debug("Coordinate system is: %s" % (self.coordsys))
except Exception as e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def choose_coord_system(self, header):
coordsys = choose_coord_system(header)
coordsys = coordsys.upper()
if coordsys in ('FK4',):
return 'b1950'
elif coordsys in ('FK5', 'ICRS'):
return 'j2000'
elif coordsys in ('PIXEL',):
return 'pixel'
#raise WCSError("Cannot determine appropriate coordinate system from FITS header")
return 'j2000'
def spectral_coord(self, idxs, coords='data'):
raise WCSError("This feature not supported by astWCS")
def pixtoradec(self, idxs, coords='data'):
if coords == 'fits':
# Via astWCS.NUMPY_MODE, we've forced pixels referenced from 0
idxs = tuple(map(lambda x: x-1, idxs))
try:
ra_deg, dec_deg = self.wcs.pix2wcs(idxs[0], idxs[1])
except Exception as e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise WCSError(e)
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
try:
x, y = self.wcs.wcs2pix(ra_deg, dec_deg)
except Exception as e:
self.logger.error("Error calculating radectopix: %s" % (str(e)))
raise WCSError(e)
if coords == 'fits':
# Via astWCS.NUMPY_MODE, we've forced pixels referenced from 0
x, y = x+1, y+1
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system is None:
system = 'j2000'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
# convert to alternate coord
try:
fromsys = self.coordsys.upper()
if fromsys == 'PIXEL':
# these are really pixel values
return (ra_deg, dec_deg)
tosys = system.upper()
if fromsys == 'B1950':
equinox = 1950.0
else:
equinox = 2000.0
lon_deg, lat_deg = astCoords.convertCoords(fromsys, tosys,
ra_deg, dec_deg,
equinox)
except Exception as e:
raise WCSError("Error converting between coordinate systems '%s' and '%s': %s" % (
fromsys, tosys, str(e)))
return (lon_deg, lat_deg)
class KapteynWCS(BaseWCS):
"""A WCS interface for kapteyn.wcs.Projection
You need to install python module 'kapteyn'
http://www.astro.rug.nl/software/kapteyn/
if you want to use this version.
"""
def __init__(self, logger):
super(KapteynWCS, self).__init__()
if not have_kapteyn:
raise WCSError("Please install package 'kapteyn' first!")
self.logger = logger
self.header = None
self.wcs = None
self.coordsys = 'raw'
self.kind = 'kapteyn/WCSLIB'
self._skyout = "equatorial icrs J2000.0"
# see: https://github.com/astropy/coordinates-benchmark/blob/master/kapteyn/convert.py
self.conv_d = dict(fk5='fk5', fk4='fk4,J2000_OBS', icrs='icrs',
galactic='galactic', ecliptic='ecliptic,J2000')
def load_header(self, header, fobj=None):
# For kapteyn, header just needs to be duck-typed like a dict
self.header = {}
self.header.update(header.items())
self.fix_bad_headers()
try:
self.logger.debug("Trying to make kapteyn wcs object")
self.wcs = kapwcs.Projection(self.header,
skyout=self._skyout)
self.coordsys = choose_coord_system(self.header)
self.logger.debug("Coordinate system is: %s" % (self.coordsys))
except Exception as e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def spectral_coord(self, idxs, coords='data'):
# Kapteyn's WCS needs pixels referenced from 1
if coords == 'data':
idxs = tuple(map(lambda x: x+1, idxs))
else:
idxs = tuple(idxs)
try:
res = self.wcs.toworld(idxs)
if len(res) > 0:
return res[self.wcs.specaxnum-1]
except Exception as e:
self.logger.error("Error calculating spectral coordinate: %s" % (str(e)))
raise WCSError(e)
def pixtoradec(self, idxs, coords='data'):
# Kapteyn's WCS needs pixels referenced from 1
if coords == 'data':
idxs = tuple(map(lambda x: x+1, idxs))
else:
idxs = tuple(idxs)
#print "indexes=%s" % (str(idxs))
try:
res = self.wcs.toworld(idxs)
if (self.wcs.lonaxnum is not None) and (self.wcs.lataxnum is not None):
ra_deg, dec_deg = res[self.wcs.lonaxnum-1], res[self.wcs.lataxnum-1]
else:
ra_deg, dec_deg = res[0], res[1]
except Exception as e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise WCSError(e)
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
args = [ra_deg, dec_deg]
if naxispath:
args += [0] * len(naxispath)
args = tuple(args)
try:
pix = self.wcs.topixel(args)
except Exception as e:
self.logger.error("Error calculating radectopix: %s" % (str(e)))
raise WCSError(e)
if coords == 'data':
# Kapteyn's WCS returns pixels referenced from 1
pix = tuple(map(lambda x: x-1, pix))
x, y = pix[0], pix[1]
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system is None:
system = 'icrs'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
if self.coordsys == 'pixel':
return (ra_deg, dec_deg)
# convert to alternate coord
spec = self.conv_d[system]
tran = kapwcs.Transformation(self._skyout, spec)
lon_deg, lat_deg = tran((ra_deg, dec_deg))
return lon_deg, lat_deg
class StarlinkWCS(BaseWCS):
"""A WCS interface for Starlink
You need to install python module 'starlink-pyast'
http://www.astro.rug.nl/software/kapteyn/
if you want to use this version.
"""
def __init__(self, logger):
super(StarlinkWCS, self).__init__()
if not have_starlink:
raise WCSError("Please install package 'starlink-pyast' first!")
self.logger = logger
self.header = None
self.wcs = None
self.coordsys = 'raw'
self.kind = 'starlink'
def load_header(self, header, fobj=None):
self.header = {}
self.header.update(header.items())
self.fix_bad_headers()
source = []
for key, value in header.items():
source.append("%-8.8s= %-70.70s" % (key, repr(value)))
# following https://gist.github.com/dsberry/4171277 to get a
# usable WCS in Ast
try:
self.logger.debug("Trying to make starlink wcs object")
# read in the header and create the default WCS transform
#adapter = Atl.PyFITSAdapter(hdu)
#fitschan = Ast.FitsChan(adapter)
fitschan = Ast.FitsChan(source)
self.wcs = fitschan.read()
# self.wcs is a FrameSet, with a Mapping
#self.wcs.Report = True
self.coordsys = choose_coord_system(self.header)
self.logger.debug("Coordinate system is: %s" % (self.coordsys))
except Exception as e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
try:
# define a transform from this destination frame to icrs/j2000
refframe = self.wcs.getframe(2)
toframe = Ast.SkyFrame("System=ICRS, Equinox=J2000")
self.icrs_trans = refframe.convert(toframe)
except Exception as e:
self.logger.error("Error making ICRS transform: %s" % (str(e)))
def spectral_coord(self, idxs, coords='data'):
# Starlink's WCS needs pixels referenced from 1
if coords == 'data':
idxs = numpy.array(map(lambda x: x+1, idxs))
else:
idxs = numpy.array(idxs)
try:
# pixel to sky coords (in the WCS specified transform)
arrs = [ [idxs[i]] for i in range(len(idxs)) ]
res = self.wcs.tran(arrs, 1)
return res[2][0]
except Exception as e:
self.logger.error("Error calculating spectral coordinate: %s" % (str(e)))
raise WCSError(e)
def pixtoradec(self, idxs, coords='data'):
# Starlink's WCS needs pixels referenced from 1
if coords == 'data':
idxs = numpy.array(list(map(lambda x: x+1, idxs)))
else:
idxs = numpy.array(idxs)
try:
# pixel to sky coords (in the WCS specified transform)
arrs = [ [idxs[i]] for i in range(len(idxs)) ]
res = self.wcs.tran(arrs, 1)
if self.coordsys not in ('pixel', 'raw'):
# whatever sky coords to icrs coords
res = self.icrs_trans.tran(res, 1)
# TODO: what if axes are inverted?
ra_rad, dec_rad = res[0][0], res[1][0]
ra_deg, dec_deg = math.degrees(ra_rad), math.degrees(dec_rad)
#print ra_deg, dec_deg
except Exception as e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise WCSError(e)
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
try:
# sky coords to pixel (in the WCS specified transform)
ra_rad, dec_rad = math.radians(ra_deg), math.radians(dec_deg)
# TODO: what if spatial axes are inverted?
args = [ra_rad, dec_rad]
if naxispath:
args += [0] * len(naxispath)
arrs = [ [args[i]] for i in range(len(args)) ]
# 0 as second arg -> inverse transform
res = self.wcs.tran(arrs, 0)
x, y = res[0][0], res[1][0]
except Exception as e:
self.logger.error("Error calculating radectopix: %s" % (str(e)))
raise WCSError(e)
if coords == 'data':
# Starlink's WCS returns pixels referenced from 1
x, y = x-1, y-1
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system is None:
system = 'icrs'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
if self.coordsys == 'pixel':
# these will actually be x, y pixel values
return (ra_deg, dec_deg)
# define a transform from reference (icrs/j2000) to user's end choice
refframe = self.icrs_trans.getframe(2)
toframe = Ast.SkyFrame("System=%s, Epoch=2000.0" % (system.upper()))
end_trans = refframe.convert(toframe)
# convert to alternate coord
ra_rad, dec_rad = math.radians(ra_deg), math.radians(dec_deg)
res = end_trans.tran([[ra_rad], [dec_rad]], 1)
lon_rad, lat_rad = res[0][0], res[1][0]
lon_deg, lat_deg = math.degrees(lon_rad), math.degrees(lat_rad)
return lon_deg, lat_deg
class BareBonesWCS(BaseWCS):
"""A very basic WCS. Assumes J2000, units in degrees, projection TAN.
***** NOTE *****:
We strongly recommend that you install one of the 3rd party python
WCS modules referred to at the top of this module, all of which are
much more capable than BareBonesWCS.
****************
"""
def __init__(self, logger):
super(BareBonesWCS, self).__init__()
self.logger = logger
self.header = {}
self.coordsys = 'raw'
self.kind = 'barebones'
self.wcs = True
def load_header(self, header, fobj=None):
self.header = {}
self.header.update(header.items())
self.fix_bad_headers()
self.coordsys = choose_coord_system(self.header)
# WCS calculations
def get_reference_pixel(self):
x = float(self.get_keyword('CRPIX1'))
y = float(self.get_keyword('CRPIX2'))
return x, y
def get_physical_reference_pixel(self):
xv = float(self.get_keyword('CRVAL1'))
yv = float(self.get_keyword('CRVAL2'))
assert 0.0 <= xv < 360.0, \
WCSError("CRVAL1 out of range: %f" % (xv))
assert -90.0 <= yv <= 90.0, \
WCSError("CRVAL2 out of range: %f" % (yv))
return xv, yv
def get_pixel_coordinates(self):
try:
cd11 = float(self.get_keyword('CD1_1'))
cd12 = float(self.get_keyword('CD1_2'))
cd21 = float(self.get_keyword('CD2_1'))
cd22 = float(self.get_keyword('CD2_2'))
except Exception as e:
cdelt1 = float(self.get_keyword('CDELT1'))
cdelt2 = float(self.get_keyword('CDELT2'))
try:
cd11 = float(self.get_keyword('PC1_1')) * cdelt1
cd12 = float(self.get_keyword('PC1_2')) * cdelt1
cd21 = float(self.get_keyword('PC2_1')) * cdelt2
cd22 = float(self.get_keyword('PC2_2')) * cdelt2
except KeyError:
cd11 = float(self.get_keyword('PC001001')) * cdelt1
cd12 = float(self.get_keyword('PC001002')) * cdelt1
cd21 = float(self.get_keyword('PC002001')) * cdelt2
cd22 = float(self.get_keyword('PC002002')) * cdelt2
return (cd11, cd12, cd21, cd22)
def spectral_coord(self, idxs, coords='data'):
raise WCSError("This feature not supported by BareBonesWCS")
def pixtoradec(self, idxs, coords='data'):
"""Convert a (x, y) pixel coordinate on the image to a (ra, dec)
coordinate in space.
Parameter (coords):
- if 'data' then x, y coordinates are interpreted as 0-based
- otherwise coordinates are interpreted as 1-based (traditional FITS)
"""
x, y = idxs[:2]
# account for DATA->FITS coordinate space
if coords == 'data':
x, y = x + 1, y + 1
crpix1, crpix2 = self.get_reference_pixel()
crval1, crval2 = self.get_physical_reference_pixel()
cd11, cd12, cd21, cd22 = self.get_pixel_coordinates()
ra_deg = (cd11 * (x - crpix1) + cd12 *
(y - crpix2)) / math.cos(math.radians(crval2)) + crval1
dec_deg = cd21 * (x - crpix1) + cd22 * (y - crpix2) + crval2
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
"""Convert a (ra_deg, dec_deg) space coordinates to (x, y) pixel
coordinates on the image. ra and dec are expected as floats in
degrees.
Parameter (coords):
- if 'data' then x, y coordinates are returned as 0-based
- otherwise coordinates are returned as 1-based (traditional FITS)
"""
crpix1, crpix2 = self.get_reference_pixel()
crval1, crval2 = self.get_physical_reference_pixel()
cd11, cd12, cd21, cd22 = self.get_pixel_coordinates()
# reverse matrix
rmatrix = (cd11 * cd22) - (cd12 * cd21)
if not cmp(rmatrix, 0.0):
raise WCSError("WCS Matrix Error: check values")
# Adjust RA as necessary
if (ra_deg - crval1) > 180.0:
ra_deg -= 360.0
elif (ra_deg - crval1) < -180.0:
ra_deg += 360.0
try:
x = (cd22 * math.cos(crval2 * math.pi/180.0) *
(ra_deg - crval1) - cd12 *
(dec_deg - crval2))/rmatrix + crpix1
y = (cd11 * (dec_deg - crval2) - cd21 *
math.cos(crval2 * math.pi/180.0) *
(ra_deg - crval1))/rmatrix + crpix2
except Exception as e:
raise WCSError("radectopix calculation error: %s" % str(e))
# account for FITS->DATA space
if coords == 'data':
x, y = x - 1, y - 1
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
return self.pixtoradec(idxs, coords=coords)
class WcslibWCS(AstropyWCS):
"""DO NOT USE--this class name to be deprecated."""
pass
################## Help functions ##################
def choose_coord_units(header):
"""Return the appropriate key code for the units value for the axes by
examining the FITS header.
"""
cunit = header['CUNIT1']
match = re.match(r'^deg\s*$', cunit)
if match:
return 'degree'
#raise WCSError("Don't understand units '%s'" % (cunit))
return 'degree'
def choose_coord_system(header):
"""Return an appropriate key code for the axes coordinate system by
examining the FITS header.
"""
try:
ctype = header['CTYPE1'].strip().upper()
except KeyError:
try:
# see if we have an "RA" header
ra = header['RA']
try:
equinox = float(header['EQUINOX'])
if equinox < 1984.0:
radecsys = 'FK4'
else:
radecsys = 'FK5'
except KeyError:
radecsys = 'ICRS'
return radecsys.lower()
except KeyError:
return 'raw'
#raise WCSError("Cannot determine appropriate coordinate system from FITS header")
match = re.match(r'^GLON\-.*$', ctype)
if match:
return 'galactic'
match = re.match(r'^ELON\-.*$', ctype)
if match:
return 'ecliptic'
match = re.match(r'^RA\-\-\-.*$', ctype)
if match:
hdkey = 'RADECSYS'
try:
radecsys = header[hdkey]
except KeyError:
try:
hdkey = 'RADESYS'
radecsys = header[hdkey]
except KeyError:
# missing keyword
# RADESYS defaults to IRCS unless EQUINOX is given
# alone, in which case it defaults to FK4 prior to 1984
# and FK5 after 1984.
try:
equinox = float(header['EQUINOX'])
if equinox < 1984.0:
radecsys = 'FK4'
else:
radecsys = 'FK5'
except KeyError:
radecsys = 'ICRS'
radecsys = radecsys.strip()
return radecsys.lower()
match = re.match(r'^HPLN\-.*$', ctype)
if match:
return 'helioprojective'
match = re.match(r'^HGLT\-.*$', ctype)
if match:
return 'heliographicstonyhurst'
match = re.match(r'^PIXEL$', ctype)
if match:
return 'pixel'
#raise WCSError("Cannot determine appropriate coordinate system from FITS header")
return 'icrs'
if not wcs_configured:
# default
WCS = BareBonesWCS
# try to use them in this order
order = ('kapteyn', 'starlink', 'astlib', 'astropy', 'astropy2')
for name in order:
try:
if use(name, raise_err=False):
break
except Exception as e:
continue
#END
|
|
#
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call (nearly) every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import os
import string
import sys
import tempfile
import unittest
from test.support import requires, import_module, verbose, SaveSignals
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
import inspect
requires('curses')
# If either of these don't exist, skip the tests.
curses = import_module('curses')
import_module('curses.ascii')
import_module('curses.textpad')
try:
import curses.panel
except ImportError:
pass
def requires_curses_func(name):
return unittest.skipUnless(hasattr(curses, name),
'requires curses.%s' % name)
term = os.environ.get('TERM')
# If newterm was supported we could use it instead of initscr and not exit
@unittest.skipIf(not term or term == 'unknown',
"$TERM=%r, calling initscr() may cause exit" % term)
@unittest.skipIf(sys.platform == "cygwin",
"cygwin's curses mostly just hangs")
class TestCurses(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not sys.__stdout__.isatty():
# Temporary skip tests on non-tty
raise unittest.SkipTest('sys.__stdout__ is not a tty')
cls.tmp = tempfile.TemporaryFile()
fd = cls.tmp.fileno()
else:
cls.tmp = None
fd = sys.__stdout__.fileno()
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=fd)
@classmethod
def tearDownClass(cls):
if cls.tmp:
cls.tmp.close()
del cls.tmp
def setUp(self):
self.save_signals = SaveSignals()
self.save_signals.save()
if verbose:
# just to make the test output a little more readable
print()
self.stdscr = curses.initscr()
curses.savetty()
def tearDown(self):
curses.resetty()
curses.endwin()
self.save_signals.restore()
def test_window_funcs(self):
"Test the methods of windows"
stdscr = self.stdscr
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a',), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
with self.subTest(meth=meth.__qualname__, args=args):
meth(*args)
for meth in [stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
with self.subTest(meth=meth.__qualname__):
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
with self.assertRaises(TypeError,
msg="Expected win.border() to raise TypeError"):
win.border(65, 66, 67, 68,
69, [], 71, 72)
win.box(65, 67)
win.box('!', '_')
win.box(b':', b'~')
self.assertRaises(TypeError, win.box, 65, 66, 67)
self.assertRaises(TypeError, win.box, 65)
win.box()
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
if hasattr(stdscr, 'immedok'):
stdscr.immedok(1)
stdscr.immedok(0)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 2, 1, 3, 3)
win2.overwrite(win, 1, 2, 2, 1, 3, 3)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
if hasattr(stdscr, 'syncok') and not sys.platform.startswith("sunos"):
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
if hasattr(stdscr, 'chgat'):
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(stdscr, 'resize'):
stdscr.resize(25, 80)
if hasattr(stdscr, 'enclose'):
stdscr.enclose(10, 10)
self.assertRaises(ValueError, stdscr.getstr, -400)
self.assertRaises(ValueError, stdscr.getstr, 2, 3, -400)
self.assertRaises(ValueError, stdscr.instr, -2)
self.assertRaises(ValueError, stdscr.instr, 2, 3, -2)
def test_embedded_null_chars(self):
# reject embedded null bytes and characters
stdscr = self.stdscr
for arg in ['a', b'a']:
with self.subTest(arg=arg):
self.assertRaises(ValueError, stdscr.addstr, 'a\0')
self.assertRaises(ValueError, stdscr.addnstr, 'a\0', 1)
self.assertRaises(ValueError, stdscr.insstr, 'a\0')
self.assertRaises(ValueError, stdscr.insnstr, 'a\0', 1)
def test_module_funcs(self):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar]:
with self.subTest(func=func.__qualname__):
func()
if hasattr(curses, 'filter'):
curses.filter()
if hasattr(curses, 'getsyx'):
curses.getsyx()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
with tempfile.TemporaryFile() as f:
self.stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp(b'abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.set_escdelay(25)
self.assertEqual(curses.get_escdelay(), 25)
curses.set_tabsize(4)
self.assertEqual(curses.get_tabsize(), 4)
if hasattr(curses, 'setsyx'):
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm(b'cr')
if hasattr(curses, 'typeahead'):
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
if hasattr(curses, 'use_env'):
curses.use_env(1)
# Functions only available on a few platforms
def test_colors_funcs(self):
if not curses.has_colors():
self.skipTest('requires colors support')
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
@requires_curses_func('keyname')
def test_keyname(self):
curses.keyname(13)
@requires_curses_func('has_key')
def test_has_key(self):
curses.has_key(13)
@requires_curses_func('getmouse')
def test_getmouse(self):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
if availmask == 0:
self.skipTest('mouse stuff not available')
curses.mouseinterval(10)
# just verify these don't cause errors
curses.ungetmouse(0, 0, 0, 0, curses.BUTTON1_PRESSED)
m = curses.getmouse()
@requires_curses_func('panel')
def test_userptr_without_set(self):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
# try to access userptr() before calling set_userptr() -- segfaults
with self.assertRaises(curses.panel.error,
msg='userptr should fail since not set'):
p.userptr()
@requires_curses_func('panel')
def test_userptr_memory_leak(self):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
obj = object()
nrefs = sys.getrefcount(obj)
for i in range(100):
p.set_userptr(obj)
p.set_userptr(None)
self.assertEqual(sys.getrefcount(obj), nrefs,
"set_userptr leaked references")
@requires_curses_func('panel')
def test_userptr_segfault(self):
w = curses.newwin(10, 10)
panel = curses.panel.new_panel(w)
class A:
def __del__(self):
panel.set_userptr(None)
panel.set_userptr(A())
panel.set_userptr(None)
@requires_curses_func('panel')
def test_new_curses_panel(self):
w = curses.newwin(10, 10)
panel = curses.panel.new_panel(w)
self.assertRaises(TypeError, type(panel))
@requires_curses_func('is_term_resized')
def test_is_term_resized(self):
curses.is_term_resized(*self.stdscr.getmaxyx())
@requires_curses_func('resize_term')
def test_resize_term(self):
curses.resize_term(*self.stdscr.getmaxyx())
@requires_curses_func('resizeterm')
def test_resizeterm(self):
stdscr = self.stdscr
lines, cols = curses.LINES, curses.COLS
new_lines = lines - 1
new_cols = cols + 1
curses.resizeterm(new_lines, new_cols)
self.assertEqual(curses.LINES, new_lines)
self.assertEqual(curses.COLS, new_cols)
def test_issue6243(self):
curses.ungetch(1025)
self.stdscr.getkey()
@requires_curses_func('unget_wch')
@unittest.skipIf(getattr(curses, 'ncurses_version', (99,)) < (5, 8),
"unget_wch is broken in ncurses 5.7 and earlier")
def test_unget_wch(self):
stdscr = self.stdscr
encoding = stdscr.encoding
for ch in ('a', '\xe9', '\u20ac', '\U0010FFFF'):
try:
ch.encode(encoding)
except UnicodeEncodeError:
continue
try:
curses.unget_wch(ch)
except Exception as err:
self.fail("unget_wch(%a) failed with encoding %s: %s"
% (ch, stdscr.encoding, err))
read = stdscr.get_wch()
self.assertEqual(read, ch)
code = ord(ch)
curses.unget_wch(code)
read = stdscr.get_wch()
self.assertEqual(read, ch)
def test_issue10570(self):
b = curses.tparm(curses.tigetstr("cup"), 5, 3)
self.assertIs(type(b), bytes)
def test_encoding(self):
stdscr = self.stdscr
import codecs
encoding = stdscr.encoding
codecs.lookup(encoding)
with self.assertRaises(TypeError):
stdscr.encoding = 10
stdscr.encoding = encoding
with self.assertRaises(TypeError):
del stdscr.encoding
def test_issue21088(self):
stdscr = self.stdscr
#
# http://bugs.python.org/issue21088
#
# the bug:
# when converting curses.window.addch to Argument Clinic
# the first two parameters were switched.
# if someday we can represent the signature of addch
# we will need to rewrite this test.
try:
signature = inspect.signature(stdscr.addch)
self.assertFalse(signature)
except ValueError:
# not generating a signature is fine.
pass
# So. No signature for addch.
# But Argument Clinic gave us a human-readable equivalent
# as the first line of the docstring. So we parse that,
# and ensure that the parameters appear in the correct order.
# Since this is parsing output from Argument Clinic, we can
# be reasonably certain the generated parsing code will be
# correct too.
human_readable_signature = stdscr.addch.__doc__.split("\n")[0]
self.assertIn("[y, x,]", human_readable_signature)
def test_issue13051(self):
stdscr = self.stdscr
if not hasattr(stdscr, 'resize'):
raise unittest.SkipTest('requires curses.window.resize')
box = curses.textpad.Textbox(stdscr, insert_mode=True)
lines, cols = stdscr.getmaxyx()
stdscr.resize(lines-2, cols-2)
# this may cause infinite recursion, leading to a RuntimeError
box._insert_printable_char('a')
class MiscTests(unittest.TestCase):
@requires_curses_func('update_lines_cols')
def test_update_lines_cols(self):
# this doesn't actually test that LINES and COLS are updated,
# because we can't automate changing them. See Issue #4254 for
# a manual test script. We can only test that the function
# can be called.
curses.update_lines_cols()
@requires_curses_func('ncurses_version')
def test_ncurses_version(self):
v = curses.ncurses_version
self.assertIsInstance(v[:], tuple)
self.assertEqual(len(v), 3)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.patch, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.patch)
self.assertGreaterEqual(v.major, 0)
self.assertGreaterEqual(v.minor, 0)
self.assertGreaterEqual(v.patch, 0)
class TestAscii(unittest.TestCase):
def test_controlnames(self):
for name in curses.ascii.controlnames:
self.assertTrue(hasattr(curses.ascii, name), name)
def test_ctypes(self):
def check(func, expected):
with self.subTest(ch=c, func=func):
self.assertEqual(func(i), expected)
self.assertEqual(func(c), expected)
for i in range(256):
c = chr(i)
b = bytes([i])
check(curses.ascii.isalnum, b.isalnum())
check(curses.ascii.isalpha, b.isalpha())
check(curses.ascii.isdigit, b.isdigit())
check(curses.ascii.islower, b.islower())
check(curses.ascii.isspace, b.isspace())
check(curses.ascii.isupper, b.isupper())
check(curses.ascii.isascii, i < 128)
check(curses.ascii.ismeta, i >= 128)
check(curses.ascii.isctrl, i < 32)
check(curses.ascii.iscntrl, i < 32 or i == 127)
check(curses.ascii.isblank, c in ' \t')
check(curses.ascii.isgraph, 32 < i <= 126)
check(curses.ascii.isprint, 32 <= i <= 126)
check(curses.ascii.ispunct, c in string.punctuation)
check(curses.ascii.isxdigit, c in string.hexdigits)
for i in (-2, -1, 256, sys.maxunicode, sys.maxunicode+1):
self.assertFalse(curses.ascii.isalnum(i))
self.assertFalse(curses.ascii.isalpha(i))
self.assertFalse(curses.ascii.isdigit(i))
self.assertFalse(curses.ascii.islower(i))
self.assertFalse(curses.ascii.isspace(i))
self.assertFalse(curses.ascii.isupper(i))
self.assertFalse(curses.ascii.isascii(i))
self.assertFalse(curses.ascii.isctrl(i))
self.assertFalse(curses.ascii.iscntrl(i))
self.assertFalse(curses.ascii.isblank(i))
self.assertFalse(curses.ascii.isgraph(i))
self.assertFalse(curses.ascii.isprint(i))
self.assertFalse(curses.ascii.ispunct(i))
self.assertFalse(curses.ascii.isxdigit(i))
self.assertFalse(curses.ascii.ismeta(-1))
def test_ascii(self):
ascii = curses.ascii.ascii
self.assertEqual(ascii('\xc1'), 'A')
self.assertEqual(ascii('A'), 'A')
self.assertEqual(ascii(ord('\xc1')), ord('A'))
def test_ctrl(self):
ctrl = curses.ascii.ctrl
self.assertEqual(ctrl('J'), '\n')
self.assertEqual(ctrl('\n'), '\n')
self.assertEqual(ctrl('@'), '\0')
self.assertEqual(ctrl(ord('J')), ord('\n'))
def test_alt(self):
alt = curses.ascii.alt
self.assertEqual(alt('\n'), '\x8a')
self.assertEqual(alt('A'), '\xc1')
self.assertEqual(alt(ord('A')), 0xc1)
def test_unctrl(self):
unctrl = curses.ascii.unctrl
self.assertEqual(unctrl('a'), 'a')
self.assertEqual(unctrl('A'), 'A')
self.assertEqual(unctrl(';'), ';')
self.assertEqual(unctrl(' '), ' ')
self.assertEqual(unctrl('\x7f'), '^?')
self.assertEqual(unctrl('\n'), '^J')
self.assertEqual(unctrl('\0'), '^@')
self.assertEqual(unctrl(ord('A')), 'A')
self.assertEqual(unctrl(ord('\n')), '^J')
# Meta-bit characters
self.assertEqual(unctrl('\x8a'), '!^J')
self.assertEqual(unctrl('\xc1'), '!A')
self.assertEqual(unctrl(ord('\x8a')), '!^J')
self.assertEqual(unctrl(ord('\xc1')), '!A')
if __name__ == '__main__':
unittest.main()
|
|
#-------------------------------------------------------------------------------
# Name: socks.py
# Purpose: This module handle SOCKS messages parsing and generating.
#
#
# Author: Nethanel Coppenhagen
#
#-------------------------------------------------------------------------------
import socket
from struct import pack, unpack, unpack_from
from struct import error as struct_error
# Package modules.
from constants import *
from constants import _VERSIONS, _AUTH_METHODS, _CMD, _ADD_TYPES, \
_SOCKS5_REPLIES, _SOCKS4_REPLIES
from socksexception import SocksException
class _BaseSocks(object):
"""Base SOCKS object with general methods for SOCKS operation."""
def __init__(self, version = V_SOCKS5):
"""Initialize SOCKS parameters."""
# SOCKS parameter check.
if version not in _VERSIONS:
raise SocksException("Wrong SOCKS version")
# Instance attributes initialization.
self.version = version
def validate_version(self, rep_version, sndr, version = None):
"""Check SOCKS version in the response."""
# Set default version if needed.
version = version if version is not None else self.version
if rep_version != version:
raise SocksException("{0} has invalid SOCKS version".format(sndr))
def _get_ipv6_hex(self, addr):
"""Return the hexadecimal representaion for ipv6.
If the addres is invalid return None."""
if not isinstance(addr, str):
return None
# Split to groups and padd with zeros.
l_addr = [i.zfill(4) if i else i for i in addr.split(":")]
# Grater than 8 groups is not a valid address.
if len(l_addr) > 8:
return None
# Add zero groups if the address is zero compressed.
for i in l_addr:
if "" in l_addr:
index = l_addr.index("")
l_addr[index:index+1] = ["0000"]*(9-len(l_addr))
else:
break
# Check each group.
for i in l_addr:
try:
if not 0 <= int(i, 16) <= 65535:
return None
except ValueError:
return None
return "".join(l_addr).decode("hex")
def validate_addr(self, addr_type, addr):
"""Validate address by addr_type."""
if type(addr) != str and addr_type not in _ADD_TYPES:
return False
if addr_type == ADD_IPV4:
try:
socket.inet_aton(addr)
except:
return False
if addr_type == ADD_IPV6 and not self._get_ipv6_hex(addr):
return False
# The addres is valid or a domain name.
# If it is a domain the socks server need to check access to this
# domain therefore return true anyway.
return True
def validate_msg_params(self, addr_type, addr, port):
"""Validate message parameters."""
if (addr_type in _ADD_TYPES and
self.validate_addr(addr_type, addr) and -1 < port < 65536):
return True
return False
def get_addr_type(self, addr):
"""Return the address type of given address and None if the
address is invalid."""
for addr_type in _ADD_TYPES:
if self.validate_addr(addr_type, addr):
return addr_type
return None
class Socks5(_BaseSocks):
"""This class handle all SOCKS5 packets and messages format."""
# Class consts.
_REPLIES = {
REP_SUCCESS : "succeeded",
REP_GENERAL_FAILURE : "general SOCKS server failure",
REP_CON_UNALLOWED : "connection not allowed by ruleset",
REP_NET_UNREACH : "network unreachable",
REP_HOST_UNREACH : "host unreachable",
REP_CON_REFUSED : "connection refused",
REP_TTL_EXPIRED : "TTL expired",
REP_CMD_NOT_SUPPORT : "command not supported",
REP_ADD_TYPE_UNSUPPORT : "address type not supported",
REP_UNKNOWN_ERR : "unknown error"
}
def __init__(self):
"""Initialize SOCKS5 parameters."""
_BaseSocks.__init__(self, V_SOCKS5)
def connect_message(self, auth = AUTH_NONE):
"""Generate message for connecting the SOCKS5 server."""
# Set authentication message.
conn_msg = pack("BBB", self.version , 1, auth)
if auth != AUTH_NONE:
conn_msg = pack("BBBB", self.version, 2, AUTH_NONE, auth)
return conn_msg
def parse_connect(self, request):
"""Parse connect message from SOCKS5 client. Returns a list
with all SOCKS client supported authentication methods."""
try:
ver, nmethod = unpack_from("BB", request)
except struct_error as e:
raise SocksException("Invalid connect message from client")
self.validate_version(ver, "Client")
# Parse authentication methods.
methods = []
for i in xrange(nmethod):
if ord(request[i+2]) in _AUTH_METHODS:
methods.append(ord(request[i+2]))
if not methods:
methods.append(AUTH_NO_ACCEPTABLE)
return methods
def connect_reply(self, method):
"""Generate SOCKS5 server reply for connect requset."""
return pack("BB", self.version, method)
def parse_conn_reply(self, reply):
"""Parse SOCKS5 server reply for connect message."""
if len(reply) != 2:
raise SocksException("Server response is invalid")
# Parse reply.
self.validate_version(ord(reply[0]), "Server")
if ord(reply[1]) == AUTH_NONE:
return AUTH_NONE
elif ord(reply[1]) == AUTH_UPASS:
return AUTH_UPASS
return AUTH_NO_ACCEPTABLE
def client_auth(self, user, pwd):
"""Generate authentication message with user and password for
connecting SOCKS5 server."""
# Validate user and password.
if not self.validate_user_pwd(user, pwd):
raise SocksException("Invalid client authentication parameters")
msg = pack("BB", 1, len(user)) + user
if not pwd:
return msg
return msg + pack("B", len(pwd)) + pwd
def parse_auth(self, msg):
"""Parse authentication message with user and password from
SOCKS5 client."""
try:
version, user_len = unpack_from("BB", msg)
except struct_error as e:
raise SocksException("Invalid authentication message from client")
self.validate_version(version, "Client", 1)
# The password is the last byte of the message. therefore if we know
# the length of the username we know where the password starts.
user = msg[2:user_len]
pwd = msg[3+user_len:]
return user, pwd
def auth_status(self, status):
"""Generates status message for client's authentication request."""
return pack("BB", 1, status)
def parse_server_auth(self, reply):
"""Parse server response for user:password authentication message."""
if len(reply) != 2:
raise SocksException("Server response is invalid")
# Parse response.
if ord(reply[0]) != 1:
raise SocksException("Invalid data from the server")
elif ord(response[1]) != 0:
return False
# Status is 0.
return True
def _generate_message(self, cmd, addr, port, addr_type,
version = None, frag = 0):
"""Generate a SOCKS5 message."""
# Set default version of needed. Sometimes needed version 0.
if version is None:
version = self.version
# Create message.
msg = pack("BBBB", version, cmd, frag, addr_type)
# Add address to the message.
if addr_type == ADD_IPV4:
msg += socket.inet_aton(addr)
elif addr_type == ADD_IPV6:
msg += self._get_ipv6_hex(addr)
# Not IP then it is a domain name
else:
msg +=pack("B", len(addr)) + addr
# Add port in network octet order.
msg += pack("!H", port)
return msg
def generate_request(self, cmd, dst_addr, dst_port, addr_type):
"""Generate a SOCKS5 request that will be sent to the server."""
# Parameters check.
if (cmd not in _CMD or
not self.validate_msg_params(addr_type, dst_addr, dst_port)):
raise SocksException("Invalid client request parameters")
return self._generate_message(cmd, dst_addr, dst_port, addr_type)
def generate_udp(self, frag, dst_addr, dst_port, addr_type, data):
"""Generate UDP request header for sending UDP datagram.."""
# Parameters check.
if not self.validate_msg_params(addr_type, dst_addr, dst_port):
raise SocksException("Invalid client request parameters")
# Create message and add data.
udp_msg=self._generate_message(0,dst_addr,dst_port,addr_type,0,frag)
udp_msg += data
return udp_msg
def generate_reply(self, rep, bnd_addr, bnd_port, addr_type):
"""Generate a SOCKS5 reply that will be sent to the client."""
# Parameters check.
if (rep not in _SOCKS5_REPLIES or
not self.validate_msg_params(addr_type, bnd_addr, bnd_port)):
raise SocksException("Invalid server reply parameters")
return self._generate_message(rep, bnd_addr, bnd_port, addr_type)
def _parse_message(self, message):
"""Parse SOCKS5 messages."""
# Parse the first 4 bytes.
try:
version, cmd, rsv, addr_type = unpack_from("BBBB", message)
except struct_error as e:
raise SocksException("Invalid SOCKS message received")
# Get address.
if addr_type not in _ADD_TYPES:
raise SocksException("Server using unknown address type")
if addr_type == ADD_IPV4:
addr = socket.inet_ntoa(message[4:8])
elif addr_type == ADD_IPV6:
# Get every group of number from address remove hex "0x" prefix,
# add zero padding and join with ":".
addr = ":".join(hex(i).replace("0x","").zfill(4)
for i in unpack_from("!HHHHHHHH", message, 5))
else:
domain_len = ord(message[4])
addr = message[5:5+domain_len]
# Get port.
port = unpack("!H", message[-2:])[0]
# Validate message parameter.
if not self.validate_msg_params(addr_type, addr, port):
raise SocksException("Invalid message parameters received")
return version, cmd, rsv, addr_type, addr, port
def parse_reply(self, reply):
"""Parse the SOCKS5 server reply for a request."""
# Parse reply.
ver, rep, rsv, addr_type, b_addr, b_port = self._parse_message(reply)
# Version check.
self.validate_version(ver, "Server")
# Get Reply.
msg = self._REPLIES.get(rep, self._REPLIES[REP_UNKNOWN_ERR])
return rep, msg, addr_type, b_addr, b_port
def parse_request(self, req):
"""Parse the SOCKS5 client request."""
# Parse request.
ver, cmd, rsv, addr_type, dst_addr, dst_port = self._parse_message(req)
# Version check.
self.validate_version(ver, "Client")
return cmd, addr_type, dst_addr, dst_port
def parse_udp(self, dgram):
"""Parse the SOCKS5 client UDP datagram."""
# For parsing request need to find where data starts.
try:
addr_type = ord(dgram[3])
if addr_type == ADD_IPV4:
data = dgram[10:]
elif addr_type == ADD_IPV6:
data = dgram[22:]
else:
data = dgram[7+ord(dgram[4]):]
except IndexError as e:
raise SocksException("Invalid SOCKS datagram received")
# Parse the header.
t_msg = self._parse_message(dgram[:-len(data)])
ver, cmd, frag, addr_type, dst_addr, dst_port = t_msg
# Version check.
self.validate_version(ver, "Client", 0)
# The cmd should be zero for UDP.
if cmd != 0:
raise SocksException("Invalid data from the server")
return frag, addr_type, dst_addr, dst_port, data
class Socks4(_BaseSocks):
"""This class handle all SOCKS4 packets and messages format."""
_REPLIES = {
REP_REQ_GRANT : "request granted",
REP_REQ_REJECT : "request rejected or failed",
REP_CANT_CONN_IDENTD : "request rejected becasue SOCKS server "\
"cannot connect to identd on the client",
REP_DIFF_USERID : "request rejected because the client program "\
"and identd report different user-ids"
}
def __init__(self):
"""Initialize SOCKS5 parameters."""
_BaseSocks.__init__(self, V_SOCKS4)
def _generate_message(self, cmd, addr, port, version = None):
"""Generate a SOCKS5 message."""
# Set default version of needed. Sometime need version 0 so check None.
if version is None:
version = self.version
# Create message.
msg = pack("!BBH", version, cmd, port)
msg += socket.inet_aton(addr)
return msg
def generate_request(self, cmd, dst_addr, dst_port, userid):
"""Generate a SOCKS4 request that will be sent to the server."""
# Parameters check.
if (cmd not in _CMD or cmd == CMD_UDP or
not self.validate_msg_params(ADD_IPV4, dst_addr, dst_port)):
raise SocksException("Invalid client request parameters")
# Create request message.
req_msg = self._generate_message(cmd, dst_addr, dst_port)
if userid:
req_msg += userid
req_msg += pack("B", 0)
return req_msg
def generate_reply(self, rep, dst_addr, dst_port):
"""Generate a SOCKS4 reply that will be sent to the client."""
# Parameters check.
if (rep not in _SOCKS4_REPLIES or
not self.validate_msg_params(ADD_IPV4, dst_addr, dst_port)):
raise SocksException("Invalid server reply parameters")
return self._generate_message(rep, dst_addr, dst_port, 0)
def _parse_message(self, messsage):
"""Parse SOCKS4 messages."""
try:
version, cmd, port = unpack_from("!BBH", messsage)
except struct_error as e:
raise SocksException("Invalid SOCKS message received")
addr = socket.inet_ntoa(messsage[4:8])
# Validate message parameter.
if not self.validate_msg_params(ADD_IPV4, addr, port):
raise SocksException("Invalid message parameters received")
return version, cmd, addr, port
def parse_reply(self, reply):
"""Parse the SOCKS4 server reply for a request."""
version, rep, dst_addr, dst_port = self._parse_message(reply)
# Version check. SOCKS4 reply version 0.
self.validate_version(version, "Server", 0)
# Get reply text.
msg = self._REPLIES.get(rep,self._REPLIES[REP_REQ_REJECT])
return rep, msg, dst_addr, dst_port
def parse_request(self, request):
"""Parse the SOCKS4 client request."""
version, cmd, dst_addr, dst_port = self._parse_message(request)
# Version check.
self.validate_version(version, "Client")
# Get userid without Null in the end.
userid = request[7:-1]
return cmd, dst_addr, dst_port, userid
# Factory function.
def socks(version = V_SOCKS5, *args, **kwargs):
"""Returns SOCKS object that support the required version."""
if version not in _VERSIONS:
raise ValueError("Invallid SOCKS version")
return Socks5() if version == V_SOCKS5 else Socks4()
|
|
'''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
# The IANA (nee Olson) database is updated several times a year.
OLSON_VERSION = '2016j'
VERSION = '2016.10' # Switching to pip compatible version numbering.
__version__ = VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
]
import sys, datetime, os.path, gettext
from pytz.exceptions import AmbiguousTimeError
from pytz.exceptions import InvalidTimeError
from pytz.exceptions import NonExistentTimeError
from pytz.exceptions import UnknownTimeZoneError
from pytz.lazy import LazyDict, LazyList, LazySet
from pytz.tzinfo import unpickler
from pytz.tzfile import build_tzinfo, _byte_string
try:
unicode
except NameError: # Python 3.x
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
unicode = str
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
s.encode('ASCII') # Raise an exception if not ASCII
return s # But return the original string - not a byte string.
else: # Python 2.x
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii(u'Hello')
'Hello'
>>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
return s.encode('ASCII')
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename):
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(name):
"""Return true if the given resource exists"""
try:
open_resource(name).close()
return True
except IOError:
return False
# Enable this when we get some translations?
# We want an i18n API that is useful to programs using Python's gettext
# module, as well as the Zope3 i18n package. Perhaps we should just provide
# the POT file and translations, and leave it up to callers to make use
# of them.
#
# t = gettext.translation(
# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'),
# fallback=True
# )
# def _(timezone_name):
# """Translate a timezone name using the current locale, returning Unicode"""
# return t.ugettext(timezone_name)
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
'''
if zone.upper() == 'UTC':
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if zone in all_timezones_set:
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.txt, but we are not depending on Python 2.4 so integrating
the README.txt examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p) - len(naive_p)
17
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _CountryTimezoneDict(LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> def print_list(list_of_strings):
... 'We use a helper so doctests work under Python 2.3 -> 3.x'
... for s in list_of_strings:
... print(s)
>>> print_list(country_timezones['nz'])
Pacific/Auckland
Pacific/Chatham
>>> print_list(country_timezones['ch'])
Europe/Zurich
>>> print_list(country_timezones['CH'])
Europe/Zurich
>>> print_list(country_timezones[unicode('ch')])
Europe/Zurich
>>> print_list(country_timezones['XXX'])
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> print_list(country_timezones('nz'))
Pacific/Auckland
Pacific/Chatham
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if zone not in all_timezones_set:
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
finally:
zone_tab.close()
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> print(country_names['au'])
Australia
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
try:
for line in zone_tab.readlines():
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
finally:
zone_tab.close()
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return ZERO
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest, os, sys
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Nelson',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Atyrau',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Famagusta',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yangon',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/East-Saskatchewan',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Saratov',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = LazyList(
tz for tz in all_timezones if resource_exists(tz))
all_timezones_set = LazySet(all_timezones)
common_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fort_Nelson',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Atyrau',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Famagusta',
'Asia/Gaza',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kathmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yangon',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Saratov',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = LazyList(
tz for tz in common_timezones if tz in all_timezones)
common_timezones_set = LazySet(common_timezones)
|
|
import lexeme.Library as Library
import csv
import lexeme.IOHelper as IOHelper
import sys
import os
from tabulate import tabulate
wordgensettings = {}
formrules = {}
phonotactics = {}
formatString = ""
def add():
'''Interface for addWord().'''
word = {}
word['english'] = input("Enter meaning in English: ")
word['word'] = input("Enter word in conlang: ")
forms = Library.getFieldOptions("form")
forms.append("other")
form = IOHelper.chooseOption("Enter word form",
forms)
if form == "other":
form = input("Enter new word form: ")
word['form'] = form
word = addCustomFields(word)
Library.addWord(word)
print("Word saved in database!")
def modify():
'''Modify an existing word.'''
try:
conword = input("Enter word in conlang: ")
if Library.wordExists(conlang=conword):
word = Library.findConWord(conword, pop=False)
outputWord(word)
else:
print("Word does not exist")
return
keys = list(word.keys())
keys.remove("id")
keys.append("NEW")
keys.append("DELETE")
another = True
while another:
key = IOHelper.chooseOption("Enter field to modify", keys)
if key == "NEW":
word = addCustomFields(word, prompt=False)
elif key == "DELETE":
keys.remove("NEW")
keys.remove("DELETE")
keys.remove("english")
keys.remove("word")
key = IOHelper.chooseOption("Enter field to delete", keys)
keys.remove(key)
del word[key]
keys.insert(0, "english")
keys.insert(0, "word")
keys.append("NEW")
keys.append("DELETE")
else:
if key in ["word", "english"]:
word[key] = input("Enter new value: ")
else:
values = Library.getFieldOptions(key)
values.append("other")
v = IOHelper.chooseOption("Enter word value",
values)
if v == "other":
v = input("Enter new value: ")
word[key] = v
another = not IOHelper.yesNo("Finished modifying")
# Delete word if finished modifying and add new word
Library.findConWord(conword, pop=True)
Library.addWord(word)
except KeyboardInterrupt:
pass
def listwords():
'''Interface for listWords().'''
t = IOHelper.chooseOption("Enter list type", ["all", "field"])
if t == "field":
fields = Library.getFields()
f = IOHelper.chooseOption("Enter desired field", fields)
options = Library.getFieldOptions(f)
o = IOHelper.chooseOption("Enter option to list", options)
l = Library.listWords(t, f, o)
else:
l = Library.listWords(t)
if len(l) > 0:
outputWordList(l)
else:
print("No words to display")
def quit():
sys.exit(0)
def decline():
''' Allows user to select word to decline and declension, then outputs the
declined word.
'''
word = input("Enter word (in conlang) to decline: ")
try:
result = Library.findConWord(word)
except LookupError:
print("Word not found in database")
return 1
prompt = "Select declension"
dec = IOHelper.createMenu(prompt, Library.getAvailableDeclensions())
output = Library.declineWord(result, dec)
outputWord(output, "conlang")
def outputWordList(wordList):
'''Take a list of words. Output list of words in table.'''
table = []
headers = ["English", "Conlang"]
for word in wordList:
row = []
row.append(word["english"])
row.append(word["word"])
for item in word:
if item not in ["english", "word", "id"]:
row.append(word[item])
table.append(row)
for item in wordList[0]:
if item not in ["english", "word", "id"]:
headers.append(item.capitalize())
print("")
print(tabulate(table, headers=headers))
print("")
def clearScreen():
os.system("cls" if os.name == "nt" else "clear")
lexeme = """
_
| | _____ _____ _ __ ___ ___
| | / _ \ \/ / _ | '_ ` _ \ / _ |
| |__| __/> | __| | | | | | __/
|_____\___/_/\_\___|_| |_| |_|\___|
"""
print(lexeme)
def outputWord(word, first="english"):
'''Take word dictionary and optional first column. Output
word in a table.
'''
table = [[], [], []]
headers = []
phonetic = Library.transcribePhonemes(word["word"])
allophonetic = Library.transcribeAllophones(phonetic)
if first == "english":
table[0].append(word["english"])
table[1].append("")
table[2].append("")
headers.append("English")
table[0].append(word["word"])
table[1].append(phonetic)
table[2].append(allophonetic)
headers.append("Conlang")
elif first == "conlang":
table[0].append(word["word"])
table[1].append(phonetic)
table[2].append(allophonetic)
headers.append("Conlang")
table[0].append(word["english"])
table[1].append("")
table[2].append("")
headers.append("English")
for item in word:
if item not in ["word", "english", "id"]:
table[0].append(word[item])
table[1].append("")
table[2].append("")
headers.append(item.capitalize())
print("")
print(tabulate(table, headers=headers))
print("")
def statistics():
'''Interface for getStatistics().'''
print("Words: " + str(Library.getStatistics()))
def search():
'''Interface for searchWords().'''
term = input("Enter search term: ")
results = Library.searchWords(term)
if len(results[0]) == 0 and len(results[1]) == 0:
print("Word not found")
else:
for word in results[0]:
outputWord(word, "english")
print("")
for word in results[1]:
outputWord(word, "conlang")
print("")
def batchgenerate():
'''Run each word in file through generate.'''
filename = input("Enter location of words file: ")
try:
with open(filename, "r") as f:
for word in f:
clearScreen()
print("Generating word " + word.strip() + "...")
generate(word.strip())
input("Press enter to continue...")
except FileNotFoundError:
print("File not found! Double-check the path you are using.")
return 1
print("Finished batch generation!")
def importWords():
'''Add words from csv file to database.'''
filename = input("Enter location of word csv file: ")
try:
with open(filename, "r") as f:
reader = csv.DictReader(f)
for word in reader:
Library.addWord(word)
print("Words successfully imported!")
except FileNotFoundError:
print("File not found! Double-check the path you are using.")
def generate(english=None):
'''Interface to generateWord().'''
if english is None:
english = input("Enter word in English: ")
if Library.wordExists(english=english):
print("Word already exists!")
w = Library.findEnglishWord(english)
outputWord(w)
return 1
forms = Library.getFieldOptions("form")
forms.append("other")
form = IOHelper.chooseOption("Enter word form",
forms)
if form == "other":
form = input("Enter new word form: ")
finalised = False
while finalised is not True:
word = Library.generateWord(english, form, wordgensettings)
while Library.wordExists(conlang=word['word']):
word = Library.generateWord(english, form, wordgensettings)
#clearScreen()
outputWord(word, "conlang")
accepted = IOHelper.chooseOption("Accept word", ["y", "n", "e"])
if accepted == "y":
finalised = True
elif accepted == "e":
word['word'] = input("Enter modified word: ")
finalised = True
word = addCustomFields(word)
Library.addWord(word)
print("Word saved in database!")
def addCustomFields(word, prompt=True):
'''Take word and allow user to set custom fields. Return
completed word.
'''
if prompt:
another = IOHelper.yesNo("Add custom field")
else:
another = True
while another:
options = Library.getFields()
options.append("other")
field = IOHelper.chooseOption("Enter desired field to add", options)
if field == "other":
new = input("Enter new field: ")
value = input("Enter word value: ")
word[new] = value
else:
values = Library.getFieldOptions(field)
values.append("other")
v = IOHelper.chooseOption("Enter word value",
values)
if v == "other":
v = input("Enter new word value: ")
word[field] = v
another = IOHelper.yesNo("Add custom field")
return word
def loadData(filename=None):
'''Loads data from config file and passes it to Library.'''
try:
result = IOHelper.parseConfig(filename)
except KeyError:
print("Config file is malformed or does not exist")
quit()
phonemes = result[0]
allophones = result[1]
declensions = result[2]
exportformat = result[4]
global wordgensettings
wordgensettings = result[3]
global formatString
formatString = exportformat
Library.setPhonemes(phonemes)
Library.setAllophones(allophones)
Library.setDeclensions(declensions)
return 0
def exportText():
'''Interface for exportText().'''
filename = input("Enter filename to export: ")
Library.exportText(filename, formatString)
def export():
'''Interface for exportWords().'''
filename = input("Enter filename to export: ")
Library.exportWords(filename)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
from mongokit import *
from pymongo.objectid import ObjectId
class AutoRefTestCase(unittest.TestCase):
"""Tests AutoRef case"""
def setUp(self):
self.connection = Connection()
self.col = self.connection['test']['mongokit']
def tearDown(self):
self.connection.drop_database('test')
self.connection.drop_database('test2')
def test_simple_autoref(self):
class DocA(Document):
structure = {
"a":{'foo':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 3
doca.save()
class DocB(Document):
structure = {
"b":{"doc_a":DocA},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
# the structure is automaticly filled by the corresponding structure
assert docb == {'b': {'doc_a':None}}, docb
#docb.validate()
docb['_id'] = 'docb'
docb['b']['doc_a'] = 4
self.assertRaises(SchemaTypeError, docb.validate)
docb['b']['doc_a'] = doca
assert docb == {'b': {'doc_a': {'a': {'foo': 3}, '_id': 'doca'}}, '_id': 'docb'}
docb.save()
saved_docb = self.col.find_one({'_id':'docb'})
_docb = self.col.DocB.get_from_id('docb')
assert saved_docb['b']['doc_a'] == DBRef(database='test', collection='mongokit', id='doca'), saved_docb['b']['doc_a']
docb_list = list(self.col.DocB.fetch())
assert len(docb_list) == 1
new_docb = docb_list[0]
assert isinstance(new_docb['b']['doc_a'], DocA), new_docb['b']['doc_a'].__class__
assert docb == {'b': {'doc_a': {'a': {'foo': 3}, '_id': 'doca'}}, '_id': 'docb'}, docb
assert docb['b']['doc_a']['a']['foo'] == 3
docb['b']['doc_a']['a']['foo'] = 4
docb.save()
assert docb['b']['doc_a']['a']['foo'] == 4, docb
assert self.col.DocA.fetch().next()['a']['foo'] == 4
assert doca['a']['foo'] == 4, doca['a']['foo']
saved_docb = self.col.DocB.collection.find_one({'_id':'docb'})
assert saved_docb['b']['doc_a'] == DBRef(database='test', collection='mongokit', id='doca'), saved_docb['b']['doc_a']
assert self.col.DocB.fetch_one() == docb
assert self.col.DocB.find_one({'_id':'docb'}) == docb
def test_simple_autoref2(self):
class Embed(Document):
structure = {
'foo': dict,
'bar': int,
}
class Doc(Document):
structure = {
'embed':Embed,
'eggs': unicode,
}
use_autorefs = True
self.connection.register([Embed, Doc])
embed = self.col.Embed()
embed['foo'] = {'hello':u'monde'}
embed['bar'] = 3
embed.save()
doc = self.col.Doc()
doc['embed'] = embed
doc['eggs'] = u'arf'
doc.save()
assert doc == {'embed': {u'_id': embed['_id'], u'bar': 3, u'foo': {u'hello': u'monde'}}, '_id': doc['_id'], 'eggs': u'arf'}, doc
doc = self.col.Doc.fetch_one()
doc['embed']['foo']['hello'] = u'World'
doc.save()
assert doc == {'embed': {u'_id': embed['_id'], u'bar': 3, u'foo': {u'hello': u'World'}}, '_id': doc['_id'], 'eggs': u'arf'}, doc
assert self.col.Embed.fetch_one() == {u'_id': embed['_id'], u'bar': 3, u'foo': {u'hello': u'World'}}
def test_autoref_with_default_values(self):
class DocA(Document):
structure = {
"a":{'foo':int},
"abis":{'bar':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 2
doca.save()
class DocB(Document):
structure = {
"b":{"doc_a":DocA},
}
use_autorefs = True
default_values = {'b.doc_a':doca}
self.connection.register([DocB])
docb = self.col.DocB()
assert docb == {'b': {'doc_a': {'a': {'foo': 2}, 'abis': {'bar': None}, '_id': 'doca'}}}, docb
docb.save()
def test_autoref_with_required_fields(self):
class DocA(Document):
structure = {
"a":{'foo':int},
"abis":{'bar':int},
}
required_fields = ['a.foo']
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 2
doca.save()
class DocB(Document):
db_name = "test"
collection_name = "mongokit"
structure = {
"b":{"doc_a":DocA},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
docb['b']['doc_a'] = doca
assert docb == {'b': {'doc_a': {'a': {'foo': 2}, 'abis': {'bar': None}, '_id': 'doca'}}}, docb
docb['_id'] = 'docb'
docb['b']['doc_a']['a']['foo'] = None
self.assertRaises(RequireFieldError, docb.validate)
docb['b']['doc_a']['a']['foo'] = 4
docb.save()
docb['b']['doc_a'] = None
docb.save()
def test_badautoref(self):
"""Test autoref enabled, but embed the wrong kind of document.
Assert that it tells us it's a bad embed.
"""
class EmbedDoc(Document):
structure = {
"spam": unicode
}
self.connection.register([EmbedDoc])
embed = self.col.EmbedDoc()
embed["spam"] = u"eggs"
embed.save()
assert embed
class EmbedOtherDoc(Document):
structure = {
"ham": unicode
}
self.connection.register([EmbedOtherDoc])
embedOther = self.connection.test.embed_other.EmbedOtherDoc()
embedOther["ham"] = u"eggs"
embedOther.save()
assert embedOther
class MyDoc(Document):
use_autorefs = True
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam": EmbedDoc,
}
use_autorefs = True
self.connection.register([MyDoc])
mydoc = self.connection.test.autoref.MyDoc()
mydoc["bla"]["foo"] = u"bar"
mydoc["bla"]["bar"] = 42
mydoc["spam"] = embedOther
self.assertRaises(SchemaTypeError, mydoc.save)
def test_badautoref_not_enabled(self):
# Test that, when autoref is disabled
# we refuse to allow a MongoDocument
# to be valid schema.
class EmbedDoc(Document):
structure = {
"spam": unicode
}
self.connection.register([EmbedDoc])
embed = self.connection.test['autoref.embed'].EmbedDoc()
embed["spam"] = u"eggs"
embed.save()
assert embed
class MyDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam": EmbedDoc,
}
self.assertRaises(StructureError, self.connection.register, [MyDoc])
def test_subclass(self):
# Test autoref enabled, but embed a subclass.
# e.g. if we say EmbedDoc, a subclass of EmbedDoc
# is also valid.
class EmbedDoc(Document):
structure = {
"spam": unicode
}
self.connection.register([EmbedDoc])
embed = self.connection.test['autoref.embed'].EmbedDoc()
embed["spam"] = u"eggs"
embed.save()
class EmbedOtherDoc(EmbedDoc):
structure = {
"ham": unicode
}
self.connection.register([EmbedOtherDoc])
embedOther = self.connection.test['autoref.embed_other'].EmbedOtherDoc()
embedOther["ham"] = u"eggs"
embedOther.save()
assert embedOther
class MyDoc(Document):
use_autorefs = True
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam": EmbedDoc,
}
self.connection.register([MyDoc])
mydoc = self.connection.test.autoref.MyDoc()
mydoc["bla"]["foo"] = u"bar"
mydoc["bla"]["bar"] = 42
mydoc["spam"] = embedOther
mydoc.save()
assert mydoc['spam'].collection.name == "autoref.embed_other"
assert mydoc['spam'] == embedOther
def test_autoref_in_list(self):
class DocA(Document):
structure = {
"a":{'foo':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 3
doca.save()
doca2 = self.col.DocA()
doca2['_id'] = 'doca2'
doca2['a']['foo'] = 5
doca2.save()
class DocB(Document):
structure = {
"b":{"doc_a":[DocA]},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
# the structure is automaticly filled by the corresponding structure
assert docb == {'b': {'doc_a':[]}}, docb
docb.validate()
docb['_id'] = 'docb'
docb['b']['doc_a'].append(u'bla')
self.assertRaises(SchemaTypeError, docb.validate)
docb['b']['doc_a'] = []
docb['b']['doc_a'].append(doca)
assert docb == {'b': {'doc_a': [{'a': {'foo': 3}, '_id': 'doca'}]}, '_id': 'docb'}
docb.save()
assert isinstance(docb.collection.find_one({'_id':'docb'})['b']['doc_a'][0], DBRef), type(docb.collection.find_one({'_id':'docb'})['b']['doc_a'][0])
assert docb == {'b': {'doc_a': [{'a': {'foo': 3}, '_id': 'doca'}]}, '_id': 'docb'}
assert docb['b']['doc_a'][0]['a']['foo'] == 3
docb['b']['doc_a'][0]['a']['foo'] = 4
docb.save()
assert docb['b']['doc_a'][0]['a']['foo'] == 4, docb['b']['doc_a'][0]['a']['foo']
assert doca['a']['foo'] == 4, doca['a']['foo']
docb['b']['doc_a'].append(doca2)
assert docb == {'b': {'doc_a': [{'a': {'foo': 4}, '_id': 'doca'}, {'a': {'foo': 5}, '_id': 'doca2'}]}, '_id': 'docb'}
docb.validate()
def test_autoref_retrieval(self):
class DocA(Document):
structure = {
"a":{'foo':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 3
doca.save()
class DocB(Document):
structure = {
"b":{
"doc_a":DocA,
"deep": {"doc_a_deep":DocA},
"deeper": {"doc_a_deeper":DocA,
"inner":{"doc_a_deepest":DocA}}
},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
# the structure is automaticly filled by the corresponding structure
docb['_id'] = 'docb'
docb['b']['doc_a'] = doca
# create a few deeper docas
deep = self.col.DocA()
#deep['_id'] = 'deep'
deep['a']['foo'] = 5
deep.save()
docb['b']['deep']['doc_a_deep'] = deep
deeper = self.col.DocA()
deeper['_id'] = 'deeper'
deeper['a']['foo'] = 8
deeper.save()
docb['b']['deeper']['doc_a_deeper'] = deeper
deepest = self.col.DocA()
deepest['_id'] = 'deepest'
#deepest['_id'] = 'deeper'
deepest['a']['foo'] = 18
deepest.save()
docb['b']['deeper']['inner']['doc_a_deepest'] = deepest
docb.save()
# now, does retrieval function as expected?
test_doc = self.col.DocB.get_from_id(docb['_id'])
assert isinstance(test_doc['b']['doc_a'], DocA), type(test_doc['b']['doc_a'])
assert test_doc['b']['doc_a']['a']['foo'] == 3
assert isinstance(test_doc['b']['deep']['doc_a_deep'], DocA)
assert test_doc['b']['deep']['doc_a_deep']['a']['foo'] == 5
assert isinstance(test_doc['b']['deeper']['doc_a_deeper'], DocA)
assert test_doc['b']['deeper']['doc_a_deeper']['a']['foo'] == 8, test_doc
assert isinstance(test_doc['b']['deeper']['inner']['doc_a_deepest'], DocA)
assert test_doc['b']['deeper']['inner']['doc_a_deepest']['a']['foo'] == 18
def test_autoref_with_same_embed_id(self):
class DocA(Document):
structure = {
"a":{'foo':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 3
doca.save()
class DocB(Document):
structure = {
"b":{
"doc_a":DocA,
"deep": {"doc_a_deep":DocA},
},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
docb['_id'] = 'docb'
docb['b']['doc_a'] = doca
# create a few deeper docas
deep = self.col.DocA()
deep['_id'] = 'doca' # XXX same id of doca, this will be erased by doca when saving docb
deep['a']['foo'] = 5
deep.save()
docb['b']['deep']['doc_a_deep'] = deep
docb.save()
test_doc = self.col.DocB.get_from_id(docb['_id'])
assert test_doc['b']['doc_a']['a']['foo'] == 3, test_doc['b']['doc_a']['a']
assert test_doc['b']['deep']['doc_a_deep']['a']['foo'] == 3, test_doc['b']['deep']['doc_a_deep']['a']['foo']
def test_autorefs_embed_in_list_with_bad_reference(self):
class User(Document):
structure = {'name':unicode}
self.connection.register([User])
class Group(Document):
use_autorefs = True
structure = {
'name':unicode,
'members':[User], #users
}
self.connection.register([User, Group])
user = self.col.User()
user['_id'] = u'fixe'
user['name'] = u'fixe'
user.save()
user2 = self.col.User()
user['_id'] = u'namlook'
user2['name'] = u'namlook'
user2.save()
group = self.col.Group()
group['members'].append(user)
self.assertRaises(AutoReferenceError, group.save)
def test_autorefs_with_dynamic_collection(self):
class DocA(Document):
structure = {'a':unicode}
class DocB(Document):
structure = {'b':DocA}
use_autorefs = True
self.connection.register([DocA, DocB])
doca = self.connection.test.doca.DocA()
doca['a'] = u'bla'
doca.save()
docb = self.connection.test.docb.DocB()
docb['b'] = doca
docb.save()
assert docb['b']['a'] == 'bla'
assert docb['b'].collection.name == "doca"
doca2 = self.connection.test.doca2.DocA()
doca2['a'] = u'foo'
doca2.save()
docb2 = self.connection.test.docb.DocB()
docb2['b'] = doca2
docb2.save()
assert docb2['b']['a'] == 'foo'
assert docb2['b'].collection.name == 'doca2'
assert docb2.collection.name == 'docb'
assert list(self.connection.test.docb.DocB.fetch()) == [docb, docb2]
def test_autorefs_with_dynamic_db(self):
class DocA(Document):
structure = {'a':unicode}
class DocB(Document):
structure = {'b':DocA}
use_autorefs = True
self.connection.register([DocA, DocB])
doca = self.connection.dba.mongokit.DocA()
doca['a'] = u'bla'
doca.save()
docb = self.connection.dbb.mongokit.DocB()
docb['b'] = doca
docb.save()
assert docb['b']['a'] == 'bla'
docb = self.connection.dbb.mongokit.DocB.get_from_id(docb['_id'])
assert isinstance(docb['b'], DocA)
def test_autoref_without_validation(self):
class DocA(Document):
structure = {
"a":{'foo':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 3
doca.save()
class DocB(Document):
structure = {
"b":{"doc_a":DocA},
}
use_autorefs = True
skip_validation = True
self.connection.register([DocB])
docb = self.col.DocB()
docb['_id'] = 'docb'
docb['b']['doc_a'] = doca
docb.save()
def test_autoref_updated(self):
class DocA(Document):
structure = {
"a":{'foo':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 3
doca.save()
doca2 = self.col.DocA()
doca2['_id'] = 'doca2'
doca2['a']['foo'] = 6
doca2.save()
class DocB(Document):
structure = {
"b":{"doc_a":[DocA]},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
docb['_id'] = 'docb'
docb.save()
assert docb == {'b': {'doc_a': []}, '_id': 'docb'}
docb['b']['doc_a'] = [doca, doca2]
docb.save()
assert docb == {'b': {'doc_a': [{u'a': {u'foo': 3}, u'_id': u'doca'}, {u'a': {u'foo': 6}, u'_id': u'doca2'}]}, '_id': 'docb'}
docb['b']['doc_a'].pop(0)
docb.save()
assert docb == {'b': {'doc_a': [{u'a': {u'foo': 6}, u'_id': u'doca2'}]}, '_id': 'docb'}
fetched_docb = self.col.DocB.get_from_id('docb')
assert fetched_docb == {u'_id': u'docb', u'b': {u'doc_a': [{u'a': {u'foo': 6}, u'_id': u'doca2'}]}}
docb = self.col.DocB()
docb['_id'] = 'docb'
docb.save()
assert docb == {'b': {'doc_a': []}, '_id': 'docb'}
docb['b']['doc_a'] = [doca, doca2]
docb.save()
assert docb == {'b': {'doc_a': [{u'a': {u'foo': 3}, u'_id': u'doca'}, {u'a': {u'foo': 6}, u'_id': u'doca2'}]}, '_id': 'docb'}, docb
docb['b']['doc_a'].pop(0)
docb['b']['doc_a'].append(doca)
docb.save()
assert docb == {'b': {'doc_a': [{u'a': {u'foo': 6}, u'_id': u'doca2'}, {u'a': {u'foo': 3}, u'_id': u'doca'}]}, '_id': 'docb'}, docb
fetched_docb = self.col.DocB.get_from_id('docb')
assert fetched_docb == {u'_id': u'docb', u'b': {u'doc_a': [{u'a': {u'foo': 6}, u'_id': u'doca2'}, {u'a': {u'foo': 3}, u'_id': u'doca'}]}}
def test_autoref_updated_with_default_values(self):
class DocA(Document):
structure = {
"a":{'foo':int},
"abis":{'bar':int},
}
default_values = {'a.foo':2}
required_fields = ['abis.bar']
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['abis']['bar'] = 3
doca.save()
class DocB(Document):
structure = {
"b":{"doc_a":DocA},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
docb['_id'] = 'docb'
docb['b']['doc_a'] = doca
assert docb == {'b': {'doc_a': {'a': {'foo': 2}, 'abis': {'bar': 3}, '_id': 'doca'}}, '_id': 'docb'}, docb
docb['b']['doc_a']['a']['foo'] = 4
docb.save()
assert docb == {'b': {'doc_a': {'a': {'foo': 4}, 'abis': {'bar': 3}, '_id': 'doca'}}, '_id': 'docb'}, docb
assert doca['a']['foo'] == 4
def test_autoref_with_None(self):
class RootDocument(Document):
use_dot_notation=True
use_autorefs = True
structure = {}
class User(RootDocument):
collection_name = "users"
structure = {
"email": unicode,
"password": unicode,
}
required_fields = [ "email", "password" ]
indexes = [
{ "fields": "email",
"unique": True,
},
]
self.connection.register([User])
User = self.col.User
u = User()
u['email'] = u'....'
u['password'] = u'....'
u.save()
assert u['_id'] != None
class ExampleSession(RootDocument):
#collection_name = "sessions"
use_autorefs = True
structure = {
"user": User,
"token": unicode,
}
# raise an assertion because User is a CallableUser, not User
self.connection.register([ExampleSession])
ex = self.col.ExampleSession()
self.assertRaises(SchemaTypeError, ex.validate)
def test_autoref_without_database_specified(self):
class EmbedDoc(Document):
structure = {
"foo": unicode,
}
class Doc(Document):
use_dot_notation=True
use_autorefs = True
force_autorefs_current_db = True
structure = {
"embed": EmbedDoc,
}
self.connection.register([EmbedDoc, Doc])
embed = self.col.EmbedDoc()
embed['foo'] = u'bar'
embed.save()
raw_doc = {'embed':DBRef(collection=self.col.name, id=embed['_id'])}
self.col.insert(raw_doc)
doc = self.col.Doc.find_one({'_id':raw_doc['_id']})
def test_recreate_and_reregister_class_with_reference(self):
class CompanyDocument(Document):
collection_name = "test_companies"
use_autorefs = True
use_dot_notation = True
structure = {
"name": unicode,
}
class UserDocument(Document):
collection_name = "test_users"
use_autorefs = True
use_dot_notation = True
structure = {
"email": unicode,
"company": CompanyDocument,
}
class SessionDocument(Document):
collection_name = "test_sessions"
use_autorefs = True
use_dot_notation = True
structure = {
"token": unicode,
"owner": UserDocument,
}
self.connection.register([CompanyDocument, UserDocument, SessionDocument])
company = self.col.database[CompanyDocument.collection_name].CompanyDocument()
company.name = u"Company"
company.save()
company_owner = self.col.database[UserDocument.collection_name].UserDocument()
company_owner.email = u"manager@test.com"
company_owner.company = company
company_owner.save()
s = self.col.database[SessionDocument.collection_name].SessionDocument()
s.token = u'asddadsad'
s.owner = company_owner
s.save()
sbis= self.col.database[SessionDocument.collection_name].SessionDocument.find_one({"token": u"asddadsad" })
assert sbis == s, sbis
class CompanyDocument(Document):
collection_name = "test_companies"
use_autorefs = True
structure = {
"name": unicode,
}
class UserDocument(Document):
collection_name = "test_users"
use_autorefs = True
structure = {
"email": unicode,
"company": CompanyDocument,
}
class SessionDocument(Document):
collection_name = "test_sessions"
use_autorefs = True
structure = {
"token": unicode,
"owner": UserDocument,
}
self.connection.register([CompanyDocument, UserDocument, SessionDocument])
sbis= self.col.database[SessionDocument.collection_name].SessionDocument.find_one({"token": u"asddadsad" })
assert sbis == s, sbis
def test_nested_autorefs(self):
class DocA(Document):
structure = {
'name':unicode,
}
use_autorefs = True
class DocB(Document):
structure = {
'name': unicode,
'doca' : DocA,
}
use_autorefs = True
class DocC(Document):
structure = {
'name': unicode,
'docb': DocB,
'doca': DocA,
}
use_autorefs = True
class DocD(Document):
structure = {
'name': unicode,
'docc': DocC,
}
use_autorefs = True
self.connection.register([DocA, DocB, DocC, DocD])
doca = self.col.DocA()
doca['name'] = u'Test A'
doca.save()
docb = self.col.DocB()
docb['name'] = u'Test B'
docb['doca'] = doca
docb.save()
docc = self.col.DocC()
docc['name'] = u'Test C'
docc['docb'] = docb
docc['doca'] = doca
docc.save()
docd = self.col.DocD()
docd['name'] = u'Test D'
docd['docc'] = docc
docd.save()
doca = self.col.DocA.find_one({'name': 'Test A'})
docb = self.col.DocB.find_one({'name': 'Test B'})
docc = self.col.DocC.find_one({'name': 'Test C'})
docd = self.col.DocD.find_one({'name': 'Test D'})
def test_nested_autoref_in_list_and_dict(self):
class DocA(Document):
structure = {
'name':unicode,
}
use_autorefs = True
class DocB(Document):
structure = {
'name': unicode,
'test': [{
'something' : unicode,
'doca' : DocA,
}]
}
use_autorefs = True
self.connection.register([DocA, DocB])
doca = self.col.DocA()
doca['name'] = u'Test A'
doca.save()
docc = self.col.DocA()
docc['name'] = u'Test C'
docc.save()
docb = self.col.DocB()
docb['name'] = u'Test B'
docb['test'].append({u'something': u'foo', 'doca': doca})
docb['test'].append({u'something': u'foo', 'doca': docc})
docb.save()
raw_docb = self.col.find_one({'name':'Test B'})
assert isinstance(raw_docb['test'][0]['doca'], DBRef), raw_docb['test'][0]
def test_dereference(self):
class DocA(Document):
structure = {
'name':unicode,
}
use_autorefs = True
self.connection.register([DocA])
doca = self.col.DocA()
doca['name'] = u'Test A'
doca.save()
docb = self.connection.test2.mongokit.DocA()
docb['name'] = u'Test B'
docb.save()
dbref = doca.get_dbref()
self.assertRaises(TypeError, self.connection.test.dereference, 1)
self.assertRaises(ValueError, self.connection.test.dereference, docb.get_dbref(), DocA)
assert self.connection.test.dereference(dbref) == {'_id':doca['_id'], 'name': 'Test A'}
assert isinstance(self.connection.test.dereference(dbref), dict)
assert self.connection.test.dereference(dbref, DocA) == {'_id':doca['_id'], 'name': 'Test A'}
assert isinstance(self.connection.test.dereference(dbref, DocA), DocA)
def test_autorefs_with_list(self):
class VDocument(Document):
db_name = 'MyDB'
use_dot_notation = True
use_autorefs = True
skip_validation = True
def __init__(self, *args, **kwargs):
super(VDocument, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
kwargs.update({'validate':True})
return super(VDocument, self).save(*args, **kwargs)
class H(VDocument):
structure = {'name':[ObjectId], 'blah':[unicode], 'foo': [{'x':unicode}]}
self.connection.register([H, VDocument])
h = self.col.H()
obj_id = ObjectId()
h.name.append(obj_id)
h.blah.append(u'some string')
h.foo.append({'x':u'hey'})
h.save()
assert h == {'blah': [u'some string'], 'foo': [{'x': u'hey'}], 'name': [obj_id], '_id': h['_id']}
def test_autorefs_with_list2(self):
class DocA(Document):
structure = {'name':unicode}
class DocB(Document):
structure = {
'docs':[{
'doca': [DocA],
'inc':int,
}],
}
use_autorefs = True
self.connection.register([DocA, DocB])
doca = self.col.DocA()
doca['_id'] = u'doca'
doca['name'] = u"foo"
doca.save()
self.col.insert(
{'_id': 'docb', 'docs':[
{
'doca':[DBRef(database='test', collection='mongokit', id='doca')],
'inc':2,
},
]
})
assert self.col.DocB.find_one({'_id':'docb'}) == {u'docs': [{u'doca': [{u'_id': u'doca', u'name': u'foo'}], u'inc': 2}], u'_id': u'docb'}
def test_autorefs_with_required(self):
import datetime
import uuid
@self.connection.register
class User(Document):
structure = {
'email': unicode,
}
@self.connection.register
class Event(Document):
structure = {
'user': User,
'title': unicode,
}
required_fields = ['user', 'title']
use_autorefs = True
user = self.connection.test.users.User()
user.save()
event = self.connection.test.events.Event()
event['user'] = user
event['title'] = u"Test"
event.validate()
event.save()
|
|
"""
Created on Tue Jan 03 10:55:39 2017
@author: Ricky
"""
#####################################################################
"""
Importing program modules
"""
#####################################################################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from scipy.signal import savgol_filter
from datetime import datetime
from datetime import timedelta
from datetime import time
from datetime import date
from basal import Basal
from bolusTime import bolusTime
from dailyBasal1 import Dbasal
from maxBasal import maxBasal
from carbCurve import Carbs
from carbCurveTime import CarbTIME
from dexTime import dexTime
from omniTime import omniTime
from carbTime import carbTime
from Flags import flags
from normalTime import normaltime
from oneday import oneday
from separateBGs import usableBasal
from gradientBG import gradientBG
#####################################################################
TDD = 49.0 # Total Daily Dosage. Pull from pump - Average should work for meow
upper = 160 # High glucose limit
lower = 70 # Low glucose limit
#####################################################################
"""
Importing Dexcom Data from TXT file
"""
#####################################################################
f = open("pyDexBH.txt") # pyData2 is manually modified from Dexcom export
lines = f.readlines() # Read data from .txt file
f.close()
f = open("omniBolusNew.txt") # omniBolus is manually modified from Dexcom export
lines2 = f.readlines() # Read data from .txt file
f.close()
f = open("carbsEaten.txt")
lines3 = f.readlines()
f.close()
Dates, timeofday, BG = dexTime(lines)
####################################################################
"""
fig, ax1 = plt.subplots()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%m/%d/%Y %H:%M"))
ax1.plot(Dates[:20], BG[:20])
ax1.set_xlabel('Date & Time')
ax1.set_ylabel('Blood Glucose Level [mg/dl]')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%m/%d/%Y %H:%M"))
ax1.set_xlim(min(Dates),Dates[20])
plt.gcf().autofmt_xdate()
plt.grid(True)
plt.title('Dataset - Time Series of BG')
plt.show()
#"""
L = len(BG)
print "Lenght of BG =", L
sumBG = sum(BG)
DatesB, timeofdayB, BolusV = omniTime(lines2)
DatesC, timeofdayC, CarbV = carbTime(lines3)
# Create Smooth BG
SBG = savgol_filter(BG, 5, 2)
print "Smooth SGBG =", len(SBG)
#####################################################################
basal, bolus = Basal(1,49)
#print "bolus sum =", sum(bolus)
Bolus = bolusTime(bolus, 0, 1, L)
#print Bolus
programBasalTime = [ 0.0, 4.0, 5.0, 6.0, 6.5, 15.0, 18.5, 19.0, 22.5, 24.0]
programBasalValue = [1.05, 1.30, 1.45, 1.30, 1.35, 1.25, 1.40, 1.50, 1.05, 1.05]
TL = timeday = 24*60/5 # minutes in the day / 5 minute intervals
delta = (1./TL)*24.
PBV24 = [0]*TL
y = 1
for x in range(TL):
if x*delta < programBasalTime[y]:
PBV24.insert(x,y-1)
x += 1
else:
PBV24.insert(x,y)
x += 1
y += 1
PBV24 = PBV24[:287]
PBV24.append(len(programBasalTime)-2)
PBR = [0]*288
y = 0
for x in range(len(PBR)):
if PBV24[x] == y:
PBR.insert(x,programBasalValue[y]*(5/60.))
elif PBV24[x] == y+1:
PBR.insert(x,programBasalValue[y+1]*(5/60.))
elif PBV24[x] == y+2:
PBR.insert(x,programBasalValue[y+2]*(5/60.))
elif PBV24[x] == y+3:
PBR.insert(x,programBasalValue[y+3]*(5/60.))
elif PBV24[x] == y+4:
PBR.insert(x,programBasalValue[y+4]*(5/60.))
elif PBV24[x] == y+5:
PBR.insert(x,programBasalValue[y+5]*(5/60.))
elif PBV24[x] == y+6:
PBR.insert(x,programBasalValue[y+6]*(5/60.))
elif PBV24[x] == y+7:
PBR.insert(x,programBasalValue[y+7]*(5/60.))
elif PBV24[x] == y+8:
PBR.insert(x,programBasalValue[y+8]*(5/60.))
else:
PBR.insert(x,programBasalValue[y+9]*(5/60.))
PBR = PBR[:287]
PBR.append(PBR[-1])
#add one extra value for midnight
basalActual = Dbasal(PBR)
#####################################################################
from carbCurve import Carbs
carb = Carbs(0.4, 0.0, L) # carb is calculated for an entire time
#print "carb sum =", sum(carb), carb
CarbsT = CarbTIME(carb, 3, 1)
#print CarbsT
#####################################################################
"""
Analysis: Part 1 - Setting up low and high BG flags
"""
#####################################################################
addIns, addInsBG, hyperglycemia, lessIns, lessInsBG, hypoglycemia, normal, normalBG, NoActionReq, TimeNorm = flags(BG, Dates, upper, lower)
#####################################################################
"""
Analysis: Part 2 - Remove times from Normal when Carbs or Bolus are used.
"""
#####################################################################
BolusTime, CarbTime, carbsUsed, bolusUsed, greatJob, greatJobBG, greatJobTime = normaltime(Dates, DatesB, DatesC, normal, BG)
#####################################################################
"""
Analysis: Part 3 - Is Normal acceptable with current Basal Rate
"""
#####################################################################
# Data for Plotting
#####################################################################
t, d = oneday(1) # t = time for one day in minutes, no date
# d = time for one day with date added (today)
#####################################################################
# Need to set up time
y = range(len(basalActual))
y = np.array(y)
z = y*5/60.
# Get Carb to Day Indices
oneDayCarb = []
for x in range(len(d)):
oneDayCarb.append(CarbsT[x])
####################################################################
# Caclulate Liver Basal Rate
####################################################################
ISF = 1800/TDD # 1800 rule - Insulin Sensitivity Factor (ISF)
print "Insulin Sensitivity Factor (how much 1 unit[mg/dl] lowers BG = ",ISF
C2RBG = 2.0 # Amount 1 carb raising BG - Ref: "Think Like a Pancreas"
####################################################################
# 1 - BOLUS
# Create array for all the boluses that take place
ABTD = []
x = 0
for x in range(len(BolusV)):
ABTD.append(bolusTime(bolus, BolusTime[x], BolusV[x], L))
x += 1
ABTD = np.array(ABTD)
ABTD = sum(ABTD)
####################################################################
# 2 - CARBS
# Create array for all the Carbs that take place
ACarbsT = []
for x in range(len(CarbV)):
ACarbsT.append((CarbTIME(carb, CarbTime[x], CarbV[x])))
x += 1
ACarbsT = np.array(ACarbsT)
ACarbsT = sum(ACarbsT)
####################################################################
# 3 - Basal Insulin
BI = basalActual + basalActual + basalActual
BI = np.array(BI)
####################################################################
# 4 - Dexcom Values
SBG = list(SBG)
SBG.insert(0,SBG[0])
SBG = np.array(SBG)
DBG = []
for x in range(1,len(SBG)):
DBG.append(SBG[x]-SBG[x-1])
BGDex = np.array(DBG)
print "Differential Dexcom Values =", len(BGDex)
print "Carbs =", len(ACarbsT)
print "Differential Dexcom Values =", len(BGDex)
print "Actual Basal Insulin =",len(BI)
print "Boluses =", len(ABTD)
####################################################################
# 5 - Basal Carbs
BL = ((ABTD*ISF) - (ACarbsT*C2RBG) + (BI*ISF) + BGDex)/C2RBG
# 6 - Change in Basal Carbs
BL = list(BL)
BL.insert(0,BL[0])
DBL = []
for x in range(1,len(BL)):
DBL.append(BL[x]-BL[x-1])
DBL = np.array(DBL)
#<----------
# Make array for when carbs are eaten and how much
#<----------
CarbsEaten = [0]*len(ACarbsT)
for x in range(len(CarbV)):
CarbsEaten[CarbTime[x]] = CarbV[x]
#<----------
# Make array for when carbs are eaten and how much
#<----------
BolusTaken = [0]*len(ACarbsT)
for x in range(len(BolusV)):
BolusTaken[BolusTime[x]] = BolusV[x]
#<----------
# Export for Capstone
#<----------
SBG = list(SBG)
SBG.pop(0)
BL.pop(0)
DFCarbs = (ACarbsT*C2RBG)
DFBolus = (ABTD*ISF)
DFBI = (BI*ISF)
DFBGChange = BGDex
DFBL = BL
DFBG = SBG
DFDBL = DBL
import pandas as pd
columnTitle = ['Date&Time','DFBGChange','Bolus Taken', 'Bolus Ingested', 'Basal Carbs (Liver)', 'Basal Insulin', 'Carbs Eaten', 'Carbs Ingested', 'BG']
DF = {'Date&Time':Dates,'BG':DFBG, 'DFBGChange': DFBGChange, 'Bolus Taken': BolusTaken, 'Bolus Ingested':DFBolus, 'Basal Carbs (Liver)':DFBL, 'Basal Insulin':DFBI, 'Carbs Eaten':CarbsEaten, 'Carbs Ingested':DFCarbs, 'Delta BL':DFDBL}
df = pd.DataFrame(DF, columns=columnTitle)
#df.to_csv(r'C:\Users\Ricky\Documents\Udacity\MLND\machine-learning-master\projects\capstone\DiabeathisSet.txt', header=columnTitle, index=None, sep=' ', mode='a')
#print df
|
|
# Copyright (c) 2017 pandas-gbq Authors All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# -*- coding: utf-8 -*-
import copy
import datetime
from unittest import mock
import numpy
import pandas
from pandas import DataFrame
import pytest
from pandas_gbq import gbq
from pandas_gbq.features import FEATURES
pytestmark = pytest.mark.filterwarnings(
"ignore:credentials from Google Cloud SDK"
)
def _make_connector(project_id="some-project", **kwargs):
return gbq.GbqConnector(project_id, **kwargs)
def mock_get_credentials_no_project(*args, **kwargs):
import google.auth.credentials
mock_credentials = mock.create_autospec(
google.auth.credentials.Credentials
)
return mock_credentials, None
def mock_get_credentials(*args, **kwargs):
import google.auth.credentials
mock_credentials = mock.create_autospec(
google.auth.credentials.Credentials
)
return mock_credentials, "default-project"
@pytest.fixture
def mock_service_account_credentials():
import google.oauth2.service_account
mock_credentials = mock.create_autospec(
google.oauth2.service_account.Credentials
)
return mock_credentials
@pytest.fixture
def mock_compute_engine_credentials():
import google.auth.compute_engine
mock_credentials = mock.create_autospec(
google.auth.compute_engine.Credentials
)
return mock_credentials
@pytest.fixture(autouse=True)
def no_auth(monkeypatch):
import pydata_google_auth
monkeypatch.setattr(pydata_google_auth, "default", mock_get_credentials)
@pytest.mark.parametrize(
("type_", "expected"),
[
("INTEGER", None), # Can't handle NULL
("BOOLEAN", None), # Can't handle NULL
("FLOAT", numpy.dtype(float)),
# TIMESTAMP will be localized after DataFrame construction.
("TIMESTAMP", "datetime64[ns]"),
("DATETIME", "datetime64[ns]"),
],
)
def test__bqschema_to_nullsafe_dtypes(type_, expected):
result = gbq._bqschema_to_nullsafe_dtypes(
[dict(name="x", type=type_, mode="NULLABLE")]
)
if not expected:
assert result == {}
else:
assert result == {"x": expected}
def test_GbqConnector_get_client_w_old_bq(monkeypatch, mock_bigquery_client):
gbq._test_google_api_imports()
connector = _make_connector()
monkeypatch.setattr(
type(FEATURES),
"bigquery_has_client_info",
mock.PropertyMock(return_value=False),
)
connector.get_client()
# No client_info argument.
mock_bigquery_client.assert_called_with(
credentials=mock.ANY, project=mock.ANY
)
def test_GbqConnector_get_client_w_new_bq(mock_bigquery_client):
gbq._test_google_api_imports()
if not FEATURES.bigquery_has_client_info:
pytest.skip("google-cloud-bigquery missing client_info feature")
pytest.importorskip("google.api_core.client_info")
connector = _make_connector()
connector.get_client()
_, kwargs = mock_bigquery_client.call_args
assert kwargs["client_info"].user_agent == "pandas-{}".format(
pandas.__version__
)
def test_to_gbq_should_fail_if_invalid_table_name_passed():
with pytest.raises(gbq.NotFoundException):
gbq.to_gbq(DataFrame([[1]]), "invalid_table_name", project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(monkeypatch):
import pydata_google_auth
monkeypatch.setattr(
pydata_google_auth, "default", mock_get_credentials_no_project
)
with pytest.raises(ValueError, match="Could not determine project ID"):
gbq.to_gbq(DataFrame([[1]]), "dataset.tablename")
@pytest.mark.parametrize(["verbose"], [(True,), (False,)])
def test_to_gbq_with_verbose_new_pandas_warns_deprecation(
monkeypatch, verbose
):
monkeypatch.setattr(
type(FEATURES),
"pandas_has_deprecated_verbose",
mock.PropertyMock(return_value=True),
)
with pytest.warns(FutureWarning, match="verbose is deprecated"):
try:
gbq.to_gbq(
DataFrame([[1]]),
"dataset.tablename",
project_id="my-project",
verbose=verbose,
)
except gbq.TableCreationError:
pass
def test_to_gbq_wo_verbose_w_new_pandas_no_warnings(monkeypatch, recwarn):
monkeypatch.setattr(
type(FEATURES),
"pandas_has_deprecated_verbose",
mock.PropertyMock(return_value=True),
)
try:
gbq.to_gbq(
DataFrame([[1]]), "dataset.tablename", project_id="my-project"
)
except gbq.TableCreationError:
pass
assert len(recwarn) == 0
def test_to_gbq_with_verbose_old_pandas_no_warnings(monkeypatch, recwarn):
monkeypatch.setattr(
type(FEATURES),
"pandas_has_deprecated_verbose",
mock.PropertyMock(return_value=False),
)
try:
gbq.to_gbq(
DataFrame([[1]]),
"dataset.tablename",
project_id="my-project",
verbose=True,
)
except gbq.TableCreationError:
pass
assert len(recwarn) == 0
def test_to_gbq_with_private_key_raises_notimplementederror():
with pytest.raises(NotImplementedError, match="private_key"):
gbq.to_gbq(
DataFrame([[1]]),
"dataset.tablename",
project_id="my-project",
private_key="path/to/key.json",
)
def test_to_gbq_doesnt_run_query(mock_bigquery_client):
try:
gbq.to_gbq(
DataFrame([[1]]), "dataset.tablename", project_id="my-project"
)
except gbq.TableCreationError:
pass
mock_bigquery_client.query.assert_not_called()
def test_to_gbq_w_empty_df(mock_bigquery_client):
import google.api_core.exceptions
mock_bigquery_client.get_table.side_effect = (
google.api_core.exceptions.NotFound("my_table")
)
gbq.to_gbq(DataFrame(), "my_dataset.my_table", project_id="1234")
mock_bigquery_client.create_table.assert_called_with(mock.ANY)
mock_bigquery_client.load_table_from_dataframe.assert_not_called()
mock_bigquery_client.load_table_from_file.assert_not_called()
def test_to_gbq_w_default_project(mock_bigquery_client):
"""If no project is specified, we should be able to use project from
default credentials.
"""
import google.api_core.exceptions
from google.cloud.bigquery.table import TableReference
mock_bigquery_client.get_table.side_effect = (
google.api_core.exceptions.NotFound("my_table")
)
gbq.to_gbq(DataFrame(), "my_dataset.my_table")
mock_bigquery_client.get_table.assert_called_with(
TableReference.from_string("default-project.my_dataset.my_table")
)
mock_bigquery_client.create_table.assert_called_with(mock.ANY)
table = mock_bigquery_client.create_table.call_args[0][0]
assert table.project == "default-project"
def test_to_gbq_w_project_table(mock_bigquery_client):
"""If a project is included in the table ID, use that instead of the client
project. See: https://github.com/pydata/pandas-gbq/issues/321
"""
import google.api_core.exceptions
from google.cloud.bigquery.table import TableReference
mock_bigquery_client.get_table.side_effect = (
google.api_core.exceptions.NotFound("my_table")
)
gbq.to_gbq(
DataFrame(),
"project_table.my_dataset.my_table",
project_id="project_client",
)
mock_bigquery_client.get_table.assert_called_with(
TableReference.from_string("project_table.my_dataset.my_table")
)
mock_bigquery_client.create_table.assert_called_with(mock.ANY)
table = mock_bigquery_client.create_table.call_args[0][0]
assert table.project == "project_table"
def test_to_gbq_creates_dataset(mock_bigquery_client):
import google.api_core.exceptions
mock_bigquery_client.get_table.side_effect = (
google.api_core.exceptions.NotFound("my_table")
)
mock_bigquery_client.get_dataset.side_effect = (
google.api_core.exceptions.NotFound("my_dataset")
)
gbq.to_gbq(DataFrame([[1]]), "my_dataset.my_table", project_id="1234")
mock_bigquery_client.create_dataset.assert_called_with(mock.ANY)
def test_read_gbq_with_no_project_id_given_should_fail(monkeypatch):
import pydata_google_auth
monkeypatch.setattr(
pydata_google_auth, "default", mock_get_credentials_no_project
)
with pytest.raises(ValueError, match="Could not determine project ID"):
gbq.read_gbq("SELECT 1", dialect="standard")
def test_read_gbq_with_inferred_project_id(monkeypatch):
df = gbq.read_gbq("SELECT 1", dialect="standard")
assert df is not None
def test_read_gbq_with_inferred_project_id_from_service_account_credentials(
mock_bigquery_client, mock_service_account_credentials
):
mock_service_account_credentials.project_id = "service_account_project_id"
df = gbq.read_gbq(
"SELECT 1",
dialect="standard",
credentials=mock_service_account_credentials,
)
assert df is not None
mock_bigquery_client.query.assert_called_once_with(
"SELECT 1",
job_config=mock.ANY,
location=None,
project="service_account_project_id",
)
def test_read_gbq_without_inferred_project_id_from_compute_engine_credentials(
mock_compute_engine_credentials,
):
with pytest.raises(ValueError, match="Could not determine project ID"):
gbq.read_gbq(
"SELECT 1",
dialect="standard",
credentials=mock_compute_engine_credentials,
)
def test_read_gbq_with_max_results_zero(monkeypatch):
df = gbq.read_gbq("SELECT 1", dialect="standard", max_results=0)
assert df is None
def test_read_gbq_with_max_results_ten(monkeypatch, mock_bigquery_client):
df = gbq.read_gbq("SELECT 1", dialect="standard", max_results=10)
assert df is not None
mock_bigquery_client.list_rows.assert_called_with(mock.ANY, max_results=10)
@pytest.mark.parametrize(["verbose"], [(True,), (False,)])
def test_read_gbq_with_verbose_new_pandas_warns_deprecation(
monkeypatch, verbose
):
monkeypatch.setattr(
type(FEATURES),
"pandas_has_deprecated_verbose",
mock.PropertyMock(return_value=True),
)
with pytest.warns(FutureWarning, match="verbose is deprecated"):
gbq.read_gbq("SELECT 1", project_id="my-project", verbose=verbose)
def test_read_gbq_wo_verbose_w_new_pandas_no_warnings(monkeypatch, recwarn):
monkeypatch.setattr(
type(FEATURES),
"pandas_has_deprecated_verbose",
mock.PropertyMock(return_value=False),
)
gbq.read_gbq("SELECT 1", project_id="my-project", dialect="standard")
assert len(recwarn) == 0
def test_read_gbq_with_old_bq_raises_importerror(monkeypatch):
import google.cloud.bigquery
monkeypatch.setattr(google.cloud.bigquery, "__version__", "0.27.0")
monkeypatch.setattr(FEATURES, "_bigquery_installed_version", None)
with pytest.raises(ImportError, match="google-cloud-bigquery"):
gbq.read_gbq(
"SELECT 1",
project_id="my-project",
)
def test_read_gbq_with_verbose_old_pandas_no_warnings(monkeypatch, recwarn):
monkeypatch.setattr(
type(FEATURES),
"pandas_has_deprecated_verbose",
mock.PropertyMock(return_value=False),
)
gbq.read_gbq(
"SELECT 1",
project_id="my-project",
dialect="standard",
verbose=True,
)
assert len(recwarn) == 0
def test_read_gbq_with_private_raises_notimplmentederror():
with pytest.raises(NotImplementedError, match="private_key"):
gbq.read_gbq(
"SELECT 1", project_id="my-project", private_key="path/to/key.json"
)
def test_read_gbq_with_invalid_dialect():
with pytest.raises(ValueError, match="is not valid for dialect"):
gbq.read_gbq("SELECT 1", dialect="invalid")
def test_read_gbq_with_configuration_query():
df = gbq.read_gbq(None, configuration={"query": {"query": "SELECT 2"}})
assert df is not None
def test_read_gbq_with_configuration_duplicate_query_raises_error():
with pytest.raises(
ValueError, match="Query statement can't be specified inside config"
):
gbq.read_gbq(
"SELECT 1", configuration={"query": {"query": "SELECT 2"}}
)
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with pytest.warns(FutureWarning):
df = DataFrame([[1, "two"], [3, "four"]])
gbq.generate_bq_schema(df)
def test_load_does_not_modify_schema_arg(mock_bigquery_client):
"""Test of Issue # 277."""
from google.api_core.exceptions import NotFound
# Create table with new schema.
mock_bigquery_client.get_table.side_effect = NotFound("nope")
df = DataFrame(
{
"field1": ["a", "b"],
"field2": [1, 2],
"field3": [datetime.date(2019, 1, 1), datetime.date(2019, 5, 1)],
}
)
original_schema = [
{"name": "field1", "type": "STRING", "mode": "REQUIRED"},
{"name": "field2", "type": "INTEGER"},
{"name": "field3", "type": "DATE"},
]
original_schema_cp = copy.deepcopy(original_schema)
gbq.to_gbq(
df,
"dataset.schematest",
project_id="my-project",
table_schema=original_schema,
if_exists="fail",
)
assert original_schema == original_schema_cp
# Test again now that table exists - behavior will differ internally
# branch at if table.exists(table_id)
original_schema = [
{"name": "field1", "type": "STRING", "mode": "REQUIRED"},
{"name": "field2", "type": "INTEGER"},
{"name": "field3", "type": "DATE"},
]
original_schema_cp = copy.deepcopy(original_schema)
gbq.to_gbq(
df,
"dataset.schematest",
project_id="my-project",
table_schema=original_schema,
if_exists="append",
)
assert original_schema == original_schema_cp
def test_read_gbq_passes_dtypes(
mock_bigquery_client, mock_service_account_credentials
):
mock_service_account_credentials.project_id = "service_account_project_id"
df = gbq.read_gbq(
"SELECT 1 AS int_col",
dialect="standard",
credentials=mock_service_account_credentials,
dtypes={"int_col": "my-custom-dtype"},
)
assert df is not None
mock_list_rows = mock_bigquery_client.list_rows("dest", max_results=100)
_, to_dataframe_kwargs = mock_list_rows.to_dataframe.call_args
assert to_dataframe_kwargs["dtypes"] == {"int_col": "my-custom-dtype"}
def test_read_gbq_use_bqstorage_api(
mock_bigquery_client, mock_service_account_credentials
):
if not FEATURES.bigquery_has_bqstorage:
pytest.skip("requires BigQuery Storage API")
mock_service_account_credentials.project_id = "service_account_project_id"
df = gbq.read_gbq(
"SELECT 1 AS int_col",
dialect="standard",
credentials=mock_service_account_credentials,
use_bqstorage_api=True,
)
assert df is not None
mock_list_rows = mock_bigquery_client.list_rows("dest", max_results=100)
mock_list_rows.to_dataframe.assert_called_once_with(
create_bqstorage_client=True,
dtypes=mock.ANY,
progress_bar_type=mock.ANY,
)
def test_read_gbq_calls_tqdm(
mock_bigquery_client, mock_service_account_credentials
):
mock_service_account_credentials.project_id = "service_account_project_id"
df = gbq.read_gbq(
"SELECT 1",
dialect="standard",
credentials=mock_service_account_credentials,
progress_bar_type="foobar",
)
assert df is not None
mock_list_rows = mock_bigquery_client.list_rows("dest", max_results=100)
_, to_dataframe_kwargs = mock_list_rows.to_dataframe.call_args
assert to_dataframe_kwargs["progress_bar_type"] == "foobar"
|
|
# Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import utils
# TODO(berrange): Remove NovaObjectDictCompat
class ComputeNode(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added get_by_service_id()
# Version 1.2: String attributes updated to support unicode
# Version 1.3: Added stats field
# Version 1.4: Added host ip field
# Version 1.5: Added numa_topology field
# Version 1.6: Added supported_hv_specs
# Version 1.7: Added host field
# Version 1.8: Added get_by_host_and_nodename()
# Version 1.9: Added pci_device_pools
# Version 1.10: Added get_first_node_by_host_for_old_compat()
# Version 1.11: PciDevicePoolList version 1.1
VERSION = '1.11'
fields = {
'id': fields.IntegerField(read_only=True),
'service_id': fields.IntegerField(),
'host': fields.StringField(nullable=True),
'vcpus': fields.IntegerField(),
'memory_mb': fields.IntegerField(),
'local_gb': fields.IntegerField(),
'vcpus_used': fields.IntegerField(),
'memory_mb_used': fields.IntegerField(),
'local_gb_used': fields.IntegerField(),
'hypervisor_type': fields.StringField(),
'hypervisor_version': fields.IntegerField(),
'hypervisor_hostname': fields.StringField(nullable=True),
'free_ram_mb': fields.IntegerField(nullable=True),
'free_disk_gb': fields.IntegerField(nullable=True),
'current_workload': fields.IntegerField(nullable=True),
'running_vms': fields.IntegerField(nullable=True),
'cpu_info': fields.StringField(nullable=True),
'disk_available_least': fields.IntegerField(nullable=True),
'metrics': fields.StringField(nullable=True),
'stats': fields.DictOfNullableStringsField(nullable=True),
'host_ip': fields.IPAddressField(nullable=True),
'numa_topology': fields.StringField(nullable=True),
# NOTE(pmurray): the supported_hv_specs field maps to the
# supported_instances field in the database
'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
# NOTE(pmurray): the pci_device_pools field maps to the
# pci_stats field in the database
'pci_device_pools': fields.ObjectField('PciDevicePoolList',
nullable=True),
}
obj_relationships = {
'pci_device_pools': [('1.9', '1.0'), ('1.11', '1.1')],
'supported_hv_specs': [('1.6', '1.0')],
}
def obj_make_compatible(self, primitive, target_version):
super(ComputeNode, self).obj_make_compatible(primitive, target_version)
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 7) and 'host' in primitive:
del primitive['host']
if target_version < (1, 5) and 'numa_topology' in primitive:
del primitive['numa_topology']
if target_version < (1, 4) and 'host_ip' in primitive:
del primitive['host_ip']
if target_version < (1, 3) and 'stats' in primitive:
# pre 1.3 version does not have a stats field
del primitive['stats']
@staticmethod
def _host_from_db_object(compute, db_compute):
if (('host' not in db_compute or db_compute['host'] is None)
and 'service_id' in db_compute
and db_compute['service_id'] is not None):
# FIXME(sbauza) : Unconverted compute record, provide compatibility
# This has to stay until we can be sure that any/all compute nodes
# in the database have been converted to use the host field
# Service field of ComputeNode could be deprecated in a next patch,
# so let's use directly the Service object
try:
service = objects.Service.get_by_id(
compute._context, db_compute['service_id'])
except exception.ServiceNotFound:
compute['host'] = None
return
try:
compute['host'] = service.host
except (AttributeError, exception.OrphanedObjectError):
# Host can be nullable in Service
compute['host'] = None
elif 'host' in db_compute and db_compute['host'] is not None:
# New-style DB having host as a field
compute['host'] = db_compute['host']
else:
# We assume it should not happen but in case, let's set it to None
compute['host'] = None
@staticmethod
def _from_db_object(context, compute, db_compute):
special_cases = set([
'stats',
'supported_hv_specs',
'host',
'pci_device_pools',
])
fields = set(compute.fields) - special_cases
for key in fields:
compute[key] = db_compute[key]
stats = db_compute['stats']
if stats:
compute['stats'] = jsonutils.loads(stats)
sup_insts = db_compute.get('supported_instances')
if sup_insts:
hv_specs = jsonutils.loads(sup_insts)
hv_specs = [objects.HVSpec.from_list(hv_spec)
for hv_spec in hv_specs]
compute['supported_hv_specs'] = hv_specs
pci_stats = db_compute.get('pci_stats')
compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats)
compute._context = context
# Make sure that we correctly set the host field depending on either
# host column is present in the table or not
compute._host_from_db_object(compute, db_compute)
compute.obj_reset_changes()
return compute
@base.remotable_classmethod
def get_by_id(cls, context, compute_id):
db_compute = db.compute_node_get(context, compute_id)
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_by_service_id(cls, context, service_id):
db_computes = db.compute_nodes_get_by_service_id(context, service_id)
# NOTE(sbauza): Old version was returning an item, we need to keep this
# behaviour for backwards compatibility
db_compute = db_computes[0]
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_by_host_and_nodename(cls, context, host, nodename):
try:
db_compute = db.compute_node_get_by_host_and_nodename(
context, host, nodename)
except exception.ComputeHostNotFound:
# FIXME(sbauza): Some old computes can still have no host record
# We need to provide compatibility by using the old service_id
# record.
# We assume the compatibility as an extra penalty of one more DB
# call but that's necessary until all nodes are upgraded.
try:
service = objects.Service.get_by_compute_host(context, host)
db_computes = db.compute_nodes_get_by_service_id(
context, service.id)
except exception.ServiceNotFound:
# We need to provide the same exception upstream
raise exception.ComputeHostNotFound(host=host)
db_compute = None
for compute in db_computes:
if compute['hypervisor_hostname'] == nodename:
db_compute = compute
# We can avoid an extra call to Service object in
# _from_db_object
db_compute['host'] = service.host
break
if not db_compute:
raise exception.ComputeHostNotFound(host=host)
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_first_node_by_host_for_old_compat(cls, context, host,
use_slave=False):
computes = ComputeNodeList.get_all_by_host(context, host, use_slave)
# FIXME(sbauza): Some hypervisors (VMware, Ironic) can return multiple
# nodes per host, we should return all the nodes and modify the callers
# instead.
# Arbitrarily returning the first node.
return computes[0]
@staticmethod
def _convert_stats_to_db_format(updates):
stats = updates.pop('stats', None)
if stats is not None:
updates['stats'] = jsonutils.dumps(stats)
@staticmethod
def _convert_host_ip_to_db_format(updates):
host_ip = updates.pop('host_ip', None)
if host_ip:
updates['host_ip'] = str(host_ip)
@staticmethod
def _convert_supported_instances_to_db_format(updates):
hv_specs = updates.pop('supported_hv_specs', None)
if hv_specs is not None:
hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
updates['supported_instances'] = jsonutils.dumps(hv_specs)
@staticmethod
def _convert_pci_stats_to_db_format(updates):
pools = updates.pop('pci_device_pools', None)
if pools:
updates['pci_stats'] = jsonutils.dumps(pools.obj_to_primitive())
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
db_compute = db.compute_node_create(self._context, updates)
self._from_db_object(self._context, self, db_compute)
@base.remotable
def save(self, prune_stats=False):
# NOTE(belliott) ignore prune_stats param, no longer relevant
updates = self.obj_get_changes()
updates.pop('id', None)
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
db_compute = db.compute_node_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_compute)
@base.remotable
def destroy(self):
db.compute_node_delete(self._context, self.id)
@property
def service(self):
if not hasattr(self, '_cached_service'):
self._cached_service = objects.Service.get_by_id(self._context,
self.service_id)
return self._cached_service
class ComputeNodeList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# ComputeNode <= version 1.2
# Version 1.1 ComputeNode version 1.3
# Version 1.2 Add get_by_service()
# Version 1.3 ComputeNode version 1.4
# Version 1.4 ComputeNode version 1.5
# Version 1.5 Add use_slave to get_by_service
# Version 1.6 ComputeNode version 1.6
# Version 1.7 ComputeNode version 1.7
# Version 1.8 ComputeNode version 1.8 + add get_all_by_host()
# Version 1.9 ComputeNode version 1.9
# Version 1.10 ComputeNode version 1.10
# Version 1.11 ComputeNode version 1.11
VERSION = '1.11'
fields = {
'objects': fields.ListOfObjectsField('ComputeNode'),
}
child_versions = {
'1.0': '1.2',
# NOTE(danms): ComputeNode was at 1.2 before we added this
'1.1': '1.3',
'1.2': '1.3',
'1.3': '1.4',
'1.4': '1.5',
'1.5': '1.5',
'1.6': '1.6',
'1.7': '1.7',
'1.8': '1.8',
'1.9': '1.9',
'1.10': '1.10',
'1.11': '1.11',
}
@base.remotable_classmethod
def get_all(cls, context):
db_computes = db.compute_node_get_all(context)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@base.remotable_classmethod
def get_by_hypervisor(cls, context, hypervisor_match):
db_computes = db.compute_node_search_by_hypervisor(context,
hypervisor_match)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@base.remotable_classmethod
def _get_by_service(cls, context, service_id, use_slave=False):
try:
db_computes = db.compute_nodes_get_by_service_id(
context, service_id)
except exception.ServiceNotFound:
# NOTE(sbauza): Previous behaviour was returning an empty list
# if the service was created with no computes, we need to keep it.
db_computes = []
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@classmethod
def get_by_service(cls, context, service, use_slave=False):
return cls._get_by_service(context, service.id, use_slave=use_slave)
@base.remotable_classmethod
def get_all_by_host(cls, context, host, use_slave=False):
try:
db_computes = db.compute_node_get_all_by_host(context, host,
use_slave)
except exception.ComputeHostNotFound:
# FIXME(sbauza): Some old computes can still have no host record
# We need to provide compatibility by using the old service_id
# record.
# We assume the compatibility as an extra penalty of one more DB
# call but that's necessary until all nodes are upgraded.
try:
service = objects.Service.get_by_compute_host(context, host,
use_slave)
db_computes = db.compute_nodes_get_by_service_id(
context, service.id)
except exception.ServiceNotFound:
# We need to provide the same exception upstream
raise exception.ComputeHostNotFound(host=host)
# We can avoid an extra call to Service object in _from_db_object
for db_compute in db_computes:
db_compute['host'] = service.host
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
|
|
"""
Entrainment of a single biophysical neuron by a periodic pulse train.
Cf. Chapter 10 of Izhikevich.
"""
from __future__ import division
from PyDSTool import *
from common_lib import *
gentype='dopri' # dopri, euler, etc.
def makeHHneuron(name, excit_type, par_args, ic_args,
Istim_dict=None, gentype='vode', specials=None):
"""specials is an optional argument that permits arbitrary
addition of entries to DSargs.
Istim_dict (optional) is a pre-prepared external input dict of
Variable objects, with the key 'Istim'.
"""
if gentype in ['vode', 'euler']:
targetlang = 'python'
else:
targetlang = 'C'
if Istim_dict is None:
Istim_str = ''
else:
Istim_str = '+Istim'
vfn_str = '(Iapp-ionic(v,m,h,n)%s)/C' % Istim_str
mfn_str = 'ma(v)*(1-m)-mb(v)*m'
nfn_str = 'na(v)*(1-n)-nb(v)*n'
hfn_str = 'ha(v)*(1-h)-hb(v)*h'
if excit_type == 'Type I':
auxfndict = {
'ma': (['v'], '0.32*(v+54)/(1-exp(-(v+54)/4))'),
'mb': (['v'], '0.28*(v+27)/(exp((v+27)/5)-1)'),
'ha': (['v'], '.128*exp(-(50+v)/18)'),
'hb': (['v'], '4/(1+exp(-(v+27)/5))'),
'na': (['v'], '.032*(v+52)/(1-exp(-(v+52)/5))'),
'nb': (['v'], '.5*exp(-(57+v)/40)')
}
elif excit_type == 'Type II':
auxfndict = {
'ma': (['v'], ' 0.1*(v+40)/(1-exp(-(v+40)/10))'),
'mb': (['v'], '4*exp(-(v+65)/18)'),
'ha': (['v'], '.07*exp(-(v+65)/20)'),
'hb': (['v'], '1/(1+exp(-(v+35)/10))'),
'na': (['v'], '.01*(v+55)/(1-exp(-(v+55)/10))'),
'nb': (['v'], '.125*exp(-(v+65)/80)')
}
else:
raise ValueError("Invalid excitability type")
auxfndict['ionic'] = (['vv', 'mm', 'hh', 'nn'],
'gna*mm*mm*mm*hh*(vv-vna) + gk*nn*nn*nn*nn*(vv-vk) + gl*(vv-vl)')
DSargs = args()
DSargs.varspecs = {'v': vfn_str,'m': mfn_str,
'h': hfn_str, 'n': nfn_str}
DSargs.auxvars = []
if Istim_dict is not None:
inputnames = ['Istim']
DSargs.inputs = Istim_dict
DSargs.varspecs['I'] = 'Istim'
DSargs.auxvars.append('I')
else:
inputnames = []
DSargs.pars = par_args
DSargs.fnspecs = auxfndict
DSargs.xdomain = {'v': [-130, 70], 'm': [0,1], 'h': [0,1], 'n': [0,1]}
DSargs.algparams = {'init_step':0.05,
'max_pts': 10000,
'maxevtpts': 200}
if gentype == 'vode':
DSargs.algparams['stiff'] = True
DSargs.checklevel = 0
DSargs.ics = ic_args
DSargs.name = name
# Event definitions
thresh_ev = Events.makeZeroCrossEvent('v', 1,
{'name': 'thresh_ev',
'eventtol': 1e-5,
'precise': True,
'term': False},
varnames=['v'],
targetlang=targetlang)
min_ev = Events.makeZeroCrossEvent('(Iapp-ionic(v,m,h,n)%s)/C' % Istim_str,
1,
{'name': 'min_ev',
'eventtol': 1e-5,
'precise': True,
'term': False},
varnames=['v', 'm', 'n', 'h'],
parnames=par_args.keys(),
inputnames=inputnames,
fnspecs=auxfndict,
targetlang=targetlang)
DSargs.events = [thresh_ev, min_ev]
if specials is not None:
for k, v in specials.items():
if k in DSargs:
current_v = DSargs[k]
if isinstance(current_v, (args, dict)):
DSargs[k].update(v)
elif isinstance(current_v, list):
if isinstance(v, list):
DSargs[k].extend(v)
else:
DSargs[k].append(v)
else:
raise ValueError("Unrecognized item type")
else:
DSargs[k] = v
if gentype == 'vode':
return Generator.Vode_ODEsystem(DSargs)
elif gentype == 'euler':
return Generator.Euler_ODEsystem(DSargs)
elif gentype == 'radau':
return Generator.Radau_ODEsystem(DSargs)
elif gentype == 'dopri':
return Generator.Dopri_ODEsystem(DSargs)
else:
raise NotImplementedError("Unknown ODE system type: %s"%gentype)
def test_f(HH, Istim, freq, stim_t0, duration=1, tmax=1000):
"""Istim is amplitude of square pulse input to neuron, having
given duration in ms and frequency in Hz. Starts at stim_t0.
"""
baseline_Iapp = HH.pars['Iapp']
stim_period = 1000./freq
HH.set(tdata=[0, tmax])
n = int(floor(tmax/stim_period))
print "Testing with stimulus frequency %.3f Hz" % freq
print " (stimulus period is %.4f ms)" % stim_period
print "Stimulus amplitude is %.3f" % Istim
stim_ts = array([stim_t0+i*stim_period for i in range(0,n+1)])
Istim_vardict = make_spike_signal(stim_ts, 1, tmax*1.1, loval=0, hival=Istim, dt=0.1)
HH.inputs = Istim_vardict
HH._extInputsChanged = True
traj = HH.compute('stim_test')
pts = traj.sample()
plt.figure(1)
plt.clf()
plt.plot(pts['t'], pts['v'], 'b', lw=2)
plt.plot(pts['t'], 3*pts['I']-75, 'k', lw=2)
#plt.ylim([-100, 50])
plt.xlabel('t')
plt.ylabel('v')
plt.title('Voltage trace and I(t) pulse stimulus')
try:
show_maps(traj, stim_ts, 0.3*tmax)
except IndexError:
print "Not enough spikes to show a map"
return traj, pts, stim_ts
def show_maps(traj, stim_ts_all, settle_time=2000):
v_ts_all = array(traj.getEventTimes('thresh_ev'))
v_ix = find(v_ts_all, settle_time)
v_ts0 = v_ts_all[:v_ix]
v_ts1 = v_ts_all[v_ix:]
stim_ix = find(stim_ts_all, settle_time)
stim_ts0 = array(stim_ts_all[:stim_ix])
stim_ts1 = array(stim_ts_all[stim_ix:])
min_len = min(len(v_ts_all), len(stim_ts_all))
plt.figure(2)
plt.clf()
plt.plot(stim_ts_all[:min_len], v_ts_all[:min_len]-stim_ts_all[:min_len], '.', color='gray')
plt.plot(stim_ts_all[stim_ix:min_len-5], v_ts_all[stim_ix:min_len-5]-stim_ts_all[stim_ix:min_len-5], 'ko')
plt.plot(stim_ts_all[min_len-5:min_len], v_ts_all[min_len-5:min_len]-stim_ts_all[min_len-5:min_len], 'ro')
# This is the period provided the system has settled to a period-1 cycle
Tper = v_ts1[-1] - v_ts1[-2]
plt.xlabel('pulse stim time')
plt.ylabel('time diff')
plt.title('Threshold time since stim (period = %.4f)' % Tper)
plt.figure(3)
plt.clf()
plt.plot([0,120],[0,120], 'r')
dts0 = npy.diff(v_ts0)
plt.plot(dts0[:-1], dts0[1:], '.', color='gray')
dts1 = npy.diff(v_ts1)
plt.plot(dts1[:-5], dts1[1:-4], 'ko')
plt.plot(dts1[-5:-1], dts1[-4:], 'ro')
x0 = min(min(dts1), min(dts0))*0.9
x1 = max(max(dts1), max(dts0))*1.1
plt.xlim([x0,x1])
plt.ylim([x0,x1])
plt.xlabel('thresh t diff n')
plt.ylabel('thresh t diff n+1')
plt.title('Threshold return map')
#--------------------
default_Istim_vardict = make_spike_signal([1], 0.5, 2000, loval=0, hival=1, dt=0.1)
specials = {'algparams': {'max_pts': 100000,
'maxevtpts': 1000}}
# Make Type I HH neuron
par_args_I = {'gna': 100, 'gk': 80, 'gl': 0.1,
'vna': 50, 'vk': -100, 'vl': -67,
'Iapp': 0.2, 'C': 1.0}
ic_args_I = {'v':-70.0, 'm': 0, 'h': 1, 'n': 0}
HH_I = makeHHneuron('HH_entrain_I', 'Type I', par_args_I, ic_args_I,
Istim_dict=default_Istim_vardict,
gentype=gentype, specials=specials)
# Make Type II HH neuron (bimodal PRC)
par_args_II = {'gna': 120, 'gk': 36, 'gl': 0.3,
'vna': 50, 'vk': -78, 'vl': -54.4,
'Iapp': 8, 'C': 1.0}
ic_args_II = {'v':-70.0, 'm': 0.02, 'h': 0.6, 'n': 0.4}
HH_II = makeHHneuron('HH_entrain_II', 'Type II', par_args_II, ic_args_II,
Istim_dict=default_Istim_vardict,
gentype=gentype, specials=specials)
print "For both neurons, vary stim amplitude between 0.5 and 2 (first numeric parameter)"
print "For Type I neuron, vary stim frequency between 5 and 30 Hz (second numeric parameter)"
print "For Type II neuron, vary stim frequency between 30 and 130 Hz (second numeric parameter)"
## Function signature:
# test_f( model, amplitude, frequency, duration, stimulus onset time, max integration time )
####### Type I neuron
### 1:1 entrainment - in-phase synch (with excitation)
#traj, pts, stim_ts = test_f(HH_I, 1, 15, 1, 300, 5000)
####### Type II neuron
### 1:1 entrainment -- anti-phase synch (with excitation)
# has a transient cycle-skipping around t = 200 (compare Ch. 10 Fig 10.16)
traj, pts, stim_ts = test_f(HH_II, 1.2, 60, 1.5, 300, 1000)
plt.show()
|
|
from future_builtins import zip
from django.core.exceptions import FieldError
from django.db import transaction
from django.db.backends.util import truncate_name
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.model._meta.db_table, None, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols = self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
params = []
for val in self.query.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping()
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.query.extra_select.iteritems()]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
for table, col in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = opts.concrete_model
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.model._meta
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
field, col, alias, _, _ = self._setup_joins(parts, opts, None)
col, alias = self._final_join_removal(col, alias)
result.append("%s.%s" % (qn(alias), qn2(col)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.model._meta.ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
for field in ordering:
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((field, []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra_select:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, col, order in self.find_ordering_name(field,
self.query.model._meta, default_order=asc):
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra_select[col])
self.query.ordering_aliases = ordering_aliases
return result, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, col, alias, joins, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
col, alias = self._final_join_removal(col, alias)
return [(alias, col, order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, target, opts, joins, _, _ = self.query.setup_joins(pieces,
opts, alias, False)
alias = joins[-1]
col = target.column
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_alias_chain(joins,
self.query.alias_map[joins[0]].join_type == self.query.LOUTER)
return field, col, alias, joins, opts
def _final_join_removal(self, col, alias):
"""
A helper method for get_distinct and get_ordering. This method will
trim extra not-needed joins from the tail of the join chain.
This is very similar to what is done in trim_joins, but we will
trim LEFT JOINS here. It would be a good idea to consolidate this
method and query.trim_joins().
"""
if alias:
while 1:
join = self.query.alias_map[alias]
if col != join.rhs_join_col:
break
self.query.unref_alias(alias)
alias = join.lhs_alias
col = join.lhs_join_col
return col, alias
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
if join_type and not first:
result.append('%s %s%s ON (%s.%s = %s.%s)'
% (join_type, qn(name), alias_str, qn(lhs),
qn2(lhs_col), qn(alias), qn2(col)))
else:
connector = not first and ', ' or ''
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, []
def get_grouping(self):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
if (len(self.query.model._meta.fields) == len(self.query.select) and
self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.model._meta.db_table, self.query.model._meta.pk.column)
]
group_by = self.query.group_by or []
extra_selects = []
for extra_select, extra_params in self.query.extra_select.itervalues():
extra_selects.append(extra_select)
params.extend(extra_params)
cols = (group_by + self.query.select +
self.query.related_select_cols + extra_selects)
seen = set()
for col in cols:
if col in seen:
continue
seen.add(col)
if isinstance(col, (list, tuple)):
result.append('%s.%s' % (qn(col[0]), qn(col[1])))
elif hasattr(col, 'as_sql'):
result.append(col.as_sql(qn, self.connection))
else:
result.append('(%s)' % str(col))
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
used=None, requested=None, restricted=None, nullable=None,
dupe_set=None, avoid_set=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
self.query.related_select_fields = []
if not used:
used = set()
if dupe_set is None:
dupe_set = set()
if avoid_set is None:
avoid_set = set()
orig_dupe_set = dupe_set
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
if not select_related_descend(f, restricted, requested):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = f.rel.to._meta.db_table
promote = nullable or f.null
if model:
int_opts = opts
alias = root_alias
alias_chain = []
for int_model in opts.get_base_chain(model):
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join((alias, int_opts.db_table, lhs_col,
int_opts.pk.column), exclusions=used,
promote=promote)
alias_chain.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if self.query.alias_map[root_alias].join_type == self.query.LOUTER:
self.query.promote_alias_chain(alias_chain, True)
else:
alias = root_alias
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join((alias, table, f.column,
f.rel.get_related_field().column),
exclusions=used.union(avoid), promote=promote)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(columns)
if self.query.alias_map[alias].join_type == self.query.LOUTER:
self.query.promote_alias_chain(aliases, True)
self.query.related_select_fields.extend(f.rel.to._meta.fields)
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
used, next, restricted, new_nullable, dupe_set, avoid)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested, reverse=True):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = model._meta.db_table
int_opts = opts
alias = root_alias
alias_chain = []
chain = opts.get_base_chain(f.rel.to)
if chain is not None:
for int_model in chain:
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update((self.query.dupe_avoidance.get(id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join(
(alias, int_opts.db_table, lhs_col, int_opts.pk.column),
exclusions=used, promote=True, reuse=used
)
alias_chain.append(alias)
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join(
(alias, table, f.rel.get_related_field().column, f.column),
exclusions=used.union(avoid),
promote=True
)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, local_only=True)
self.query.related_select_cols.extend(columns)
self.query.related_select_fields.extend(model._meta.fields)
next = requested.get(f.related_query_name(), {})
new_nullable = f.null or None
self.fill_related_selections(model._meta, table, cur_depth+1,
used, next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
# Set transaction dirty if we're using SELECT FOR UPDATE to ensure
# a subsequent commit/rollback is executed, so any database locks
# are released.
if self.query.select_for_update and transaction.is_managed(self.using):
transaction.set_dirty(self.using)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_fields isn't populated until
# execute_sql() has been called.
if self.query.select_fields:
fields = self.query.select_fields + self.query.related_select_fields
else:
fields = self.query.model._meta.fields
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
aggregate_start = len(self.query.extra_select.keys()) + len(self.query.select)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return empty_iter()
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.query.ordering_aliases:
return cursor.fetchone()[:-len(self.query.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.query.ordering_aliases:
result = order_modified_iter(cursor, len(self.query.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
class SQLInsertCompiler(SQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.model._meta.db_table, self.query.model._meta.pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor and cursor.rowcount or 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.model._meta.pk.name])
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql = ('SELECT %s FROM (%s) subquery' % (
', '.join([
aggregate.as_sql(qn, self.connection)
for aggregate in self.query.aggregate_select.values()
]),
self.query.subquery)
)
params = self.query.sub_params
return (sql, params)
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def empty_iter():
"""
Returns an iterator containing no results.
"""
yield iter([]).next()
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
|
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.generic import TemplateView
from .models import *
@login_required(login_url="login/")
def home(request):
if request.LANGUAGE_CODE == 'pl':
return HttpResponseRedirect('/pl/employees')
else:
return HttpResponseRedirect('/en/employees')
class PlayersView(TemplateView):
template_name = "players.html"
page = 'players'
def get_context_data(self, **kwargs):
sort_by = self.request.GET.get('sort')
desc = self.request.GET.get('desc') == 'yes'
context = super(PlayersView, self).get_context_data(**kwargs)
context['page'] = self.page
if sort_by:
if desc:
context['playersfields'] = Players.objects.all().order_by('-' + sort_by)
else:
context['playersfields'] = Players.objects.all().order_by(sort_by)
context['sort_param'] = sort_by
else:
context['playersfields'] = Players.objects.all()
return context
@classmethod
def filter_players(cls, request):
page = 'players'
search_phrase = request.GET.get('search_phrase', '')
filtered_players = Players.objects.filter(
Q(name__icontains=search_phrase) | Q(surname__icontains=search_phrase) |
Q(position__name__icontains=search_phrase))
context = {'playersfields': filtered_players, 'search_phrase': search_phrase, 'page': page}
return render(request, "players.html", context)
class PlayerDetailsView(TemplateView):
template_name = "player_details.html"
page = 'player'
player_slug = None
def get(self, request, *args, **kwargs):
self.player_slug = kwargs.get('player_slug')
return super(PlayerDetailsView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PlayerDetailsView, self).get_context_data(**kwargs)
context['page'] = self.page
context['player'] = Players.objects.get(slug=self.player_slug)
context['countries'] = COUNTRIES
context['legs'] = Players.BETTER_LEG
context['injuries'] = Players.INJURY
return context
class EmployeesView(LoginRequiredMixin, TemplateView):
template_name = "employees.html"
page = 'employees'
login_url = "/login"
def get_context_data(self, **kwargs):
sort_by = self.request.GET.get('sort')
desc = self.request.GET.get('desc') == 'yes'
context = super(EmployeesView, self).get_context_data(**kwargs)
context['page'] = self.page
if sort_by:
if desc:
context['employeesfields'] = Employees.objects.exclude(role_id=None).order_by('-' + sort_by)
else:
context['employeesfields'] = Employees.objects.exclude(role_id=None).order_by(sort_by)
context['sort_param'] = sort_by
else:
context['employeesfields'] = Employees.objects.exclude(role_id=None)
return context
@classmethod
def filter_employees(cls, request):
page = 'employees'
search_phrase = request.GET.get('search_phrase', '')
filtered_employees = Employees.objects.filter(
Q(first_name__icontains=search_phrase) | Q(last_name__icontains=search_phrase) |
Q(role__name__icontains=search_phrase))
context = {'employeesfields': filtered_employees, 'search_phrase': search_phrase, 'page': page}
return render(request, "employees.html", context)
class EmployeeDetailsView(LoginRequiredMixin, TemplateView):
template_name = "employee_details.html"
page = 'employees'
employee_slug = None
login_url = "/login"
def get(self, request, *args, **kwargs):
self.employee_slug = kwargs.get('employee_slug')
return super(EmployeeDetailsView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EmployeeDetailsView, self).get_context_data(**kwargs)
context['page'] = self.page
context['employee'] = Employees.objects.get(slug=self.employee_slug)
context['countries'] = COUNTRIES
return context
class TrophiesView(TemplateView):
template_name = "trophies.html"
page = 'trophies'
def get_context_data(self, **kwargs):
sort_by = self.request.GET.get('sort')
desc = self.request.GET.get('desc') == 'yes'
context = super(TrophiesView, self).get_context_data(**kwargs)
context['page'] = self.page
if sort_by:
if desc:
context['trophiesfields'] = Trophies.objects.all().order_by('-' + sort_by)
else:
context['trophiesfields'] = Trophies.objects.all().order_by(sort_by)
context['sort_param'] = sort_by
else:
context['trophiesfields'] = Trophies.objects.all()
return context
@classmethod
def filter_trophies(cls, request):
page = 'trophies'
search_phrase = request.GET.get('search_phrase', '')
if request.LANGUAGE_CODE == 'pl':
filtered_trophies = Trophies.objects.filter(name_pl__icontains=search_phrase)
else:
filtered_trophies = Trophies.objects.filter(name__icontains=search_phrase)
context = {'trophiesfields': filtered_trophies, 'search_phrase': search_phrase, 'page': page}
return render(request, "trophies.html", context)
class LineupsView(TemplateView):
template_name = "lineups.html"
page = 'lineups'
def get_context_data(self, **kwargs):
context = super(LineupsView, self).get_context_data(**kwargs)
context['page'] = self.page
context['lineupsfields'] = Lineups.objects.all()
return context
class LineupDetailsView(LoginRequiredMixin, TemplateView):
template_name = "lineup_details.html"
page = "lineups"
lineup_id = None
login_url = "/login"
def get(self, request, *args, **kwargs):
self.lineup_id = kwargs.get('pk')
return super(LineupDetailsView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
sort_by = self.request.GET.get('sort')
sort_table = self.request.GET.get('table')
desc = self.request.GET.get('desc') == 'yes'
context = super(LineupDetailsView, self).get_context_data(**kwargs)
context['page'] = self.page
context['object'] = Lineups.objects.get(id=self.lineup_id)
context['lineups'] = Lineups.objects.all()
if sort_by:
context['sort_param'] = sort_by
if sort_table == 'players':
if self.request.session.get('employees_sort'):
context['employees'] = Employees.objects.filter(lineup_id=self.lineup_id).order_by(
self.request.session.get('employees_sort'))
else:
context['employees'] = Employees.objects.filter(lineup_id=self.lineup_id)
if desc:
self.request.session['players_sort'] = '-' + sort_by
context['players'] = Players.objects.filter(lineup_id=self.lineup_id).order_by('-' + sort_by)
else:
self.request.session['players_sort'] = sort_by
context['players'] = Players.objects.filter(lineup_id=self.lineup_id).order_by(sort_by)
else:
if self.request.session.get('players_sort'):
context['players'] = Players.objects.filter(lineup_id=self.lineup_id).order_by(
self.request.session.get('players_sort'))
else:
context['players'] = Players.objects.filter(lineup_id=self.lineup_id)
if desc:
self.request.session['employees_sort'] = '-' + sort_by
context['employees'] = Employees.objects.filter(lineup_id=self.lineup_id).order_by('-' + sort_by)
else:
self.request.session['employees_sort'] = sort_by
context['employees'] = Employees.objects.filter(lineup_id=self.lineup_id).order_by(sort_by)
else:
context['players'] = Players.objects.filter(lineup_id=self.lineup_id)
context['employees'] = Employees.objects.filter(lineup_id=self.lineup_id)
return context
class GamesView(TemplateView):
template_name = "games.html"
page = 'games'
season_id = None
fixture_id = None
def get(self, request, *args, **kwargs):
self.season_id = kwargs.get('season_id')
self.fixture_id = kwargs.get('fixture_id')
return super(GamesView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
sort_by = self.request.GET.get('sort')
desc = self.request.GET.get('desc') == 'yes'
context = super(GamesView, self).get_context_data(**kwargs)
context['page'] = self.page
context['seasons'] = Seasons.objects.filter(pk__in=Games.objects.all().values_list('season_id'))
context['fixtures'] = Fixtures.objects.filter(pk__in=Games.objects.filter(
season_id=self.season_id).values_list('fixture_id'))
context['season_id'] = self.season_id
context['fixture_id'] = self.fixture_id
if sort_by:
if desc:
context['gamesfields'] = Games.objects.filter(season_id=self.season_id,
fixture_id=self.fixture_id).order_by('-' + sort_by)
else:
context['gamesfields'] = Games.objects.filter(season_id=self.season_id,
fixture_id=self.fixture_id).order_by(sort_by)
context['sort_param'] = sort_by
else:
context['gamesfields'] = Games.objects.filter(season_id=self.season_id,
fixture_id=self.fixture_id).order_by('-game_date', '-game_hour')
return context
@classmethod
def filter_games(cls, request, **kwargs):
page = 'games'
context = {}
cls.season_id = kwargs.get('season_id')
cls.fixture_id = kwargs.get('fixture_id')
search_phrase = request.GET.get('search_phrase', '')
filtered_games = Games.objects.filter(
Q(homeTeam__name__icontains=search_phrase,
season_id=cls.season_id, fixture_id=cls.fixture_id) |
Q(awayTeam__name__icontains=search_phrase, season_id=cls.season_id, fixture_id=cls.fixture_id)
)
context['page'] = page
context['gamesfields'] = filtered_games
context['search_pharse'] = search_phrase
context['seasons'] = Seasons.objects.all()
context['fixtures'] = Fixtures.objects.filter(pk__in=Games.objects.filter(
season_id=cls.season_id).values_list('fixture_id'))
context['season_id'] = cls.season_id
context['fixture_id'] = cls.fixture_id
return render(request, "games.html", context)
class GameDetailsView(TemplateView):
template_name = "game_details.html"
page = 'game_details'
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Post.pic_25'
db.add_column('blogs_post', 'pic_25',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Post.pic_26'
db.add_column('blogs_post', 'pic_26',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Post.pic_27'
db.add_column('blogs_post', 'pic_27',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Post.pic_28'
db.add_column('blogs_post', 'pic_28',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Post.pic_29'
db.add_column('blogs_post', 'pic_29',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Post.pic_30'
db.add_column('blogs_post', 'pic_30',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Post.pic_31'
db.add_column('blogs_post', 'pic_31',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Post.pic_32'
db.add_column('blogs_post', 'pic_32',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Post.pic_33'
db.add_column('blogs_post', 'pic_33',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Post.pic_25'
db.delete_column('blogs_post', 'pic_25')
# Deleting field 'Post.pic_26'
db.delete_column('blogs_post', 'pic_26')
# Deleting field 'Post.pic_27'
db.delete_column('blogs_post', 'pic_27')
# Deleting field 'Post.pic_28'
db.delete_column('blogs_post', 'pic_28')
# Deleting field 'Post.pic_29'
db.delete_column('blogs_post', 'pic_29')
# Deleting field 'Post.pic_30'
db.delete_column('blogs_post', 'pic_30')
# Deleting field 'Post.pic_31'
db.delete_column('blogs_post', 'pic_31')
# Deleting field 'Post.pic_32'
db.delete_column('blogs_post', 'pic_32')
# Deleting field 'Post.pic_33'
db.delete_column('blogs_post', 'pic_33')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'draft_notice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'has_artists': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'header_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bootblog': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'main_color': ('django.db.models.fields.CharField', [], {'default': "'#ff7f00'", 'max_length': '10'}),
'moderator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'pinterest_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Template']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'}),
'twitter_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_category'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'cat_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_caret': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_email': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_fb': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_pint': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_tw': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'default': "'#000000'", 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'parent_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_category'", 'null': 'True', 'to': "orm['blogs.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'Comment_author'", 'null': 'True', 'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'comment_status': ('django.db.models.fields.CharField', [], {'default': "'pe'", 'max_length': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'We'", 'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '2', 'null': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menu': {
'Meta': {'object_name': 'Menu'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_menu'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_main': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'external_link': ('django.db.models.fields.URLField', [], {'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Menu']", 'null': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Page']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_page'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_25': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_26': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_27': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_28': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_29': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_30': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_31': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_32': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_33': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'tag': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Tag']", 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'video': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_ogg': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'vimeo_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'blogs.rss': {
'Meta': {'object_name': 'Rss'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_rss'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'feed_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_tag'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'blogs.template': {
'Meta': {'object_name': 'Template'},
'archives': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'base': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'category': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'single': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import shlex
import sys
from subprocess import Popen, PIPE, \
check_call, CalledProcessError
# The name of environmental variable
# for branch name on which count unmerged commits count.
ENV_MERGE_BRANCH = 'ZSH_VCS_PROMPT_MERGE_BRANCH'
# Git error messages.
ERR_MSG_NO_BRANCH = 'fatal: ref HEAD is not a symbolic ref'
ERR_MSG_UNKNOWN_OPTION_SHORT = "error: unknown option `short'"
ERRO_MSG_AMBIGUOUS_ARGUMENT = \
"fatal: ambiguous argument '%s..%s': " \
+ "unknown revision or path not in the working tree."
class Cmd(object):
def __init__(self, cmd, exargs=None, ignore_error=False):
self.ignore_error = ignore_error
cmd = shlex.split(cmd)
if exargs:
cmd.extend(exargs)
self.cmd = cmd
self.process = Popen(self.cmd, stdout=PIPE, stderr=PIPE)
def get_result(self):
out, error = self.process.communicate()
if not self.ignore_error:
self._check_error(error)
if out:
try:
out = str(out, 'utf-8')
except TypeError:
pass
return out
return ''
def _check_error(self, error):
returncode = self.process.returncode
if returncode == 0:
return
if error:
try:
error = str(error, 'utf-8')
except TypeError:
pass
else:
error = 'Unknown error'
message = 'Error(%d): %s' % (returncode, error)
raise Exception(message)
def check_before_running():
try:
check_call('type git >/dev/null 2>&1', shell=True)
except CalledProcessError as e:
info = sys.exc_info()
raise Exception('Git is not installed (%s)\n%s: %s'
% (e.returncode, info[0].__name__, info[1]))
output = Cmd('git rev-parse --is-inside-work-tree').get_result()
if output == 'false':
raise Exception('Not inside git work tree')
def main():
#check_before_running()
# Start proess to get full name of the current branch (like refs/heads/master).
p_branch_ref = Cmd('git symbolic-ref HEAD')
# Start process to get the git top directory.
p_top_dir = Cmd('git rev-parse --show-toplevel')
# Start proess to get the current branch name.
p_branch = Cmd('git symbolic-ref --short HEAD')
# Get full name of the current barnch (like refs/heads/master).
try:
branch_ref = p_branch_ref.get_result().strip()
except Exception as e:
if ERR_MSG_NO_BRANCH in str(e):
# If not on any branch.
return 1
raise
# Start proess to get the tracking branch.
p_tracking_branch = Cmd('git for-each-ref --format="%%(upstream:short)" %s'
% branch_ref, ignore_error=True)
# Get merge branch from environmental variable.
merge_branch = ENV_MERGE_BRANCH in os.environ \
and os.environ[ENV_MERGE_BRANCH]
# Change directory to git top.
os.chdir(p_top_dir.get_result().strip())
# Get the current branch name.
branch = ''
try:
# Old version git does not suppoert the option --short.
branch = p_branch.get_result().strip()
except Exception as e:
if ERR_MSG_NO_BRANCH in str(e):
# If not on any branch.
return 1
elif ERR_MSG_UNKNOWN_OPTION_SHORT in str(e):
branch = branch_ref[11:]
else:
raise
# Get the tracking branch.
tracking_branch = p_tracking_branch.get_result().strip()
# Start processes.
p_staged_files = Cmd('git diff --staged --name-status')
p_unstaged_files = Cmd('git diff --name-status')
p_untracked_files = Cmd('git ls-files --others --exclude-standard')
p_stash_list = Cmd('git stash list')
p_unmerged_list = None
if merge_branch and not branch == merge_branch:
p_unmerged_list = Cmd('git rev-list %s..%s' % (merge_branch, branch))
p_behind_ahead = None
if tracking_branch:
p_behind_ahead = Cmd('git rev-list --left-right --count %s...HEAD'
% tracking_branch)
# Count staged files.
try:
staged_files = p_staged_files.get_result().splitlines()
staged_files = [namestat[0] for namestat in staged_files]
except:
staged_files = []
git_status = Cmd('git status --short --porcelain').get_result().splitlines()
for namestat in git_status:
if namestat[0] in ['U', 'M', 'A', 'D', 'R', 'C']:
staged_files.append(namestat[0])
# Count conflicts.
conflicts = staged_files.count('U')
if conflicts > 0:
return 1
staged = len(staged_files) - conflicts
# Count unstaged files.
unstaged_files = p_unstaged_files.get_result()
unstaged_files = [namestat[0] for namestat in unstaged_files.splitlines()]
unstaged = len(unstaged_files) - unstaged_files.count('U')
# Count untracked files.
untracked_files = p_untracked_files.get_result()
untracked_files = untracked_files.splitlines()
untracked = len(untracked_files)
# Count stashed files.
stash_list = p_stash_list.get_result()
stashed = len(stash_list.splitlines())
# Check if clean.
if not unstaged and not staged and not conflicts and not untracked:
clean = '1'
else:
clean = '0'
# Count difference commits between the current branch and the remote branch.
ahead = '0'
behind = '0'
if p_behind_ahead:
try:
behind_ahead = p_behind_ahead.get_result().split()
behind = behind_ahead[0]
ahead = behind_ahead[1]
except:
# If the option --count is unsupported.
behind_ahead = Cmd('git rev-list --left-right %s...HEAD'
% tracking_branch).get_result().splitlines()
ahead = len([x for x in behind_ahead if x[0] == '>'])
behind = len(behind_ahead) - ahead
# Count unmerged commits.
unmerged = '0'
if p_unmerged_list:
try:
unmerged_list = p_unmerged_list.get_result().splitlines()
except Exception as e:
err_msg = ERRO_MSG_AMBIGUOUS_ARGUMENT % (merge_branch, branch)
if not err_msg in str(e):
raise
else:
unmerged = len(unmerged_list)
# Result
out = '\n'.join([
branch,
str(ahead),
str(behind),
str(staged),
str(conflicts),
str(unstaged),
str(untracked),
str(stashed),
clean,
str(unmerged)])
print(out)
if __name__ == '__main__':
sys.exit(main())
|
|
from unittest import TestCase
from os.path import dirname, join
import dark
from dark.genomes import GenomeProteinInfo
from dark.civ.proteins import SqliteIndex
TOP = dirname(dirname(dark.__file__))
DB = SqliteIndex(join(TOP, 'test', 'data', 'hbv', 'hbv-proteins.db'))
BAM1 = join(TOP, 'test', 'data', 'hbv', 'query1.bam')
BAM2 = join(TOP, 'test', 'data', 'hbv', 'query2.bam')
BAM3 = join(TOP, 'test', 'data', 'hbv', 'query3.bam')
class TestGenomeProteinInfo(TestCase):
"""
Test the GenomeProteinInfo class.
"""
def testLoadReference(self):
"""
Test that everything is as expected after loading the genome file.
"""
gpi = GenomeProteinInfo('KJ586809.1', DB, True)
self.assertEqual(gpi.genome['proteinCount'], len(gpi.proteins))
nonProteinOffsets = (
set(range(gpi.genome['length'])) - set(gpi.offsets))
self.assertEqual(set(), nonProteinOffsets)
self.assertEqual('KJ586809.1', gpi.genome['accession'])
self.assertEqual('Hepatitis B virus strain P18, complete genome',
gpi.genome['name'])
def testLoadBAM1(self):
"""
Test that everything is as expected after loading the BAM1 file.
"""
gpi = GenomeProteinInfo('KJ586809.1', DB, True)
gpi.addSAM(BAM1)
# Genome covered offsets.
self.assertEqual(200, len(gpi.coveredOffsetCount))
self.assertEqual(list(range(200)), list(gpi.coveredOffsetCount))
self.assertEqual([1] * 200, list(gpi.coveredOffsetCount.values()))
# SAM files.
self.assertEqual([BAM1], gpi.samFiles)
self.assertEqual({'query1'}, gpi.readIdsMatchingGenome)
# Protein accession numbers.
expected = set(['AJF208%02d.1' % i for i in range(4, 11)])
self.assertEqual(expected, set(gpi.proteins))
# Offset 200 is in 4 proteins but is not matched by the query.
self.assertEqual(set(), gpi.offsets[200]['readIds'])
expected = set(['AJF208%02d.1' % i for i in range(4, 8)])
self.assertEqual(expected, gpi.offsets[200]['proteinAccessions'])
# Offset 0 is in 3 proteins and is matched by the query.
self.assertEqual({'query1'}, gpi.offsets[0]['readIds'])
expected = set(['AJF208%02d.1' % i for i in range(4, 7)])
self.assertEqual(expected, gpi.offsets[0]['proteinAccessions'])
# Read ids for all proteins.
self.assertEqual({'query1'}, gpi.readIdsForAllProteins())
# AJF20804.1 coverage (its ranges are 2306-3221 and 0-1623)
info = gpi.proteinCoverageInfo('AJF20804.1')
self.assertEqual(200, info['coveredOffsets'])
self.assertEqual(200, info['totalBases'])
self.assertEqual((3221 - 2306) + (1623 - 0), info['ntLength'])
self.assertEqual({'query1'}, info['readIds'])
def testLoadBAM12(self):
"""
Test that everything is as expected after loading the BAM1 and BAM2
files.
"""
gpi = GenomeProteinInfo('KJ586809.1', DB, True)
gpi.addSAM(BAM1)
gpi.addSAM(BAM2)
# Genome covered offsets.
self.assertEqual(300, len(gpi.coveredOffsetCount))
self.assertEqual(list(range(200)) + list(range(1400, 1500)),
list(gpi.coveredOffsetCount))
self.assertEqual([1] * 300, list(gpi.coveredOffsetCount.values()))
# SAM files.
self.assertEqual([BAM1, BAM2], gpi.samFiles)
self.assertEqual({'query1', 'query2'}, gpi.readIdsMatchingGenome)
# Protein accession numbers.
expected = set(['AJF208%02d.1' % i for i in range(4, 11)])
self.assertEqual(expected, set(gpi.proteins))
# Offset 200 is in 4 proteins but is not matched by any query.
self.assertEqual(set(), gpi.offsets[200]['readIds'])
expected = set(['AJF208%02d.1' % i for i in range(4, 8)])
self.assertEqual(expected, gpi.offsets[200]['proteinAccessions'])
# Offset 0 is in 3 proteins and is matched by query1.
self.assertEqual({'query1'}, gpi.offsets[0]['readIds'])
expected = set(['AJF208%02d.1' % i for i in range(4, 7)])
self.assertEqual(expected, gpi.offsets[0]['proteinAccessions'])
# Offset 1400 is in 2 proteins and is matched by query2.
self.assertEqual({'query2'}, gpi.offsets[1400]['readIds'])
self.assertEqual({'AJF20804.1', 'AJF20808.1'},
gpi.offsets[1400]['proteinAccessions'])
# Read ids for all proteins.
self.assertEqual({'query1', 'query2'}, gpi.readIdsForAllProteins())
# AJF20804.1 coverage (its ranges are 2306-3221 and 0-1623)
info = gpi.proteinCoverageInfo('AJF20804.1')
self.assertEqual(300, info['coveredOffsets'])
self.assertEqual(300, info['totalBases'])
self.assertEqual((3221 - 2306) + (1623 - 0), info['ntLength'])
self.assertEqual({'query1', 'query2'}, info['readIds'])
def testLoadBAM123(self):
"""
Test that everything is as expected after loading the BAM1, BAM2,
and BAM3 files.
"""
gpi = GenomeProteinInfo('KJ586809.1', DB, True)
gpi.addSAM(BAM1)
gpi.addSAM(BAM2)
gpi.addSAM(BAM3)
# Genome covered offsets.
# There are 50 offsets that are covered twice.
self.assertEqual(750 - 50, len(gpi.coveredOffsetCount))
self.assertEqual(set(range(200)) | set(range(1000, 1500)),
set(gpi.coveredOffsetCount))
self.assertEqual(set([1] * 700 + [2] * 50),
set(gpi.coveredOffsetCount.values()))
# SAM files.
self.assertEqual([BAM1, BAM2, BAM3], gpi.samFiles)
self.assertEqual({'query1', 'query2', 'query3'},
gpi.readIdsMatchingGenome)
# Protein accession numbers.
expected = set(['AJF208%02d.1' % i for i in range(4, 11)])
self.assertEqual(expected, set(gpi.proteins))
# Offset 200 is in 4 proteins but is not matched by any query.
self.assertEqual(set(), gpi.offsets[200]['readIds'])
expected = set(['AJF208%02d.1' % i for i in range(4, 8)])
self.assertEqual(expected, gpi.offsets[200]['proteinAccessions'])
# Offset 0 is in 3 proteins and is matched by query1.
self.assertEqual({'query1'}, gpi.offsets[0]['readIds'])
expected = set(['AJF208%02d.1' % i for i in range(4, 7)])
self.assertEqual(expected, gpi.offsets[0]['proteinAccessions'])
# Offset 1400 is in 2 proteins and is matched by query2 and query3.
self.assertEqual({'query2', 'query3'}, gpi.offsets[1400]['readIds'])
self.assertEqual({'AJF20804.1', 'AJF20808.1'},
gpi.offsets[1400]['proteinAccessions'])
# Read ids for all proteins.
self.assertEqual({'query1', 'query2', 'query3'},
gpi.readIdsForAllProteins())
# AJF20804.1 coverage (its ranges are 2306-3221 and 0-1623)
info = gpi.proteinCoverageInfo('AJF20804.1')
self.assertEqual(700, info['coveredOffsets'])
self.assertEqual(750, info['totalBases'])
self.assertEqual((3221 - 2306) + (1623 - 0), info['ntLength'])
self.assertEqual({'query1', 'query2', 'query3'}, info['readIds'])
def testTooFewReadOffsetsBAM1(self):
"""
Test that a read is not returned as overlapping a protein unless it
meets the minimum number of required overlapping offsets.
"""
gpi = GenomeProteinInfo('KJ586809.1', DB, True)
gpi.addSAM(BAM1)
# Look at protein AJF20804.1 coverage (its ranges are 2306-3221 and
# 0-1623). There should be no matching reads because the query
# (query1) is only 200 nt long and so cannot match with at least
# 500 nucleotides. The number of covered offsets and total bases
# should both also be zero for the same reason.
info = gpi.proteinCoverageInfo('AJF20804.1', 500)
self.assertEqual(set(), info['readIds'])
self.assertEqual(0, info['totalBases'])
self.assertEqual(0, info['coveredOffsets'])
def testSufficientReadOffsetsBAM1(self):
"""
Test that a read is returned as overlapping a protein when it meets
the minimum number of required overlapping offsets.
"""
gpi = GenomeProteinInfo('KJ586809.1', DB, True)
gpi.addSAM(BAM1)
# Look at protein AJF20804.1 coverage (its ranges are 2306-3221 and
# 0-1623). The query (query1) must be returned as it has 200
# matching nucleotides.
info = gpi.proteinCoverageInfo('AJF20804.1', 199)
self.assertEqual({'query1'}, info['readIds'])
|
|
# -*- coding: utf-8 -*-
"""
Request Management
"""
module = request.controller
resourcename = request.function
if not deployment_settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
"""
Application Home page
- custom View
"""
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def is_affiliated():
"""
Check if User is affiliated to an Organisation
@ToDo: Move this elsewhere
"""
if not auth.is_logged_in():
return False
elif s3_has_role(ADMIN):
return True
else:
table = auth.settings.table_user
query = (table.id == auth.user.id)
auth_user = db(query).select(table.organisation_id,
limitby=(0, 1)).first()
if auth_user and auth_user.organisation_id:
return True
else:
return False
# =============================================================================
def create():
""" Redirect to req/create """
redirect(URL(f="req", args="create"))
# -----------------------------------------------------------------------------
def req():
""" REST Controller """
req_table = s3db.req_req
# Set the req_item site_id (Requested From), called from action buttons on req/req_item_inv_item/x page
if "req_item_id" in request.vars and "inv_item_id" in request.vars:
inv_item = s3db.inv_inv_item[request.vars.inv_item_id]
site_id = inv_item.site_id
item_id = inv_item.item_id
s3db.req_req_item[request.vars.req_item_id] = dict(site_id = site_id)
response.confirmation = T("%(item)s requested from %(site)s" % {"item":s3db.supply_item_represent(item_id, show_link = False),
"site":s3db.org_site_represent(site_id, show_link=False)
})
default_type = request.vars.default_type
if default_type:
type_field = req_table.type
type_field.default = int(default_type)
type_field.writable = False
def prep(r):
s3db.req_prep(r)
# Remove type from list_fields
list_fields = s3db.get_config("req_req", "list_fields")
try:
list_fields.remove("type")
except:
# It has already been removed.
# This can happen if the req controller is called
# for a second time, such as when printing reports
pass
s3db.configure("req_req", list_fields=list_fields)
if r.interactive:
# Set Fields and Labels depending on type
type = ( r.record and r.record.type ) or \
( request.vars and request.vars.type )
if type:
type = int(type)
req_table.type.default = int(type)
# This prevents the type from being edited AFTER it is set
req_table.type.readable = False
req_table.type.writable = False
crud_strings = settings.get_req_req_crud_strings(type)
if crud_strings:
s3.crud_strings["req_req"] = crud_strings
# Filter the query based on type
if s3.filter:
s3.filter = s3.filter & \
(s3db.req_req.type == type)
else:
s3.filter = (s3db.req_req.type == type)
# @ToDo: apply these changes via JS for the create form where type is edittable
if type == 1: # Item
req_table.date_recv.readable = True
req_table.date_recv.writable = True
req_table.date_recv.readable = True
req_table.date_recv.writable = True
req_table.purpose.label = T("What the Items will be used for")
req_table.site_id.label =T("Deliver To")
req_table.request_for_id.label = T("Deliver To")
req_table.recv_by_id.label = T("Delivered To")
if type == 3: # Person
req_table.date_required_until.readable = True
req_table.date_required_until.writable = True
req_table.purpose.label = T("Task Details")
req_table.site_id.label = T("Report To")
req_table.request_for_id.label = T("Report To")
req_table.recv_by_id.label = T("Reported To")
if r.method != "update" and r.method != "read":
if not r.component:
# Hide fields which don't make sense in a Create form
# - includes one embedded in list_create
# - list_fields over-rides, so still visible within list itself
s3db.req_create_form_mods()
s3db.configure(s3db.req_req,
create_next = URL(c="req",
f="req",
args=["[id]",
"req_item"
]
)
)
# Get the default Facility for this user
# @ToDo: Use site_id in User Profile (like current organisation_id)
if deployment_settings.has_module("hrm"):
hrtable = s3db.hrm_human_resource
query = (hrtable.person_id == s3_logged_in_person())
site = db(query).select(hrtable.site_id,
limitby=(0, 1)).first()
if site:
r.table.site_id.default = site.site_id
elif r.component.name == "document":
s3.crud.submit_button = T("Add")
table = r.component.table
# @ToDo: Fix for Link Table
#table.date.default = r.record.date
#if r.record.site_id:
# stable = db.org_site
# query = (stable.id == r.record.site_id)
# site = db(query).select(stable.location_id,
# stable.organisation_id,
# limitby=(0, 1)).first()
# if site:
# table.location_id.default = site.location_id
# table.organisation_id.default = site.organisation_id
elif r.component.name == "req_item":
table = r.component.table
table.site_id.writable = table.site_id.readable = False
s3db.req_hide_quantities(table)
elif r.component.name == "req_skill":
s3db.req_hide_quantities(r.component.table)
if r.component and r.component.name == "commit":
table = r.component.table
# Allow commitments to be added when doing so as a component
s3db.configure(table,
listadd = True)
type = r.record.type
if type == 1: # Items
# Limit site_id to facilities the user has permissions for
auth.permitted_facilities(table=r.table,
error_msg=T("You do not have permission for any facility to make a commitment."))
if r.interactive:
# Redirect to the Items tab after creation
s3db.configure(table,
create_next = URL(c="req", f="commit",
args=["[id]", "commit_item"]),
update_next = URL(c="req", f="commit",
args=["[id]", "commit_item"]))
else:
# Non-Item commits can have an Organisation
# Check if user is affiliated to an Organisation
if is_affiliated():
# Limit organisation_id to organisations the user has permissions for
auth.permitted_organisations(table=r.table,
redirect_on_error=False)
table.organisation_id.readable = True
table.organisation_id.writable = True
else:
# Unaffiliated people can't commit on behalf of others
r.component.table.committer_id.writable = False
r.component.table.committer_id.comment = None
# Non-Item commits shouldn't have a From Inventory
# @ToDo: Assets do?
table.site_id.readable = False
table.site_id.writable = False
if r.interactive and r.record.type == 3: # People
# Redirect to the Persons tab after creation
s3db.configure(table,
create_next = URL(c="req", f="commit",
args=["[id]", "commit_person"]),
update_next = URL(c="req", f="commit",
args=["[id]", "commit_person"]))
else:
# Limit site_id to facilities the user has permissions for
# @ToDo: Non-Item requests shouldn't be bound to a Facility?
auth.permitted_facilities(table=r.table,
error_msg=T("You do not have permission for any facility to make a request."))
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
s3_action_buttons(r)
if not r.component:
if deployment_settings.get_req_use_commit():
# This is appropriate to all
s3.actions.append(
dict(url = URL(c = "req",
f = "req",
args = ["[id]", "commit", "create"]),
_class = "action-btn",
label = str(T("Commit"))
)
)
# This is only appropriate for item requests
query = (r.table.type == 1)
rows = db(query).select(r.table.id)
restrict = [str(row.id) for row in rows]
s3.actions.append(
dict(url = URL(c = "req",
f = "req",
args = ["[id]", "req_item"]),
_class = "action-btn",
label = str(T("View Items")),
restrict = restrict
)
)
elif r.component.name == "req_item":
req_item_inv_item_btn = dict(url = URL(c = "req",
f = "req_item_inv_item",
args = ["[id]"]
),
_class = "action-btn",
label = str(T("Request from Facility")),
)
s3.actions.append(req_item_inv_item_btn)
elif r.component.name == "req_skill":
pass
else:
# We don't yet have other components
pass
return output
s3.postp = postp
output = s3_rest_controller("req", "req",
rheader=eden.req.req_rheader)
return output
# =============================================================================
def req_item():
""" REST Controller """
s3db.configure("req_req_item",
insertable=False)
def prep(r):
if r.interactive:
if r.method != None and r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# - includes one embedded in list_create
# - list_fields over-rides, so still visible within list itself
s3db.req_hide_quantities(r.table)
return True
s3.prep = prep
output = s3_rest_controller()
req_item_inv_item_btn = dict(url = URL(c = "req",
f = "req_item_inv_item",
args = ["[id]"]
),
_class = "action-btn",
label = str(T("Request from Facility")),
)
if s3.actions:
s3.actions += [req_item_inv_item_btn]
else:
s3.actions = [req_item_inv_item_btn]
return output
# -----------------------------------------------------------------------------
def req_item_packs():
"""
Called by S3FilterFieldChange to provide the pack options for a
particular Item
"""
table = s3db.supply_item_pack
ritable = s3db.req_req_item
query = (ritable.id == request.args[0]) & \
(ritable.item_id == table.item_id)
response.headers["Content-Type"] = "application/json"
return db(query).select(table.id,
table.name,
table.quantity).json()
# -----------------------------------------------------------------------------
def req_item_inv_item():
"""
Shows the inventory items which match a requested item
@ToDo: Make this page a component of req_item
"""
req_item_id = request.args[0]
request.args = [] #
ritable = s3db.req_req_item
req_item = ritable[req_item_id]
rtable = s3db.req_req
req = rtable[req_item.req_id]
output = {}
output["title"] = T("Request Stock from Available Warehouse")
output["req_btn"] = A( T("Return to Request"),
_href = URL( c = "req",
f = "req",
args = [req_item.req_id, "req_item"]
),
_class = "action-btn"
)
output["req_item"] = TABLE( TR(
TH( "%s: " % T("Requested By") ),
rtable.site_id.represent(req.site_id),
TH( "%s: " % T("Item")),
ritable.item_id.represent(req_item.item_id),
),
TR(
TH( "%s: " % T("Requester") ),
rtable.requester_id.represent(req.requester_id),
TH( "%s: " % T("Quantity")),
req_item.quantity,
),
TR(
TH( "%s: " % T("Date Requested") ),
rtable.date.represent(req.date),
TH( T("Quantity Committed")),
req_item.quantity_commit,
),
TR(
TH( "%s: " % T("Date Required") ),
rtable.date_required.represent(req.date_required),
TH( "%s: " % T("Quantity in Transit")),
req_item.quantity_transit,
),
TR(
TH( "%s: " % T("Priority") ),
rtable.priority.represent(req.priority),
TH( "%s: " % T("Quantity Fulfilled")),
req_item.quantity_fulfil,
)
)
s3.no_sspag = True # pagination won't work with 2 datatables on one page @todo: test
itable = s3db.inv_inv_item
# Get list of matching inventory items
s3.filter = (itable.item_id == req_item.item_id)
# Tweak CRUD String for this context
s3.crud_strings["inv_inv_item"].msg_list_empty = T("No Inventories currently have this item in stock")
inv_items = s3_rest_controller("inv", "inv_item")
output["items"] = inv_items["items"]
if current.deployment_settings.get_supply_use_alt_name():
# Get list of alternative inventory items
atable = s3db.supply_item_alt
query = (atable.item_id == req_item.item_id ) & \
(atable.deleted == False )
alt_item_rows = db(query).select(atable.alt_item_id)
alt_item_ids = [alt_item_row.alt_item_id for alt_item_row in alt_item_rows]
if alt_item_ids:
s3.filter = (itable.item_id.belongs(alt_item_ids))
inv_items_alt = s3_rest_controller("inv", "inv_item")
output["items_alt"] = inv_items_alt["items"]
else:
output["items_alt"] = T("No Inventories currently have suitable alternative items in stock")
response.view = "req/req_item_inv_item.html"
s3.actions = [dict(url = URL(c = request.controller,
f = "req",
args = [req_item.req_id, "req_item"],
vars = dict(req_item_id = req_item_id,
inv_item_id = "[id]")
),
_class = "action-btn",
label = str(T("Request From")),
)]
return output
# =============================================================================
def req_skill():
""" REST Controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return req_skill_controller()
# =============================================================================
def commit():
""" REST Controller """
# Check if user is affiliated to an Organisation
if not is_affiliated():
tablename = "req_commit_person"
table = s3db[tablename]
# Unaffiliated people can't commit on behalf of others
table.person_id.writable = False
# & can only make single-person commitments
# (This should have happened in the main commitment)
s3db.configure(tablename,
insertable=False)
def prep(r):
if r.interactive:
# Commitments created through UI should be done via components
# @ToDo: Block Direct Create attempts
table = r.table
#table.req_id.default = request.vars["req_id"]
#table.req_id.writable = False
if r.record:
if r.record.type == 1: # Items
# Limit site_id to facilities the user has permissions for
auth.permitted_facilities(table=table,
error_msg=T("You do not have permission for any facility to make a commitment.") )
else:
# Non-Item commits can have an Organisation
# Limit organisation_id to organisations the user has permissions for
auth.permitted_organisations(table=r.table, redirect_on_error=False)
table.organisation_id.readable = True
table.organisation_id.writable = True
# Non-Item commits shouldn't have a From Inventory
# @ToDo: Assets do?
table.site_id.readable = False
table.site_id.writable = False
if r.component:
req_id = r.record.req_id
if r.component.name == "commit_item":
# Limit commit items to items from the request
s3db.req_commit_item.req_item_id.requires = \
IS_ONE_OF(db,
"req_req_item.id",
s3db.req_item_represent,
orderby = "req_req_item.id",
filterby = "req_id",
filter_opts = [req_id],
sort=True
)
elif r.component.name == "person":
pass
# Limit commit skills to skills from the request
#db.req_commit_skill.req_skill_id.requires = \
# IS_ONE_OF(db,
# "req_req_skill.id",
# s3db.req_skill_represent,
# orderby = "req_req_skill.id",
# filterby = "req_id",
# filter_opts = [req_id],
# sort=True
# )
return True
s3.prep = prep
rheader = commit_rheader
output = s3_rest_controller(module, resourcename, rheader=rheader)
return output
# -----------------------------------------------------------------------------
def commit_rheader(r):
""" Resource Header for Commitments """
if r.representation == "html":
record = r.record
if record and r.name == "commit":
s3_date_represent = s3base.S3DateTime.date_represent
tabs = [(T("Edit Details"), None)]
type = record.type and int(record.type)
table = r.table
if type == 1:
tabs.append((T("Items"), "commit_item"))
#req_record = db.req_req[record.req_id]
#req_date = req_record.date
rheader = DIV( TABLE( TR( TH( "%s: " % table.req_id.label),
table.req_id.represent(record.req_id),
),
TR( TH( "%s: " % T("Committing Warehouse")),
s3db.org_site_represent(record.site_id),
TH( "%s: " % T("Commit Date")),
s3_date_represent(record.date),
),
TR( TH( "%s: " % table.comments.label),
TD(record.comments, _colspan=3)
),
),
)
prepare_btn = A( T("Send Commitment"),
_href = URL(c = "inv",
f = "send_commit",
args = [record.id]
),
_id = "send_commit",
_class = "action-btn"
)
s3.rfooter = TAG[""](prepare_btn)
# send_btn = A( T("Send Commitment as Shipment"),
# _href = URL(c = "inv",
# f = "send_commit",
# args = [record.id]
# ),
# _id = "send_commit",
# _class = "action-btn"
# )
#
# send_btn_confirm = SCRIPT("S3ConfirmClick('#send_commit', '%s')" %
# T("Do you want to send these Committed items?") )
# s3.rfooter = TAG[""](send_btn,send_btn_confirm)
#rheader.append(send_btn)
#rheader.append(send_btn_confirm)
elif type == 3:
tabs.append((T("People"), "commit_person"))
#req_record = db.req_req[record.req_id]
#req_date = req_record.date
organisation_represent = s3db.org_organisation_represent
rheader = DIV( TABLE( TR( TH( "%s: " % table.req_id.label),
table.req_id.represent(record.req_id),
),
TR( TH( "%s: " % T("Committing Organization")),
organisation_represent(record.organisation_id),
TH( "%s: " % T("Commit Date")),
s3_date_represent(record.date),
),
TR( TH( "%s: " % table.comments.label),
TD(record.comments, _colspan=3)
),
),
)
else:
# Other (& Assets/Shelter)
rheader = DIV( TABLE( TR( TH( "%s: " % table.req_id.label),
table.req_id.represent(record.req_id),
),
TR( TH( "%s: " % T("Committing Person")),
s3db.pr_person_represent(record.committer_id),
TH( "%s: " % T("Commit Date")),
s3_date_represent(record.date),
),
TR( TH( "%s: " % table.comments.label),
TD(record.comments or "", _colspan=3)
),
),
)
rheader_tabs = s3_rheader_tabs(r,
tabs)
rheader.append(rheader_tabs)
return rheader
return None
# =============================================================================
def commit_item():
""" REST Controller """
return s3_rest_controller()
# =============================================================================
def commit_req():
"""
function to commit items according to a request.
copy data from a req into a commitment
arg: req_id
vars: site_id
"""
req_id = request.args[0]
r_req = s3db.req_req[req_id]
site_id = request.vars.get("site_id")
# User must have permissions over facility which is sending
(prefix, resourcename, id) = s3db.get_instance(s3db.org_site, site_id)
if not site_id or not auth.s3_has_permission("update",
"%s_%s" % (prefix,
resourcename),
record_id=id):
session.error = T("You do not have permission to make this commitment.")
redirect(URL(c = "req",
f = "req",
args = [req_id],
))
# Create a new commit record
commit_id = s3db.req_commit.insert(date = request.utcnow,
req_id = req_id,
site_id = site_id,
type = r_req.type
)
# Only select items which are in the warehouse
ritable = s3db.req_req_item
iitable = s3db.inv_inv_item
query = (ritable.req_id == req_id) & \
(ritable.quantity_fulfil < ritable.quantity) & \
(iitable.site_id == site_id) & \
(ritable.item_id == iitable.item_id) & \
(ritable.deleted == False) & \
(iitable.deleted == False)
req_items = db(query).select(ritable.id,
ritable.quantity,
ritable.item_pack_id,
iitable.item_id,
iitable.quantity,
iitable.item_pack_id)
citable = s3db.req_commit_item
for req_item in req_items:
req_item_quantity = req_item.req_req_item.quantity * \
req_item.req_req_item.pack_quantity
inv_item_quantity = req_item.inv_inv_item.quantity * \
req_item.inv_inv_item.pack_quantity
if inv_item_quantity > req_item_quantity:
commit_item_quantity = req_item_quantity
else:
commit_item_quantity = inv_item_quantity
commit_item_quantity = commit_item_quantity / req_item.req_req_item.pack_quantity
if commit_item_quantity:
commit_item_id = citable.insert(commit_id = commit_id,
req_item_id = req_item.req_req_item.id,
item_pack_id = req_item.req_req_item.item_pack_id,
quantity = commit_item_quantity
)
# Update the req_item.commit_quantity & req.commit_status
s3mgr.store_session("req", "commit_item", commit_item_id)
s3db.req_commit_item_onaccept(None)
# Redirect to commit
redirect(URL(c = "req",
f = "commit",
args = [commit_id, "commit_item"]))
# =============================================================================
def send_req():
"""
function to send items according to a request.
copy data from a req into a send
arg: req_id
vars: site_id
"""
ritable = s3db.req_req_item
iitable = s3db.inv_inv_item
sendtable = s3db.inv_send
tracktable = s3db.inv_track_item
siptable = s3db.supply_item_pack
req_id = request.args[0]
r_req = s3db.req_req[req_id]
site_id = request.vars.get("site_id")
# User must have permissions over facility which is sending
(prefix, resourcename, id) = s3db.get_instance(db.org_site, site_id)
if not site_id or not auth.s3_has_permission("update",
"%s_%s" % (prefix,
resourcename),
record_id=id):
session.error = T("You do not have permission to send this shipment.")
redirect(URL(c = "req",
f = "req",
args = [req_id]))
# Create a new send record
code = s3db.inv_get_shipping_code("WB",
site_id,
s3db.inv_send.send_ref
)
send_id = sendtable.insert(send_ref = code,
req_ref = r_req.req_ref,
sender_id = auth.s3_logged_in_person(),
site_id = site_id,
date = request.utcnow,
recipient_id = r_req.requester_id,
to_site_id = r_req.site_id,
status = s3db.inv_ship_status["IN_PROCESS"],
)
# Get the items for this request that have not been fulfilled (in transit)
query = (ritable.req_id == req_id) & \
(ritable.quantity_transit < ritable.quantity) & \
(ritable.deleted == False)
req_items = db(query).select(ritable.id,
ritable.quantity,
ritable.quantity_transit,
ritable.item_id,
ritable.item_pack_id,
)
# loop through each request item and find matched in the site inventory
for req_i in req_items:
query = (iitable.item_id == req_i.item_id) & \
(iitable.quantity > 0) & \
(iitable.site_id == site_id) & \
(iitable.deleted == False)
inv_items = db(query).select(iitable.id,
iitable.item_id,
iitable.quantity,
iitable.item_pack_id,
iitable.pack_value,
iitable.currency,
iitable.expiry_date,
iitable.bin,
iitable.owner_org_id,
iitable.supply_org_id,
)
# if their is a single match then set up a tracktable record
# get the request pack_quantity
req_p_qnty = siptable[req_i.item_pack_id].quantity
req_qnty = req_i.quantity
req_qnty_in_t = req_i.quantity_transit
req_qnty_wanted = (req_qnty - req_qnty_in_t) * req_p_qnty
# insert the track item records
# if their is more than one item match then set the quantity to 0
# and add the quantity requested in the comments
for inv_i in inv_items:
# get inv_item.pack_quantity
if len(inv_items) == 1:
# Remove this total from the warehouse stock
send_item_quantity = s3db.inv_remove(inv_i, req_qnty_wanted)
else:
send_item_quantity = 0
comment = "%d items needed to match total request" % req_qnty_wanted
tracktable.insert(send_id = send_id,
send_inv_item_id = inv_i.id,
item_id = inv_i.item_id,
req_item_id = req_i.id,
item_pack_id = inv_i.item_pack_id,
quantity = send_item_quantity,
status = s3db.inv_tracking_status["IN_PROCESS"],
pack_value = inv_i.pack_value,
currency = inv_i.currency,
bin = inv_i.bin,
expiry_date = inv_i.expiry_date,
owner_org_id = inv_i.owner_org_id,
supply_org_id = inv_i.supply_org_id,
comments = comment,
)
# Redirect to commit
redirect(URL(c = "inv",
f = "send",
args = [send_id, "track_item"]))
# =============================================================================
def commit_item_json():
"""
"""
ctable = s3db.req_commit
itable = s3db.req_commit_item
stable = s3db.org_site
#ctable.date.represent = lambda dt: dt[:10]
query = (itable.req_item_id == request.args[0]) & \
(ctable.id == itable.commit_id) & \
(ctable.site_id == stable.id) & \
(itable.deleted == False)
records = db(query).select(ctable.id,
ctable.date,
stable.name,
itable.quantity,
orderby = db.req_commit.date)
json_str = "[%s,%s" % ( json.dumps(dict(id = str(T("Committed")),
quantity = "#")),
records.json()[1:])
response.headers["Content-Type"] = "application/json"
return json_str
# END =========================================================================
|
|
"""Functions to make 3D plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Mark Wronkiewicz <wronk.mark@gmail.com>
#
# License: Simplified BSD
from ..externals.six import string_types, advance_iterator
import os.path as op
import inspect
import warnings
from itertools import cycle
import base64
import numpy as np
from scipy import linalg
from ..io.pick import pick_types
from ..io.constants import FIFF
from ..surface import (get_head_surf, get_meg_helmet_surf, read_surface,
transform_surface_to)
from ..transforms import (read_trans, _find_trans, apply_trans,
combine_transforms, _get_mri_head_t, _ensure_trans,
invert_transform)
from ..utils import get_subjects_dir, logger, _check_subject, verbose
from ..defaults import _handle_default
from .utils import mne_analyze_colormap, _prepare_trellis, COLORS
from ..externals.six import BytesIO
def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to run in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
time_idx = None
if time is None:
time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
if not evoked.times[0] <= time <= evoked.times[-1]:
raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
time_idx = np.argmin(np.abs(evoked.times - time))
types = [sm['kind'] for sm in surf_maps]
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
colormap = mne_analyze_colormap(format='mayavi')
colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
np.tile([0., 0., 0., 255.], (2, 1)),
np.tile([255., 0., 0., 255.], (127, 1))])
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, this_map in enumerate(surf_maps):
surf = this_map['surf']
map_data = this_map['data']
map_type = this_map['kind']
map_ch_names = this_map['ch_names']
if map_type == 'eeg':
pick = pick_types(evoked.info, meg=False, eeg=True)
else:
pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
ch_names = [evoked.ch_names[k] for k in pick]
set_ch_names = set(ch_names)
set_map_ch_names = set(map_ch_names)
if set_ch_names != set_map_ch_names:
message = ['Channels in map and data do not match.']
diff = set_map_ch_names - set_ch_names
if len(diff):
message += ['%s not in data file. ' % list(diff)]
diff = set_ch_names - set_map_ch_names
if len(diff):
message += ['%s not in map file.' % list(diff)]
raise RuntimeError(' '.join(message))
data = np.dot(map_data, evoked.data[pick, time_idx])
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
vlim = np.max(np.abs(data))
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
# Now show our field pattern
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim)
fsurf.module_manager.scalar_lut_manager.lut.table = colormap
# And the field lines on top
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
cont = mlab.pipeline.contour_surface(mesh, contours=21,
line_width=1.0,
vmin=-vlim, vmax=vlim,
opacity=alpha)
cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
if '%' in time_label:
time_label %= (1e3 * evoked.times[time_idx])
mlab.text(0.01, 0.01, time_label, width=0.4)
mlab.view(10, 60)
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True, img_output=False):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'transverse' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Call pyplot.show() at the end.
img_output : None | tuple
If tuple (width and height), images will be produced instead of a
single figure with many axes. This mode is designed to reduce the
(substantial) overhead associated with making tens to hundreds
of matplotlib axes, instead opting to re-use a single Axes instance.
Returns
-------
fig : Instance of matplotlib.figure.Figure | list
The figure. Will instead be a list of png images if
img_output is a tuple.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
if img_output is None:
fig, axs = _prepare_trellis(len(slices), 4)
else:
fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0))
axs = [ax] * len(slices)
fig_size = fig.get_size_inches()
w, h = img_output[0], img_output[1]
w2 = fig_size[0]
fig.set_size_inches([(w2 / float(w)) * w, (w2 / float(w)) * h])
plt.close(fig)
inds = dict(coronal=[0, 1, 2], axial=[2, 0, 1],
sagittal=[2, 1, 0])[orientation]
outs = []
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
if img_output is not None:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
ax.tricontour(surf['rr'][:, inds[0]], surf['rr'][:, inds[1]],
surf['tris'], surf['rr'][:, inds[2]],
levels=[sl], colors='yellow', linewidths=2.0)
if img_output is not None:
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(0, img_output[1])
ax.set_ylim(img_output[0], 0)
output = BytesIO()
fig.savefig(output, bbox_inches='tight',
pad_inches=0, format='png')
outs.append(base64.b64encode(output.getvalue()).decode('ascii'))
if show:
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt.show()
return fig if img_output is None else outs
@verbose
def plot_trans(info, trans='auto', subject=None, subjects_dir=None,
ch_type=None, source=('bem', 'head'), coord_frame='head',
meg_sensors=False, dig=False, verbose=None):
"""Plot MEG/EEG head surface and helmet in 3D.
Parameters
----------
info : dict
The measurement info.
trans : str | 'auto' | dict
The full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, both the MEG helmet and EEG electrodes will be shown.
If 'meg', only the MEG helmet will be shown. If 'eeg', only the
EEG electrodes will be shown.
source : str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
to 'bem'. Note. For single layer bems it is recommended to use 'head'.
coord_frame : str
Coordinate frame to use, 'head', 'meg', or 'mri'.
meg_sensors : bool
If True, plot MEG sensors as points in addition to showing the helmet.
dig : bool
If True, plot the digitization points.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
if coord_frame not in ['head', 'meg', 'mri']:
raise ValueError('coord_frame must be "head" or "meg"')
if ch_type not in [None, 'eeg', 'meg']:
raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
% ch_type)
if isinstance(trans, string_types):
if trans == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
trans = _find_trans(subject, subjects_dir)
trans = read_trans(trans)
elif not isinstance(trans, dict):
raise TypeError('trans must be str or dict')
head_mri_t = _ensure_trans(trans, 'head', 'mri')
del trans
# both the head and helmet will be in MRI coordinates after this
surfs = [get_head_surf(subject, source=source, subjects_dir=subjects_dir)]
if ch_type is None or ch_type == 'meg':
surfs.append(get_meg_helmet_surf(info, head_mri_t))
if coord_frame == 'meg':
surf_trans = combine_transforms(info['dev_head_t'], head_mri_t,
'meg', 'mri')
elif coord_frame == 'head':
surf_trans = head_mri_t
else: # coord_frame == 'mri'
surf_trans = None
surfs = [transform_surface_to(surf, coord_frame, surf_trans)
for surf in surfs]
del surf_trans
# determine points
meg_loc = list()
ext_loc = list()
car_loc = list()
if ch_type is None or ch_type == 'eeg':
eeg_loc = np.array([info['chs'][k]['loc'][:3]
for k in pick_types(info, meg=False, eeg=True)])
if len(eeg_loc) > 0:
# Transform EEG electrodes from head coordinates if necessary
if coord_frame == 'meg':
eeg_loc = apply_trans(invert_transform(info['dev_head_t']),
eeg_loc)
elif coord_frame == 'mri':
eeg_loc = apply_trans(invert_transform(head_mri_t), eeg_loc)
else:
# only warn if EEG explicitly requested, or EEG channels exist but
# no locations are provided
if (ch_type is not None or
len(pick_types(info, meg=False, eeg=True)) > 0):
warnings.warn('EEG electrode locations not found. '
'Cannot plot EEG electrodes.')
if meg_sensors:
meg_loc = np.array([info['chs'][k]['loc'][:3]
for k in pick_types(info)])
if len(meg_loc) > 0:
# Transform MEG coordinates from meg if necessary
if coord_frame == 'head':
meg_loc = apply_trans(info['dev_head_t'], meg_loc)
elif coord_frame == 'mri':
t = combine_transforms(info['dev_head_t'], head_mri_t,
'meg', 'mri')
meg_loc = apply_trans(t, meg_loc)
else:
warnings.warn('MEG electrodes not found. '
'Cannot plot MEG locations.')
if dig:
ext_loc = np.array([d['r'] for d in info['dig']
if d['kind'] == FIFF.FIFFV_POINT_EXTRA])
car_loc = np.array([d['r'] for d in info['dig']
if d['kind'] == FIFF.FIFFV_POINT_CARDINAL])
if coord_frame == 'meg':
t = invert_transform(info['dev_head_t'])
ext_loc = apply_trans(t, ext_loc)
car_loc = apply_trans(t, car_loc)
elif coord_frame == 'mri':
ext_loc = apply_trans(head_mri_t, ext_loc)
car_loc = apply_trans(head_mri_t, car_loc)
if len(car_loc) == len(ext_loc) == 0:
warnings.warn('Digitization points not found. '
'Cannot plot digitization.')
# do the plotting, surfaces then points
from mayavi import mlab
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
alphas = [1.0, 0.5] # head, helmet
colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
for surf, alpha, color in zip(surfs, alphas, colors):
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=color, opacity=alpha)
datas = (eeg_loc, meg_loc, car_loc, ext_loc)
colors = ((1., 0., 0.), (0., 0.25, 0.5), (1., 1., 0.), (1., 0.5, 0.))
alphas = (1.0, 0.25, 0.5, 0.25)
scales = (0.005, 0.0025, 0.015, 0.0075)
for data, color, alpha, scale in zip(datas, colors, alphas, scales):
if len(data) > 0:
with warnings.catch_warnings(record=True): # traits
mlab.points3d(data[:, 0], data[:, 1], data[:, 2],
color=color, scale_factor=scale, opacity=alpha)
mlab.view(90, 90)
return fig
def _limits_to_control_points(clim, stc_data, colormap):
"""Private helper function to convert limits (values or percentiles)
to control points.
Note: If using 'mne', generate cmap control points for a directly
mirrored cmap for simplicity (i.e., no normalization is computed to account
for a 2-tailed mne cmap).
Parameters
----------
clim : str | dict
Desired limits use to set cmap control points.
Returns
-------
ctrl_pts : list (length 3)
Array of floats corresponding to values to use as cmap control points.
colormap : str
The colormap.
"""
# Based on type of limits specified, get cmap control points
if colormap == 'auto':
if clim == 'auto':
colormap = 'mne' if (stc_data < 0).any() else 'hot'
else:
if 'lims' in clim:
colormap = 'hot'
else: # 'pos_lims' in clim
colormap = 'mne'
if clim == 'auto':
# Set upper and lower bound based on percent, and get average between
ctrl_pts = np.percentile(np.abs(stc_data), [96, 97.5, 99.95])
elif isinstance(clim, dict):
# Get appropriate key for clim if it's a dict
limit_key = ['lims', 'pos_lims'][colormap in ('mne', 'mne_analyze')]
if colormap != 'auto' and limit_key not in clim.keys():
raise KeyError('"pos_lims" must be used with "mne" colormap')
clim['kind'] = clim.get('kind', 'percent')
if clim['kind'] == 'percent':
ctrl_pts = np.percentile(np.abs(stc_data),
list(np.abs(clim[limit_key])))
elif clim['kind'] == 'value':
ctrl_pts = np.array(clim[limit_key])
if (np.diff(ctrl_pts) < 0).any():
raise ValueError('value colormap limits must be strictly '
'nondecreasing')
else:
raise ValueError('If clim is a dict, clim[kind] must be '
' "value" or "percent"')
else:
raise ValueError('"clim" must be "auto" or dict')
if len(ctrl_pts) != 3:
raise ValueError('"lims" or "pos_lims" is length %i. It must be length'
' 3' % len(ctrl_pts))
ctrl_pts = np.array(ctrl_pts, float)
if len(set(ctrl_pts)) != 3:
if len(set(ctrl_pts)) == 1: # three points match
if ctrl_pts[0] == 0: # all are zero
warnings.warn('All data were zero')
ctrl_pts = np.arange(3, dtype=float)
else:
ctrl_pts *= [0., 0.5, 1] # all nonzero pts == max
else: # two points match
# if points one and two are identical, add a tiny bit to the
# control point two; if points two and three are identical,
# subtract a tiny bit from point two.
bump = 1e-5 if ctrl_pts[0] == ctrl_pts[1] else -1e-5
ctrl_pts[1] = ctrl_pts[0] + bump * (ctrl_pts[2] - ctrl_pts[0])
return ctrl_pts, colormap
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='time=%0.2f ms',
smoothing_steps=10, transparent=None, alpha=1.0,
time_viewer=False, config_opts=None,
subjects_dir=None, figure=None, views='lat',
colorbar=True, clim='auto'):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
stc : SourceEstimates
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display.
colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
Name of colormap to use or a custom look up table. If array, must
be (n x 3) or (n x 4) array for with RGB or RGBA values between
0 and 255. If 'auto', either 'hot' or 'mne' will be chosen
based on whether 'lims' or 'pos_lims' are specified in `clim`.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing
transparent : bool | None
If True, use a linear transparency between fmin and fmid.
None will choose automatically based on colormap type.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
clim : str | dict
Colorbar properties specification. If 'auto', set clim automatically
based on data percentiles. If dict, should contain:
``kind`` : str
Flag to specify type of limits. 'value' or 'percent'.
``lims`` : list | np.ndarray | tuple of float, 3 elements
Note: Only use this if 'colormap' is not 'mne'.
Left, middle, and right bound for colormap.
``pos_lims`` : list | np.ndarray | tuple of float, 3 elements
Note: Only use this if 'colormap' is 'mne'.
Left, middle, and right bound for colormap. Positive values
will be mirrored directly across zero during colormap
construction to obtain negative control points.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
from surfer import Brain, TimeViewer
config_opts = _handle_default('config_opts', config_opts)
import mayavi
from mayavi import mlab
# import here to avoid circular import problem
from ..source_estimate import SourceEstimate
if not isinstance(stc, SourceEstimate):
raise ValueError('stc has to be a surface source estimate')
if hemi not in ['lh', 'rh', 'split', 'both']:
raise ValueError('hemi has to be either "lh", "rh", "split", '
'or "both"')
n_split = 2 if hemi == 'split' else 1
n_views = 1 if isinstance(views, string_types) else len(views)
if figure is not None:
# use figure with specified id or create new figure
if isinstance(figure, int):
figure = mlab.figure(figure, size=(600, 600))
# make sure it is of the correct type
if not isinstance(figure, list):
figure = [figure]
if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure):
raise TypeError('figure must be a mayavi scene or list of scenes')
# make sure we have the right number of figures
n_fig = len(figure)
if not n_fig == n_split * n_views:
raise RuntimeError('`figure` must be a list with the same '
'number of elements as PySurfer plots that '
'will be created (%s)' % n_split * n_views)
# convert control points to locations in colormap
ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap)
# Construct cmap manually if 'mne' and get cmap bounds
# and triage transparent argument
if colormap in ('mne', 'mne_analyze'):
colormap = mne_analyze_colormap(ctrl_pts)
scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]]
transparent = False if transparent is None else transparent
else:
scale_pts = ctrl_pts
transparent = True if transparent is None else transparent
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
raise_error=True)
subject = _check_subject(stc.subject, subject, True)
if hemi in ['both', 'split']:
hemis = ['lh', 'rh']
else:
hemis = [hemi]
title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
args = inspect.getargspec(Brain.__init__)[0]
kwargs = dict(title=title, figure=figure, config_opts=config_opts,
subjects_dir=subjects_dir)
if 'views' in args:
kwargs['views'] = views
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(subject, hemi, surface, **kwargs)
for hemi in hemis:
hemi_idx = 0 if hemi == 'lh' else 1
if hemi_idx == 0:
data = stc.data[:len(stc.vertices[0])]
else:
data = stc.data[len(stc.vertices[0]):]
vertices = stc.vertices[hemi_idx]
time = 1e3 * stc.times
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=smoothing_steps, time=time,
time_label=time_label, alpha=alpha, hemi=hemi,
colorbar=colorbar)
# scale colormap and set time (index) to display
brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1],
fmax=scale_pts[2], transparent=transparent)
if time_viewer:
TimeViewer(brain)
return brain
def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
fontsize=18, bgcolor=(.05, 0, .1),
opacity=0.2, brain_color=(0.7,) * 3,
show=True, high_resolution=False,
fig_name=None, fig_number=None, labels=None,
modes=('cone', 'sphere'),
scale_factors=(1, 0.6),
verbose=None, **kwargs):
"""Plot source estimates obtained with sparse solver
Active dipoles are represented in a "Glass" brain.
If the same source is active in multiple source estimates it is
displayed with a sphere otherwise with a cone in 3D.
Parameters
----------
src : dict
The source space.
stcs : instance of SourceEstimate or list of instances of SourceEstimate
The source estimates (up to 3).
colors : list
List of colors
linewidth : int
Line width in 2D plot.
fontsize : int
Font size.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
show : bool
Show figures if True.
high_resolution : bool
If True, plot on the original (non-downsampled) cortical mesh.
fig_name :
Mayavi figure name.
fig_number :
Matplotlib figure number.
labels : ndarray or list of ndarrays
Labels to show sources in clusters. Sources with the same
label and the waveforms within each cluster are presented in
the same color. labels should be a list of ndarrays when
stcs is a list ie. one label for each stc.
modes : list
Should be a list, with each entry being ``'cone'`` or ``'sphere'``
to specify how the dipoles should be shown.
scale_factors : list
List of floating point scale factors for the markers.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
**kwargs : kwargs
Keyword arguments to pass to mlab.triangular_mesh.
"""
known_modes = ['cone', 'sphere']
if not isinstance(modes, (list, tuple)) or \
not all(mode in known_modes for mode in modes):
raise ValueError('mode must be a list containing only '
'"cone" or "sphere"')
if not isinstance(stcs, list):
stcs = [stcs]
if labels is not None and not isinstance(labels, list):
labels = [labels]
if colors is None:
colors = COLORS
linestyles = ['-', '--', ':']
# Show 3D
lh_points = src[0]['rr']
rh_points = src[1]['rr']
points = np.r_[lh_points, rh_points]
lh_normals = src[0]['nn']
rh_normals = src[1]['nn']
normals = np.r_[lh_normals, rh_normals]
if high_resolution:
use_lh_faces = src[0]['tris']
use_rh_faces = src[1]['tris']
else:
use_lh_faces = src[0]['use_tris']
use_rh_faces = src[1]['use_tris']
use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
points *= 170
vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
for stc in stcs]
unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
from mayavi import mlab
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
mlab.clf()
if mlab.options.backend != 'test':
f.scene.disable_render = True
with warnings.catch_warnings(record=True): # traits warnings
surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
points[:, 2], use_faces,
color=brain_color,
opacity=opacity, **kwargs)
import matplotlib.pyplot as plt
# Show time courses
plt.figure(fig_number)
plt.clf()
colors = cycle(colors)
logger.info("Total number of active sources: %d" % len(unique_vertnos))
if labels is not None:
colors = [advance_iterator(colors) for _ in
range(np.unique(np.concatenate(labels).ravel()).size)]
for idx, v in enumerate(unique_vertnos):
# get indices of stcs it belongs to
ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
is_common = len(ind) > 1
if labels is None:
c = advance_iterator(colors)
else:
# if vertex is in different stcs than take label from first one
c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
mode = modes[1] if is_common else modes[0]
scale_factor = scale_factors[1] if is_common else scale_factors[0]
if (isinstance(scale_factor, (np.ndarray, list, tuple)) and
len(unique_vertnos) == len(scale_factor)):
scale_factor = scale_factor[idx]
x, y, z = points[v]
nx, ny, nz = normals[v]
with warnings.catch_warnings(record=True): # traits
mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
mode=mode, scale_factor=scale_factor)
for k in ind:
vertno = vertnos[k]
mask = (vertno == v)
assert np.sum(mask) == 1
linestyle = linestyles[k]
plt.plot(1e3 * stcs[k].times, 1e9 * stcs[k].data[mask].ravel(),
c=c, linewidth=linewidth, linestyle=linestyle)
plt.xlabel('Time (ms)', fontsize=18)
plt.ylabel('Source amplitude (nAm)', fontsize=18)
if fig_name is not None:
plt.title(fig_name)
if show:
plt.show()
surface.actor.property.backface_culling = True
surface.actor.property.shading = True
return surface
def plot_dipole_locations(dipoles, trans, subject, subjects_dir=None,
bgcolor=(1, 1, 1), opacity=0.3,
brain_color=(0.7, 0.7, 0.7), mesh_color=(1, 1, 0),
fig_name=None, fig_size=(600, 600), mode='cone',
scale_factor=0.1e-1, colors=None, verbose=None):
"""Plot dipole locations
Only the location of the first time point of each dipole is shown.
Parameters
----------
dipoles : list of instances of Dipole | Dipole
The dipoles to plot.
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
mesh_color : tuple of length 3
Mesh color.
fig_name : str
Mayavi figure name.
fig_size : tuple of length 2
Mayavi figure size.
mode : str
Should be ``'cone'`` or ``'sphere'`` to specify how the
dipoles should be shown.
scale_factor : float
The scaling applied to amplitudes for the plot.
colors: list of colors | None
Color to plot with each dipole. If None default colors are used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
Notes
-----
.. versionadded:: 0.9.0
"""
from mayavi import mlab
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
trans = _get_mri_head_t(trans)[0]
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
raise_error=True)
fname = op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')
points, faces = read_surface(fname)
points = apply_trans(trans['trans'], points * 1e-3)
from .. import Dipole
if isinstance(dipoles, Dipole):
dipoles = [dipoles]
if mode not in ['cone', 'sphere']:
raise ValueError('mode must be in "cone" or "sphere"')
if colors is None:
colors = cycle(COLORS)
fig = mlab.figure(size=fig_size, bgcolor=bgcolor, fgcolor=(0, 0, 0))
with warnings.catch_warnings(record=True): # FutureWarning in traits
mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
faces, color=mesh_color, opacity=opacity)
for dip, color in zip(dipoles, colors):
rgb_color = color_converter.to_rgb(color)
with warnings.catch_warnings(record=True): # FutureWarning in traits
mlab.quiver3d(dip.pos[0, 0], dip.pos[0, 1], dip.pos[0, 2],
dip.ori[0, 0], dip.ori[0, 1], dip.ori[0, 2],
opacity=1., mode=mode, color=rgb_color,
scalars=dip.amplitude.max(),
scale_factor=scale_factor)
if fig_name is not None:
mlab.title(fig_name)
if fig.scene is not None: # safe for Travis
fig.scene.x_plus_view()
return fig
|
|
import os
import numpy as np
import tensorflow as tf
from SVHN_dataset import SVHNDataset
from SVHN_recognition.deep_localization_weighted_loss_variable_length import DeepLocalizationWeightedLossVariableLength
from SVHN_recognition.svhn_paper_convolution import SVHNPaperConvolution
from SVHN_recognition.svhn_paper_convolution_dropout_output import SVHNPaperConvolutionDropoutOutput
from SVHN_recognition.svhn_transfer_learning import SVHNTransferLearning
from SVHN_recognition.svhn_transfer_learning_no_maxpool import SVHNNoMaxpool
from visualize import Visualize
import matplotlib.pyplot as plt
import pickle
def get_batch(dataset, inputs_placeholder, labels_placeholder, positions_placeholder, keep_prob_placeholder,
keep_prob_placeholder_conv,
keep_prob_val, keep_prob_conv_val,
is_training_placeholder, is_traininig):
batch = dataset.load(50)
inputs = batch['examples']
labels = batch['labels']
positions = batch['positions']
return {"batch": {inputs_placeholder: inputs, labels_placeholder: labels, positions_placeholder: positions,
keep_prob_placeholder: keep_prob_val,
keep_prob_placeholder_conv: keep_prob_conv_val,
is_training_placeholder: is_traininig}, "end_of_file": batch["end_of_file"]}
def evaluate(dataset, session, operation, inputs_placeholder, labels_placeholder, positions_placeholder,
keep_prob_placeholder, keep_prob_placeholder_conv,
is_training_placeholder, model_name, name,
summary_writer, learning_step, visualize_correct=0, visualize_incorrect=0):
visualize = Visualize()
correct_visualized_counter = 0
incorrect_visualized_counter = 0
correct_num = 0
total_position_error = 0
number_of_examples = 0
number_of_characters = 0
correct_num_characters = 0
while True:
batch_object = get_batch(dataset, inputs_placeholder, labels_placeholder, positions_placeholder,
keep_prob_placeholder, keep_prob_placeholder_conv, 1, 1,
is_training_placeholder, False)
if batch_object["end_of_file"]:
break
batch = batch_object["batch"]
number_of_examples += len(batch[inputs_placeholder])
corrects_in_batch, corrects_vector, predictions, batch_position_error, predicted_positions, total_correct_characters, total_characters = session.run(
operation, feed_dict=batch)
correct_num += corrects_in_batch
total_position_error += batch_position_error
number_of_characters += total_characters
correct_num_characters += total_correct_characters
# visualize correct and incorrect recognitions
if incorrect_visualized_counter < visualize_incorrect or correct_visualized_counter < visualize_correct:
for i in range(len(batch[inputs_placeholder])):
true_label = np.argmax(batch[labels_placeholder][i], axis=1)
if correct_visualized_counter < visualize_correct and corrects_vector[i] == True:
visualize.visualize_with_correct_label_position(batch[inputs_placeholder][i], predictions[i],
true_label,
predicted_positions[i],
batch[positions_placeholder][i],
os.path.join(model_name, name) + "_correct")
correct_visualized_counter += 1
elif incorrect_visualized_counter < visualize_incorrect and corrects_vector[i] == False:
visualize.visualize_with_correct_label_position(batch[inputs_placeholder][i], predictions[i],
true_label,
predicted_positions[i],
batch[positions_placeholder][i],
os.path.join(model_name, name) + "_incorrect")
incorrect_visualized_counter += 1
sequence_accuracy = correct_num / number_of_examples
character_accuracy = correct_num_characters / number_of_characters
position_error = total_position_error / (number_of_examples / 50)
summary = tf.Summary()
summary.value.add(tag='Sequence_accuracy_' + name, simple_value=sequence_accuracy)
summary.value.add(tag='Character_accuracy_' + name, simple_value=character_accuracy)
summary.value.add(tag='Position_error_' + name, simple_value=position_error)
summary_writer.add_summary(summary, learning_step)
print("Number of correct examples: " + str(correct_num) + "/" + str(number_of_examples))
print("Number of correct characters: " + str(correct_num_characters) + "/" + str(number_of_characters))
print("Sequence accuracy %.3f" % sequence_accuracy)
print("Character accuracy %.3f" % character_accuracy)
print("Position error %.3f" % position_error)
print()
def calculate_normalization_parameters():
train_localization = SVHNDataset("../SVHN_data/extratrain/", "extratrain.json", np.zeros((128, 256, 3)),
np.ones((128, 256, 3)))
all_training_example = False
sum = None
number_of_examples = 0
while not all_training_example:
loaded = train_localization.load(500)
all_training_example = loaded["end_of_file"]
examples = loaded["examples"]
if sum is None:
sum = np.zeros((128, 256, 3))
batch_sum = np.sum(examples, axis=0)
sum = np.sum([sum, batch_sum], axis=0)
number_of_examples += len(examples)
print("Calculated mean of " + str(number_of_examples) + " examples")
mean = sum / number_of_examples
train_localization = SVHNDataset("../SVHN_data/extratrain/", "extratrain.json", np.zeros((128, 256, 3)),
np.ones((128, 256, 3)))
all_training_example = False
squares = None
number_of_calculated = 0
while not all_training_example:
loaded = train_localization.load(500)
all_training_example = loaded["end_of_file"]
examples = loaded["examples"]
if squares is None:
squares = np.zeros((128, 256, 3))
sub = examples - mean
squares += np.sum(np.power(sub, 2), axis=0)
number_of_calculated += len(examples)
print("Calculated std of " + str(number_of_calculated) + " examples")
std = np.sqrt(squares / number_of_examples)
plt.imshow(np.around(mean))
plt.imshow(np.around(std))
return mean, std
if __name__ == '__main__':
# Use it at first to calculate mean and std of training dataset, use created pickle file later
mean, std = calculate_normalization_parameters()
pickle.dump({"mean": mean, "std": std}, open("mean_std.p", "wb"))
# mean_std = pickle.load(open("mean_std.p", "rb"))
# mean = mean_std["mean"]
# std = mean_std["std"]
# Load dataset
train_localization = SVHNDataset("../SVHN_data/extratrain/", "extratrain.json", mean, std)
with tf.Graph().as_default():
# Wiring
# model = DeepLocalizationWeightedLossVariableLength()
# model = SVHNPaperConvolution()
# model = SVHNTransferLearning()
model = SVHNNoMaxpool()
inputs_placeholder, labels_placeholder, positions_placeholder, keep_prob_placeholder, keep_prob_placeholder_conv, is_training_placeholder = model.input_placeholders()
logits, predicted_positions, regularization = model.inference(inputs_placeholder, keep_prob_placeholder,
keep_prob_placeholder_conv,
is_training_placeholder)
loss = model.loss(logits, labels_placeholder, predicted_positions, positions_placeholder, regularization)
training = model.training(loss["total_loss"], 0.0001)
evaluation = model.evaluation(logits, labels_placeholder, predicted_positions, positions_placeholder)
# Initialization
session = tf.InteractiveSession()
init = tf.global_variables_initializer()
session.run(init)
# visualize graph
writer = tf.summary.FileWriter("visualizations/" + model.get_name())
writer.add_graph(session.graph)
loader = tf.train.Saver()
# Summaries
merged_summary = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=4)
RESTORE = False
ckpt = "checkpoints/SVHN_no_maxpool_reweighting/SVHN_no_maxpool_reweighting-0"
continue_from_step = 0
if RESTORE:
loader.restore(session, ckpt)
start_step = continue_from_step
else:
start_step = 0
# Training
steps = 30000
for step in range(start_step, steps + 1):
batch = get_batch(train_localization, inputs_placeholder, labels_placeholder, positions_placeholder,
keep_prob_placeholder, keep_prob_placeholder_conv, 0.5, 0.9,
is_training_placeholder, True)["batch"]
loss_value, summary, _ = session.run([loss, merged_summary, training], feed_dict=batch)
writer.add_summary(summary, step)
if step % 1000 == 0:
print("Step %d, Total loss %.3f, Character loss %.3f, Position loss %.3f" % (
step, loss_value["total_loss"], loss_value["logits_loss"], loss_value["positions_loss"]))
# Save checkpoint
# TODO if folder exists
print("Creating checkpoint")
try:
os.makedirs(os.path.join("checkpoints", model.get_name()))
except:
pass
saver.save(session, os.path.join("checkpoints", model.get_name(), model.get_name()), global_step=step)
# Visualize at the end of training
if step == steps:
visualize_correct_count = 100
visualize_incorrect_count = 100
print("Saving visualizations")
else:
visualize_correct_count = 0
visualize_incorrect_count = 0
print("Train accuracy")
train_localization_evaluation = SVHNDataset("../SVHN_data/extratrain/", "extratrain.json", mean, std)
evaluate(train_localization_evaluation, session, evaluation, inputs_placeholder, labels_placeholder,
positions_placeholder,
keep_prob_placeholder,
keep_prob_placeholder_conv,
is_training_placeholder, model.get_name(), "train", writer, step,
visualize_correct_count,
visualize_incorrect_count)
print("Test accuracy")
test_localization_evaluation = SVHNDataset("../SVHN_data/test/new/", "test.json", mean, std)
evaluate(test_localization_evaluation, session, evaluation, inputs_placeholder, labels_placeholder,
positions_placeholder,
keep_prob_placeholder,
keep_prob_placeholder_conv,
is_training_placeholder, model.get_name(), "test", writer, step,
visualize_correct_count,
visualize_incorrect_count)
print()
|
|
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import re
import google.appengine.ext.ndb as ndb
import models
XREF_RE = re.compile(r'(?:k8s-gubernator\.appspot\.com|gubernator\.k8s\.io)/build(/[^])\s]+/\d+)')
APPROVERS_RE = re.compile(r'<!-- META={"?approvers"?:\[([^]]*)\]} -->')
def classify_issue(repo, number):
"""
Classify an issue in a repo based on events in Datastore.
Args:
repo: string
number: int
Returns:
is_pr: bool
is_open: bool
involved: list of strings representing usernames involved
payload: a dict, see full description for classify below.
last_event_timestamp: the timestamp of the most recent event.
"""
ancestor = models.GithubResource.make_key(repo, number)
logging.info('finding webhooks for %s %s', repo, number)
event_keys = list(models.GithubWebhookRaw.query(ancestor=ancestor)
.order(models.GithubWebhookRaw.timestamp)
.fetch(keys_only=True))
logging.info('classifying %s %s (%d events)', repo, number, len(event_keys))
last_event_timestamp = [datetime.datetime(2000, 1, 1)]
def events_iterator():
for x in xrange(0, len(event_keys), 100):
events = ndb.get_multi(event_keys[x:x+100])
for event in events:
last_event_timestamp[0] = max(last_event_timestamp[0], event.timestamp)
yield [event.to_tuple() for event in events]
def get_status_for(sha):
statuses = {}
for status in models.GHStatus.query_for_sha(repo, sha):
last_event_timestamp[0] = max(last_event_timestamp[0], status.updated_at)
statuses[status.context] = [
status.state, status.target_url, status.description]
return statuses
classified = classify_from_iterator(events_iterator(), status_fetcher=get_status_for)
return list(classified) + last_event_timestamp
def get_merged(events, merged=None):
"""
Determine the most up-to-date view of the issue given its inclusion
in a series of events.
Note that different events have different levels of detail-- comments
don't include head SHA information, pull request events don't have label
information, etc.
Args:
events: a list of (event_type str, event_body dict, timestamp).
merged: the result of a previous invocation.
Returns:
body: a dict representing the issue's latest state.
"""
merged = merged or {}
for _event, body, _timestamp in events:
if 'issue' in body:
merged.update(body['issue'])
if 'pull_request' in body:
merged.update(body['pull_request'])
return merged
def get_labels(events, labels=None):
"""
Determine the labels applied to an issue.
Args:
events: a list of (event_type str, event_body dict, timestamp).
Returns:
labels: the currently applied labels as {label_name: label_color}
"""
labels = labels or {}
for event, body, _timestamp in events:
if 'issue' in body:
# issues come with labels, so we can update here
labels = {l['name']: l['color'] for l in body['issue']['labels']}
# pull_requests don't include their full labels :(
action = body.get('action')
if event == 'pull_request':
# Pull request label events don't come with a full label set.
# Track them explicitly here.
try:
if action in ('labeled', 'unlabeled') and 'label' not in body:
logging.warning('label event with no labels (multiple changes?)')
elif action == 'labeled':
label = body['label']
if label['name'] not in labels:
labels[label['name']] = label['color']
elif action == 'unlabeled':
labels.pop(body['label']['name'], None)
except:
logging.exception('??? %r', body)
raise
return labels
def get_skip_comments(events, skip_users=None):
"""
Determine comment ids that should be ignored, either because of
deletion or because the user should be skipped.
Args:
events: a list of (event_type str, event_body dict, timestamp).
Returns:
comment_ids: a set of comment ids that were deleted or made by
users that should be skipped.
"""
skip_users = skip_users or []
skip_comments = set()
for event, body, _timestamp in events:
action = body.get('action')
if event in ('issue_comment', 'pull_request_review_comment'):
comment_id = body['comment']['id']
if action == 'deleted' or body['sender']['login'] in skip_users:
skip_comments.add(comment_id)
return skip_comments
def classify(events, status_fetcher=None):
"""
Given an event-stream for an issue and status-getter, process
the events and determine what action should be taken, if any.
Args: One of:
events: a list of (event_type str, event_body dict, timestamp).
events_iterator: an iterable yielding successive events lists
status_fetcher: a function that returns statuses for the given SHA.
Returns:
is_pr: bool
is_open: bool
involved: list of strings representing usernames involved
payload: a dictionary of additional information, including:
{
'author': str author_name,
'title': str issue title,
'labels': {label_name: label_color},
'attn': {user_name: reason},
'mergeable': bool,
'comments': [{'user': str name, 'comment': comment, 'timestamp': str iso8601}],
'xrefs': list of builds referenced (by GCS path),
}
"""
merged = get_merged(events)
labels = get_labels(events)
comments = get_comments(events)
reviewers = get_reviewers(events)
distilled_events = distill_events(events)
return _classify_internal(
merged, labels, comments, reviewers, distilled_events, status_fetcher)
def classify_from_iterator(events_iterator, status_fetcher=None):
"""Like classify(), but process batches of events from an iterator."""
merged = None
labels = None
comments = None
reviewers = None
distilled_events = None
for events in events_iterator:
merged = get_merged(events, merged)
labels = get_labels(events, labels)
comments = get_comments(events, comments)
reviewers = get_reviewers(events, reviewers)
distilled_events = distill_events(events, distilled_events)
return _classify_internal(
merged, labels, comments, reviewers, distilled_events, status_fetcher)
def _classify_internal(merged, labels, comments, reviewers, distilled_events, status_fetcher):
approvers = get_approvers(comments)
is_pr = 'head' in merged or 'pull_request' in merged
is_open = merged['state'] != 'closed'
author = merged['user']['login']
assignees = sorted({assignee['login'] for assignee in merged['assignees']} | reviewers)
involved = sorted(u.lower() for u in set([author] + assignees + approvers))
payload = {
'author': author,
'assignees': assignees,
'title': merged['title'],
'labels': labels,
'xrefs': get_xrefs(comments, merged),
}
if is_pr:
if is_open:
payload['needs_rebase'] = 'needs-rebase' in labels or merged.get('mergeable') == 'false'
payload['additions'] = merged.get('additions', 0)
payload['deletions'] = merged.get('deletions', 0)
if 'head' in merged:
payload['head'] = merged['head']['sha']
if approvers:
payload['approvers'] = approvers
if status_fetcher and 'head' in payload:
payload['status'] = status_fetcher(payload['head'])
if merged.get('milestone'):
payload['milestone'] = merged['milestone']['title']
payload['attn'] = calculate_attention(distilled_events, payload)
return is_pr, is_open, involved, payload
def get_xrefs(comments, merged):
xrefs = set(XREF_RE.findall(merged.get('body') or ''))
for c in comments:
xrefs.update(XREF_RE.findall(c['comment']))
return sorted(xrefs)
def get_comments(events, comments=None):
"""
Pick comments and pull-request review comments out of a list of events.
Args:
events: a list of (event_type str, event_body dict, timestamp).
comments_prev: the previous output of this function.
Returns:
comments: a list of dict(author=..., comment=..., timestamp=...),
ordered with the earliest comment first.
"""
if not comments:
comments = {}
else:
comments = {c['id']: c for c in comments}
comments = {} # comment_id : comment
for event, body, _timestamp in events:
action = body.get('action')
if event in ('issue_comment', 'pull_request_review_comment'):
comment_id = body['comment']['id']
if action == 'deleted':
comments.pop(comment_id, None)
else:
c = body['comment']
comments[comment_id] = {
'author': c['user']['login'],
'comment': c['body'],
'timestamp': c['created_at'],
'id': c['id'],
}
return sorted(comments.values(), key=lambda c: c['timestamp'])
def get_reviewers(events, reviewers=None):
"""
Return the set of users that have a code review requested or completed.
"""
reviewers = reviewers or set()
for event, body, _timestamp in events:
action = body.get('action')
if event == 'pull_request':
if action == 'review_requested':
if 'requested_reviewer' not in body:
logging.warning('no reviewer present -- self-review?')
continue
reviewers.add(body['requested_reviewer']['login'])
elif action == 'review_request_removed':
reviewers -= {body['requested_reviewer']['login']}
elif event == 'pull_request_review':
if action == 'submitted':
reviewers.add(body['sender']['login'])
return reviewers
def get_approvers(comments):
"""
Return approvers requested in comments.
This MUST be kept in sync with mungegithub's getGubernatorMetadata().
"""
approvers = []
for comment in comments:
if comment['author'] == 'k8s-merge-robot':
m = APPROVERS_RE.search(comment['comment'])
if m:
approvers = m.group(1).replace('"', '').split(',')
return approvers
def distill_events(events, distilled_events=None):
"""
Given a sequence of events, return a series of user-action tuples
relevant to determining user state.
"""
bots = [
'google-oss-robot',
'istio-testing',
'k8s-bot',
'k8s-ci-robot',
'k8s-merge-robot',
'k8s-oncall',
'k8s-reviewable',
]
skip_comments = get_skip_comments(events, bots)
output = distilled_events or []
for event, body, timestamp in events:
action = body.get('action')
user = body.get('sender', {}).get('login')
if event in ('issue_comment', 'pull_request_review_comment'):
if body['comment']['id'] in skip_comments:
continue
if action == 'created':
output.append(('comment', user, timestamp))
if event == 'pull_request_review':
if action == 'submitted':
# this is morally equivalent to a comment
output.append(('comment', user, timestamp))
if event == 'pull_request':
if action in ('opened', 'reopened', 'synchronize'):
output.append(('push', user, timestamp))
if action == 'labeled' and 'label' in body:
output.append(('label ' + body['label']['name'].lower(), user, timestamp))
return output
def evaluate_fsm(events, start, transitions):
"""
Given a series of event tuples and a start state, execute the list of transitions
and return the resulting state, the time it entered that state, and the last time
the state would be entered (self-transitions are allowed).
transitions is a list of tuples
(state_before str, state_after str, condition str or callable)
The transition occurs if condition equals the action (as a str), or if
condition(action, user) is True.
"""
state = start
state_start = 0 # time that we entered this state
state_last = 0 # time of last transition into this state
for action, user, timestamp in events:
for state_before, state_after, condition in transitions:
if state_before is None or state_before == state:
if condition == action or (callable(condition) and condition(action, user)):
if state_after != state:
state_start = timestamp
state = state_after
state_last = timestamp
break
return state, state_start, state_last
def get_author_state(author, distilled_events):
"""
Determine the state of the author given a series of distilled events.
"""
return evaluate_fsm(distilled_events, start='waiting', transitions=[
# before, after, condition
(None, 'address comments', lambda a, u: a == 'comment' and u != author),
('address comments', 'waiting', 'push'),
('address comments', 'waiting', lambda a, u: a == 'comment' and u == author),
])
def get_assignee_state(assignee, author, distilled_events):
"""
Determine the state of an assignee given a series of distilled events.
"""
return evaluate_fsm(distilled_events, start='needs review', transitions=[
# before, after, condition
('needs review', 'waiting', lambda a, u: u == assignee and a in ('comment', 'label lgtm')),
(None, 'needs review', 'push'),
(None, 'needs review', lambda a, u: a == 'comment' and u == author),
])
def calculate_attention(distilled_events, payload):
"""
Given information about an issue, determine who should look at it.
It can include start and last update time for various states --
"address comments#123#456" means that something has been in 'address comments' since
123, and there was some other event that put it in 'address comments' at 456.
"""
author = payload['author']
assignees = payload['assignees']
attn = {}
def notify(to, reason):
attn[to] = reason
if any(state == 'failure' for state, _url, _desc
in payload.get('status', {}).values()):
notify(author, 'fix tests')
for approver in payload.get('approvers', []):
notify(approver, 'needs approval')
for assignee in assignees:
assignee_state, first, last = get_assignee_state(assignee, author, distilled_events)
if assignee_state != 'waiting':
notify(assignee, '%s#%s#%s' % (assignee_state, first, last))
author_state, first, last = get_author_state(author, distilled_events)
if author_state != 'waiting':
notify(author, '%s#%s#%s' % (author_state, first, last))
if payload.get('needs_rebase'):
notify(author, 'needs rebase')
if 'do-not-merge/release-note-label-needed' in payload['labels']:
notify(author, 'needs release-note label')
return attn
|
|
# -*- encoding: utf-8 -*-
"""API/Interface to the SQLAlchemy backend.
"""
import copy
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from sqlalchemy.orm import exc as sqla_exc
from ironic_inventory.common import exceptions
from ironic_inventory.db import api
from ironic_inventory.db.sqlalchemy import models
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""
:return:
"""
return Connection()
class Connection(api.Connection):
def __init__(self):
pass
def _get_servers_query(self, kwargs):
session = get_session()
filters = copy.copy(kwargs)
filters['reservation_id'] = None
filters['deployed'] = False
query = session.query(models.Server).filter_by(**filters)
return query
def _delete_reservation(self, reservation_id, server_uuid):
session = get_session()
with session.begin():
query = session.query(models.Reservation).filter_by(id=reservation_id)
try:
reservation = query.one()
except sqla_exc.NoResultFound:
# HACK(caustin): For now, swallow this exception.
# in the very near future roll back the deployment of the
# server raise and error .
LOG.warn('Reservation for server being %(uuid)s deployed was not'
'found.', {'uuid': server_uuid})
session.delete(reservation)
def add_server(self, **kwargs):
"""Adds a provisional server to the inventory.
:param name: The Server's name or id.
:param cpu_count: The number of CPUs in the server.
:param chassis_drive_capacity: The drive capacity of the server's chassis.
:param psu_capacity: The server's power supply capicity.
:param chassis_size: The size of the server's chassis.
:param memory: The server's memory in MB.
:param local_drive_size: The size in GB of the local drive.
:param driver_name: The name of the Ironic provisioning driver.
:param deploy_kernel: The UUID of the deploy kernel.
:param deploy_ramdisk: The UUID of the deploy ramdisk.
:param ipmi_address: The IP Address of the IPMI interface.
:param ipmi_password: The Password for the IPMI user.
:param impi_username: The User ID / name of the IPMI user.
:param impi_priv_level: The IPMI Privilege Level of the user.
:param ipmi_mac_address: The MAC Address of the IPMI interface.
:param cpu_arch: The CPU Architecture. Defaults to 'x86_64'
"""
server = models.Server()
server.update(kwargs)
try:
server.save()
except db_exc.DBDuplicateEntry as exc:
if 'ipmi_mac_address' in exc.columns:
raise exceptions.ExistingMACAddress(
address=kwargs['ipmi_mac_address'])
if 'name' in exc.columns:
raise exceptions.ExistingServerName(name=kwargs['name'])
else:
raise exceptions.ExistingServer()
return server
def remove_server(self, uuid):
"""Remove a server from the inventory pool.
:param uuid: The server's uuid.
"""
session = get_session()
with session.begin():
query = session.query(models.Server).filter_by(uuid=uuid)
try:
server = query.one()
except sqla_exc.NoResultFound:
raise exceptions.ServerNotFound(server_uuid=uuid)
if server.reservation_id:
# Don't delete servers with an existing reservation.
raise exceptions.ServerReserved()
query.delete()
def get_all_servers(self):
"""Get all servers as a list.
"""
session = get_session()
return session.query(models.Server).all()
def get_matching_servers(self, **kwargs):
"""Return a list of servers that match the search parameters.
:param cpu_count: The number of CPUs in the server.
:param chassis_drive_capacity: The drive capacity of the server's chassis.
:param psu_capacity: The server's power supply capicity.
:param chassis_size: The size of the server's chassis.
:param memory: The server's memory in MB.
:param local_drive_size: The size in GB of the local drive.
:param cpu_arch: The CPU Architecture. Defaults to 'x86_64'
:return: list
"""
try:
query = self._get_servers_query(kwargs)
servers = query.all()
for server in servers:
self.reserve_server(server)
except sqla_exc.NoResultFound:
# Note(caustin): For now, I am considering the case where no match is
# found to not be an exception. So, just return None.
return None
return servers
def get_single_server_match(self, **kwargs):
"""Return a single server that matches the search parameters.
:param cpu_count: The number of CPUs in the server.
:param chassis_drive_capacity: The drive capacity of the server's chassis.
:param psu_capacity: The server's power supply capicity.
:param chassis_size: The size of the server's chassis.
:param memory: The server's memory in MB.
:param local_drive_size: The size in GB of the local drive.
:param cpu_arch: The CPU Architecture. Defaults to 'x86_64'
"""
try:
query = self._get_servers_query(kwargs)
server = query.first()
self.reserve_server(server)
except sqla_exc.NoResultFound:
# Note(caustin): For now, I consider the case where no server meeting
# the critera is found to be non-exceptional. So, returning None in
# this case.
return None
return server
def get_server_by_uuid(self, server_id):
"""Get a server by it's uuid
:param server_id: The server's uuid
"""
session = get_session()
query = session.query(models.Server).filter_by(uuid=server_id)
try:
return query.one()
except sqla_exc.NoResultFound:
raise exceptions.ServerNotFound(uuid=server_id)
def get_server_by_name(self, server_name):
"""Get a server by it's name.
:param server_name: The server's unique name.
"""
session = get_session()
query = session.query(models.Server).filter_by(name=server_name)
try:
return query.one()
except sqla_exc.NoResultFound:
raise exceptions.ServerNotFound(name=server_name)
def update_server(self, server_uuid, **kwargs):
"""
:param server_uuid:
:param kwargs:
"""
session = get_session()
with session.begin():
query = session.query(models.Server).filter_by(uuid=server_uuid)
try:
# TODO (caustin): 'with_lockmode' has been superseded by
# with_for_update in SQLAlchemy. Update and test when possible.
server = query.with_lockmode('update').one()
except sqla_exc.NoResultFound:
raise exceptions.ServerNotFound(uuid=server_uuid)
if server.reservation_id:
# We probably shouldn't update a server that has an existing
# reservation in place.
raise exceptions.ServerReserved()
server.update(kwargs)
return server
def reserve_server(self, server_instance):
"""Create a reservation for a server.
:param server_instance: A server object.
"""
if server_instance.reservation_id:
raise exceptions.ServerReserved(server_uuid=server_instance.uuid)
reservation = models.Reservation()
reservation.save()
server_instance.update({'reservation_id': reservation.id})
server_instance.save()
return server_instance
def cancel_reservation(self, server_uuid):
"""Cancel a reservation for a server.
"""
server = self.get_server_by_uuid(self, server_uuid)
reservation_id = server.reservation_id
if not reservation_id:
raise exceptions.ServerNotReserved(self, server_uuid=server_uuid)
updated_server = self.update_server(self, server_uuid, **{'reservation_id': None})
self._delete_reservation(server.reservation_id, server_uuid)
return updated_server
def deploy_server(self, server_uuid, *args, **kwargs):
"""Mark a server as being used by an ironic node.
:param server_instance:
:param args:
:param kwargs:
:return:
"""
server = self.get_server_by_uuid(server_uuid)
reservation_id = server.reservation_id
if reservation_id:
raise exceptions.ServerNotReserved(server_uuid)
update_values = {'reservation_id': None, 'deployed': True}
deployed_server = self.update_server(server_uuid, **update_values)
self._delete_reservation(reservation_id, server_uuid)
return deployed_server
def return_server_to_pool(self, server_uuid, *args, **kwargs):
"""Returns a previously deployed server to the pool of available servers.
:param server_uuid:
:param args:
:param kwargs:
:return:
"""
session = get_session()
with session.begin():
query = session.query(models.Server).filter_by(uuid=server_uuid)
try:
server = query.with_lockmode('update').one()
except sqla_exc.NoResultFound:
raise exceptions.ServerNotFound(uuid=server_uuid)
if not server.deployed:
raise exceptions.ServerNotDeployed(uuid=server_uuid)
server.update({'deployed': False})
return server
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util.tf_export import tf_export
# TODO(b/64974358): Increase default buffer size to 256 MB.
_DEFAULT_READER_BUFFER_SIZE_BYTES = 256 * 1024 # 256 KB
@tf_export("data.TextLineDataset", v1=[])
class TextLineDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` comprising lines from one or more text files."""
def __init__(self, filenames, compression_type=None, buffer_size=None):
"""Creates a `TextLineDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
to buffer. A value of 0 results in the default buffering values chosen
based on the compression type.
"""
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
variant_tensor = gen_dataset_ops.text_line_dataset(
self._filenames, self._compression_type, self._buffer_size)
super(TextLineDatasetV2, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
@tf_export(v1=["data.TextLineDataset"])
class TextLineDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` comprising lines from one or more text files."""
def __init__(self, filenames, compression_type=None, buffer_size=None):
wrapped = TextLineDatasetV2(filenames, compression_type, buffer_size)
super(TextLineDatasetV1, self).__init__(wrapped)
__init__.__doc__ = TextLineDatasetV2.__init__.__doc__
@property
def _filenames(self):
return self._dataset._filenames # pylint: disable=protected-access
@_filenames.setter
def _filenames(self, value):
self._dataset._filenames = value # pylint: disable=protected-access
class _TFRecordDataset(dataset_ops.DatasetSource):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self, filenames, compression_type=None, buffer_size=None):
"""Creates a `TFRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes in the read buffer. 0 means no buffering.
"""
# Force the type to string even if filenames is an empty list.
self._filenames = ops.convert_to_tensor(
filenames, dtypes.string, name="filenames")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size",
buffer_size,
argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)
variant_tensor = gen_dataset_ops.tf_record_dataset(
self._filenames, self._compression_type, self._buffer_size)
super(_TFRecordDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
@tf_export("data.TFRecordDataset", v1=[])
class TFRecordDatasetV2(dataset_ops.DatasetV2):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self, filenames, compression_type=None, buffer_size=None,
num_parallel_reads=None):
"""Creates a `TFRecordDataset` to read one or more TFRecord files.
NOTE: The `num_parallel_reads` argument can be used to improve performance
when reading from a remote filesystem.
Args:
filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or
more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes in the read buffer. 0 means no buffering.
num_parallel_reads: (Optional.) A `tf.int64` scalar representing the
number of files to read in parallel. Defaults to reading files
sequentially.
Raises:
TypeError: If any argument does not have the expected type.
ValueError: If any argument does not have the expected shape.
"""
if isinstance(filenames, dataset_ops.DatasetV2):
if dataset_ops.get_legacy_output_types(filenames) != dtypes.string:
raise TypeError(
"`filenames` must be a `tf.data.Dataset` of `tf.string` elements.")
if not dataset_ops.get_legacy_output_shapes(filenames).is_compatible_with(
tensor_shape.scalar()):
raise ValueError(
"`filenames` must be a `tf.data.Dataset` of scalar `tf.string` "
"elements.")
else:
filenames = ops.convert_to_tensor(filenames, dtype=dtypes.string)
filenames = array_ops.reshape(filenames, [-1], name="flat_filenames")
filenames = dataset_ops.DatasetV2.from_tensor_slices(filenames)
self._filenames = filenames
self._compression_type = compression_type
self._buffer_size = buffer_size
self._num_parallel_reads = num_parallel_reads
def read_one_file(filename):
return _TFRecordDataset(filename, compression_type, buffer_size)
if num_parallel_reads is None:
self._impl = filenames.flat_map(read_one_file)
else:
self._impl = filenames.interleave(
read_one_file,
cycle_length=num_parallel_reads,
num_parallel_calls=num_parallel_reads)
variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access
super(TFRecordDatasetV2, self).__init__(variant_tensor)
def _clone(self,
filenames=None,
compression_type=None,
buffer_size=None,
num_parallel_reads=None):
return TFRecordDatasetV2(filenames or self._filenames,
compression_type or self._compression_type,
buffer_size or self._buffer_size,
num_parallel_reads or self._num_parallel_reads)
def _inputs(self):
return self._impl._inputs() # pylint: disable=protected-access
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
@tf_export(v1=["data.TFRecordDataset"])
class TFRecordDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self, filenames, compression_type=None, buffer_size=None,
num_parallel_reads=None):
wrapped = TFRecordDatasetV2(
filenames, compression_type, buffer_size, num_parallel_reads)
super(TFRecordDatasetV1, self).__init__(wrapped)
__init__.__doc__ = TFRecordDatasetV2.__init__.__doc__
def _clone(self,
filenames=None,
compression_type=None,
buffer_size=None,
num_parallel_reads=None):
# pylint: disable=protected-access
return TFRecordDatasetV1(
filenames or self._dataset._filenames,
compression_type or self._dataset._compression_type,
buffer_size or self._dataset._buffer_size,
num_parallel_reads or self._dataset._num_parallel_reads)
@property
def _filenames(self):
return self._dataset._filenames # pylint: disable=protected-access
@_filenames.setter
def _filenames(self, value):
self._dataset._filenames = value # pylint: disable=protected-access
@tf_export("data.FixedLengthRecordDataset", v1=[])
class FixedLengthRecordDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` of fixed-length records from one or more binary files."""
def __init__(self,
filenames,
record_bytes,
header_bytes=None,
footer_bytes=None,
buffer_size=None,
compression_type=None):
"""Creates a `FixedLengthRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_bytes: A `tf.int64` scalar representing the number of bytes in
each record.
header_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to skip at the start of a file.
footer_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to ignore at the end of a file.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes to buffer when reading.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
"""
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._record_bytes = ops.convert_to_tensor(
record_bytes, dtype=dtypes.int64, name="record_bytes")
self._header_bytes = convert.optional_param_to_tensor(
"header_bytes", header_bytes)
self._footer_bytes = convert.optional_param_to_tensor(
"footer_bytes", footer_bytes)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
if (self._compression_type is not None or
compat.forward_compatible(2018, 11, 30)):
variant_tensor = gen_dataset_ops.fixed_length_record_dataset_v2(
self._filenames, self._header_bytes, self._record_bytes,
self._footer_bytes, self._buffer_size, self._compression_type)
else:
variant_tensor = gen_dataset_ops.fixed_length_record_dataset(
self._filenames, self._header_bytes, self._record_bytes,
self._footer_bytes, self._buffer_size)
super(FixedLengthRecordDatasetV2, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
@tf_export(v1=["data.FixedLengthRecordDataset"])
class FixedLengthRecordDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` of fixed-length records from one or more binary files."""
def __init__(self,
filenames,
record_bytes,
header_bytes=None,
footer_bytes=None,
buffer_size=None,
compression_type=None):
wrapped = FixedLengthRecordDatasetV2(
filenames, record_bytes, header_bytes, footer_bytes, buffer_size,
compression_type)
super(FixedLengthRecordDatasetV1, self).__init__(wrapped)
__init__.__doc__ = FixedLengthRecordDatasetV2.__init__.__doc__
@property
def _filenames(self):
return self._dataset._filenames # pylint: disable=protected-access
@_filenames.setter
def _filenames(self, value):
self._dataset._filenames = value # pylint: disable=protected-access
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# these aliases in place.
FixedLengthRecordDataset = FixedLengthRecordDatasetV1
TFRecordDataset = TFRecordDatasetV1
TextLineDataset = TextLineDatasetV1
|
|
#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This file contains all common functions that the backend scripts that provide
support for PyCOMPSs and Jupyter within Supercomputers.
"""
import os
import shlex
import subprocess
VERBOSE = False # General Boolean to print detailed information through stdout - For debugging purposes.
DECODING_FORMAT = 'utf-8'
SUCCESS_KEYWORD = 'SUCCESS'
NOT_RUNNING_KEYWORD = 'NOT_RUNING'
ERROR_KEYWORD = 'ERROR'
DISABLED_VALUE = 'undefined'
JOB_NAME_KEYWORD = '-PyCOMPSsInteractive'
def command_runner(cmd, exception=True, cwd=None):
"""
Run the command defined in the cmd list.
Decodes the stdout and stderr following the DECODING_FORMAT.
:param cmd: Command to execute as list.
:param exception: Throw exception if failed. Otherwise, the caller will handle the error.
:param cwd: Directory where to execute the command
:return: return code, stdout, stderr
"""
if VERBOSE:
print("Executing command: " + ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
stdout, stderr = p.communicate() # blocks until cmd is done
return_code = p.returncode
stdout = stdout.decode(DECODING_FORMAT)
stderr = stderr.decode(DECODING_FORMAT)
if exception and return_code != 0:
_raise_command_exception(cmd, return_code, stdout, stderr)
return return_code, stdout, stderr
def get_installation_path():
"""
Retrieve the COMPSs installation root folder
:return: COMPSs installation root folder
"""
script_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.sep + os.path.join(*(script_path.split(os.path.sep)[:-4]))
return root_path
def setup_supercomputer_configuration(include=None):
"""
Setup the supercomputer configuration.
Reads the supercomputer cfg and queuing system cfg and exports
the variables defined in them within the current environment.
This is useful to get the appropriate commands for the submission,
cancel, status of jobs, etc. which depends on the specific supercomputer
relying on the COMPSs configuration.
Alternatively, it is possible to include a keyword in the command for
the later substitution from the python code before using it (e.g.
QUEUE_JOB_NAME_CMD - which is used multiple times).
:param include: Dictionary with environment variables to consider in the
environment so that the cfgs can complete their contents
:return: None
"""
if VERBOSE:
print("Setting up the supercomputer configuration...")
# Get the scripts directory
sc_cfg_name = 'default.cfg'
sc_cfg = os.path.join(get_installation_path(),
'Runtime', 'scripts', 'queues', 'supercomputers', sc_cfg_name)
if VERBOSE:
print("* Loading SC cfg file")
_export_environment_variables(sc_cfg, include)
# Export the environment variables
qs_cfg_name = str(os.environ['QUEUE_SYSTEM'].strip()) + '.cfg'
qs_cfg = os.path.join(get_installation_path(),
'Runtime', 'scripts', 'queues', 'queue_systems', qs_cfg_name)
if VERBOSE:
print("* Loading QS cfg file")
_export_environment_variables(qs_cfg, include)
def is_notebook_job(job_id):
"""
Checks if the given job id is running a PyCOMPSs notebook.
To this end, checks the job name to see if it matches the JOB_NAME_KEYWORD.
:param job_id: Job id to check
:return: True if is a notebook. False on the contrary
"""
name = get_job_name(job_id)
if verify_job_name(name):
if VERBOSE:
print("Found notebook id: " + str(job_id))
return True
else:
if VERBOSE:
print("Job " + str(job_id) + " is not a PyCOMPSs notebook job.")
return False
def update_command(command, job_id):
"""
Updates the given command with the necessary job_id.
Does the replacement of %JOBID% with the job identifier
:param command: Command to update
:param job_id: Job identifier
:return: The updated command as list splitter by spaces
"""
updated_command = command.replace('%JOBID%', str(job_id)).split()
return updated_command
def get_job_name(job_id):
"""
Get the job name of a given job identifier.
:param job_id: Job identifier
:return: Job name
"""
raw_job_name_command = os.environ['QUEUE_JOB_NAME_CMD']
job_name_command = update_command(raw_job_name_command, str(job_id))
_, name, _ = command_runner(job_name_command)
return name.strip()
def verify_job_name(name):
"""
Verifies if the name provided includes the keyword
:param name: Name to check
:return: Boolean
"""
if JOB_NAME_KEYWORD in name:
return True
else:
return False
def get_job_status(job_id):
"""
Retrieves the status for the given job identifier
:param job_id: Job identifier
:return: The status as string, the return code
"""
# Get the command to check the status the job
raw_job_status_command = os.environ['QUEUE_JOB_STATUS_CMD']
job_status_command = update_command(raw_job_status_command, job_id)
# Check the status of the job
return_code, stdout, stderr = command_runner(job_status_command, exception=False)
# Get the Running tag and check if matches
running_tag = os.environ['QUEUE_JOB_RUNNING_TAG']
# Print to provide status to the client
if return_code != 0:
job_status = "CHECK FAILED"
elif stdout.strip() == running_tag:
job_status = "RUNNING"
else:
job_status = str(stdout).strip()
return job_status, return_code
def not_a_notebook(job_id):
"""
Prints the not a notebook job message and exit(1).
:param job_id: Job id
:return: None
"""
print(ERROR_KEYWORD)
print(" - Job Id: " + str(job_id) + " does not belong to a PyCOMPSs interactive job.")
exit(1)
def _export_environment_variables(env_file, include=None):
"""
Export the environment variables defined in "file".
Uses bash to parse the file and gets the variables from a clean environment
to read them and set them in the current environment.
Includes the "include" variables in the environment if defined, so that
the sourced configuration files can take them as inputs (e.g. job_id).
:param env_file: File with the environment variables
:param include: Dictionary with environment variables to consider in the
environment so that the cfgs can complete their contents.
:return: None
"""
if include:
exports = []
for k, v in include.items():
exports.append('export ' + k.strip() + '=' + v.strip())
exports = ' && '.join(exports)
command = shlex.split("env -i bash -c 'set -a && " + exports + " && source " + str(env_file) + " && env'")
else:
command = shlex.split("env -i bash -c 'set -a && source " + str(env_file) + " && env'")
if VERBOSE:
print("Exporting environment variables from: " + env_file)
return_code, stdout, stderr = command_runner(command)
for line in stdout.splitlines():
(key, _, value) = line.partition("=")
if key != '_':
os.environ[key] = value.strip()
def _raise_command_exception(command, return_code, stdout, stderr):
"""
Generic exception raiser for command execution.
:param command: Command that threw the exception
:param return_code: Return code of the command that failed
:param stdout: Standard output
:param stderr: Standard error
:return: None
:raises: Exception
"""
raise Exception(ERROR_KEYWORD + ": Failed execution: " + str(command)
+ "\nRETURN CODE:" + str(return_code)
+ "\nSTDOUT:\n" + str(stdout)
+ "\nSTDERR:\n" + str(stderr))
|
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
|
|
#!/usr/bin/python
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the RPC client module"""
import unittest
from ganeti import constants
from ganeti import errors
from ganeti import serializer
from ganeti.rpc import client
import testutils
class TextRPCParsing(testutils.GanetiTestCase):
def testParseRequest(self):
msg = serializer.DumpJson({
client.KEY_METHOD: "foo",
client.KEY_ARGS: ("bar", "baz", 123),
})
self.assertEqualValues(client.ParseRequest(msg),
("foo", ["bar", "baz", 123], None))
self.assertRaises(client.ProtocolError, client.ParseRequest,
"this\"is {invalid, ]json data")
# No dict
self.assertRaises(client.ProtocolError, client.ParseRequest,
serializer.DumpJson(123))
# Empty dict
self.assertRaises(client.ProtocolError, client.ParseRequest,
serializer.DumpJson({ }))
# No arguments
self.assertRaises(client.ProtocolError, client.ParseRequest,
serializer.DumpJson({ client.KEY_METHOD: "foo", }))
# No method
self.assertRaises(client.ProtocolError, client.ParseRequest,
serializer.DumpJson({ client.KEY_ARGS: [], }))
# No method or arguments
self.assertRaises(client.ProtocolError, client.ParseRequest,
serializer.DumpJson({ client.KEY_VERSION: 1, }))
def testParseRequestWithVersion(self):
msg = serializer.DumpJson({
client.KEY_METHOD: "version",
client.KEY_ARGS: (["some"], "args", 0, "here"),
client.KEY_VERSION: 20100101,
})
self.assertEqualValues(client.ParseRequest(msg),
("version", [["some"], "args", 0, "here"], 20100101))
def testParseResponse(self):
msg = serializer.DumpJson({
client.KEY_SUCCESS: True,
client.KEY_RESULT: None,
})
self.assertEqual(client.ParseResponse(msg), (True, None, None))
self.assertRaises(client.ProtocolError, client.ParseResponse,
"this\"is {invalid, ]json data")
# No dict
self.assertRaises(client.ProtocolError, client.ParseResponse,
serializer.DumpJson(123))
# Empty dict
self.assertRaises(client.ProtocolError, client.ParseResponse,
serializer.DumpJson({ }))
# No success
self.assertRaises(client.ProtocolError, client.ParseResponse,
serializer.DumpJson({ client.KEY_RESULT: True, }))
# No result
self.assertRaises(client.ProtocolError, client.ParseResponse,
serializer.DumpJson({ client.KEY_SUCCESS: True, }))
# No result or success
self.assertRaises(client.ProtocolError, client.ParseResponse,
serializer.DumpJson({ client.KEY_VERSION: 123, }))
def testParseResponseWithVersion(self):
msg = serializer.DumpJson({
client.KEY_SUCCESS: True,
client.KEY_RESULT: "Hello World",
client.KEY_VERSION: 19991234,
})
self.assertEqual(client.ParseResponse(msg), (True, "Hello World", 19991234))
def testFormatResponse(self):
for success, result in [(False, "error"), (True, "abc"),
(True, { "a": 123, "b": None, })]:
msg = client.FormatResponse(success, result)
msgdata = serializer.LoadJson(msg)
self.assert_(client.KEY_SUCCESS in msgdata)
self.assert_(client.KEY_RESULT in msgdata)
self.assert_(client.KEY_VERSION not in msgdata)
self.assertEqualValues(msgdata,
{ client.KEY_SUCCESS: success,
client.KEY_RESULT: result,
})
def testFormatResponseWithVersion(self):
for success, result, version in [(False, "error", 123), (True, "abc", 999),
(True, { "a": 123, "b": None, }, 2010)]:
msg = client.FormatResponse(success, result, version=version)
msgdata = serializer.LoadJson(msg)
self.assert_(client.KEY_SUCCESS in msgdata)
self.assert_(client.KEY_RESULT in msgdata)
self.assert_(client.KEY_VERSION in msgdata)
self.assertEqualValues(msgdata,
{ client.KEY_SUCCESS: success,
client.KEY_RESULT: result,
client.KEY_VERSION: version,
})
def testFormatRequest(self):
for method, args in [("a", []), ("b", [1, 2, 3])]:
msg = client.FormatRequest(method, args)
msgdata = serializer.LoadJson(msg)
self.assert_(client.KEY_METHOD in msgdata)
self.assert_(client.KEY_ARGS in msgdata)
self.assert_(client.KEY_VERSION not in msgdata)
self.assertEqualValues(msgdata,
{ client.KEY_METHOD: method,
client.KEY_ARGS: args,
})
def testFormatRequestWithVersion(self):
for method, args, version in [("fn1", [], 123), ("fn2", [1, 2, 3], 999)]:
msg = client.FormatRequest(method, args, version=version)
msgdata = serializer.LoadJson(msg)
self.assert_(client.KEY_METHOD in msgdata)
self.assert_(client.KEY_ARGS in msgdata)
self.assert_(client.KEY_VERSION in msgdata)
self.assertEqualValues(msgdata,
{ client.KEY_METHOD: method,
client.KEY_ARGS: args,
client.KEY_VERSION: version,
})
class TestCallRPCMethod(unittest.TestCase):
MY_LUXI_VERSION = 1234
assert constants.LUXI_VERSION != MY_LUXI_VERSION
def testSuccessNoVersion(self):
def _Cb(msg):
(method, args, version) = client.ParseRequest(msg)
self.assertEqual(method, "fn1")
self.assertEqual(args, "Hello World")
return client.FormatResponse(True, "x")
result = client.CallRPCMethod(_Cb, "fn1", "Hello World")
def testServerVersionOnly(self):
def _Cb(msg):
(method, args, version) = client.ParseRequest(msg)
self.assertEqual(method, "fn1")
self.assertEqual(args, "Hello World")
return client.FormatResponse(True, "x", version=self.MY_LUXI_VERSION)
self.assertRaises(errors.LuxiError, client.CallRPCMethod,
_Cb, "fn1", "Hello World")
def testWithVersion(self):
def _Cb(msg):
(method, args, version) = client.ParseRequest(msg)
self.assertEqual(method, "fn99")
self.assertEqual(args, "xyz")
return client.FormatResponse(True, "y", version=self.MY_LUXI_VERSION)
self.assertEqual("y", client.CallRPCMethod(_Cb, "fn99", "xyz",
version=self.MY_LUXI_VERSION))
def testVersionMismatch(self):
def _Cb(msg):
(method, args, version) = client.ParseRequest(msg)
self.assertEqual(method, "fn5")
self.assertEqual(args, "xyz")
return client.FormatResponse(True, "F", version=self.MY_LUXI_VERSION * 2)
self.assertRaises(errors.LuxiError, client.CallRPCMethod,
_Cb, "fn5", "xyz", version=self.MY_LUXI_VERSION)
def testError(self):
def _Cb(msg):
(method, args, version) = client.ParseRequest(msg)
self.assertEqual(method, "fnErr")
self.assertEqual(args, [])
err = errors.OpPrereqError("Test")
return client.FormatResponse(False, errors.EncodeException(err))
self.assertRaises(errors.OpPrereqError, client.CallRPCMethod,
_Cb, "fnErr", [])
def testErrorWithVersionMismatch(self):
def _Cb(msg):
(method, args, version) = client.ParseRequest(msg)
self.assertEqual(method, "fnErr")
self.assertEqual(args, [])
err = errors.OpPrereqError("TestVer")
return client.FormatResponse(False, errors.EncodeException(err),
version=self.MY_LUXI_VERSION * 2)
self.assertRaises(errors.LuxiError, client.CallRPCMethod,
_Cb, "fnErr", [],
version=self.MY_LUXI_VERSION)
def testErrorWithVersion(self):
def _Cb(msg):
(method, args, version) = client.ParseRequest(msg)
self.assertEqual(method, "fn9")
self.assertEqual(args, [])
err = errors.OpPrereqError("TestVer")
return client.FormatResponse(False, errors.EncodeException(err),
version=self.MY_LUXI_VERSION)
self.assertRaises(errors.OpPrereqError, client.CallRPCMethod,
_Cb, "fn9", [],
version=self.MY_LUXI_VERSION)
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
|
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for HDS HNAS NFS storage.
"""
import os
import time
from xml.etree import ElementTree as ETree
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.volume.drivers.hds.hnas_backend import HnasBackend
from cinder.volume.drivers import nfs
HDS_HNAS_NFS_VERSION = '1.0.0'
LOG = logging.getLogger(__name__)
NFS_OPTS = [
cfg.StrOpt('hds_hnas_nfs_config_file',
default='/opt/hds/hnas/cinder_nfs_conf.xml',
help='Configuration file for HDS NFS cinder plugin'), ]
CONF = cfg.CONF
CONF.register_opts(NFS_OPTS)
HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc'}
def _xml_read(root, element, check=None):
"""Read an xml element.
:param root: XML object
:param element: string desired tag
:param check: string if present, throw exception if element missing
"""
try:
val = root.findtext(element)
LOG.info(_LI("%(element)s: %(val)s"), {'element': element, 'val': val})
if val:
return val.strip()
if check:
raise exception.ParameterNotFound(param=element)
return None
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
LOG.error(_LE("XML exception reading parameter: %s"), element)
else:
LOG.info(_LI("XML exception reading parameter: %s"), element)
return None
def _read_config(xml_config_file):
"""Read hds driver specific xml config file.
:param xml_config_file: string filename containing XML configuration
"""
if not os.access(xml_config_file, os.R_OK):
raise exception.NotFound(_LE("Can't open config file: %s"),
xml_config_file)
try:
root = ETree.parse(xml_config_file).getroot()
except Exception:
raise exception.ConfigNotFound(_LE("Error parsing config file: %s"),
xml_config_file)
# mandatory parameters
config = {}
arg_prereqs = ['mgmt_ip0', 'username', 'password']
for req in arg_prereqs:
config[req] = _xml_read(root, req, 'check')
# optional parameters
config['hnas_cmd'] = _xml_read(root, 'hnas_cmd') or\
HNAS_DEFAULT_CONFIG['hnas_cmd']
config['hdp'] = {}
config['services'] = {}
# min one needed
for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
if _xml_read(root, svc) is None:
continue
service = {'label': svc}
# none optional
for arg in ['volume_type', 'hdp']:
service[arg] = _xml_read(root, svc + '/' + arg, 'check')
config['services'][service['volume_type']] = service
config['hdp'][service['hdp']] = service['hdp']
# at least one service required!
if config['services'].keys() is None:
raise exception.ParameterNotFound(param="No service found")
return config
def factory_bend():
"""Factory over-ride in self-tests."""
return HnasBackend()
class HDSNFSDriver(nfs.NfsDriver):
"""Base class for Hitachi NFS driver.
Executes commands relating to Volumes.
"""
def __init__(self, *args, **kwargs):
# NOTE(vish): db is set by Manager
self._execute = None
self.context = None
self.configuration = kwargs.get('configuration', None)
if self.configuration:
self.configuration.append_config_values(NFS_OPTS)
self.config = _read_config(
self.configuration.hds_hnas_nfs_config_file)
super(HDSNFSDriver, self).__init__(*args, **kwargs)
self.bend = factory_bend()
(self.arid, self.nfs_name, self.lumax) = self._array_info_get()
def _array_info_get(self):
"""Get array parameters."""
out = self.bend.get_version(self.config['hnas_cmd'],
HDS_HNAS_NFS_VERSION,
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
inf = out.split()
return inf[1], 'nfs_' + inf[1], inf[6]
def _id_to_vol(self, volume_id):
"""Given the volume id, retrieve the volume object from database.
:param volume_id: string volume id
"""
vol = self.db.volume_get(self.context, volume_id)
return vol
def _get_service(self, volume):
"""Get the available service parameters for a given volume using
its type.
:param volume: dictionary volume reference
"""
label = None
if volume['volume_type']:
label = volume['volume_type']['name']
label = label or 'default'
if label not in self.config['services'].keys():
# default works if no match is found
label = 'default'
if label in self.config['services'].keys():
svc = self.config['services'][label]
LOG.info(_LI("Get service: %(lbl)s->%(svc)s"),
{'lbl': label, 'svc': svc['fslabel']})
service = (svc['hdp'], svc['path'], svc['fslabel'])
else:
LOG.info(_LI("Available services: %s"),
self.config['services'].keys())
LOG.error(_LE("No configuration found for service: %s"),
label)
raise exception.ParameterNotFound(param=label)
return service
def set_execute(self, execute):
self._execute = execute
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: dictionary volume reference
:param new_size: int size in GB to extend
"""
nfs_mount = self._get_provider_location(volume['id'])
path = self._get_volume_path(nfs_mount, volume['name'])
# Resize the image file on share to new size.
LOG.debug("Checking file for resize")
if self._is_file_size_equal(path, new_size):
return
else:
LOG.info(_LI("Resizing file to %sG"), new_size)
image_utils.resize_image(path, new_size)
if self._is_file_size_equal(path, new_size):
LOG.info(_LI("LUN %(id)s extended to %(size)s GB."),
{'id': volume['id'], 'size': new_size})
return
else:
raise exception.InvalidResults(
_("Resizing image file failed."))
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True
else:
return False
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug("create_volume_from %s", volume)
vol_size = volume['size']
snap_size = snapshot['volume_size']
if vol_size != snap_size:
msg = _("Cannot create volume of size %(vol_size)s from "
"snapshot of size %(snap_size)s")
msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size}
raise exception.CinderException(msg % msg_fmt)
self._clone_volume(snapshot['name'],
volume['name'],
snapshot['volume_id'])
share = self._get_volume_location(snapshot['volume_id'])
return {'provider_location': share}
def create_snapshot(self, snapshot):
"""Create a snapshot.
:param snapshot: dictionary snapshot reference
"""
self._clone_volume(snapshot['volume_name'],
snapshot['name'],
snapshot['volume_id'])
share = self._get_volume_location(snapshot['volume_id'])
LOG.debug('Share: %s', share)
# returns the mount point (not path)
return {'provider_location': share}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: dictionary snapshot reference
"""
nfs_mount = self._get_provider_location(snapshot['volume_id'])
if self._volume_not_present(nfs_mount, snapshot['name']):
return True
self._execute('rm', self._get_volume_path(nfs_mount, snapshot['name']),
run_as_root=True)
def _get_volume_location(self, volume_id):
"""Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>.
:param volume_id: string volume id
"""
nfs_server_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
return nfs_server_ip + ':' + export_path
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume.
:param volume_id: string volume id
"""
volume = self.db.volume_get(self.context, volume_id)
# same format as _get_volume_location
return volume.provider_location
def _get_host_ip(self, volume_id):
"""Returns IP address for the given volume.
:param volume_id: string volume id
"""
return self._get_provider_location(volume_id).split(':')[0]
def _get_export_path(self, volume_id):
"""Returns NFS export path for the given volume.
:param volume_id: string volume id
"""
return self._get_provider_location(volume_id).split(':')[1]
def _volume_not_present(self, nfs_mount, volume_name):
"""Check if volume exists.
:param volume_name: string volume name
"""
try:
self._try_execute('ls', self._get_volume_path(nfs_mount,
volume_name))
except processutils.ProcessExecutionError:
# If the volume isn't present
return True
return False
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except processutils.ProcessExecutionError:
tries += 1
if tries >= self.configuration.num_shell_tries:
raise
LOG.exception(_LE("Recovering from a failed execute. "
"Try number %s"), tries)
time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name):
"""Get volume path (local fs path) for given volume name on given nfs
share.
:param nfs_share string, example 172.18.194.100:/var/nfs
:param volume_name string,
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
"""
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: dictionary volume reference
:param src_vref: dictionary src_vref reference
"""
vol_size = volume['size']
src_vol_size = src_vref['size']
if vol_size != src_vol_size:
msg = _("Cannot create clone of size %(vol_size)s from "
"volume of size %(src_vol_size)s")
msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size}
raise exception.CinderException(msg % msg_fmt)
self._clone_volume(src_vref['name'], volume['name'], src_vref['id'])
share = self._get_volume_location(src_vref['id'])
return {'provider_location': share}
def get_volume_stats(self, refresh=False):
"""Get volume stats.
if 'refresh' is True, update the stats first.
"""
_stats = super(HDSNFSDriver, self).get_volume_stats(refresh)
be_name = self.configuration.safe_get('volume_backend_name')
_stats["volume_backend_name"] = be_name or 'HDSNFSDriver'
_stats["vendor_name"] = 'HDS'
_stats["driver_version"] = HDS_HNAS_NFS_VERSION
_stats["storage_protocol"] = 'NFS'
return _stats
def _get_nfs_info(self):
out = self.bend.get_nfs_info(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
lines = out.split('\n')
# dict based on NFS exports addresses
conf = {}
for line in lines:
if 'Export' in line:
inf = line.split()
(export, path, fslabel, hdp, ip1) = \
inf[1], inf[3], inf[5], inf[7], inf[11]
# 9, 10, etc are IP addrs
key = ip1 + ':' + export
conf[key] = {}
conf[key]['path'] = path
conf[key]['hdp'] = hdp
conf[key]['fslabel'] = fslabel
msg = _("nfs_info: %(key)s: %(path)s, HDP: \
%(fslabel)s FSID: %(hdp)s")
LOG.info(msg, {'key': key, 'path': path, 'fslabel': fslabel,
'hdp': hdp})
return conf
def do_setup(self, context):
"""Perform internal driver setup."""
self.context = context
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
LOG.info(_LI("Review shares: %s"), self.shares)
nfs_info = self._get_nfs_info()
for share in self.shares:
#export = share.split(':')[1]
if share in nfs_info.keys():
LOG.info(_LI("share: %(share)s -> %(info)s"),
{'share': share, 'info': nfs_info[share]['path']})
for svc in self.config['services'].keys():
if share == self.config['services'][svc]['hdp']:
self.config['services'][svc]['path'] = \
nfs_info[share]['path']
# don't overwrite HDP value
self.config['services'][svc]['fsid'] = \
nfs_info[share]['hdp']
self.config['services'][svc]['fslabel'] = \
nfs_info[share]['fslabel']
LOG.info(_LI("Save service info for"
" %(svc)s -> %(hdp)s, %(path)s"),
{'svc': svc, 'hdp': nfs_info[share]['hdp'],
'path': nfs_info[share]['path']})
break
if share != self.config['services'][svc]['hdp']:
LOG.error(_LE("NFS share %(share)s has no service entry:"
" %(svc)s -> %(hdp)s"),
{'share': share, 'svc': svc,
'hdp': self.config['services'][svc]['hdp']})
raise exception.ParameterNotFound(param=svc)
else:
LOG.info(_LI("share: %s incorrect entry"), share)
def _clone_volume(self, volume_name, clone_name, volume_id):
"""Clones mounted volume using the HNAS file_clone.
:param volume_name: string volume name
:param clone_name: string clone name (or snapshot)
:param volume_id: string volume id
"""
export_path = self._get_export_path(volume_id)
# volume-ID snapshot-ID, /cinder
LOG.info(_LI("Cloning with volume_name %(vname)s clone_name %(cname)s"
" export_path %(epath)s"), {'vname': volume_name,
'cname': clone_name,
'epath': export_path})
source_vol = self._id_to_vol(volume_id)
# sps; added target
(_hdp, _path, _fslabel) = self._get_service(source_vol)
target_path = '%s/%s' % (_path, clone_name)
source_path = '%s/%s' % (_path, volume_name)
out = self.bend.file_clone(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
_fslabel, source_path, target_path)
return out
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = '_'.join(['test', op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError('Test %s defined more than once' % test_name)
setattr(test, test_name, fn)
class ShapeTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testBatchGradientUnknownSize(self):
with self.cached_session():
batch_size = constant_op.constant(3)
matrix_size = constant_op.constant(4)
batch_identity = array_ops.tile(
array_ops.expand_dims(
array_ops.diag(array_ops.ones([matrix_size])), 0),
[batch_size, 1, 1])
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), self.evaluate(sum_grad))
class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self):
with self.session(use_gpu=True):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
if functor_.__name__ == 'matrix_square_root':
# Square the input matrix to ensure that its matrix square root exists
a = math_ops.matmul(a, a)
a_np = self.evaluate(a)
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else 0.05
theoretical, numerical = gradient_checker.compute_gradient(
a,
a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=a_np,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class MatrixBinaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBinaryFunctorGradientTest(functor_,
dtype_,
shape_,
float32_tol_fudge=1.0,
**kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self):
# TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable
# GPU test for matrix_solve.
use_gpu = False if functor_ == linalg_ops.matrix_solve else True
with self.session(use_gpu=use_gpu):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
b = constant_op.constant(b_np)
c = functor_(a, b, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.05
# The gradients for a and b may be of very different magnitudes,
# so to not get spurious failures we test them separately.
for factor, factor_init in [a, a_np], [b, b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
factor,
factor.get_shape().as_list(),
c,
c.get_shape().as_list(),
x_init_value=factor_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
# Tests for gradients of binary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
for adjoint in False, True:
shape = extra + (size, size)
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(str, shape)),
str(adjoint))
_AddTest(MatrixBinaryFunctorGradientTest, 'MatrixSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
for lower in True, False:
name = '%s_low_%s' % (name, lower)
if (name == 'float32_10_10_adj_False_low_True') and \
test_lib.is_built_with_rocm():
# Skip this one particular subtest on the ROCm platform
# It will fail because of 1 element in 10,000 mismatch,
# and the mismatch is minor (tolerance is 0.20, mismtach is 0,22)
# TODO(rocm) : investigate cause of mistmach and fix
continue
_AddTest(MatrixBinaryFunctorGradientTest,
'MatrixTriangularSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_triangular_solve,
dtype,
shape,
float32_tol_fudge=4.0,
adjoint=adjoint,
lower=lower))
# Tests for gradients of unary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
dtype, shape))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixExponentialGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
linalg_impl.matrix_exponential, dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,
dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'LogMatrixDeterminantGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
lambda x: linalg_ops.log_matrix_determinant(x)[1],
dtype, shape))
# The numerical Jacobian is consistently invalid for these four shapes
# because the matrix square root of the perturbed input doesn't exist
if shape in {(2, 5, 5), (3, 5, 5), (3, 10, 10), (3, 2, 5, 5)}:
# Alternative shape that consistently produces a valid numerical Jacobian
shape = extra + (size + 1, size + 1)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixSquareRootGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_square_root,
dtype, shape))
# Tests for gradients of matrix_solve_ls
for dtype in np.float32, np.float64:
for rows in 2, 5, 10:
for cols in 2, 5, 10:
for l2_regularization in 1e-6, 0.001, 1.0:
shape = (rows, cols)
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(str, shape)),
l2_regularization)
float32_tol_fudge = 5.1 if l2_regularization == 1e-6 else 4.0
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveLsGradient',
name,
# pylint: disable=long-lambda,g-long-lambda
_GetMatrixBinaryFunctorGradientTest(
(lambda a, b, l=l2_regularization:
linalg_ops.matrix_solve_ls(a, b, l)),
dtype,
shape,
float32_tol_fudge))
test_lib.main()
|
|
from datetime import datetime, timedelta
from uuid import uuid4
from pytz import UTC
from twisted.python import log
from twisted.internet.defer import maybeDeferred
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from txaws.ec2.client import Signature
from txaws.service import AWSServiceEndpoint
from txaws.credentials import AWSCredentials
from txaws.server.schema import (
Schema, Unicode, Integer, Enum, RawStr, Date)
from txaws.server.exception import APIError
from txaws.server.call import Call
class QueryAPI(Resource):
"""Base class for EC2-like query APIs.
@param registry: The L{Registry} to use to look up L{Method}s for handling
the API requests.
@param path: Optionally, the actual resource path the clients are using
when sending HTTP requests to this API, to take into account when
validating the signature. This can differ from the one in the HTTP
request we're processing in case the service sits behind a reverse
proxy, like Apache. For this works to work you have to make sure
that 'path + path_of_the_rewritten_request' equals the resource
path that clients are sending the request to.
The following class variables must be defined by sub-classes:
@ivar signature_versions: A list of allowed values for 'SignatureVersion'.
@cvar content_type: The content type to set the 'Content-Type' header to.
"""
isLeaf = True
time_format = "%Y-%m-%dT%H:%M:%SZ"
schema = Schema(
Unicode("Action"),
RawStr("AWSAccessKeyId"),
Date("Timestamp", optional=True),
Date("Expires", optional=True),
Unicode("Version", optional=True),
Enum("SignatureMethod", {"HmacSHA256": "sha256", "HmacSHA1": "sha1"},
optional=True, default="HmacSHA256"),
Unicode("Signature"),
Integer("SignatureVersion", optional=True, default=2))
def __init__(self, registry=None, path=None):
Resource.__init__(self)
self.path = path
self.registry = registry
def get_method(self, call, *args, **kwargs):
"""Return the L{Method} instance to invoke for the given L{Call}.
@param args: Positional arguments to pass to the method constructor.
@param kwargs: Keyword arguments to pass to the method constructor.
"""
method_class = self.registry.get(call.action, call.version)
method = method_class(*args, **kwargs)
if not method.is_available():
raise APIError(400, "InvalidAction", "The action %s is not "
"valid for this web service." % call.action)
else:
return method
def get_principal(self, access_key):
"""Return a principal object by access key.
The returned object must have C{access_key} and C{secret_key}
attributes and if the authentication succeeds, it will be
passed to the created L{Call}.
"""
raise NotImplemented("Must be implemented by subclasses")
def handle(self, request):
"""Handle an HTTP request for executing an API call.
This method authenticates the request checking its signature, and then
calls the C{execute} method, passing it a L{Call} object set with the
principal for the authenticated user and the generic parameters
extracted from the request.
@param request: The L{HTTPRequest} to handle.
"""
request.id = str(uuid4())
deferred = maybeDeferred(self._validate, request)
deferred.addCallback(self.execute)
def write_response(response):
request.setHeader("Content-Length", str(len(response)))
request.setHeader("Content-Type", self.content_type)
request.write(response)
request.finish()
return response
def write_error(failure):
log.err(failure)
if failure.check(APIError):
status = failure.value.status
bytes = failure.value.response
if bytes is None:
bytes = self.dump_error(failure.value, request)
else:
bytes = str(failure.value)
status = 500
request.setResponseCode(status)
request.write(bytes)
request.finish()
deferred.addCallback(write_response)
deferred.addErrback(write_error)
return deferred
def dump_error(self, error, request):
"""Serialize an error generating the response to send to the client.
@param error: The L{APIError} to format.
@param request: The request that generated the error.
"""
raise NotImplementedError("Must be implemented by subclass.")
def dump_result(self, result):
"""Serialize the result of the method invokation.
@param result: The L{Method} result to serialize.
"""
return result
def authorize(self, method, call):
"""Authorize to invoke the given L{Method} with the given L{Call}."""
def execute(self, call):
"""Execute an API L{Call}.
At this point the request has been authenticated and C{call.principal}
is set with the L{Principal} for the L{User} requesting the call.
@return: The response to write in the request for the given L{Call}.
@raises: An L{APIError} in case the execution fails, sporting an error
message the HTTP status code to return.
"""
method = self.get_method(call)
deferred = maybeDeferred(self.authorize, method, call)
deferred.addCallback(lambda _: method.invoke(call))
return deferred.addCallback(self.dump_result)
def get_utc_time(self):
"""Return a C{datetime} object with the current time in UTC."""
return datetime.now(UTC)
def _validate(self, request):
"""Validate an L{HTTPRequest} before executing it.
The following conditions are checked:
- The request contains all the generic parameters.
- The action specified in the request is a supported one.
- The signature mechanism is a supported one.
- The provided signature matches the one calculated using the locally
stored secret access key for the user.
- The signature hasn't expired.
@return: The validated L{Call}, set with its default arguments and the
the principal of the accessing L{User}.
"""
params = dict((k, v[-1]) for k, v in request.args.iteritems())
args, rest = self.schema.extract(params)
self._validate_generic_parameters(args)
def create_call(principal):
self._validate_principal(principal, args)
self._validate_signature(request, principal, args, params)
return Call(raw_params=rest,
principal=principal,
action=args.Action,
version=args.Version,
id=request.id)
deferred = maybeDeferred(self.get_principal, args.AWSAccessKeyId)
deferred.addCallback(create_call)
return deferred
def _validate_generic_parameters(self, args):
"""Validate the generic request parameters.
@param args: Parsed schema arguments.
@raises APIError: In the following cases:
- Action is not included in C{self.actions}
- SignatureVersion is not included in C{self.signature_versions}
- Expires and Timestamp are present
- Expires is before the current time
- Timestamp is older than 15 minutes.
"""
utc_now = self.get_utc_time()
if getattr(self, "actions", None) is not None:
# Check the deprecated 'actions' attribute
if not args.Action in self.actions:
raise APIError(400, "InvalidAction", "The action %s is not "
"valid for this web service." % args.Action)
else:
self.registry.check(args.Action, args.Version)
if not args.SignatureVersion in self.signature_versions:
raise APIError(403, "InvalidSignature", "SignatureVersion '%s' "
"not supported" % args.SignatureVersion)
if args.Expires and args.Timestamp:
raise APIError(400, "InvalidParameterCombination",
"The parameter Timestamp cannot be used with "
"the parameter Expires")
if args.Expires and args.Expires < utc_now:
raise APIError(400,
"RequestExpired",
"Request has expired. Expires date is %s" % (
args.Expires.strftime(self.time_format)))
if args.Timestamp and args.Timestamp + timedelta(minutes=15) < utc_now:
raise APIError(400,
"RequestExpired",
"Request has expired. Timestamp date is %s" % (
args.Timestamp.strftime(self.time_format)))
def _validate_principal(self, principal, args):
"""Validate the principal."""
if principal is None:
raise APIError(401, "AuthFailure",
"No user with access key '%s'" %
args.AWSAccessKeyId)
def _validate_signature(self, request, principal, args, params):
"""Validate the signature."""
creds = AWSCredentials(principal.access_key, principal.secret_key)
endpoint = AWSServiceEndpoint()
endpoint.set_method(request.method)
endpoint.set_canonical_host(request.getHeader("Host"))
path = request.path
if self.path is not None:
path = "%s/%s" % (self.path.rstrip("/"), path.lstrip("/"))
endpoint.set_path(path)
params.pop("Signature")
signature = Signature(creds, endpoint, params)
if signature.compute() != args.Signature:
raise APIError(403, "SignatureDoesNotMatch",
"The request signature we calculated does not "
"match the signature you provided. Check your "
"key and signing method.")
def get_status_text(self):
"""Get the text to return when a status check is made."""
return "Query API Service"
def render_GET(self, request):
"""Handle a GET request."""
if not request.args:
request.setHeader("Content-Type", "text/plain")
return self.get_status_text()
else:
self.handle(request)
return NOT_DONE_YET
render_POST = render_GET
|
|
import datetime
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import DateTime
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.horizontal_shard import ShardedSession
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import operators
from sqlalchemy.sql import visitors
# db1 is used for id generation. The "pool_threadlocal"
# causes the id_generator() to use the same connection as that
# of an ongoing transaction within db1.
echo = True
db1 = create_engine("sqlite://", echo=echo, pool_threadlocal=True)
db2 = create_engine("sqlite://", echo=echo)
db3 = create_engine("sqlite://", echo=echo)
db4 = create_engine("sqlite://", echo=echo)
# create session function. this binds the shard ids
# to databases within a ShardedSession and returns it.
create_session = sessionmaker(class_=ShardedSession)
create_session.configure(
shards={
"north_america": db1,
"asia": db2,
"europe": db3,
"south_america": db4,
}
)
# mappings and tables
Base = declarative_base()
# we need a way to create identifiers which are unique across all databases.
# one easy way would be to just use a composite primary key, where one value
# is the shard id. but here, we'll show something more "generic", an id
# generation function. we'll use a simplistic "id table" stored in database
# #1. Any other method will do just as well; UUID, hilo, application-specific,
# etc.
ids = Table("ids", Base.metadata, Column("nextid", Integer, nullable=False))
def id_generator(ctx):
# in reality, might want to use a separate transaction for this.
with db1.connect() as conn:
nextid = conn.scalar(ids.select(for_update=True))
conn.execute(ids.update(values={ids.c.nextid: ids.c.nextid + 1}))
return nextid
# table setup. we'll store a lead table of continents/cities, and a secondary
# table storing locations. a particular row will be placed in the database
# whose shard id corresponds to the 'continent'. in this setup, secondary rows
# in 'weather_reports' will be placed in the same DB as that of the parent, but
# this can be changed if you're willing to write more complex sharding
# functions.
class WeatherLocation(Base):
__tablename__ = "weather_locations"
id = Column(Integer, primary_key=True, default=id_generator)
continent = Column(String(30), nullable=False)
city = Column(String(50), nullable=False)
reports = relationship("Report", backref="location")
def __init__(self, continent, city):
self.continent = continent
self.city = city
class Report(Base):
__tablename__ = "weather_reports"
id = Column(Integer, primary_key=True)
location_id = Column(
"location_id", Integer, ForeignKey("weather_locations.id")
)
temperature = Column("temperature", Float)
report_time = Column(
"report_time", DateTime, default=datetime.datetime.now
)
def __init__(self, temperature):
self.temperature = temperature
# create tables
for db in (db1, db2, db3, db4):
Base.metadata.drop_all(db)
Base.metadata.create_all(db)
# establish initial "id" in db1
db1.execute(ids.insert(), nextid=1)
# step 5. define sharding functions.
# we'll use a straight mapping of a particular set of "country"
# attributes to shard id.
shard_lookup = {
"North America": "north_america",
"Asia": "asia",
"Europe": "europe",
"South America": "south_america",
}
def shard_chooser(mapper, instance, clause=None):
"""shard chooser.
looks at the given instance and returns a shard id
note that we need to define conditions for
the WeatherLocation class, as well as our secondary Report class which will
point back to its WeatherLocation via its 'location' attribute.
"""
if isinstance(instance, WeatherLocation):
return shard_lookup[instance.continent]
else:
return shard_chooser(mapper, instance.location)
def id_chooser(query, ident):
"""id chooser.
given a primary key, returns a list of shards
to search. here, we don't have any particular information from a
pk so we just return all shard ids. often, you'd want to do some
kind of round-robin strategy here so that requests are evenly
distributed among DBs.
"""
if query.lazy_loaded_from:
# if we are in a lazy load, we can look at the parent object
# and limit our search to that same shard, assuming that's how we've
# set things up.
return [query.lazy_loaded_from.identity_token]
else:
return ["north_america", "asia", "europe", "south_america"]
def query_chooser(query):
"""query chooser.
this also returns a list of shard ids, which can
just be all of them. but here we'll search into the Query in order
to try to narrow down the list of shards to query.
"""
ids = []
# we'll grab continent names as we find them
# and convert to shard ids
for column, operator, value in _get_query_comparisons(query):
# "shares_lineage()" returns True if both columns refer to the same
# statement column, adjusting for any annotations present.
# (an annotation is an internal clone of a Column object
# and occur when using ORM-mapped attributes like
# "WeatherLocation.continent"). A simpler comparison, though less
# accurate, would be "column.key == 'continent'".
if column.shares_lineage(WeatherLocation.__table__.c.continent):
if operator == operators.eq:
ids.append(shard_lookup[value])
elif operator == operators.in_op:
ids.extend(shard_lookup[v] for v in value)
if len(ids) == 0:
return ["north_america", "asia", "europe", "south_america"]
else:
return ids
def _get_query_comparisons(query):
"""Search an orm.Query object for binary expressions.
Returns expressions which match a Column against one or more
literal values as a list of tuples of the form
(column, operator, values). "values" is a single value
or tuple of values depending on the operator.
"""
binds = {}
clauses = set()
comparisons = []
def visit_bindparam(bind):
# visit a bind parameter.
# check in _params for it first
if bind.key in query._params:
value = query._params[bind.key]
elif bind.callable:
# some ORM functions (lazy loading)
# place the bind's value as a
# callable for deferred evaluation.
value = bind.callable()
else:
# just use .value
value = bind.value
binds[bind] = value
def visit_column(column):
clauses.add(column)
def visit_binary(binary):
# special handling for "col IN (params)"
if (
binary.left in clauses
and binary.operator == operators.in_op
and hasattr(binary.right, "clauses")
):
comparisons.append(
(
binary.left,
binary.operator,
tuple(binds[bind] for bind in binary.right.clauses),
)
)
elif binary.left in clauses and binary.right in binds:
comparisons.append(
(binary.left, binary.operator, binds[binary.right])
)
elif binary.left in binds and binary.right in clauses:
comparisons.append(
(binary.right, binary.operator, binds[binary.left])
)
# here we will traverse through the query's criterion, searching
# for SQL constructs. We will place simple column comparisons
# into a list.
if query._criterion is not None:
visitors.traverse_depthfirst(
query._criterion,
{},
{
"bindparam": visit_bindparam,
"binary": visit_binary,
"column": visit_column,
},
)
return comparisons
# further configure create_session to use these functions
create_session.configure(
shard_chooser=shard_chooser,
id_chooser=id_chooser,
query_chooser=query_chooser,
)
# save and load objects!
tokyo = WeatherLocation("Asia", "Tokyo")
newyork = WeatherLocation("North America", "New York")
toronto = WeatherLocation("North America", "Toronto")
london = WeatherLocation("Europe", "London")
dublin = WeatherLocation("Europe", "Dublin")
brasilia = WeatherLocation("South America", "Brasila")
quito = WeatherLocation("South America", "Quito")
tokyo.reports.append(Report(80.0))
newyork.reports.append(Report(75))
quito.reports.append(Report(85))
sess = create_session()
sess.add_all([tokyo, newyork, toronto, london, dublin, brasilia, quito])
sess.commit()
t = sess.query(WeatherLocation).get(tokyo.id)
assert t.city == tokyo.city
assert t.reports[0].temperature == 80.0
north_american_cities = sess.query(WeatherLocation).filter(
WeatherLocation.continent == "North America"
)
assert {c.city for c in north_american_cities} == {"New York", "Toronto"}
asia_and_europe = sess.query(WeatherLocation).filter(
WeatherLocation.continent.in_(["Europe", "Asia"])
)
assert {c.city for c in asia_and_europe} == {"Tokyo", "London", "Dublin"}
# the Report class uses a simple integer primary key. So across two databases,
# a primary key will be repeated. The "identity_token" tracks in memory
# that these two identical primary keys are local to different databases.
newyork_report = newyork.reports[0]
tokyo_report = tokyo.reports[0]
assert inspect(newyork_report).identity_key == (Report, (1,), "north_america")
assert inspect(tokyo_report).identity_key == (Report, (1,), "asia")
# the token representing the originating shard is also available directly
assert inspect(newyork_report).identity_token == "north_america"
assert inspect(tokyo_report).identity_token == "asia"
|
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, isna
import pandas._testing as tm
def test_first_last_nth(df):
# tests for first / last / nth
grouped = df.groupby("A")
first = grouped.first()
expected = df.loc[[1, 0], ["B", "C", "D"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
tm.assert_frame_equal(first, expected)
nth = grouped.nth(0)
tm.assert_frame_equal(nth, expected)
last = grouped.last()
expected = df.loc[[5, 7], ["B", "C", "D"]]
expected.index = Index(["bar", "foo"], name="A")
tm.assert_frame_equal(last, expected)
nth = grouped.nth(-1)
tm.assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = df.loc[[2, 3], ["B", "C", "D"]].copy()
expected.index = Index(["foo", "bar"], name="A")
expected = expected.sort_index()
tm.assert_frame_equal(nth, expected)
# it works!
grouped["B"].first()
grouped["B"].last()
grouped["B"].nth(0)
df.loc[df["A"] == "foo", "B"] = np.nan
assert isna(grouped["B"].first()["foo"])
assert isna(grouped["B"].last()["foo"])
assert isna(grouped["B"].nth(0)["foo"])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
result = g.first()
expected = df.iloc[[1, 2]].set_index("A")
tm.assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index("A")
result = g.nth(0, dropna="any")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", ["first", "last"])
def test_first_last_with_na_object(method, nulls_fixture):
# https://github.com/pandas-dev/pandas/issues/32123
groups = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby(
"a"
)
result = getattr(groups, method)()
if method == "first":
values = [1, 3]
else:
values = [2, 3]
values = np.array(values, dtype=result["b"].dtype)
idx = pd.Index([1, 2], name="a")
expected = pd.DataFrame({"b": values}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index", [0, -1])
def test_nth_with_na_object(index, nulls_fixture):
# https://github.com/pandas-dev/pandas/issues/32123
groups = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby(
"a"
)
result = groups.nth(index)
if index == 0:
values = [1, 3]
else:
values = [2, nulls_fixture]
values = np.array(values, dtype=result["b"].dtype)
idx = pd.Index([1, 2], name="a")
expected = pd.DataFrame({"b": values}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", ["first", "last"])
def test_first_last_with_None(method):
# https://github.com/pandas-dev/pandas/issues/32800
# None should be preserved as object dtype
df = pd.DataFrame.from_dict({"id": ["a"], "value": [None]})
groups = df.groupby("id", as_index=False)
result = getattr(groups, method)()
tm.assert_frame_equal(result, df)
def test_first_last_nth_dtypes(df_mixed_floats):
df = df_mixed_floats.copy()
df["E"] = True
df["F"] = 1
# tests for first / last / nth
grouped = df.groupby("A")
first = grouped.first()
expected = df.loc[[1, 0], ["B", "C", "D", "E", "F"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
tm.assert_frame_equal(first, expected)
last = grouped.last()
expected = df.loc[[5, 7], ["B", "C", "D", "E", "F"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
tm.assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.loc[[3, 2], ["B", "C", "D", "E", "F"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
tm.assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = list(range(10))
idx.append(9)
s = Series(data=range(11), index=idx, name="IntCol")
assert s.dtype == "int64"
f = s.groupby(level=0).first()
assert f.dtype == "int64"
def test_first_last_nth_nan_dtype():
# GH 33591
df = pd.DataFrame({"data": ["A"], "nans": pd.Series([np.nan], dtype=object)})
grouped = df.groupby("data")
expected = df.set_index("data").nans
tm.assert_series_equal(grouped.nans.first(), expected)
tm.assert_series_equal(grouped.nans.last(), expected)
tm.assert_series_equal(grouped.nans.nth(-1), expected)
tm.assert_series_equal(grouped.nans.nth(0), expected)
def test_first_strings_timestamps():
# GH 11244
test = pd.DataFrame(
{
pd.Timestamp("2012-01-01 00:00:00"): ["a", "b"],
pd.Timestamp("2012-01-02 00:00:00"): ["c", "d"],
"name": ["e", "e"],
"aaaa": ["f", "g"],
}
)
result = test.groupby("name").first()
expected = DataFrame(
[["a", "c", "f"]],
columns=Index([Timestamp("2012-01-01"), Timestamp("2012-01-02"), "aaaa"]),
index=Index(["e"], name="name"),
)
tm.assert_frame_equal(result, expected)
def test_nth():
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
tm.assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index("A"))
tm.assert_frame_equal(g.nth(1), df.iloc[[1]].set_index("A"))
tm.assert_frame_equal(g.nth(2), df.loc[[]].set_index("A"))
tm.assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index("A"))
tm.assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index("A"))
tm.assert_frame_equal(g.nth(-3), df.loc[[]].set_index("A"))
tm.assert_series_equal(g.B.nth(0), df.set_index("A").B.iloc[[0, 2]])
tm.assert_series_equal(g.B.nth(1), df.set_index("A").B.iloc[[1]])
tm.assert_frame_equal(g[["B"]].nth(0), df.loc[[0, 2], ["A", "B"]].set_index("A"))
exp = df.set_index("A")
tm.assert_frame_equal(g.nth(0, dropna="any"), exp.iloc[[1, 2]])
tm.assert_frame_equal(g.nth(-1, dropna="any"), exp.iloc[[1, 2]])
exp["B"] = np.nan
tm.assert_frame_equal(g.nth(7, dropna="any"), exp.iloc[[1, 2]])
tm.assert_frame_equal(g.nth(2, dropna="any"), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame(
{
"color": {0: "green", 1: "green", 2: "red", 3: "red", 4: "red"},
"food": {0: "ham", 1: "eggs", 2: "eggs", 3: "ham", 4: "pork"},
"two": {
0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997,
},
"one": {
0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997,
},
}
).set_index(["color", "food"])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
tm.assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
tm.assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype="int64")
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
tm.assert_series_equal(expected2, expected, check_names=False)
assert expected.name == 1
assert expected2.name == 1
# validate first
v = s[g == 1].iloc[0]
assert expected.iloc[0] == v
assert expected2.iloc[0] == v
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna="all")
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError, match="For a DataFrame groupby"):
s.groupby(g, sort=False).nth(0, dropna=True)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
result = g.B.nth(0, dropna="all")
expected = g.B.first()
tm.assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]], columns=["A", "B"])
g = df.groupby("A")
tm.assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index("A"))
tm.assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index("A"))
tm.assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index("A"))
tm.assert_frame_equal(g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index("A"))
tm.assert_frame_equal(g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index("A"))
tm.assert_frame_equal(g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index("A"))
tm.assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index("A"))
tm.assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index("A"))
business_dates = pd.date_range(start="4/1/2014", end="6/30/2014", freq="B")
df = DataFrame(1, index=business_dates, columns=["a", "b"])
# get the first, fourth and last two business days for each month
key = [df.index.year, df.index.month]
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
[
"2014/4/1",
"2014/4/4",
"2014/4/29",
"2014/4/30",
"2014/5/1",
"2014/5/6",
"2014/5/29",
"2014/5/30",
"2014/6/2",
"2014/6/5",
"2014/6/27",
"2014/6/30",
]
)
expected = DataFrame(1, columns=["a", "b"], index=expected_dates)
tm.assert_frame_equal(result, expected)
def test_nth_multi_index(three_group):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = three_group.groupby(["A", "B"])
result = grouped.nth(0)
expected = grouped.first()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, expected_first, expected_last",
[
(
{
"id": ["A"],
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
"foo": [1],
},
{
"id": ["A"],
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
"foo": [1],
},
{
"id": ["A"],
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
"foo": [1],
},
),
(
{
"id": ["A", "B", "A"],
"time": [
Timestamp("2012-01-01 13:00:00", tz="America/New_York"),
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
Timestamp("2012-03-01 12:00:00", tz="Europe/London"),
],
"foo": [1, 2, 3],
},
{
"id": ["A", "B"],
"time": [
Timestamp("2012-01-01 13:00:00", tz="America/New_York"),
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
],
"foo": [1, 2],
},
{
"id": ["A", "B"],
"time": [
Timestamp("2012-03-01 12:00:00", tz="Europe/London"),
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
],
"foo": [3, 2],
},
),
],
)
def test_first_last_tz(data, expected_first, expected_last):
# GH15884
# Test that the timezone is retained when calling first
# or last on groupby with as_index=False
df = DataFrame(data)
result = df.groupby("id", as_index=False).first()
expected = DataFrame(expected_first)
cols = ["id", "time", "foo"]
tm.assert_frame_equal(result[cols], expected[cols])
result = df.groupby("id", as_index=False)["time"].first()
tm.assert_frame_equal(result, expected[["id", "time"]])
result = df.groupby("id", as_index=False).last()
expected = DataFrame(expected_last)
cols = ["id", "time", "foo"]
tm.assert_frame_equal(result[cols], expected[cols])
result = df.groupby("id", as_index=False)["time"].last()
tm.assert_frame_equal(result, expected[["id", "time"]])
@pytest.mark.parametrize(
"method, ts, alpha",
[
["first", Timestamp("2013-01-01", tz="US/Eastern"), "a"],
["last", Timestamp("2013-01-02", tz="US/Eastern"), "b"],
],
)
def test_first_last_tz_multi_column(method, ts, alpha):
# GH 21603
category_string = pd.Series(list("abc")).astype("category")
df = pd.DataFrame(
{
"group": [1, 1, 2],
"category_string": category_string,
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
}
)
result = getattr(df.groupby("group"), method)()
expected = pd.DataFrame(
{
"category_string": pd.Categorical(
[alpha, "c"], dtype=category_string.dtype
),
"datetimetz": [ts, Timestamp("2013-01-03", tz="US/Eastern")],
},
index=pd.Index([1, 2], name="group"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
pd.array([True, False], dtype="boolean"),
pd.array([1, 2], dtype="Int64"),
pd.to_datetime(["2020-01-01", "2020-02-01"]),
pd.to_timedelta([1, 2], unit="D"),
],
)
@pytest.mark.parametrize("function", ["first", "last", "min", "max"])
def test_first_last_extension_array_keeps_dtype(values, function):
# https://github.com/pandas-dev/pandas/issues/33071
# https://github.com/pandas-dev/pandas/issues/32194
df = DataFrame({"a": [1, 2], "b": values})
grouped = df.groupby("a")
idx = Index([1, 2], name="a")
expected_series = Series(values, name="b", index=idx)
expected_frame = DataFrame({"b": values}, index=idx)
result_series = getattr(grouped["b"], function)()
tm.assert_series_equal(result_series, expected_series)
result_frame = grouped.agg({"b": function})
tm.assert_frame_equal(result_frame, expected_frame)
def test_nth_multi_index_as_expected():
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
}
)
grouped = three_group.groupby(["A", "B"])
result = grouped.nth(0)
expected = DataFrame(
{"C": ["dull", "dull", "dull", "dull"]},
index=MultiIndex.from_arrays(
[["bar", "bar", "foo", "foo"], ["one", "two", "one", "two"]],
names=["A", "B"],
),
)
tm.assert_frame_equal(result, expected)
def test_groupby_head_tail():
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
g_as = df.groupby("A", as_index=True)
g_not_as = df.groupby("A", as_index=False)
# as_index= False, much easier
tm.assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1))
tm.assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1))
empty_not_as = DataFrame(
columns=df.columns, index=pd.Index([], dtype=df.index.dtype)
)
empty_not_as["A"] = empty_not_as["A"].astype(df.A.dtype)
empty_not_as["B"] = empty_not_as["B"].astype(df.B.dtype)
tm.assert_frame_equal(empty_not_as, g_not_as.head(0))
tm.assert_frame_equal(empty_not_as, g_not_as.tail(0))
tm.assert_frame_equal(empty_not_as, g_not_as.head(-1))
tm.assert_frame_equal(empty_not_as, g_not_as.tail(-1))
tm.assert_frame_equal(df, g_not_as.head(7)) # contains all
tm.assert_frame_equal(df, g_not_as.tail(7))
# as_index=True, (used to be different)
df_as = df
tm.assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1))
tm.assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1))
empty_as = DataFrame(index=df_as.index[:0], columns=df.columns)
empty_as["A"] = empty_not_as["A"].astype(df.A.dtype)
empty_as["B"] = empty_not_as["B"].astype(df.B.dtype)
tm.assert_frame_equal(empty_as, g_as.head(0))
tm.assert_frame_equal(empty_as, g_as.tail(0))
tm.assert_frame_equal(empty_as, g_as.head(-1))
tm.assert_frame_equal(empty_as, g_as.tail(-1))
tm.assert_frame_equal(df_as, g_as.head(7)) # contains all
tm.assert_frame_equal(df_as, g_as.tail(7))
# test with selection
tm.assert_frame_equal(g_as[[]].head(1), df_as.loc[[0, 2], []])
tm.assert_frame_equal(g_as[["A"]].head(1), df_as.loc[[0, 2], ["A"]])
tm.assert_frame_equal(g_as[["B"]].head(1), df_as.loc[[0, 2], ["B"]])
tm.assert_frame_equal(g_as[["A", "B"]].head(1), df_as.loc[[0, 2]])
tm.assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0, 2], []])
tm.assert_frame_equal(g_not_as[["A"]].head(1), df_as.loc[[0, 2], ["A"]])
tm.assert_frame_equal(g_not_as[["B"]].head(1), df_as.loc[[0, 2], ["B"]])
tm.assert_frame_equal(g_not_as[["A", "B"]].head(1), df_as.loc[[0, 2]])
def test_group_selection_cache():
# GH 12839 nth, head, and tail should return same result consistently
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
expected = df.iloc[[0, 2]].set_index("A")
g = df.groupby("A")
result1 = g.head(n=2)
result2 = g.nth(0)
tm.assert_frame_equal(result1, df)
tm.assert_frame_equal(result2, expected)
g = df.groupby("A")
result1 = g.tail(n=2)
result2 = g.nth(0)
tm.assert_frame_equal(result1, df)
tm.assert_frame_equal(result2, expected)
g = df.groupby("A")
result1 = g.nth(0)
result2 = g.head(n=2)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, df)
g = df.groupby("A")
result1 = g.nth(0)
result2 = g.tail(n=2)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, df)
def test_nth_empty():
# GH 16064
df = DataFrame(index=[0], columns=["a", "b", "c"])
result = df.groupby("a").nth(10)
expected = DataFrame(index=Index([], name="a"), columns=["b", "c"])
tm.assert_frame_equal(result, expected)
result = df.groupby(["a", "b"]).nth(10)
expected = DataFrame(
index=MultiIndex([[], []], [[], []], names=["a", "b"]), columns=["c"]
)
tm.assert_frame_equal(result, expected)
def test_nth_column_order():
# GH 20760
# Check that nth preserves column order
df = DataFrame(
[[1, "b", 100], [1, "a", 50], [1, "a", np.nan], [2, "c", 200], [2, "d", 150]],
columns=["A", "C", "B"],
)
result = df.groupby("A").nth(0)
expected = DataFrame(
[["b", 100.0], ["c", 200.0]], columns=["C", "B"], index=Index([1, 2], name="A")
)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").nth(-1, dropna="any")
expected = DataFrame(
[["a", 50.0], ["d", 150.0]], columns=["C", "B"], index=Index([1, 2], name="A")
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dropna", [None, "any", "all"])
def test_nth_nan_in_grouper(dropna):
# GH 26011
df = DataFrame(
[[np.nan, 0, 1], ["abc", 2, 3], [np.nan, 4, 5], ["def", 6, 7], [np.nan, 8, 9]],
columns=list("abc"),
)
result = df.groupby("a").nth(0, dropna=dropna)
expected = pd.DataFrame(
[[2, 3], [6, 7]], columns=list("bc"), index=Index(["abc", "def"], name="a")
)
tm.assert_frame_equal(result, expected)
|
|
import os
import datetime
# No set literals because we support Python 2.6.
TRUE_VALUES = set((
True,
'True',
'true',
))
class empty(object):
"""
We use this sentinel object, instead of None, as None is a plausible value
for a default in real Python code.
"""
pass
def get_env_value(name, required=False, default=empty):
"""
Core function for extracting the environment variable.
Enforces mutual exclusivity between `required` and `default` keywords.
The `empty` sentinal value is used as the default `default` value to allow
other function to handle default/empty logic in the appropriate way.
"""
if required and default is not empty:
raise ValueError("Using `default` with `required=True` is invalid")
elif required:
try:
value = os.environ[name]
except KeyError:
raise KeyError(
"Must set environment variable {0}".format(name)
)
else:
value = os.environ.get(name, default)
return value
def env_int(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and casts it to an
integer. If the name is not present in the environment and no default is
specified then a ``ValueError`` will be raised. Similarly, if the
environment value is not castable to an integer, a ``ValueError`` will be
raised.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
raise ValueError(
"`env_int` requires either a default value to be specified, or for "
"the variable to be present in the environment"
)
return int(value)
def env_float(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and casts it to an
float. If the name is not present in the environment and no default is
specified then a ``ValueError`` will be raised. Similarly, if the
environment value is not castable to an float, a ``ValueError`` will be
raised.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
raise ValueError(
"`env_float` requires either a default value to be specified, or for "
"the variable to be present in the environment"
)
return float(value)
def env_bool(name, truthy_values=TRUE_VALUES, required=False, default=empty):
"""Pulls an environment variable out of the environment returning it as a
boolean. The strings ``'True'`` and ``'true'`` are the default *truthy*
values. If not present in the environment and no default is specified,
``None`` is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param truthy_values: An iterable of values that should be considered
truthy.
:type truthy_values: iterable
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
return None
return value in TRUE_VALUES
def env_string(name, required=False, default=empty):
"""Pulls an environment variable out of the environment returning it as a
string. If not present in the environment and no default is specified, an
empty string is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, default=default, required=required)
if value is empty:
value = ''
return value
def env_list(name, separator=',', required=False, default=empty):
"""Pulls an environment variable out of the environment, splitting it on a
separator, and returning it as a list. Extra whitespace on the list values
is stripped. List values that evaluate as falsy are removed. If not present
and no default specified, an empty list is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param separator: The separator that the string should be split on.
:type separator: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
return []
# wrapped in list to force evaluation in python 3
return list(filter(bool, [v.strip() for v in value.split(separator)]))
def env_timestamp(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and parses it to a
``datetime.datetime`` object. The environment variable is expected to be a
timestamp in the form of a float.
If the name is not present in the environment and no default is specified
then a ``ValueError`` will be raised.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
if required and default is not empty:
raise ValueError("Using `default` with `required=True` is invalid")
value = get_env_value(name, required=required, default=empty)
# change datetime.datetime to time, return time.struct_time type
if default is not empty and value is empty:
return default
if value is empty:
raise ValueError(
"`env_timestamp` requires either a default value to be specified, "
"or for the variable to be present in the environment"
)
timestamp = float(value)
return datetime.datetime.fromtimestamp(timestamp)
def env_iso8601(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and parses it to a
``datetime.datetime`` object. The environment variable is expected to be an
iso8601 formatted string.
If the name is not present in the environment and no default is specified
then a ``ValueError`` will be raised.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
try:
import iso8601
except ImportError:
raise ImportError(
'Parsing iso8601 datetime strings requires the iso8601 library'
)
if required and default is not empty:
raise ValueError("Using `default` with `required=True` is invalid")
value = get_env_value(name, required=required, default=empty)
# change datetime.datetime to time, return time.struct_time type
if default is not empty and value is empty:
return default
if value is empty:
raise ValueError(
"`env_iso8601` requires either a default value to be specified, or "
"for the variable to be present in the environment"
)
return iso8601.parse_date(value)
def get(name, required=False, default=empty, type=None):
"""Generic getter for environment variables. Handles defaults,
required-ness, and what type to expect.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
:param type: The type of variable expected.
:param type: str or type
"""
fn = {
'int': env_int,
int: env_int,
'bool': env_bool,
bool: env_bool,
'string': env_string,
str: env_string,
'list': env_list,
list: env_list,
'timestamp': env_timestamp,
datetime.time: env_timestamp,
'datetime': env_iso8601,
datetime.datetime: env_iso8601,
}.get(type, env_string)
return fn(name, default=default, required=required)
|
|
# ST2/ST3 compat
from __future__ import print_function
import sublime
if sublime.version() < '3000':
# we are on ST2 and Python 2.X
_ST3 = False
else:
_ST3 = True
import re
import sys
import os.path
# To accommodate both Python 2 and 3
def advance_iterator(it):
if not _ST3:
return it.next()
else:
return next(it)
print_debug = False
interactive = False
extra_file_ext = []
def debug(s):
if print_debug:
print ("parseTeXlog: " + s.encode('UTF-8')) # I think the ST2 console wants this
# The following function is only used when debugging interactively.
#
# If file is not found, ask me if we are debugging
# Rationale: if we are debugging from the command line, perhaps we are parsing
# a log file from a user, so apply heuristics and / or ask if the file not
# found is actually legit
#
# Return value: the question is, "Should I skip this file?" Hence:
# True means YES, DO SKIP IT, IT IS NOT A FILE
# False means NO, DO NOT SKIP IT, IT IS A FILE
def debug_skip_file(f):
# If we are not debugging, then it's not a file for sure, so skip it
if not (print_debug and interactive):
return True
debug("debug_skip_file: " + f)
f_ext = os.path.splitext(f)[1].lower()[1:]
# Heuristic: TeXlive on Mac or Linux (well, Ubuntu at least) or Windows / MiKTeX
# Known file extensions:
known_file_exts = ['tex','sty','cls','cfg','def','mkii','fd','map','clo', 'dfu', \
'ldf', 'bdf', 'bbx','cbx','lbx']
if (f_ext in known_file_exts) and \
(("/usr/local/texlive/" in f) or ("/usr/share/texlive/" in f) or ("Program Files\\MiKTeX" in f) \
or re.search(r"\\MiKTeX\\\d\.\d+\\tex",f)) or ("\\MiKTeX\\tex\\" in f):
print ("TeXlive / MiKTeX FILE! Don't skip it!")
return False
# Heuristic: "version 2010.12.02"
if re.match(r"version \d\d\d\d\.\d\d\.\d\d", f):
print ("Skip it!")
return True
# Heuristic: TeX Live line
if re.match(r"TeX Live 20\d\d(/Debian)?\) \(format", f):
print ("Skip it!")
return True
# Heuristic: MiKTeX line
if re.match("MiKTeX \d\.\d\d?",f):
print ("Skip it!")
return True
# Heuristic: no two consecutive spaces in file name
if " " in f:
print ("Skip it!")
return True
# Heuristic: various diagnostic messages
if f=='e.g.,' or "ext4): destination with the same identifier" in f or "Kristoffer H. Rose" in f:
print ("Skip it!")
return True
# Heuristic: file in local directory with .tex ending
file_exts = extra_file_ext + ['tex', 'aux', 'bbl', 'cls', 'sty','out']
if f[0:2] in ['./', '.\\', '..'] and f_ext in file_exts:
print ("File! Don't skip it")
return False
if raw_input() == "":
print ("Skip it")
return True
else:
print ("FILE! Don't skip it")
return False
# More robust parsing code: October / November 2012
# Input: tex log file, read in **binary** form, unprocessed
# Output: content to be displayed in output panel, split into lines
def parse_tex_log(data):
debug("Parsing log file")
errors = []
warnings = []
parsing = []
guessed_encoding = 'UTF-8' # for now
# Split data into lines while in binary form
# Then decode using guessed encoding
# We need the # of bytes per line, not the # of chars (codepoints), to undo TeX's line breaking
# so we construct an array of tuples:
# (decoded line, length of original byte array)
try:
log = [(l.decode(guessed_encoding, 'ignore'), len(l)) for l in data.splitlines()]
except UnicodeError:
debug("log file not in UTF-8 encoding!")
errors.append("ERROR: your log file is not in UTF-8 encoding.")
errors.append("Sorry, I can't process this file")
return (errors, warnings)
# loop over all log lines; construct error message as needed
# This will be useful for multi-file documents
# some regexes
# file_rx = re.compile(r"\(([^)]+)$") # OLD
# Structure (+ means captured, - means not captured)
# + maybe " (for Windows)
# + maybe a drive letter and : (for Windows)
# + maybe . NEW: or ../ or ..\, with repetitions
# + then any char, matched NON-GREEDILY (avoids issues with multiple files on one line?)
# + then .
# + then any char except for whitespace or " or ); at least ONE such char
# + then maybe " (on Windows/MikTeX)
# - then whitespace or ), or end of line
# + then anything else, captured for recycling
# This should take care of e.g. "(./test.tex [12" or "(./test.tex (other.tex"
# NOTES:
# 1. we capture the initial and ending " if there is one; we'll need to remove it later
# 2. we define the basic filename parsing regex so we can recycle it
# 3. we allow for any character besides "(" before a file name starts. This gives a lot of
# false positives but we kill them with os.path.isfile
file_basic = r"\"?(?:[a-zA-Z]\:)?(?:\.|(?:\.\./)|(?:\.\.\\))*.+?\.[^\s\"\)\.]+\"?"
file_rx = re.compile(r"[^\(]*?\((" + file_basic + r")(\s|\"|\)|$)(.*)")
# Useless file #1: {filename.ext}; capture subsequent text
# Will avoid nested {'s as these can't really appear, except if file names have braces
# which is REALLY bad!!!
file_useless1_rx = re.compile(r"\{\"?(?:\.|\.\./)*[^\.]+\.[^\{\}]*\"?\}(.*)")
# Useless file #2: <filename.ext>; capture subsequent text
file_useless2_rx = re.compile(r"<\"?(?:\.|\.\./)*[^\.]+\.[^>]*\"?>(.*)")
pagenum_begin_rx = re.compile(r"\s*\[\d*(.*)")
line_rx = re.compile(r"^l\.(\d+)\s(.*)") # l.nn <text>
warning_rx = re.compile(r"^(.*?) Warning: (.+)") # Warnings, first line
line_rx_latex_warn = re.compile(r"input line (\d+)\.$") # Warnings, line number
matched_parens_rx = re.compile(r"\([^()]*\)") # matched parentheses, to be deleted (note: not if nested)
assignment_rx = re.compile(r"\\[^=]*=") # assignment, heuristics for line merging
# Special case: the xy package, which reports end of processing with "loaded)" or "not reloaded)"
xypic_begin_rx = re.compile(r"[^()]*?(?:not re)?loaded\)(.*)")
xypic_rx = re.compile(r".*?(?:not re)?loaded\)(.*)")
# Special case: the comment package, which prints ")" after some text
comment_rx = re.compile(r"Excluding comment '.*?'(.*)")
files = []
xypic_flag = False # If we have seen xypic, report a warning, not an error for incorrect parsing
# Support function to handle warnings
def handle_warning(l):
if files==[]:
location = "[no file]"
parsing.append("PERR [handle_warning no files] " + l)
else:
location = files[-1]
warn_match_line = line_rx_latex_warn.search(l)
if warn_match_line:
warn_line = warn_match_line.group(1)
warnings.append(location + ":" + warn_line + ": " + l)
else:
warnings.append(location + ": " + l)
# State definitions
STATE_NORMAL = 0
STATE_SKIP = 1
STATE_REPORT_ERROR = 2
STATE_REPORT_WARNING = 3
state = STATE_NORMAL
# Use our own iterator instead of for loop
log_iterator = log.__iter__()
line_num=0
line = ""
linelen = 0
recycle_extra = False # Should we add extra to newly read line?
reprocess_extra = False # Should we reprocess extra, without reading a new line?
emergency_stop = False # If TeX stopped processing, we can't pop all files
incomplete_if = False # Ditto if some \if... statement is not complete
while True:
# first of all, see if we have a line to recycle (see heuristic for "l.<nn>" lines)
if recycle_extra:
line, linelen = extra, extralen
recycle_extra = False
line_num +=1
elif reprocess_extra:
line = extra # NOTE: we must remember that we are reprocessing. See long-line heuristics
else: # we read a new line
# save previous line for "! File ended while scanning use of..." message
prev_line = line
try:
line, linelen = advance_iterator(log_iterator) # will fail when no more lines
line_num += 1
except StopIteration:
break
# Now we deal with TeX's decision to truncate all log lines at 79 characters
# If we find a line of exactly 79 characters, we add the subsequent line to it, and continue
# until we find a line of less than 79 characters
# The problem is that there may be a line of EXACTLY 79 chars. We keep our fingers crossed but also
# use some heuristics to avoid disastrous consequences
# We are inspired by latexmk (which has no heuristics, though)
# HEURISTIC: the first line is always long, and we don't care about it
# also, the **<file name> line may be long, but we skip it, too (to avoid edge cases)
# We make sure we are NOT reprocessing a line!!!
# Also, we make sure we do not have a filename match, or it would be clobbered by exending!
if (not reprocess_extra) and line_num>1 and linelen>=79 and line[0:2] != "**":
debug ("Line %d is %d characters long; last char is %s" % (line_num, len(line), line[-1]))
# HEURISTICS HERE
extend_line = True
recycle_extra = False
# HEURISTIC: check first if we just have a long "(.../file.tex" (or similar) line
# A bit inefficient as we duplicate some of the code below for filename matching
file_match = file_rx.match(line)
if file_match:
debug("MATCHED (long line)")
file_name = file_match.group(1)
file_extra = file_match.group(2) + file_match.group(3) # don't call it "extra"
# remove quotes if necessary, but first save the count for a later check
quotecount = file_name.count("\"")
file_name = file_name.replace("\"", "")
# NOTE: on TL201X pdftex sometimes writes "pdfTeX warning" right after file name
# This may or may not be a stand-alone long line, but in any case if we
# extend, the file regex will fire regularly
if file_name[-6:]=="pdfTeX" and file_extra[:8]==" warning":
debug("pdfTeX appended to file name, extending")
# Else, if the extra stuff is NOT ")" or "", we have more than a single
# file name, so again the regular regex will fire
elif file_extra not in [")", ""]:
debug("additional text after file name, extending")
# If we have exactly ONE quote, we are on Windows but we are missing the final quote
# in which case we extend, because we may be missing parentheses otherwise
elif quotecount==1:
debug("only one quote, extending")
# Now we have a long line consisting of a potential file name alone
# Check if it really is a file name
elif (not os.path.isfile(file_name)) and debug_skip_file(file_name):
debug("Not a file name")
else:
debug("IT'S A (LONG) FILE NAME WITH NO EXTRA TEXT")
extend_line = False # so we exit right away and continue with parsing
while extend_line:
debug("extending: " + line)
try:
# different handling for Python 2 and 3
extra, extralen = advance_iterator(log_iterator)
debug("extension? " + extra)
line_num += 1 # for debugging purposes
# HEURISTIC: if extra line begins with "Package:" "File:" "Document Class:",
# or other "well-known markers",
# we just had a long file name, so do not add
if extralen>0 and \
(extra[0:5]=="File:" or extra[0:8]=="Package:" or extra[0:15]=="Document Class:") or \
(extra[0:9]=="LaTeX2e <") or assignment_rx.match(extra):
extend_line = False
# no need to recycle extra, as it's nothing we are interested in
# HEURISTIC: when TeX reports an error, it prints some surrounding text
# and may use the whole line. Then it prints "...", and "l.<nn> <text>" on a new line
# pdftex warnings also use "..." at the end of a line.
# If so, do not extend
elif line[-3:]=="...": # and line_rx.match(extra): # a bit inefficient as we match twice
debug("Found [...]")
extend_line = False
recycle_extra = True # make sure we process the "l.<nn>" line!
else:
line += extra
debug("Extended: " + line)
linelen += extralen
if extralen < 79:
extend_line = False
except StopIteration:
extend_line = False # end of file, so we must be done. This shouldn't happen, btw
# We may skip the above "if" because we are reprocessing a line, so reset flag:
reprocess_extra = False
# Check various states
if state==STATE_SKIP:
state = STATE_NORMAL
continue
if state==STATE_REPORT_ERROR:
# skip everything except "l.<nn> <text>"
debug("Reporting error in line: " + line)
# We check for emergency stops here, too, because it may occur before the l.nn text
if "! Emergency stop." in line:
emergency_stop = True
debug("Emergency stop found")
continue
err_match = line_rx.match(line)
if not err_match:
continue
# now we match!
state = STATE_NORMAL
err_line = err_match.group(1)
err_text = err_match.group(2)
# err_msg is set from last time
if files==[]:
location = "[no file]"
parsing.append("PERR [STATE_REPORT_ERROR no files] " + line)
else:
location = files[-1]
debug("Found error: " + err_msg)
errors.append(location + ":" + err_line + ": " + err_msg + " [" + err_text + "]")
continue
if state==STATE_REPORT_WARNING:
# add current line and check if we are done or not
current_warning += line
if line[-1]=='.':
handle_warning(current_warning)
current_warning = None
state = STATE_NORMAL # otherwise the state stays at REPORT_WARNING
continue
if line=="":
continue
# Sometimes an \if... is not completed; in this case some files may remain on the stack
# I think the same format may apply to different \ifXXX commands, so make it flexible
if len(line)>0 and line.strip()[:23]=="(\\end occurred when \\if" and \
line.strip()[-15:]=="was incomplete)":
incomplete_if = True
debug(line)
# Skip things that are clearly not file names, though they may trigger false positives
if len(line)>0 and \
(line[0:5]=="File:" or line[0:8]=="Package:" or line[0:15]=="Document Class:") or \
(line[0:9]=="LaTeX2e <"):
continue
# Are we done? Get rid of extra spaces, just in case (we may have extended a line, etc.)
if line.strip() == "Here is how much of TeX's memory you used:":
if len(files)>0:
if emergency_stop or incomplete_if:
debug("Done processing, files on stack due to known conditions (all is fine!)")
elif xypic_flag:
parsing.append("PERR [files on stack (xypic)] " + ";".join(files))
else:
parsing.append("PERR [files on stack] " + ";".join(files))
files=[]
# break
# We cannot stop here because pdftex may yet have errors to report.
# Special error reporting for e.g. \footnote{text NO MATCHING PARENS & co
if "! File ended while scanning use of" in line:
scanned_command = line[35:-2] # skip space and period at end
# we may be unable to report a file by popping it, so HACK HACK HACK
file_name, linelen = advance_iterator(log_iterator) # <inserted text>
file_name, linelen = advance_iterator(log_iterator) # \par
file_name, linelen = advance_iterator(log_iterator)
file_name = file_name[3:] # here is the file name with <*> in front
errors.append("TeX STOPPED: " + line[2:-2]+prev_line[:-5])
errors.append("TeX reports the error was in file:" + file_name)
continue
# Here, make sure there was no uncaught error, in which case we do more special processing
# This will match both tex and pdftex Fatal Error messages
if "==> Fatal error occurred," in line:
debug("Fatal error detected")
if errors == []:
errors.append("TeX STOPPED: fatal errors occurred. Check the TeX log file for details")
continue
# If tex just stops processing, we will be left with files on stack, so we keep track of it
if "! Emergency stop." in line:
state = STATE_SKIP
emergency_stop = True
debug("Emergency stop found")
continue
# TOo many errors: will also have files on stack. For some reason
# we have to do differently from above (need to double-check: why not stop processing if
# emergency stop, too?)
if "(That makes 100 errors; please try again.)" in line:
errors.append("Too many errors. TeX stopped.")
debug("100 errors, stopping")
break
# catch over/underfull
# skip everything for now
# Over/underfull messages end with [] so look for that
if line[0:8] == "Overfull" or line[0:9] == "Underfull":
if line[-2:]=="[]": # one-line over/underfull message
continue
ou_processing = True
while ou_processing:
try:
line, linelen = advance_iterator(log_iterator) # will fail when no more lines
except StopIteration:
debug("Over/underfull: StopIteration (%d)" % line_num)
break
line_num += 1
debug("Over/underfull: skip " + line + " (%d) " % line_num)
# Sometimes it's " []" and sometimes it's "[]"...
if len(line)>0 and line in [" []", "[]"]:
ou_processing = False
if ou_processing:
warnings.append("Malformed LOG file: over/underfull")
warnings.append("Please let me know via GitHub")
break
else:
continue
# Special case: the bibgerm package, which has comments starting and ending with
# **, and then finishes with "**)"
if len(line)>0 and line[:2] == "**" and line[-3:] == "**)" \
and files and "bibgerm" in files[-1]:
debug("special case: bibgerm")
debug(" "*len(files) + files[-1] + " (%d)" % (line_num,))
files.pop()
continue
# Special case: the relsize package, which puts ")" at the end of a
# line beginning with "Examine \". Ah well!
if len(line)>0 and line[:9] == "Examine \\" and line[-3:] == ". )" \
and files and "relsize" in files[-1]:
debug("special case: relsize")
debug(" "*len(files) + files[-1] + " (%d)" % (line_num,))
files.pop()
continue
# Special case: the comment package, which puts ")" at the end of a
# line beginning with "Excluding comment 'something'"
# Since I'm not sure, we match "Excluding comment 'something'" and recycle the rest
comment_match = comment_rx.match(line)
if comment_match and files and "comment" in files[-1]:
debug("special case: comment")
extra = comment_match.group(1)
debug("Reprocessing " + extra)
reprocess_extra = True
continue
# Special case: the numprint package, which prints a line saying
# "No configuration file... found.)"
# if there is no config file (duh!), and that (!!!) signals the end of processing :-(
if len(line)>0 and line.strip() == "No configuration file `numprint.cfg' found.)" \
and files and "numprint" in files[-1]:
debug("special case: numprint")
debug(" "*len(files) + files[-1] + " (%d)" % (line_num,))
files.pop()
continue
# Special case: xypic's "loaded)" at the BEGINNING of a line. Will check later
# for matches AFTER other text.
xypic_match = xypic_begin_rx.match(line)
if xypic_match:
debug("xypic match before: " + line)
# Do an extra check to make sure we are not too eager: is the topmost file
# likely to be an xypic file? Look for xypic in the file name
if files and "xypic" in files[-1]:
debug(" "*len(files) + files[-1] + " (%d)" % (line_num,))
files.pop()
extra = xypic_match.group(1)
debug("Reprocessing " + extra)
reprocess_extra = True
continue
else:
debug("Found loaded) but top file name doesn't have xy")
line = line.strip() # get rid of initial spaces
# note: in the next line, and also when we check for "!", we use the fact that "and" short-circuits
if len(line)>0 and line[0]==')': # denotes end of processing of current file: pop it from stack
if files:
debug(" "*len(files) + files[-1] + " (%d)" % (line_num,))
files.pop()
extra = line[1:]
debug("Reprocessing " + extra)
reprocess_extra = True
continue
else:
parsing.append("PERR [')' no files]")
break
# Opening page indicators: skip and reprocess
# Note: here we look for matches at the BEGINNING of a line. We check again below
# for matches elsewhere, but AFTER matching for file names.
pagenum_begin_match = pagenum_begin_rx.match(line)
if pagenum_begin_match:
extra = pagenum_begin_match.group(1)
debug("Reprocessing " + extra)
reprocess_extra = True
continue
# Closing page indicators: skip and reprocess
# Also, sometimes we have a useless file <file.tex, then a warning happens and the
# last > appears later. Pick up such stray >'s as well.
if len(line)>0 and line[0] in [']', '>']:
extra = line[1:]
debug("Reprocessing " + extra)
reprocess_extra = True
continue
# Useless file matches: {filename.ext} or <filename.ext>. We just throw it out
file_useless_match = file_useless1_rx.match(line) or file_useless2_rx.match(line)
if file_useless_match:
extra = file_useless_match.group(1)
debug("Useless file: " + line)
debug("Reprocessing " + extra)
reprocess_extra = True
continue
# this seems to happen often: no need to push / pop it
if line[:12]=="(pdftex.def)":
continue
# Now we should have a candidate file. We still have an issue with lines that
# look like file names, e.g. "(Font) blah blah data 2012.10.3" but those will
# get killed by the isfile call. Not very efficient, but OK in practice
debug("FILE? Line:" + line)
file_match = file_rx.match(line)
if file_match:
debug("MATCHED")
file_name = file_match.group(1)
extra = file_match.group(2) + file_match.group(3)
# remove quotes if necessary
file_name = file_name.replace("\"", "")
# on TL2011 pdftex sometimes writes "pdfTeX warning" right after file name
# so fix it
# TODO: report pdftex warning
if file_name[-6:]=="pdfTeX" and extra[:8]==" warning":
debug("pdfTeX appended to file name; removed")
file_name = file_name[:-6]
extra = "pdfTeX" + extra
# This kills off stupid matches
if (not os.path.isfile(file_name)) and debug_skip_file(file_name):
#continue
# NOTE BIG CHANGE HERE: CONTINUE PROCESSING IF NO MATCH
pass
else:
debug("IT'S A FILE!")
files.append(file_name)
debug(" "*len(files) + files[-1] + " (%d)" % (line_num,))
# Check if it's a xypic file
if (not xypic_flag) and "xypic" in file_name:
xypic_flag = True
debug("xypic detected, demoting parsing error to warnings")
# now we recycle the remainder of this line
debug("Reprocessing " + extra)
reprocess_extra = True
continue
# Special case: match xypic's " loaded)" markers
# You may think we already checked for this. But, NO! We must check both BEFORE and
# AFTER looking for file matches. The problem is that we
# may have the " loaded)" marker either after non-file text, or after a loaded
# file name. Aaaarghh!!!
xypic_match = xypic_rx.match(line)
if xypic_match:
debug("xypic match after: " + line)
# Do an extra check to make sure we are not too eager: is the topmost file
# likely to be an xypic file? Look for xypic in the file name
if files and "xypic" in files[-1]:
debug(" "*len(files) + files[-1] + " (%d)" % (line_num,))
files.pop()
extra = xypic_match.group(1)
debug("Reprocessing " + extra)
reprocess_extra = True
continue
else:
debug("Found loaded) but top file name doesn't have xy")
if len(line)>0 and line[0]=='!': # Now it's surely an error
debug("Error found: " + line)
# If it's a pdftex error, it's on the current line, so report it
if "pdfTeX error" in line:
err_msg = line[1:].strip() # remove '!' and possibly spaces
# This may or may not have a file location associated with it.
# Be conservative and do not try to report one.
errors.append(err_msg)
errors.append("Check the TeX log file for more information")
continue
# Now it's a regular TeX error
err_msg = line[2:] # skip "! "
# next time around, err_msg will be set and we'll extract all info
state = STATE_REPORT_ERROR
continue
# Second match for opening page numbers. We now use "search" which matches
# everywhere, not just at the beginning. We do so AFTER matching file names so we
# don't miss any.
pagenum_begin_match = pagenum_begin_rx.search(line)
if pagenum_begin_match:
debug("Matching [xx after some text")
extra = pagenum_begin_match.group(1)
debug("Reprocessing " + extra)
reprocess_extra = True
continue
warning_match = warning_rx.match(line)
if warning_match:
# if last character is a dot, it's a single line
if line[-1] == '.':
handle_warning(line)
continue
# otherwise, accumulate it
current_warning = line
state = STATE_REPORT_WARNING
continue
# If there were parsing issues, output them to debug
if parsing:
warnings.append("(Log parsing issues. Disregard unless something else is wrong.)")
print_debug = True
for l in parsing:
debug(l)
return (errors, warnings)
# If invoked from the command line, parse provided log file
if __name__ == '__main__':
print_debug = True
interactive = True
try:
logfilename = sys.argv[1]
# logfile = open(logfilename, 'r') \
# .read().decode(enc, 'ignore') \
# .encode(enc, 'ignore').splitlines()
if len(sys.argv) == 3:
extra_file_ext = sys.argv[2].split(" ")
data = open(logfilename,'r').read()
(errors,warnings) = parse_tex_log(data)
print ("")
print ("Warnings:")
for warn in warnings:
print (warn.encode('UTF-8'))
print ("")
print ("Errors:")
for err in errors:
print (err.encode('UTF-8'))
except Exception as e:
import traceback
traceback.print_exc()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import mock
import six
from senlin.common import exception
from senlin.engine import environment
from senlin.tests.unit.common import base
fake_env_str = """
parameters:
pa: va
pb: vb
custom_profiles:
prof_1: plugin_1
custom_policies:
policy_2: plugin_2
custom_triggers:
trigger_3: plugin_3
"""
class TestEnvironment(base.SenlinTestCase):
def setUp(self):
super(TestEnvironment, self).setUp()
def test_create_global(self):
e = environment.Environment(is_global=True)
self.assertEqual({}, e.params)
self.assertEqual('profiles', e.profile_registry.registry_name)
self.assertEqual('policies', e.policy_registry.registry_name)
self.assertEqual('triggers', e.trigger_registry.registry_name)
self.assertEqual('drivers', e.driver_registry.registry_name)
self.assertTrue(e.profile_registry.is_global)
self.assertTrue(e.policy_registry.is_global)
self.assertTrue(e.trigger_registry.is_global)
self.assertTrue(e.driver_registry.is_global)
def test_create_default(self):
ge = environment.global_env()
e = environment.Environment()
reg_prof = e.profile_registry
reg_plcy = e.policy_registry
reg_trig = e.trigger_registry
reg_driv = e.driver_registry
self.assertEqual({}, e.params)
self.assertEqual('profiles', reg_prof.registry_name)
self.assertEqual('policies', reg_plcy.registry_name)
self.assertEqual('triggers', reg_trig.registry_name)
self.assertEqual('drivers', reg_driv.registry_name)
self.assertFalse(reg_prof.is_global)
self.assertFalse(reg_plcy.is_global)
self.assertFalse(reg_trig.is_global)
self.assertFalse(reg_driv.is_global)
self.assertEqual('profiles', ge.profile_registry.registry_name)
self.assertEqual('policies', ge.policy_registry.registry_name)
self.assertEqual('triggers', ge.trigger_registry.registry_name)
self.assertEqual('drivers', ge.driver_registry.registry_name)
self.assertEqual(ge.profile_registry, reg_prof.global_registry)
self.assertEqual(ge.policy_registry, reg_plcy.global_registry)
self.assertEqual(ge.trigger_registry, reg_trig.global_registry)
self.assertEqual(ge.driver_registry, reg_driv.global_registry)
def test_create_with_env(self):
env = {
'parameters': {
'p1': 'v1',
'p2': True,
},
'custom_profiles': {
'PROFILE_FOO': 'some.class',
'PROFILE_BAR': 'other.class',
},
'custom_policies': {
'POLICY_Alpha': 'package.alpha',
'POLICY_Beta': 'package.beta',
},
'custom_triggers': {
'TRIGGER_1': 'module.1',
'TRIGGER_2': 'module.2',
}
}
e = environment.Environment(env=env, is_global=True)
self.assertEqual('v1', e.params['p1'])
self.assertEqual(True, e.params['p2'])
self.assertEqual('some.class', e.get_profile('PROFILE_FOO'))
self.assertEqual('other.class', e.get_profile('PROFILE_BAR'))
self.assertEqual('package.alpha', e.get_policy('POLICY_Alpha'))
self.assertEqual('package.beta', e.get_policy('POLICY_Beta'))
self.assertEqual('module.1', e.get_trigger('TRIGGER_1'))
self.assertEqual('module.2', e.get_trigger('TRIGGER_2'))
def test_parse(self):
env = environment.Environment()
result = env.parse(fake_env_str)
self.assertEqual('va', result['parameters']['pa'])
self.assertEqual('vb', result['parameters']['pb'])
self.assertEqual('plugin_1', result['custom_profiles']['prof_1'])
self.assertEqual('plugin_2', result['custom_policies']['policy_2'])
self.assertEqual('plugin_3', result['custom_triggers']['trigger_3'])
# unknown sections
env_str = "variables:\n p1: v1"
err = self.assertRaises(ValueError, env.parse, env_str)
self.assertEqual('environment has unknown section "variables"',
six.text_type(err))
# omitted sections
env_str = "parameters:\n p1: v1"
result = env.parse(env_str)
self.assertEqual('v1', result['parameters']['p1'])
self.assertEqual({}, result['custom_profiles'])
self.assertEqual({}, result['custom_policies'])
self.assertEqual({}, result['custom_triggers'])
def test_parse_empty(self):
env = environment.Environment()
result = env.parse(None)
self.assertEqual({}, result)
def test_load(self):
env = environment.Environment()
env.load({})
self.assertEqual({}, env.params)
self.assertEqual({}, env.profile_registry._registry)
self.assertEqual({}, env.policy_registry._registry)
self.assertEqual({}, env.trigger_registry._registry)
self.assertEqual({}, env.driver_registry._registry)
env_dict = {
'parameters': {
'P': 'V'
},
'custom_profiles': {
'C1': 'class1',
},
'custom_policies': {
'C2': 'class2',
},
'custom_triggers': {
'C3': 'class3',
}
}
env.load(env_dict)
self.assertEqual('V', env.params['P'])
self.assertEqual('class1', env.get_profile('C1'))
self.assertEqual('class2', env.get_policy('C2'))
self.assertEqual('class3', env.get_trigger('C3'))
def test_check_plugin_name(self):
env = environment.Environment()
for pt in ['Profile', 'Policy', 'Trigger', 'Driver']:
res = env._check_plugin_name(pt, 'abc')
self.assertIsNone(res)
ex = self.assertRaises(exception.InvalidPlugin,
env._check_plugin_name, pt, '')
self.assertEqual('%s type name not specified' % pt,
six.text_type(ex))
ex = self.assertRaises(exception.InvalidPlugin,
env._check_plugin_name, pt, None)
self.assertEqual('%s type name not specified' % pt,
six.text_type(ex))
for v in [123, {}, ['a'], ('b', 'c'), True]:
ex = self.assertRaises(exception.InvalidPlugin,
env._check_plugin_name, pt, v)
self.assertEqual('%s type name is not a string' % pt,
six.text_type(ex))
def test_register_and_get_profile(self):
plugin = mock.Mock()
env = environment.Environment()
ex = self.assertRaises(exception.ProfileTypeNotFound,
env.get_profile, 'foo')
self.assertEqual('Profile type (foo) is not found.',
six.text_type(ex))
env.register_profile('foo', plugin)
self.assertEqual(plugin, env.get_profile('foo'))
def test_get_profile_types(self):
env = environment.Environment()
plugin1 = mock.Mock()
env.register_profile('foo', plugin1)
plugin2 = mock.Mock()
env.register_profile('bar', plugin2)
actual = env.get_profile_types()
self.assertIn({'name': 'foo'}, actual)
self.assertIn({'name': 'bar'}, actual)
def test_register_and_get_policy(self):
plugin = mock.Mock()
env = environment.Environment()
ex = self.assertRaises(exception.PolicyTypeNotFound,
env.get_policy, 'foo')
self.assertEqual('Policy type (foo) is not found.',
six.text_type(ex))
env.register_policy('foo', plugin)
self.assertEqual(plugin, env.get_policy('foo'))
def test_get_policy_types(self):
env = environment.Environment()
plugin1 = mock.Mock()
env.register_policy('foo', plugin1)
plugin2 = mock.Mock()
env.register_policy('bar', plugin2)
actual = env.get_policy_types()
self.assertIn({'name': 'foo'}, actual)
self.assertIn({'name': 'bar'}, actual)
def test_register_and_get_trigger_types(self):
plugin = mock.Mock()
env = environment.Environment()
ex = self.assertRaises(exception.TriggerTypeNotFound,
env.get_trigger, 'foo')
self.assertEqual('Trigger type (foo) is not found.',
six.text_type(ex))
env.register_trigger('foo', plugin)
self.assertEqual(plugin, env.get_trigger('foo'))
def test_get_trigger_types(self):
env = environment.Environment()
plugin1 = mock.Mock()
env.register_trigger('foo', plugin1)
plugin2 = mock.Mock()
env.register_trigger('bar', plugin2)
actual = env.get_trigger_types()
self.assertIn({'name': 'foo'}, actual)
self.assertIn({'name': 'bar'}, actual)
def test_register_and_get_driver_types(self):
plugin = mock.Mock()
env = environment.Environment()
ex = self.assertRaises(exception.InvalidPlugin,
env.get_driver, 'foo')
self.assertEqual('Driver plugin foo is not found.',
six.text_type(ex))
env.register_driver('foo', plugin)
self.assertEqual(plugin, env.get_driver('foo'))
def test_get_driver_types(self):
env = environment.Environment()
plugin1 = mock.Mock()
env.register_driver('foo', plugin1)
plugin2 = mock.Mock()
env.register_driver('bar', plugin2)
actual = env.get_driver_types()
self.assertIn({'name': 'foo'}, actual)
self.assertIn({'name': 'bar'}, actual)
def test_read_global_environment(self):
mock_dir = self.patchobject(glob, 'glob')
mock_dir.return_value = ['/etc/senlin/environments/e.yaml']
env_dir = '/etc/senlin/environments'
env_contents = 'parameters:\n p1: v1'
env = environment.Environment(is_global=True)
with mock.patch('senlin.engine.environment.open',
mock.mock_open(read_data=env_contents),
create=True) as mock_open:
env.read_global_environment()
mock_dir.assert_called_once_with(env_dir + '/*')
mock_open.assert_called_once_with('%s/e.yaml' % env_dir)
def test_empty_environment_dir(self):
mock_dir = self.patchobject(glob, 'glob', return_value=[])
env_dir = '/etc/senlin/environments'
env = environment.Environment()
env.read_global_environment()
mock_dir.assert_called_once_with(env_dir + '/*')
def test_read_global_environment_oserror(self):
mock_dir = self.patchobject(glob, 'glob')
mock_dir.side_effect = OSError
env = environment.Environment(is_global=True)
env_dir = '/etc/senlin/environments'
env.read_global_environment()
mock_dir.assert_called_once_with(env_dir + '/*')
def test_read_global_environment_ioerror(self):
mock_dir = self.patchobject(glob, 'glob')
mock_dir.return_value = ['/etc/senlin/environments/e.yaml']
env_dir = '/etc/senlin/environments'
env = environment.Environment(is_global=True)
env_contents = ''
with mock.patch('senlin.engine.environment.open',
mock.mock_open(read_data=env_contents),
create=True) as mock_open:
mock_open.side_effect = IOError
env.read_global_environment()
mock_dir.assert_called_once_with(env_dir + '/*')
mock_open.assert_called_once_with('%s/e.yaml' % env_dir)
def test_read_global_environment_parse_error(self):
mock_dir = self.patchobject(glob, 'glob')
mock_dir.return_value = ['/etc/senlin/environments/e.yaml']
env_dir = '/etc/senlin/environments'
env_contents = 'aii$%@@$#7'
env = environment.Environment(is_global=True)
with mock.patch('senlin.engine.environment.open',
mock.mock_open(read_data=env_contents),
create=True) as mock_open:
env.read_global_environment()
mock_dir.assert_called_once_with(env_dir + '/*')
mock_open.assert_called_once_with('%s/e.yaml' % env_dir)
@mock.patch.object(environment, '_get_mapping')
def test_global_initialize(self, mock_mapping):
mock_mapping.return_value = [['aaa', mock.Mock()]]
environment._environment = None
environment.initialize()
expected = [mock.call('senlin.profiles'),
mock.call('senlin.policies'),
mock.call('senlin.triggers'),
mock.call('senlin.drivers')]
self.assertIsNotNone(environment._environment)
self.assertEqual(expected, mock_mapping.call_args_list)
self.assertIsNotNone(environment.global_env().get_profile('aaa'))
self.assertIsNotNone(environment.global_env().get_policy('aaa'))
self.assertIsNotNone(environment.global_env().get_trigger('aaa'))
self.assertIsNotNone(environment.global_env().get_driver('aaa'))
environment._environment = None
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras Premade WideNDeep models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.feature_column import dense_features_v2
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.premade import linear
from tensorflow.python.keras.premade import wide_deep
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class WideDeepModelTest(keras_parameterized.TestCase):
def test_wide_deep_model(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5, high=5, size=(64, 2))
dnn_inp = np.random.uniform(low=-5, high=5, size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1]
wide_deep_model.compile(
optimizer=['sgd', 'adam'],
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
wide_deep_model.fit(inputs, output, epochs=5)
self.assertTrue(wide_deep_model.built)
def test_wide_deep_model_backprop(self):
with self.cached_session():
linear_model = linear.LinearModel(units=1, kernel_initializer='zeros')
dnn_model = sequential.Sequential(
[core.Dense(units=1, kernel_initializer='zeros')])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.array([1.])
dnn_inp = np.array([1.])
inputs = [linear_inp, dnn_inp]
output = linear_inp + 2 * dnn_inp
linear_opt = gradient_descent.SGD(learning_rate=.1)
dnn_opt = gradient_descent.SGD(learning_rate=.3)
wide_deep_model.compile(
optimizer=[linear_opt, dnn_opt],
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
self.evaluate(variables.global_variables_initializer())
wide_deep_model.fit(inputs, output, epochs=1)
self.assertAllClose(
[[0.6]],
self.evaluate(wide_deep_model.linear_model.dense_layers[0].kernel))
self.assertAllClose([[1.8]],
self.evaluate(
wide_deep_model.dnn_model.layers[0].kernel))
def test_wide_deep_model_with_single_input(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
inputs = np.random.uniform(low=-5, high=5, size=(64, 3))
output = .3 * inputs[:, 0]
wide_deep_model.compile(
optimizer=['sgd', 'adam'],
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
wide_deep_model.fit(inputs, output, epochs=5)
def test_wide_deep_model_with_multi_outputs(self):
with context.eager_mode():
inp = input_layer.Input(shape=(1,), name='linear')
l = linear.LinearModel(units=2, use_bias=False)(inp)
l1, l2 = array_ops.split(l, num_or_size_splits=2, axis=1)
linear_model = training.Model(inp, [l1, l2])
linear_model.set_weights([np.asarray([[0.5, 0.3]])])
h = core.Dense(units=2, use_bias=False)(inp)
h1, h2 = array_ops.split(h, num_or_size_splits=2, axis=1)
dnn_model = training.Model(inp, [h1, h2])
dnn_model.set_weights([np.asarray([[0.1, -0.5]])])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
inp_np = np.asarray([[1.]])
out1, out2 = wide_deep_model(inp_np)
# output should be (0.5 + 0.1), and (0.3 - 0.5)
self.assertAllClose([[0.6]], out1)
self.assertAllClose([[-0.2]], out2)
wide_deep_model = wide_deep.WideDeepModel(
linear_model, dnn_model, activation='relu')
out1, out2 = wide_deep_model(inp_np)
# output should be relu((0.5 + 0.1)), and relu((0.3 - 0.5))
self.assertAllClose([[0.6]], out1)
self.assertAllClose([[0.]], out2)
def test_wide_deep_model_with_single_optimizer(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5, high=5, size=(64, 2))
dnn_inp = np.random.uniform(low=-5, high=5, size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1]
wide_deep_model.compile(
optimizer='sgd',
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
wide_deep_model.fit(inputs, output, epochs=5)
self.assertTrue(wide_deep_model.built)
def test_wide_deep_model_as_layer(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1)])
linear_input = input_layer.Input(shape=(3,), name='linear')
dnn_input = input_layer.Input(shape=(5,), name='dnn')
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
wide_deep_output = wide_deep_model((linear_input, dnn_input))
input_b = input_layer.Input(shape=(1,), name='b')
output_b = core.Dense(units=1)(input_b)
model = training.Model(
inputs=[linear_input, dnn_input, input_b],
outputs=[wide_deep_output + output_b])
linear_input_np = np.random.uniform(low=-5, high=5, size=(64, 3))
dnn_input_np = np.random.uniform(low=-5, high=5, size=(64, 5))
input_b_np = np.random.uniform(low=-5, high=5, size=(64,))
output_np = linear_input_np[:, 0] + .2 * dnn_input_np[:, 1] + input_b_np
model.compile(
optimizer='sgd',
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model.fit([linear_input_np, dnn_input_np, input_b_np], output_np, epochs=5)
def test_wide_deep_model_with_sub_model_trained(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(
linear.LinearModel(units=1),
sequential.Sequential([core.Dense(units=1, input_dim=3)]))
linear_inp = np.random.uniform(low=-5, high=5, size=(64, 2))
dnn_inp = np.random.uniform(low=-5, high=5, size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1]
linear_model.compile(
optimizer='sgd',
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
dnn_model.compile(
optimizer='adam',
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
linear_model.fit(linear_inp, output, epochs=50)
dnn_model.fit(dnn_inp, output, epochs=50)
wide_deep_model.compile(
optimizer=['sgd', 'adam'],
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
wide_deep_model.fit(inputs, output, epochs=50)
# This test is an example for cases where linear and dnn model accepts
# same raw input and same transformed inputs, i.e., the raw input is
# categorical, and both linear and dnn model accept one hot encoding.
def test_wide_deep_model_with_single_feature_column(self):
vocab_list = ['alpha', 'beta', 'gamma']
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape)
cat_column = fc.categorical_column_with_vocabulary_list(
key='symbol', vocabulary_list=vocab_list)
ind_column = fc.indicator_column(cat_column)
dense_feature_layer = dense_features_v2.DenseFeatures([ind_column])
linear_model = linear.LinearModel(
use_bias=False, kernel_initializer='zeros')
dnn_model = sequential.Sequential([core.Dense(units=1)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
combined = sequential.Sequential([dense_feature_layer, wide_deep_model])
opt = gradient_descent.SGD(learning_rate=0.1)
combined.compile(
opt,
'mse', [],
run_eagerly=testing_utils.should_run_eagerly())
combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10)
# This test is an example for cases where linear and dnn model accepts
# same raw input but different transformed inputs, i.e,. the raw input is
# categorical, and linear model accepts one hot encoding, while dnn model
# accepts embedding encoding.
def test_wide_deep_model_with_two_feature_columns(self):
vocab_list = ['alpha', 'beta', 'gamma']
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape)
cat_column = fc.categorical_column_with_vocabulary_list(
key='symbol', vocabulary_list=vocab_list)
ind_column = fc.indicator_column(cat_column)
emb_column = fc.embedding_column(cat_column, dimension=5)
linear_feature_layer = dense_features_v2.DenseFeatures([ind_column])
linear_model = linear.LinearModel(
use_bias=False, kernel_initializer='zeros')
combined_linear = sequential.Sequential(
[linear_feature_layer, linear_model])
dnn_model = sequential.Sequential([core.Dense(units=1)])
dnn_feature_layer = dense_features_v2.DenseFeatures([emb_column])
combined_dnn = sequential.Sequential([dnn_feature_layer, dnn_model])
wide_deep_model = wide_deep.WideDeepModel(combined_linear, combined_dnn)
opt = gradient_descent.SGD(learning_rate=0.1)
wide_deep_model.compile(
opt,
'mse', [],
run_eagerly=testing_utils.should_run_eagerly())
wide_deep_model.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10)
def test_config(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
config = wide_deep_model.get_config()
cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(config)
self.assertEqual(linear_model.units,
cloned_wide_deep_model.linear_model.units)
self.assertEqual(dnn_model.layers[0].units,
cloned_wide_deep_model.dnn_model.layers[0].units)
def test_config_with_custom_objects(self):
def my_activation(x):
return x
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(
linear_model, dnn_model, activation=my_activation)
config = wide_deep_model.get_config()
cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(
config, custom_objects={'my_activation': my_activation})
self.assertEqual(cloned_wide_deep_model.activation, my_activation)
if __name__ == '__main__':
test.main()
|
|
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
from tensorflow.contrib.layers import flatten
from tensorflow.examples.tutorials.mnist import input_data
def LeNet(x):
"""
**Input**
The LeNet architecture accepts a 32x32xC image as input, where C is the number of color channels. Since MNIST images are grayscale, C is 1 in this case.
Architecture
Layer 1: Convolutional. The output shape should be 28x28x6.
Activation. Your choice of activation function.
Pooling. The output shape should be 14x14x6.
Layer 2: Convolutional. The output shape should be 10x10x16.
Activation. Your choice of activation function.
Pooling. The output shape should be 5x5x16.
Flatten. Flatten the output shape of the final pooling layer such that it's 1D instead of 3D. The easiest way to do is by using tf.contrib.layers.flatten, which is already imported for you.
Layer 3: Fully Connected. This should have 120 outputs.
Activation. Your choice of activation function.
Layer 4: Fully Connected. This should have 84 outputs.
Activation. Your choice of activation function.
Layer 5: Fully Connected (Logits). This should have 10 outputs.
**Output**
Return the result of the 2nd fully connected layer.
Parameters
----------
x
Returns
-------
tensor
"""
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
conv1_w = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean=mu, stddev=sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_w, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# Activation.
conv1 = tf.nn.relu(conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 2: Convolutional. Output = 10x10x16.
conv2_w = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_w, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# Activation.
conv2 = tf.nn.relu(conv2)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_w = tf.Variable(tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_w) + fc1_b
# Activation.
fc1 = tf.nn.relu(fc1)
# Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_w = tf.Variable(tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_w) + fc2_b
# Activation.
fc2 = tf.nn.relu(fc2)
# Layer 5: Fully Connected. Input = 84. Output = 10.
fc3_w = tf.Variable(tf.truncated_normal(shape=(84, 10), mean=mu, stddev=sigma))
fc3_b = tf.Variable(tf.zeros(10))
logits = tf.matmul(fc2, fc3_w) + fc3_b
return logits
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 10, dtype=tf.float32)
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data, BATCH_SIZE):
"""
Evaluate how well the loss and accuracy of the model for a given dataset.
Parameters
----------
X_data
y_data
BATCH_SIZE
Returns
-------
"""
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset + BATCH_SIZE], y_data[offset:offset + BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
def run():
"""
Runs the model and validates it.
"""
mnist = input_data.read_data_sets("./MNIST_data/", reshape=False)
X_train, y_train = mnist.train.images, mnist.train.labels
X_validation, y_validation = mnist.validation.images, mnist.validation.labels
X_test, y_test = mnist.test.images, mnist.test.labels
assert (len(X_train) == len(y_train))
assert (len(X_validation) == len(y_validation))
assert (len(X_test) == len(y_test))
print()
print("Image Shape: {}".format(X_train[0].shape))
print()
print("Training Set: {} samples".format(len(X_train)))
print("Validation Set: {} samples".format(len(X_validation)))
print("Test Set: {} samples".format(len(X_test)))
# Pad images with 0s
X_train = np.pad(X_train, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')
X_validation = np.pad(X_validation, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')
X_test = np.pad(X_test, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')
print("Updated Image Shape: {}".format(X_train[0].shape))
# Shuffle training data.
X_train, y_train = shuffle(X_train, y_train)
# visualize(X_train, y_train) # To visualize random sample.
EPOCHS = 10
BATCH_SIZE = 128
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation, BATCH_SIZE)
print("EPOCH {} ...".format(i + 1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test, y_test, BATCH_SIZE)
print("Test Accuracy = {:.3f}".format(test_accuracy))
def visualize(X_train, y_train):
"""
View a random sample from the MNIST dataset.
"""
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
plt.figure(figsize=(1, 1))
plt.imshow(image, cmap="gray")
print(y_train[index])
plt.show()
if __name__ == '__main__':
# visualize()
run()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Zinc grammar specification.
# (C) 2016 VRT Systems
#
# vim: set ts=4 sts=4 et tw=78 sw=4 si:
import datetime
import logging
import re
import sys
import iso8601
import pyparsing as pp
import six
# Bring in special Project Haystack types and time zones
from .datatypes import Quantity, Coordinate, Uri, Bin, MARKER, NA, REMOVE, Ref, XStr
from .grid import Grid
# Bring in our sortable dict class to preserve order
from .sortabledict import SortableDict
# Bring in version handling
from .version import Version, VER_2_0, VER_3_0
from .zoneinfo import timezone
# Logging instance for reporting debug info
LOG = logging.getLogger(__name__)
# All grids start with the version string.
VERSION_RE = re.compile(r'^ver:"(([^"\\]|\\[\\"bfnrt$])+)"')
NEWLINE_RE = re.compile(r'\r?\n')
# Character number regex; for exceptions
CHAR_NUM_RE = re.compile(' *\(at char \d+\),')
def reformat_exception(ex_msg, line_num=None):
print(ex_msg)
msg = CHAR_NUM_RE.sub(u'', six.text_type(ex_msg))
print(msg)
if line_num is not None:
return msg.replace(u'line:1', u'line:%d' % line_num)
else:
return msg
# Convenience function, we want whitespace left alone.
def _leave_ws(cls, *args, **kwargs):
return cls(*args, **kwargs).leaveWhitespace()
# Versions of the pyparsing types that leave our whitespace alone!
Empty = lambda *a, **kwa: _leave_ws(pp.Empty, *a, **kwa)
Regex = lambda *a, **kwa: _leave_ws(pp.Regex, *a, **kwa)
Literal = lambda *a, **kwa: _leave_ws(pp.Literal, *a, **kwa)
CaselessLiteral = lambda *a, **kwa: _leave_ws(pp.CaselessLiteral, *a, **kwa)
Word = lambda *a, **kwa: _leave_ws(pp.Word, *a, **kwa)
Optional = lambda *a, **kwa: _leave_ws(pp.Optional, *a, **kwa)
Suppress = lambda *a, **kwa: _leave_ws(pp.Suppress, *a, **kwa)
Combine = lambda *a, **kwa: _leave_ws(pp.Combine, *a, **kwa)
And = lambda *a, **kwa: _leave_ws(pp.And, *a, **kwa)
Or = lambda *a, **kwa: _leave_ws(pp.Or, *a, **kwa)
ZeroOrMore = lambda *a, **kwa: _leave_ws(pp.ZeroOrMore, *a, **kwa)
OneOrMore = lambda *a, **kwa: _leave_ws(pp.OneOrMore, *a, **kwa)
Group = lambda *a, **kwa: _leave_ws(pp.Group, *a, **kwa)
DelimitedList = lambda *a, **kwa: _leave_ws(pp.delimitedList, *a, **kwa)
Forward = lambda *a, **kwa: _leave_ws(pp.Forward, *a, **kwa)
class ZincParseException(ValueError):
"""
Exception thrown when a grid cannot be parsed successfully. If known,
the line and column for the grid are given.
"""
def __init__(self, message, grid_str, line, col):
self.grid_str = grid_str
self.line = line
self.col = col
try:
# If we know the line and column, point it out in the message.
grid_str_lines = grid_str.split('\n')
width = max([len(l) for l in grid_str_lines])
linefmt = u'%%-%ds' % width
rowfmt = u'%4d%s' + linefmt + u'%s'
formatted_lines = [
rowfmt % (
num,
' >' if (line == num) else '| ',
line_str,
'< ' if (line == num) else ' |'
)
for (num, line_str)
in enumerate(grid_str.split('\n'), 1)
]
formatted_lines.insert(line,
(u' | ' + linefmt + u' |') \
% (((col - 2) * u' ') + '.^.')
)
# Border it for readability
formatted_lines.insert(0, u' .' + (u'-' * (2 + width)) + u'.')
formatted_lines.append(u' \'' + (u'-' * (2 + width)) + u'\'')
# Append to message
message += u'\n%s' % u'\n'.join(formatted_lines)
except: # pragma: no cover
# We should not get here.
LOG.exception('Exception encountered formatting log message')
pass
super(ZincParseException, self).__init__(message)
class NearestMatch(object):
"""
This class returns the nearest matching grammar for the given version.
"""
def __init__(self, known_grammars):
self._known_grammars = known_grammars
def __getitem__(self, ver):
"""
Retrieve the grammar that closest matches the version string given.
"""
try:
return self._known_grammars[ver]
except KeyError:
pass
nearest = Version.nearest(ver)
g = self._known_grammars[nearest]
self._known_grammars[ver] = g
return g
class GenerateMatch(object):
"""
This class tries to generate a matching grammar based on the version input given.
"""
def __init__(self, generator_fn):
self._generator_fn = generator_fn
self._known_grammars = {}
def __getitem__(self, ver):
try:
return self._known_grammars[ver]
except KeyError:
g = self._generator_fn(ver)
self._known_grammars[ver] = g
return g
def _unescape(s, uri=False):
"""
Iterative parser for string escapes.
"""
out = ''
while len(s) > 0:
c = s[0]
if c == '\\':
# Backslash escape
esc_c = s[1]
if esc_c in ('u', 'U'):
# Unicode escape
out += six.unichr(int(s[2:6], base=16))
s = s[6:]
continue
else:
if esc_c == 'b':
out += '\b'
elif esc_c == 'f':
out += '\f'
elif esc_c == 'n':
out += '\n'
elif esc_c == 'r':
out += '\r'
elif esc_c == 't':
out += '\t'
else:
if uri and (esc_c == '#'):
# \# is passed through with backslash.
out += '\\'
# Pass through
out += esc_c
s = s[2:]
continue
else:
out += c
s = s[1:]
return out
# Grammar according to
# latest: http://project-haystack.org/doc/Zinc
# "2.0": https://web.archive.org/web/20141012013653/http://project-haystack.org:80/doc/Zinc
# "3.0": https://web.archive.org/web/20160805064015/http://project-haystack.org:80/doc/Zinc
# Rudimentary elements
hs_digit = Regex(r'\d')
hs_digits = Regex(r'[0-9_]+').setParseAction(
lambda toks: [''.join([t.replace('_', '') for t in toks[0]])])
hs_alphaLo = Regex(r'[a-z]')
hs_alphaHi = Regex(r'[A-Z]')
hs_alpha = Regex(r'[a-zA-Z]')
hs_valueSep = Regex(r' *, *').setName('valueSep')
hs_rowSep = Regex(r' *\n *').setName('rowSep')
hs_plusMinus = Or([Literal('+'), Literal('-')])
# Forward declaration of data types.
hs_scalar_2_0 = Forward()
hs_scalar_3_0 = Forward()
hs_scalar = NearestMatch({
VER_2_0: hs_scalar_2_0,
VER_3_0: hs_scalar_3_0
})
hs_grid_2_0 = Forward()
hs_grid_3_0 = Forward()
hs_grid = NearestMatch({
VER_2_0: hs_grid_2_0,
VER_3_0: hs_grid_3_0
})
# Co-ordinates
hs_coordDeg = Combine(And([
Optional(Literal('-')),
Optional(hs_digits),
Optional(And([Literal('.'), hs_digits]))
])).setParseAction(lambda toks: [float(toks[0] or '0')])
hs_coord = And([Suppress(Literal('C(')),
hs_coordDeg,
Suppress(hs_valueSep),
hs_coordDeg,
Suppress(Literal(')'))]).setParseAction(
lambda toks: [Coordinate(toks[0], toks[1])])
# Dates and times
hs_tzHHMMOffset = Combine(Or([
CaselessLiteral('z'),
And([hs_plusMinus, Regex(r'\d\d:\d\d')])]
))
hs_tzName = Regex(r'[A-Z][a-zA-Z0-9_\-]*')
hs_tzUTCGMT = Or([Literal('UTC'), Literal('GMT')])
hs_tzUTCOffset = Combine(And([
hs_tzUTCGMT, Optional(
Or([Literal('0'),
And([hs_plusMinus, OneOrMore(hs_digit)]
)]
))]))
hs_timeZoneName = Or([hs_tzUTCOffset, hs_tzName])
hs_dateSep = CaselessLiteral('T')
hs_date_str = Combine(And([
hs_digit, hs_digit, hs_digit, hs_digit,
Literal('-'),
hs_digit, hs_digit,
Literal('-'),
hs_digit, hs_digit]))
hs_date = hs_date_str.copy().setParseAction(
lambda toks: [datetime.datetime.strptime(toks[0], '%Y-%m-%d').date()])
hs_time_str = Combine(And([
hs_digit, hs_digit,
Literal(':'),
hs_digit, hs_digit,
Literal(':'),
hs_digit, hs_digit,
Optional(And([
Literal('.'),
OneOrMore(hs_digit)]))
]))
def _parse_time(toks):
time_str = toks[0]
time_fmt = '%H:%M:%S'
if '.' in time_str:
time_fmt += '.%f'
return [datetime.datetime.strptime(time_str, time_fmt).time()]
hs_time = hs_time_str.copy().setParseAction(_parse_time)
hs_isoDateTime = Combine(And([
hs_date_str,
hs_dateSep,
hs_time_str,
Optional(hs_tzHHMMOffset)
])).setParseAction(lambda toks: [iso8601.parse_date(toks[0].upper())])
def _parse_datetime(toks):
# Made up of parts: ISO8601 Date/Time, time zone label
isodt = toks[0]
if len(toks) > 1:
tzname = toks[1]
else:
tzname = None
if (isodt.tzinfo is None) and bool(tzname): # pragma: no cover
# This technically shouldn't happen according to Zinc specs
return [timezone(tzname).localise(isodt)]
elif bool(tzname):
try:
tz = timezone(tzname)
return [isodt.astimezone(tz)]
except: # pragma: no cover
# Unlikely to occur, might do though if Project Haystack changes
# its timezone list or if a system doesn't recognise a particular
# timezone.
return [isodt] # Failed, leave alone
else:
return [isodt]
hs_dateTime = And([
hs_isoDateTime,
Optional(And([
Suppress(Literal(' ')),
hs_timeZoneName
]))
]).setParseAction(_parse_datetime)
# Quantities and raw numeric values
hs_unitChar = Or([
hs_alpha,
Word(u'%_/$' + u''.join([
six.unichr(c)
for c in range(0x0080, 0xffff)
]), exact=1)
])
hs_unit = Combine(OneOrMore(hs_unitChar))
hs_exp = Combine(And([
CaselessLiteral('e'),
Optional(hs_plusMinus),
hs_digits
]))
hs_decimal = Combine(And([
Optional(Literal('-')),
hs_digits,
Optional(And([
Literal('.'),
hs_digits
])),
Optional(hs_exp)
])).setParseAction(lambda toks: [float(toks[0])])
hs_quantity = And([hs_decimal, hs_unit]).setParseAction(
lambda toks: [Quantity(toks[0], unit=toks[1])])
hs_number = Or([
hs_quantity,
hs_decimal,
Or([
Literal('INF'),
Literal('-INF'),
Literal('NaN')
]).setParseAction(lambda toks: [float(toks[0])])
])
# URIs
hs_uriChar = Regex(r"([^\x00-\x1f\\`]|\\[bfnrt\\:/?" \
+ r"#\[\]@&=;`]|\\[uU][0-9a-fA-F]{4})")
hs_uri = Combine(And([
Suppress(Literal('`')),
ZeroOrMore(hs_uriChar),
Suppress(Literal('`'))
])).setParseAction(lambda toks: [Uri(_unescape(toks[0], uri=True))])
# Strings
hs_strChar = Regex(r"([^\x00-\x1f\\\"]|\\[bfnrt\\\"$]|\\[uU][0-9a-fA-F]{4})")
hs_str = Combine(And([
Suppress(Literal('"')),
ZeroOrMore(hs_strChar),
Suppress(Literal('"'))
])).setParseAction(lambda toks: [_unescape(toks[0], uri=False)])
# References
hs_refChar = Or([hs_alpha, hs_digit, Word('_:-.~', exact=1)])
hs_ref = And([
Suppress(Literal('@')),
Combine(ZeroOrMore(hs_refChar)),
Optional(And([
Suppress(Literal(' ')),
hs_str
]))
]).setParseAction(lambda toks: [ \
Ref(toks[0], toks[1] if len(toks) > 1 else None) \
])
# Bins
hs_binChar = Regex(r"[\x20-\x27\x2a-\x7f]")
hs_bin = Combine(And([
Suppress(Literal('Bin(')),
Combine(ZeroOrMore(hs_binChar)),
Suppress(Literal(')'))
])).setParseAction(lambda toks: [Bin(toks[0])])
# Haystack 3.0 XStr(...)
hs_xstr = And([
Regex(r"[a-zA-Z0-9_]+"),
Suppress(Literal('(')),
hs_str,
Suppress(Literal(')'))
]).setParseAction(lambda toks: [XStr(toks[0], toks[1])])
# Booleans
hs_bool = Word('TF', min=1, max=1, exact=1).setParseAction( \
lambda toks: [toks[0] == 'T'])
# Singleton values
hs_remove = Literal('R').setParseAction( \
lambda toks: [REMOVE]).setName('remove')
hs_marker = Literal('M').setParseAction( \
lambda toks: [MARKER]).setName('marker')
hs_null = Literal('N').setParseAction( \
lambda toks: [None]).setName('null')
hs_na = Literal('NA').setParseAction( \
lambda toks: [NA]).setName('na')
# Lists, these will probably be in Haystack 4.0, so let's not
# assume a version. There are three cases:
# - Empty list: [ {optional whitespace} ]
# - List *with* trailing comma: [ 1, 2, 3, ]
# - List without trailing comma: [ 1, 2, 3 ]
#
# We need to handle this trailing separator case. That for now means
# that a NULL within a list *MUST* be explicitly given using the 'N'
# literal: we cannot support implicit NULLs as they are ambiguous.
hs_list = GenerateMatch( \
lambda ver: Group(Or([ \
Suppress(Regex(r'[ *]')), \
And([ \
Suppress(Regex(r'\[ *')), \
Optional(DelimitedList( \
hs_scalar[ver], \
delim=hs_valueSep)), \
Suppress(Optional(hs_valueSep)), \
Suppress(Regex(r' *\]')) \
]) \
])).setParseAction(lambda toks: toks.asList()))
# Tag IDs
hs_id = Regex(r'[a-z][a-zA-Z0-9_]*').setName('id')
# Grid building blocks
hs_cell = GenerateMatch( \
lambda ver: Or([Empty().copy().setParseAction(lambda toks: [None]), \
hs_scalar[ver]]).setName('cell'))
# Dict
# There are three cases:
# - Empty dict: { {optional whitespace} }
# - map with marker: { m }
# - dics: { k:1 ]
#
hs_tagmarker = hs_id
hs_tagpair = GenerateMatch(
lambda ver: And([hs_id,
Suppress(Regex(r': *')),
hs_scalar[ver]
])
.setParseAction(lambda toks: tuple(toks[:2]))
.setName('tagPair'))
hs_tag = GenerateMatch(
lambda ver: Or([hs_tagmarker, hs_tagpair[ver]])
.setName('tag'))
hs_tags = GenerateMatch(
lambda ver: ZeroOrMore(Or([hs_tag[ver], \
Suppress(Regex(r'[ *]'))])) \
.setName('tags'))
def to_dict(tokenlist):
result = {}
i = 0
it = enumerate(tokenlist)
for i, tok in it:
if i < len(tokenlist) - 2 and tokenlist[i + 1] == ':':
result[tokenlist[i]] = tokenlist[i + 2]
next(it)
next(it)
else:
if isinstance(tok, six.string_types):
result[tok] = MARKER
elif isinstance(tok, tuple):
result[tok[0]] = tok[1]
else:
result[tok] = MARKER
return result
# def to_dict(tokenlist):
# result = {}
# for i, tok in enumerate(tokenlist):
# if isinstance(tok, six.string_types):
# result[tok] = MARKER
# else:
# result[tok[0]] = tok[1]
# return result
hs_dict = GenerateMatch(
lambda ver: Or([
Suppress(Regex(r'[ *]')),
And([
Suppress(Regex(r'{ *')),
hs_tags[ver],
Suppress(Regex(r' *}'))
])
])
.setName("dict")
.setParseAction(to_dict)
)
hs_inner_grid = GenerateMatch( \
lambda ver: And([
Suppress(Regex(r'<< *')),
hs_grid[ver],
Suppress(Regex(r' *>>')),
]))
# All possible scalar values, by Haystack version
hs_scalar_2_0 <<= Or([hs_ref, hs_bin, hs_str, hs_uri, hs_dateTime,
hs_date, hs_time, hs_coord, hs_number, hs_null, hs_marker,
hs_remove, hs_bool]).setName('scalar')
hs_scalar_3_0 <<= Or([hs_ref, hs_xstr, hs_str, hs_uri, hs_dateTime,
hs_date, hs_time, hs_coord, hs_number, hs_na, hs_null, hs_marker,
hs_remove, hs_bool, hs_list[VER_3_0], hs_dict[VER_3_0], hs_inner_grid[VER_3_0]]).setName('scalar')
hs_nl = Combine(And([Optional(Literal('\r')), Literal('\n')]))
hs_row = GenerateMatch( \
lambda ver: Group(And([DelimitedList(hs_cell[ver], delim=hs_valueSep),
Suppress(Regex(r' *')),
Suppress(hs_nl)
])).setName('row'))
hs_rows = GenerateMatch( \
lambda ver: Group(ZeroOrMore(hs_row[ver])).setName("rows"))
hs_metaPair = GenerateMatch( \
lambda ver: And([ \
hs_id, \
Suppress(And([ \
ZeroOrMore(Literal(' ')), \
Literal(':'), \
ZeroOrMore(Literal(' ')) \
])), \
hs_scalar[ver] \
]).setParseAction(lambda toks: [tuple(toks[:2])]).setName('metaPair'))
hs_metaMarker = hs_id.copy().setParseAction( \
lambda toks: [(toks[0], MARKER)]).setName('metaMarker')
hs_metaItem = GenerateMatch( \
lambda ver: Or([ \
hs_metaMarker, \
hs_metaPair[ver] \
]).setName('metaItem'))
hs_meta = GenerateMatch( \
lambda ver: DelimitedList(hs_metaItem[ver], \
delim=' ').setParseAction( \
lambda toks: [SortableDict(toks.asList())] \
).setName('meta'))
hs_col = GenerateMatch( \
lambda ver: And([ \
hs_id, \
Optional(And([ \
Suppress(Literal(' ')), \
hs_meta[ver]
])).setName('colMeta') \
]).setParseAction(lambda toks: [ \
(toks[0], toks[1] if len(toks) > 1 else {})]))
hs_cols = GenerateMatch( \
lambda ver: And([
DelimitedList(
hs_col[ver], delim=hs_valueSep).setParseAction( # + hs_nl
lambda toks: [SortableDict(toks.asList())]),
Suppress(Regex(r' *')),
Suppress(hs_nl)
])
)
hs_gridVer = Combine(And([Suppress(Literal('ver:')) + hs_str]))
def _assign_ver(toks):
ver = toks[0]
if len(toks) > 1:
grid_meta = toks[1]
else:
grid_meta = SortableDict()
# Put 'ver' at the start
grid_meta.add_item('ver', ver, index=0)
return grid_meta
hs_gridMeta = GenerateMatch( \
lambda ver: And([ \
hs_gridVer, \
Optional(And([ \
Suppress(Literal(' ')), \
hs_meta[ver] \
])).setName('gridMeta'),
Suppress(Regex(r' *')),
Suppress(hs_nl)
]).setParseAction(_assign_ver)) # + hs_nl
def _gen_grid(toks):
(grid_meta, col_meta, rows) = toks
if len(rows) == 1 and rows[0] == None:
rows = []
g = Grid(version=grid_meta.pop('ver'),
metadata=grid_meta,
columns=list(col_meta.items()))
g.extend(map(lambda row: dict(zip(col_meta.keys(), row)), rows))
return g
hs_grid_2_0 <<= And([ \
hs_gridMeta[VER_2_0],
hs_cols[VER_2_0],
hs_rows[VER_2_0],
]).setParseAction(_gen_grid)
hs_grid_3_0 <<= And([ \
hs_gridMeta[VER_3_0],
hs_cols[VER_3_0],
hs_rows[VER_3_0],
]).setParseAction(_gen_grid)
def parse_grid(grid_data, parseAll=True):
"""
Parse the incoming grid.
"""
try:
# First element is the grid metadata
ver_match = VERSION_RE.match(grid_data)
if ver_match is None:
raise ZincParseException(
'Could not determine version from %r' % NEWLINE_RE.split(grid_data)[0],
grid_data, 1, 1)
version = Version(ver_match.group(1))
# Now parse the grid of the grid accordingly
g = hs_grid[version].parseString(grid_data, parseAll=parseAll)[0]
return g
except pp.ParseException as pe:
LOG.debug('Failing grid: %r', grid_data)
raise ZincParseException(
'Failed to parse: %s' % reformat_exception(pe, pe.lineno),
grid_data, pe.lineno, pe.col)
except:
LOG.debug('Failing grid: %r', grid_data)
raise ZincParseException(
'Failed to parse: %s' % sys.exc_info()[0], grid_data, 0, 0)
def parse_scalar(scalar_data, version):
"""
Parse a Project Haystack scalar in ZINC format.
"""
try:
return hs_scalar[version].parseString(scalar_data, parseAll=True)[0]
except pp.ParseException as pe:
# Raise a new exception with the appropriate line number.
raise ZincParseException(
'Failed to parse scalar: %s' % reformat_exception(pe),
scalar_data, 1, pe.col)
except:
LOG.debug('Failing scalar data: %r (version %r)',
scalar_data, version)
raise
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import concurrent.futures
import inspect
import unittest
import uuid
import qiime2.plugin
from qiime2.core.type import MethodSignature, Int
from qiime2.sdk import Artifact, Method, Results
from qiime2.core.testing.method import (concatenate_ints, merge_mappings,
split_ints, params_only_method,
no_input_method)
from qiime2.core.testing.type import IntSequence1, IntSequence2, Mapping
from qiime2.core.testing.util import get_dummy_plugin
# TODO refactor these tests along with Visualizer tests to remove duplication.
class TestMethod(unittest.TestCase):
def setUp(self):
self.plugin = get_dummy_plugin()
self.concatenate_ints_sig = MethodSignature(
concatenate_ints,
inputs={
'ints1': IntSequence1 | IntSequence2,
'ints2': IntSequence1,
'ints3': IntSequence2
},
parameters={
'int1': qiime2.plugin.Int,
'int2': qiime2.plugin.Int
},
outputs=[
('concatenated_ints', IntSequence1)
]
)
self.split_ints_sig = MethodSignature(
split_ints,
inputs={
'ints': IntSequence1
},
parameters={},
outputs=[
('left', IntSequence1),
('right', IntSequence1)
]
)
def test_private_constructor(self):
with self.assertRaisesRegex(NotImplementedError,
'Method constructor.*private'):
Method()
def test_from_function_with_artifacts_and_parameters(self):
method = self.plugin.methods['concatenate_ints']
self.assertEqual(method.id, 'concatenate_ints')
self.assertEqual(method.signature, self.concatenate_ints_sig)
self.assertEqual(method.name, 'Concatenate integers')
self.assertTrue(
method.description.startswith('This method concatenates integers'))
self.assertTrue(
method.source.startswith('\n```python\ndef concatenate_ints('))
def test_from_function_with_multiple_outputs(self):
method = self.plugin.methods['split_ints']
self.assertEqual(method.id, 'split_ints')
exp_sig = MethodSignature(
split_ints,
inputs={
'ints': IntSequence1
},
parameters={},
outputs=[
('left', IntSequence1),
('right', IntSequence1)
]
)
self.assertEqual(method.signature, exp_sig)
self.assertEqual(method.name, 'Split sequence of integers in half')
self.assertTrue(
method.description.startswith('This method splits a sequence'))
self.assertTrue(
method.source.startswith('\n```python\ndef split_ints('))
def test_from_function_without_parameters(self):
method = self.plugin.methods['merge_mappings']
self.assertEqual(method.id, 'merge_mappings')
exp_sig = MethodSignature(
merge_mappings,
inputs={
'mapping1': Mapping,
'mapping2': Mapping
},
input_descriptions={
'mapping1': 'Mapping object to be merged'
},
parameters={},
outputs=[
('merged_mapping', Mapping)
],
output_descriptions={
'merged_mapping': 'Resulting merged Mapping object'
}
)
self.assertEqual(method.signature, exp_sig)
self.assertEqual(method.name, 'Merge mappings')
self.assertTrue(
method.description.startswith('This method merges two mappings'))
self.assertTrue(
method.source.startswith('\n```python\ndef merge_mappings('))
def test_from_function_with_parameters_only(self):
method = self.plugin.methods['params_only_method']
self.assertEqual(method.id, 'params_only_method')
exp_sig = MethodSignature(
params_only_method,
inputs={},
parameters={
'name': qiime2.plugin.Str,
'age': qiime2.plugin.Int
},
outputs=[
('out', Mapping)
]
)
self.assertEqual(method.signature, exp_sig)
self.assertEqual(method.name, 'Parameters only method')
self.assertTrue(
method.description.startswith('This method only accepts'))
self.assertTrue(
method.source.startswith('\n```python\ndef params_only_method('))
def test_from_function_without_inputs_or_parameters(self):
method = self.plugin.methods['no_input_method']
self.assertEqual(method.id, 'no_input_method')
exp_sig = MethodSignature(
no_input_method,
inputs={},
parameters={},
outputs=[
('out', Mapping)
]
)
self.assertEqual(method.signature, exp_sig)
self.assertEqual(method.name, 'No input method')
self.assertTrue(
method.description.startswith('This method does not accept any'))
self.assertTrue(
method.source.startswith('\n```python\ndef no_input_method('))
def test_is_callable(self):
self.assertTrue(callable(self.plugin.methods['concatenate_ints']))
def test_callable_properties(self):
concatenate_ints = self.plugin.methods['concatenate_ints']
merge_mappings = self.plugin.methods['merge_mappings']
concatenate_exp = {
'int2': Int, 'ints2': IntSequence1, 'return': (IntSequence1,),
'int1': Int, 'ints3': IntSequence2,
'ints1': IntSequence1 | IntSequence2}
merge_exp = {
'mapping2': Mapping, 'mapping1': Mapping, 'return': (Mapping,)}
mapper = {
concatenate_ints: concatenate_exp,
merge_mappings: merge_exp}
for method, exp in mapper.items():
self.assertEqual(method.__call__.__name__, '__call__')
self.assertEqual(method.__call__.__annotations__, exp)
self.assertFalse(hasattr(method.__call__, '__wrapped__'))
def test_async_properties(self):
concatenate_ints = self.plugin.methods['concatenate_ints']
merge_mappings = self.plugin.methods['merge_mappings']
concatenate_exp = {
'int2': Int, 'ints2': IntSequence1, 'return': (IntSequence1,),
'int1': Int, 'ints3': IntSequence2,
'ints1': IntSequence1 | IntSequence2}
merge_exp = {
'mapping2': Mapping, 'mapping1': Mapping, 'return': (Mapping,)}
mapper = {
concatenate_ints: concatenate_exp,
merge_mappings: merge_exp}
for method, exp in mapper.items():
self.assertEqual(method.async.__name__, 'async')
self.assertEqual(method.async.__annotations__, exp)
self.assertFalse(hasattr(method.async, '__wrapped__'))
def test_callable_and_async_signature_with_artifacts_and_parameters(self):
# Signature with input artifacts and parameters (i.e. primitives).
concatenate_ints = self.plugin.methods['concatenate_ints']
for callable_attr in '__call__', 'async':
signature = inspect.Signature.from_callable(
getattr(concatenate_ints, callable_attr))
parameters = list(signature.parameters.items())
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
exp_parameters = [
('ints1', inspect.Parameter(
'ints1', kind, annotation=IntSequence1 | IntSequence2)),
('ints2', inspect.Parameter(
'ints2', kind, annotation=IntSequence1)),
('ints3', inspect.Parameter(
'ints3', kind, annotation=IntSequence2)),
('int1', inspect.Parameter(
'int1', kind, annotation=Int)),
('int2', inspect.Parameter(
'int2', kind, annotation=Int))
]
self.assertEqual(parameters, exp_parameters)
def test_callable_and_async_signature_with_no_parameters(self):
# Signature without parameters (i.e. primitives), only input artifacts.
method = self.plugin.methods['merge_mappings']
for callable_attr in '__call__', 'async':
signature = inspect.Signature.from_callable(
getattr(method, callable_attr))
parameters = list(signature.parameters.items())
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
exp_parameters = [
('mapping1', inspect.Parameter(
'mapping1', kind, annotation=Mapping)),
('mapping2', inspect.Parameter(
'mapping2', kind, annotation=Mapping))
]
self.assertEqual(parameters, exp_parameters)
def test_call_with_artifacts_and_parameters(self):
concatenate_ints = self.plugin.methods['concatenate_ints']
artifact1 = Artifact.import_data(IntSequence1, [0, 42, 43])
artifact2 = Artifact.import_data(IntSequence2, [99, -22])
result = concatenate_ints(artifact1, artifact1, artifact2, 55, 1)
# Test properties of the `Results` object.
self.assertIsInstance(result, tuple)
self.assertIsInstance(result, Results)
self.assertEqual(len(result), 1)
self.assertEqual(result.concatenated_ints.view(list),
[0, 42, 43, 0, 42, 43, 99, -22, 55, 1])
result = result[0]
self.assertIsInstance(result, Artifact)
self.assertEqual(result.type, IntSequence1)
self.assertIsInstance(result.uuid, uuid.UUID)
# Can retrieve multiple views of different type.
exp_list_view = [0, 42, 43, 0, 42, 43, 99, -22, 55, 1]
self.assertEqual(result.view(list), exp_list_view)
self.assertEqual(result.view(list), exp_list_view)
exp_counter_view = collections.Counter(
{0: 2, 42: 2, 43: 2, 99: 1, -22: 1, 55: 1, 1: 1})
self.assertEqual(result.view(collections.Counter),
exp_counter_view)
self.assertEqual(result.view(collections.Counter),
exp_counter_view)
# Accepts IntSequence1 | IntSequence2
artifact3 = Artifact.import_data(IntSequence2, [10, 20])
result, = concatenate_ints(artifact3, artifact1, artifact2, 55, 1)
self.assertEqual(result.type, IntSequence1)
self.assertEqual(result.view(list),
[10, 20, 0, 42, 43, 99, -22, 55, 1])
def test_call_with_multiple_outputs(self):
split_ints = self.plugin.methods['split_ints']
artifact = Artifact.import_data(IntSequence1, [0, 42, -2, 43, 6])
result = split_ints(artifact)
self.assertIsInstance(result, tuple)
self.assertEqual(len(result), 2)
for output_artifact in result:
self.assertIsInstance(output_artifact, Artifact)
self.assertEqual(output_artifact.type, IntSequence1)
self.assertIsInstance(output_artifact.uuid, uuid.UUID)
# Output artifacts have different UUIDs.
self.assertNotEqual(result[0].uuid, result[1].uuid)
# Index lookup.
self.assertEqual(result[0].view(list), [0, 42])
self.assertEqual(result[1].view(list), [-2, 43, 6])
# Test properties of the `Results` object.
self.assertIsInstance(result, Results)
self.assertEqual(result.left.view(list), [0, 42])
self.assertEqual(result.right.view(list), [-2, 43, 6])
def test_call_with_no_parameters(self):
merge_mappings = self.plugin.methods['merge_mappings']
artifact1 = Artifact.import_data(Mapping, {'foo': 'abc', 'bar': 'def'})
artifact2 = Artifact.import_data(Mapping, {'bazz': 'abc'})
result = merge_mappings(artifact1, artifact2)
# Test properties of the `Results` object.
self.assertIsInstance(result, tuple)
self.assertIsInstance(result, Results)
self.assertEqual(len(result), 1)
self.assertEqual(result.merged_mapping.view(dict),
{'foo': 'abc', 'bar': 'def', 'bazz': 'abc'})
result = result[0]
self.assertIsInstance(result, Artifact)
self.assertEqual(result.type, Mapping)
self.assertIsInstance(result.uuid, uuid.UUID)
self.assertEqual(result.view(dict),
{'foo': 'abc', 'bar': 'def', 'bazz': 'abc'})
def test_call_with_parameters_only(self):
params_only_method = self.plugin.methods['params_only_method']
result, = params_only_method("Someone's Name", 999)
self.assertIsInstance(result, Artifact)
self.assertEqual(result.type, Mapping)
self.assertIsInstance(result.uuid, uuid.UUID)
self.assertEqual(result.view(dict), {"Someone's Name": '999'})
def test_call_without_inputs_or_parameters(self):
no_input_method = self.plugin.methods['no_input_method']
result, = no_input_method()
self.assertIsInstance(result, Artifact)
self.assertEqual(result.type, Mapping)
self.assertIsInstance(result.uuid, uuid.UUID)
self.assertEqual(result.view(dict), {'foo': '42'})
def test_call_with_optional_artifacts(self):
method = self.plugin.methods['optional_artifacts_method']
ints1 = Artifact.import_data(IntSequence1, [0, 42, 43])
ints2 = Artifact.import_data(IntSequence1, [99, -22])
ints3 = Artifact.import_data(IntSequence2, [43, 43])
# No optional artifacts provided.
obs = method(ints1, 42).output
self.assertEqual(obs.view(list), [0, 42, 43, 42])
# One optional artifact provided.
obs = method(ints1, 42, optional1=ints2).output
self.assertEqual(obs.view(list), [0, 42, 43, 42, 99, -22])
# All optional artifacts provided.
obs = method(
ints1, 42, optional1=ints2, optional2=ints3, num2=111).output
self.assertEqual(obs.view(list), [0, 42, 43, 42, 99, -22, 43, 43, 111])
# Invalid type provided as optional artifact.
with self.assertRaisesRegex(TypeError,
'not a subtype of IntSequence1'):
method(ints1, 42, optional1=ints3)
def test_async(self):
concatenate_ints = self.plugin.methods['concatenate_ints']
artifact1 = Artifact.import_data(IntSequence1, [0, 42, 43])
artifact2 = Artifact.import_data(IntSequence2, [99, -22])
future = concatenate_ints.async(artifact1, artifact1, artifact2, 55, 1)
self.assertIsInstance(future, concurrent.futures.Future)
result = future.result()
# Test properties of the `Results` object.
self.assertIsInstance(result, tuple)
self.assertIsInstance(result, Results)
self.assertEqual(len(result), 1)
self.assertEqual(result.concatenated_ints.view(list),
[0, 42, 43, 0, 42, 43, 99, -22, 55, 1])
result = result[0]
self.assertIsInstance(result, Artifact)
self.assertEqual(result.type, IntSequence1)
self.assertIsInstance(result.uuid, uuid.UUID)
# Can retrieve multiple views of different type.
exp_list_view = [0, 42, 43, 0, 42, 43, 99, -22, 55, 1]
self.assertEqual(result.view(list), exp_list_view)
self.assertEqual(result.view(list), exp_list_view)
exp_counter_view = collections.Counter(
{0: 2, 42: 2, 43: 2, 99: 1, -22: 1, 55: 1, 1: 1})
self.assertEqual(result.view(collections.Counter),
exp_counter_view)
self.assertEqual(result.view(collections.Counter),
exp_counter_view)
# Accepts IntSequence1 | IntSequence2
artifact3 = Artifact.import_data(IntSequence2, [10, 20])
future = concatenate_ints.async(artifact3, artifact1, artifact2, 55, 1)
result, = future.result()
self.assertEqual(result.type, IntSequence1)
self.assertEqual(result.view(list),
[10, 20, 0, 42, 43, 99, -22, 55, 1])
def test_async_with_multiple_outputs(self):
split_ints = self.plugin.methods['split_ints']
artifact = Artifact.import_data(IntSequence1, [0, 42, -2, 43, 6])
future = split_ints.async(artifact)
self.assertIsInstance(future, concurrent.futures.Future)
result = future.result()
self.assertIsInstance(result, tuple)
self.assertEqual(len(result), 2)
for output_artifact in result:
self.assertIsInstance(output_artifact, Artifact)
self.assertEqual(output_artifact.type, IntSequence1)
self.assertIsInstance(output_artifact.uuid, uuid.UUID)
# Output artifacts have different UUIDs.
self.assertNotEqual(result[0].uuid, result[1].uuid)
# Index lookup.
self.assertEqual(result[0].view(list), [0, 42])
self.assertEqual(result[1].view(list), [-2, 43, 6])
# Test properties of the `Results` object.
self.assertIsInstance(result, Results)
self.assertEqual(result.left.view(list), [0, 42])
self.assertEqual(result.right.view(list), [-2, 43, 6])
def test_docstring(self):
merge_mappings = self.plugin.methods['merge_mappings']
split_ints = self.plugin.methods['split_ints']
identity_with_optional_metadata = (
self.plugin.methods['identity_with_optional_metadata'])
no_input_method = self.plugin.methods['no_input_method']
params_only_method = self.plugin.methods['params_only_method']
long_description_method = self.plugin.methods[
'long_description_method']
self.assertEqual(merge_mappings.__doc__, 'QIIME 2 Method')
merge_calldoc = merge_mappings.__call__.__doc__
self.assertEqual(exp_merge_calldoc, merge_calldoc)
split_ints_return = split_ints.__call__.__doc__.split('\n\n')[3]
self.assertEqual(exp_split_ints_return, split_ints_return)
optional_params = (
identity_with_optional_metadata.__call__.__doc__.split('\n\n')[2])
self.assertEqual(exp_optional_params, optional_params)
no_input_method = no_input_method.__call__.__doc__
self.assertEqual(exp_no_input_method, no_input_method)
params_only = params_only_method.__call__.__doc__
self.assertEqual(exp_params_only, params_only)
long_desc = long_description_method.__call__.__doc__
self.assertEqual(exp_long_description, long_desc)
exp_merge_calldoc = """\
Merge mappings
This method merges two mappings into a single new mapping. If a key is
shared between mappings and the values differ, an error will be raised.
Parameters
----------
mapping1 : Mapping
Mapping object to be merged
mapping2 : Mapping
Returns
-------
merged_mapping : Mapping
Resulting merged Mapping object
"""
exp_split_ints_return = """\
Returns
-------
left : IntSequence1
right : IntSequence1
"""
exp_optional_params = """\
Parameters
----------
ints : IntSequence1 | IntSequence2
metadata : Metadata, optional\
"""
exp_no_input_method = """\
No input method
This method does not accept any type of input.
Returns
-------
out : Mapping
"""
exp_params_only = """\
Parameters only method
This method only accepts parameters.
Parameters
----------
name : Str
age : Int
Returns
-------
out : Mapping
"""
exp_long_description = """\
Long Description
This is a very long description. If asked about its length, I would have to
say it is greater than 79 characters.
Parameters
----------
mapping1 : Mapping
This is a very long description. If asked about its length, I would
have to say it is greater than 79 characters.
name : Str
This is a very long description. If asked about its length, I would
have to say it is greater than 79 characters.
age : Int
Returns
-------
out : Mapping
This is a very long description. If asked about its length, I would
have to say it is greater than 79 characters.
"""
if __name__ == '__main__':
unittest.main()
|
|
#
# Channel.py -- Channel class for the Ginga reference viewer.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import time
from ginga.misc import Bunch, Datasrc, Callback, Future, Settings
from ginga.util import viewer as gviewer
class ChannelError(Exception):
pass
class Channel(Callback.Callbacks):
"""Class to manage a channel.
Parameters
----------
name : str
Name of the channel.
fv : `~ginga.rv.Control.GingaShell`
The reference viewer shell.
settings : `~ginga.misc.Settings.SettingGroup`
Channel settings.
datasrc : `~ginga.misc.Datasrc.Datasrc`
Data cache.
"""
def __init__(self, name, fv, settings, datasrc=None):
super(Channel, self).__init__()
self.logger = fv.logger
self.fv = fv
self.settings = settings
self.logger = fv.logger
# CHANNEL ATTRIBUTES
self.name = name
self.widget = None
self.container = None
self.workspace = None
self.opmon = None
# this is the image viewer we are connected to
self.fitsimage = None
# this is the currently active viewer
self.viewer = None
self.viewers = []
self.viewer_dict = {}
if datasrc is None:
num_images = self.settings.get('numImages', 1)
datasrc = Datasrc.Datasrc(num_images)
self.datasrc = datasrc
self.cursor = -1
self.history = []
self.image_index = {}
# external entities can attach stuff via this attribute
self.extdata = Bunch.Bunch()
self._configure_sort()
self.settings.get_setting('sort_order').add_callback(
'set', self._sort_changed_ext_cb)
def connect_viewer(self, viewer):
if viewer not in self.viewers:
self.viewers.append(viewer)
self.viewer_dict[viewer.vname] = viewer
def move_image_to(self, imname, channel):
if self == channel:
return
self.copy_image_to(imname, channel)
self.remove_image(imname)
def copy_image_to(self, imname, channel, silent=False):
if self == channel:
return
if imname in channel:
# image with that name is already there
return
# transfer image info
info = self.image_index[imname]
was_not_there_already = channel._add_info(info)
try:
image = self.datasrc[imname]
except KeyError:
return
if was_not_there_already:
channel.datasrc[imname] = image
if not silent:
self.fv.gui_do(channel.add_image_update, image, info,
update_viewer=False)
def remove_image(self, imname):
info = self.image_index[imname]
self.remove_history(imname)
if imname in self.datasrc:
image = self.datasrc[imname]
self.datasrc.remove(imname)
# update viewer if we are removing the currently displayed image
cur_image = self.viewer.get_dataobj()
if cur_image == image:
self.refresh_cursor_image()
self.fv.make_async_gui_callback('remove-image', self.name,
info.name, info.path)
return info
def get_image_names(self):
return [info.name for info in self.history]
def get_loaded_image(self, imname):
"""Get an image from memory.
Parameters
----------
imname : str
Key, usually image name and extension.
Returns
-------
image
Image object.
Raises
------
KeyError
Image is not in memory.
"""
image = self.datasrc[imname]
return image
def add_image(self, image, silent=False, bulk_add=False):
imname = image.get('name', None)
if imname is None:
raise ValueError("image has no name")
self.logger.debug("Adding image '%s' in channel %s" % (
imname, self.name))
self.datasrc[imname] = image
# Has this image been loaded into a channel before?
info = image.get('image_info', None)
if info is None:
# No
idx = image.get('idx', None)
path = image.get('path', None)
image_loader = image.get('image_loader', None)
image_future = image.get('image_future', None)
info = self.add_history(imname, path,
image_loader=image_loader,
image_future=image_future,
idx=idx)
image.set(image_info=info)
# add an image profile if one is missing
profile = self.get_image_profile(image)
info.profile = profile
if not silent:
self.add_image_update(image, info,
update_viewer=not bulk_add)
def add_image_info(self, info):
image_loader = info.get('image_loader', self.fv.load_image)
# create an image_future if one does not exist
image_future = info.get('image_future', None)
if (image_future is None) and (info.path is not None):
image_future = Future.Future()
image_future.freeze(image_loader, info.path)
info = self.add_history(info.name, info.path,
image_loader=image_loader,
image_future=image_future)
def get_image_info(self, imname):
return self.image_index[imname]
def update_image_info(self, image, info):
imname = image.get('name', None)
if (imname is None) or (imname not in self.image_index):
return False
# don't update based on image name alone--actual image must match
try:
my_img = self.get_loaded_image(imname)
if my_img is not image:
return False
except KeyError:
return False
# update the info record
iminfo = self.get_image_info(imname)
iminfo.update(info)
self.fv.make_async_gui_callback('add-image-info', self, iminfo)
return True
def add_image_update(self, image, info, update_viewer=False):
self.fv.make_async_gui_callback('add-image', self.name, image, info)
if not update_viewer:
return
current = self.datasrc.youngest()
curname = current.get('name')
self.logger.debug("image=%s youngest=%s" % (image.get('name'), curname))
if current != image:
return
# switch to current image?
if self.settings['switchnew']:
self.logger.debug("switching to new image '%s'" % (curname))
self.switch_image(image)
if self.settings['raisenew']:
channel = self.fv.get_current_channel()
if channel != self:
self.fv.change_channel(self.name)
def refresh_cursor_image(self):
if self.cursor < 0:
self.viewer.clear()
self.fv.channel_image_updated(self, None)
return
info = self.history[self.cursor]
if info.name in self.datasrc:
# object still in memory
data_obj = self.datasrc[info.name]
self.switch_image(data_obj)
else:
self.switch_name(info.name)
def prev_image(self, loop=True):
self.logger.debug("Previous image")
if self.cursor <= 0:
n = len(self.history) - 1
if (not loop) or (n < 0):
self.logger.error("No previous image!")
return True
self.cursor = n
else:
self.cursor -= 1
self.refresh_cursor_image()
return True
def next_image(self, loop=True):
self.logger.debug("Next image")
n = len(self.history) - 1
if self.cursor >= n:
if (not loop) or (n < 0):
self.logger.error("No next image!")
return True
self.cursor = 0
else:
self.cursor += 1
self.refresh_cursor_image()
return True
def _add_info(self, info):
if info.name in self.image_index:
# image info is already present
return False
self.history.append(info)
self.image_index[info.name] = info
if self.hist_sort is not None:
self.history.sort(key=self.hist_sort)
self.fv.make_async_gui_callback('add-image-info', self, info)
# image was newly added
return True
def add_history(self, imname, path, idx=None,
image_loader=None, image_future=None):
if not (imname in self.image_index):
if image_loader is None:
image_loader = self.fv.load_image
# create an image_future if one does not exist
if (image_future is None) and (path is not None):
image_future = Future.Future()
image_future.freeze(image_loader, path)
info = Bunch.Bunch(name=imname, path=path,
idx=idx,
image_loader=image_loader,
image_future=image_future,
time_added=time.time(),
time_modified=None,
last_viewer_info=None,
profile=None)
self._add_info(info)
else:
# already in history
info = self.image_index[imname]
return info
def remove_history(self, imname):
if imname in self.image_index:
info = self.image_index[imname]
del self.image_index[imname]
i = self.history.index(info)
self.history.remove(info)
# adjust cursor as necessary
if i < self.cursor:
self.cursor -= 1
if self.cursor >= len(self.history):
# loop
self.cursor = min(0, len(self.history) - 1)
self.fv.make_async_gui_callback('remove-image-info', self, info)
def get_current_image(self):
return self.viewer.get_dataobj()
def view_object(self, dataobj):
# see if a viewer has been used on this object before
vinfo = None
obj_name = dataobj.get('name')
if obj_name in self.image_index:
info = self.image_index[obj_name]
vinfo = info.last_viewer_info
if vinfo is not None:
# use the viewer we used before
viewers = [vinfo]
else:
# find available viewers that can view this kind of object
viewers = gviewer.get_priority_viewers(dataobj)
if len(viewers) == 0:
raise ValueError("No viewers for this data object!")
self.logger.debug("{} available viewers for this model".format(len(viewers)))
# if there is only one viewer available, use it otherwise
# pop-up a dialog and ask the user
if len(viewers) == 1:
self._open_with_viewer(viewers[0], dataobj)
return
msg = ("Multiple viewers are available for this data object. "
"Please select one.")
self.fv.gui_choose_viewer(msg, viewers, self._open_with_viewer,
dataobj)
def _open_with_viewer(self, vinfo, dataobj):
# if we don't have this viewer type then install one in the channel
if vinfo.name not in self.viewer_dict:
self.fv.make_viewer(vinfo, self)
self.viewer = self.viewer_dict[vinfo.name]
# find this viewer and raise it
idx = self.viewers.index(self.viewer)
self.widget.set_index(idx)
# and load the data
self.viewer.set_dataobj(dataobj)
obj_name = dataobj.get('name')
if obj_name in self.image_index:
info = self.image_index[obj_name]
# record viewer last used to view this object
info.last_viewer_info = vinfo
if info in self.history:
# update cursor to match dataobj
self.cursor = self.history.index(info)
self.fv.channel_image_updated(self, dataobj)
# Check for preloading any dataobjs into memory
preload = self.settings.get('preload_images', False)
if not preload:
return
# queue next and previous files for preloading
index = self.cursor
if index < len(self.history) - 1:
info = self.history[index + 1]
if info.path is not None:
self.fv.add_preload(self.name, info)
if index > 0:
info = self.history[index - 1]
if info.path is not None:
self.fv.add_preload(self.name, info)
def switch_image(self, image):
curimage = self.get_current_image()
if curimage == image:
self.logger.debug("Apparently no need to set channel viewer.")
return
self.logger.debug("updating viewer...")
self.view_object(image)
def switch_name(self, imname):
if imname in self.datasrc:
# Image is still in the heap
image = self.datasrc[imname]
self.switch_image(image)
return
if not (imname in self.image_index):
errmsg = "No image by the name '%s' found" % (imname)
self.logger.error("Can't switch to image '%s': %s" % (
imname, errmsg))
raise ChannelError(errmsg)
# Do we have a way to reconstruct this image from a future?
info = self.image_index[imname]
if info.image_future is not None:
self.logger.info("Image '%s' is no longer in memory; attempting "
"image future" % (imname))
# TODO: recode this--it's a bit messy
def _switch(image):
# this will be executed in the gui thread
self.add_image(image, silent=True)
self.switch_image(image)
# reset modified timestamp
info.time_modified = None
self.fv.make_async_gui_callback('add-image-info', self, info)
def _load_n_switch(imname, path, image_future):
# this will be executed in a non-gui thread
# reconstitute the image
image = self.fv.error_wrap(image_future.thaw)
if isinstance(image, Exception):
errmsg = "Error reconstituting image: %s" % (str(image))
self.logger.error(errmsg)
raise image
profile = info.get('profile', None)
if profile is None:
profile = self.get_image_profile(image)
info.profile = profile
# perpetuate some of the image metadata
image.set(image_future=image_future, name=imname, path=path,
image_info=info, profile=profile)
self.fv.gui_do(_switch, image)
self.fv.nongui_do(_load_n_switch, imname, info.path,
info.image_future)
elif info.path is not None:
# Do we have a path? We can try to reload it
self.logger.debug("Image '%s' is no longer in memory; attempting "
"to load from %s" % (imname, info.path))
#self.fv.load_file(path, chname=chname)
self.fv.nongui_do(self.load_file, info.path, chname=self.name)
else:
raise ChannelError("No way to recreate image '%s'" % (imname))
def _configure_sort(self):
self.hist_sort = lambda info: info.time_added
# set sorting function
sort_order = self.settings.get('sort_order', 'loadtime')
if sort_order == 'alpha':
# sort history alphabetically
self.hist_sort = lambda info: info.name
def _sort_changed_ext_cb(self, setting, value):
self._configure_sort()
self.history.sort(key=self.hist_sort)
def get_image_profile(self, image):
profile = image.get('profile', None)
if profile is None:
profile = Settings.SettingGroup()
image.set(profile=profile)
return profile
def __len__(self):
return len(self.history)
def __contains__(self, imname):
return imname in self.image_index
def __getitem__(self, imname):
return self.image_index[imname]
# END
|
|
# adalink STLink V2 Programmer (using OpenOCD).
#
# Python interface to control the STLink V2 programmer using OpenOCD.
#
# Note you MUST have OpenOCD installed.
#
# Author: Tony DiCola
import logging
import os
import platform
import re
import sys
import subprocess
import threading
import time
from .base import Programmer
from ..errors import AdaLinkError
# OSX GUI-based app does not has the same PATH as terminal-based
if platform.system() == 'Darwin':
os.environ["PATH"] = os.environ["PATH"] + ':/usr/local/bin'
logger = logging.getLogger(__name__)
class STLink(Programmer):
# Name used to identify this programmer on the command line.
name = 'stlink'
def __init__(self, openocd_exe=None, openocd_path='', params=None):
"""Create a new instance of the STLink communication class. By default
OpenOCD should be accessible in your system path and it will be used
to communicate with a connected STLink device.
You can override the OpenOCD executable name by specifying a value in
the openocd_exe parameter. You can also manually specify the path to the
OpenOCD executable in the openocd_path parameter.
Optional command line arguments to OpenOCD can be provided in the
params parameter as a string.
"""
# If not provided, pick the appropriate OpenOCD name based on the
# platform:
# - Linux = openocd
# - Mac = openocd
# - Windows = openocd.exe
if openocd_exe is None:
system = platform.system()
if system == 'Linux' or system == 'Darwin':
openocd_exe = 'openocd'
elif system == 'Windows':
openocd_exe = 'openocd.exe'
else:
raise AdaLinkError('Unsupported system: {0}'.format(system))
# Store the path to the OpenOCD tool so it can later be run.
self._openocd_path = os.path.join(openocd_path, openocd_exe)
logger.info('Using path to OpenOCD: {0}'.format(self._openocd_path))
# Apply command line parameters if specified.
self._openocd_params = []
if params is not None:
self._openocd_params.extend(params.split())
logger.info('Using parameters to OpenOCD: {0}'.format(params))
# Make sure we have OpenOCD in the system path
self._test_openocd()
def _test_openocd(self):
"""Checks if OpenOCD 0.9.0 is found in the system path or not."""
# Spawn OpenOCD process with --version and capture its output.
args = [self._openocd_path, '--version']
try:
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, err = process.communicate()
# Parse out version number from response.
match = re.search('^Open On-Chip Debugger (\S+)', output,
re.IGNORECASE | re.MULTILINE)
if not match:
return
# Simple semantic version check to see if OpenOCD version is greater
# or equal to 0.9.0.
version = match.group(1).split('.')
if int(version[0]) > 0:
# Version 1 or greater, assume it's good (higher than 0.9.0).
return
if int(version[0]) == 0 and int(version[1]) >= 9:
# Version 0.9 or greater, assume it's good.
return
# Otherwise assume version is too old because it's below 0.9.0.
raise RuntimError
except Exception as ex:
print('ERROR', ex)
raise AdaLinkError('Failed to find OpenOCD 0.9.0 or greater! Make '
'sure OpenOCD 0.9.0 is installed and in your '
'system path.')
def run_commands(self, commands, timeout_sec=60):
"""Run the provided list of commands with OpenOCD. Commands should be
a list of strings with with OpenOCD commands to run. Returns the
output of OpenOCD. If execution takes longer than timeout_sec an
exception will be thrown. Set timeout_sec to None to disable the timeout
completely.
"""
# Spawn OpenOCD process and capture its output.
args = [self._openocd_path]
args.extend(self._openocd_params)
for c in commands:
args.append('-c')
args.append('"{0}"'.format(c))
args = ' '.join(args)
logger.debug('Running OpenOCD command: {0}'.format(args))
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
if timeout_sec is not None:
# Use a timer to stop the subprocess if the timeout is exceeded.
# This helps prevent very subtle issues with deadlocks on reading
# subprocess output. See: http://stackoverflow.com/a/10012262
def timeout_exceeded(p):
# Stop the subprocess and kill the whole program.
p.kill()
raise AdaLinkError('OpenOCD process exceeded timeout!')
timeout = threading.Timer(timeout_sec, timeout_exceeded, [process])
timeout.start()
# Grab output of STLink.
output, err = process.communicate()
if timeout_sec is not None:
# Stop timeout timer when communicate call returns.
timeout.cancel()
logger.debug('OpenOCD response: {0}'.format(output))
return output
def _readmem(self, address, command):
"""Read the specified register with the provided register read command.
"""
# Build list of commands to read register.
address = '0x{0:08X}'.format(address) # Convert address value to hex string.
commands = [
'init',
'{0} {1}'.format(command, address),
'exit'
]
# Run command and parse output for register value.
output = self.run_commands(commands)
match = re.search('^{0}: (\S+)'.format(address), output,
re.IGNORECASE | re.MULTILINE)
if match:
return int(match.group(1), 16)
else:
raise AdaLinkError('Could not find expected memory value, are the STLink and board connected?')
def is_connected(self):
"""Return true if the device is connected to the programmer."""
output = self.run_commands(['init', 'exit'])
return output.find('Error:') == -1
def wipe(self):
"""Wipe clean the flash memory of the device. Will happen before any
programming if requested.
"""
# There is no general mass erase function with OpenOCD, instead only
# chip-specific functions. For that reason don't implement a default
# wipe and instead force cores to subclass and provide their own
# wipe functionality.
raise NotImplementedError
def program(self, hex_files=[], bin_files=[]):
"""Program chip with provided list of hex and/or bin files. Hex_files
is a list of paths to .hex files, and bin_files is a list of tuples with
the first value being the path to the .bin file and the second value
being the integer starting address for the bin file."""
# Build list of commands to program hex files.
commands = [
'init',
'reset init',
'halt'
]
# Program each hex file.
for f in hex_files:
f = self.escape_path(os.path.abspath(f))
commands.append('flash write_image {0} 0 ihex'.format(f))
# Program each bin file.
for f, addr in bin_files:
f = self.escape_path(os.path.abspath(f))
commands.append('flash write_image {0} 0x{1:08X} bin'.format(f, addr))
commands.append('reset run')
commands.append('exit')
self.run_commands(commands)
def readmem32(self, address):
"""Read a 32-bit value from the provided memory address."""
return self._readmem(address, 'mdw')
def readmem16(self, address):
"""Read a 16-bit value from the provided memory address."""
return self._readmem(address, 'mdh')
def readmem8(self, address):
"""Read a 8-bit value from the provided memory address."""
return self._readmem(address, 'mdb')
def escape_path(self, path):
"""Escape the path with Tcl '{}' chars to prevent spaces,
backslashes, etc. from being misinterpreted.
"""
return '{{{0}}}'.format(path)
|
|
import csb.test as test
from csb.bio.io import ClansParser
from csb.bio.io.clans import Clans, ClansEntry, ClansParams, ClansSeqgroup,\
Color, ClansEntryCollection, ClansSeqgroupCollection, DuplicateEntryError,\
DuplicateEntryNameError
@test.unit
class TestClansColor(test.Case):
def setUp(self):
super(TestClansColor, self).setUp()
def testColorInit(self):
color = Color()
self.assertEqual(color.r, 0)
self.assertEqual(color.g, 0)
self.assertEqual(color.b, 0)
def testColorSetter(self):
color = Color()
for color_name in ('r', 'g', 'b'):
self.assertRaises(ValueError, color.__setattr__, color_name, -1)
self.assertRaises(ValueError, color.__setattr__, color_name, 256)
def testParseClansColorWithCorrectInput(self):
correct_input = '83;92;3'
color = Color.from_string(correct_input)
self.assertEqual(color.r, 83)
self.assertEqual(color.g, 92)
self.assertEqual(color.b, 3)
def testParseClansColorWithWrongInput(self):
color = Color()
wrong_input_1 = (83, 92, 3)
self.assertRaises(TypeError,
color.from_string, wrong_input_1)
wrong_input_2 = '83;92;3;'
self.assertRaises(ValueError, color.from_string, wrong_input_2)
wrong_input_3 = '83;92'
self.assertRaises(ValueError, color.from_string, wrong_input_3)
def testToClansColor(self):
color = Color()
self.assertEqual(color.to_clans_color(), '0;0;0;255')
testValues = (83, 92, 3, 87)
color.r = testValues[0]
color.g = testValues[1]
color.b = testValues[2]
color.a = testValues[3]
self.assertEqual(color.to_clans_color(),
';'.join(map(str, testValues)))
@test.functional
class TestClansParams(test.Case):
def setUp(self):
super(TestClansParams, self).setUp()
def testInstatiation(self):
cp = ClansParams()
for attribute_name, default_value in cp._DEFAULTS.items():
if attribute_name == 'colors':
continue
self.assertEqual(cp.__getattribute__(attribute_name),
default_value)
def testUnknownParamFail(self):
self.assertRaises(KeyError, ClansParams, **{'unknownParam': True})
def testForbiddenAssignments(self):
self.assertRaises(ValueError, ClansParams, **{'attfactor': 'a'})
self.assertRaises(ValueError, ClansParams, **{'attvalpow': 'a'})
self.assertRaises(ValueError, ClansParams, **{'avgfoldchange': 'a'})
self.assertRaises(ValueError, ClansParams, **{'blastpath': 3})
self.assertRaises(ValueError, ClansParams, **{'cluster2d': 'a'})
self.assertRaises(ValueError, ClansParams, **{'colors': 'a'})
self.assertRaises(ValueError, ClansParams, **{'complexatt': 'a'})
self.assertRaises(ValueError, ClansParams, **{'cooling': 'a'})
self.assertRaises(ValueError, ClansParams, **{'currcool': 'a'})
self.assertRaises(ValueError, ClansParams, **{'dampening': 'a'})
self.assertRaises(ValueError, ClansParams, **{'dotsize': 'a'})
self.assertRaises(ValueError, ClansParams, **{'formatdbpath': 3})
self.assertRaises(ValueError, ClansParams, **{'groupsize': 'a'})
self.assertRaises(ValueError, ClansParams, **{'maxmove': 'a'})
self.assertRaises(ValueError, ClansParams, **{'minattract': 'a'})
self.assertRaises(ValueError, ClansParams, **{'ovalsize': 'a'})
self.assertRaises(ValueError, ClansParams, **{'pval': 'a'})
self.assertRaises(ValueError, ClansParams, **{'repfactor': 'a'})
self.assertRaises(ValueError, ClansParams, **{'repvalpow': 'a'})
self.assertRaises(ValueError, ClansParams, **{'showinfo': 'a'})
self.assertRaises(ValueError, ClansParams, **{'usefoldchange': 'a'})
self.assertRaises(ValueError, ClansParams, **{'usescval': 'a'})
self.assertRaises(ValueError, ClansParams, **{'zoom': 'a'})
@test.functional
class TestClans(test.Case):
def setUp(self):
super(TestClans, self).setUp()
def testClansInit(self):
'''
Test creating an empty L{Clans} instance.
'''
c = Clans()
param_names = ['attfactor', 'attvalpow', 'avgfoldchange', 'blastpath',
'cluster2d', 'colors', 'complexatt', 'cooling',
'currcool', 'dampening', 'dotsize', 'formatdbpath',
'groupsize', 'maxmove', 'minattract', 'ovalsize',
'pval', 'repfactor', 'repvalpow', 'showinfo',
'usefoldchange', 'usescval', 'zoom']
for param_name in param_names:
self.assertTrue(hasattr(c.params, param_name))
self.assertEqual(c.filename, None)
self.assertEqual(c.rotmtx.shape, (3, 3))
self.assertEqual(len(c.entries), 0)
self.assertEqual(len(c.seqgroups), 0)
self.assertTrue(isinstance(c.entries, ClansEntryCollection))
self.assertTrue(isinstance(c.seqgroups, ClansSeqgroupCollection))
def testClansEntryAddingAndSorting(self):
c = Clans()
names = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
shuffled_names = ['g', 'f', 'b', 'd', 'e', 'c', 'a']
for name in shuffled_names:
c.add_entry(ClansEntry(name=name))
c.sort()
for i, e in enumerate(c):
self.assertEqual(e.name, names[i])
def testClansEntrySortingWithCustomKeyFunction(self):
c = Clans()
sequences = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
shuffled_sequences = ['g', 'f', 'b', 'd', 'e', 'c', 'a']
for i, sequence in enumerate(sequences):
c.add_entry(ClansEntry(name=str(i), seq=shuffled_sequences[i]))
custom_key_function = lambda e: e.seq # sort by sequence instead of name
c.sort(key=custom_key_function)
for i, e in enumerate(c):
self.assertEqual(e.seq, sequences[i])
def testGetEntry(self):
c = Clans()
## get non-existant entry from empty clans instance
self.assertRaises(ValueError, c.get_entry, 'a')
names = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
entries = [ClansEntry(name=name) for name in names]
[c.add_entry(e) for e in entries]
## check whether entries fetched by name match those created
for i, name in enumerate(names):
self.assertEqual(c.get_entry(name), entries[i])
## check pedantic flag for duplicate name='a' entries
c.add_entry(ClansEntry(name='a'))
self.assertTrue(c.get_entry('a', False).name == 'a')
self.assertRaises(DuplicateEntryNameError, c.get_entry, 'a', True)
def testDuplicateEntryError(self):
c = Clans()
e = ClansEntry(name='a', seq='A', coords=(1., 1., 1.))
c.add_entry(e)
c.add_entry(e)
original_length = len(c)
self.assertRaises(DuplicateEntryError, c._update_index)
self.assertEqual(original_length, len(c))
@test.functional
class TestClansSeqgroup(test.Case):
def setUp(self):
super(TestClansSeqgroup, self).setUp()
def testInit(self):
sg = ClansSeqgroup()
self.assertTrue(sg.is_empty())
def testAddingAndRemovingSeqgroups(self):
c = Clans()
names = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
for i, name in enumerate(names):
c.add_group(ClansSeqgroup(name=name))
self.assertEqual(len(c.seqgroups), i + 1)
removed = 0
while len(c.seqgroups) != 0:
c.remove_group(c.seqgroups[-1])
removed += 1
self.assertEqual(removed, len(names))
self.assertEqual(len(c.seqgroups), 0)
testGroup = ClansSeqgroup()
self.assertRaises(TypeError, testGroup.add, 23)
self.assertRaises(TypeError, testGroup.remove, 23)
def testAppendingSeqgroupsFromOtherInstance(self):
source = Clans()
source_entry1 = ClansEntry(name='X', seq='S')
source_entry2 = ClansEntry(name='A', seq='S')
source.add_entry(source_entry1)
source.add_entry(source_entry2)
seqgroup_names_to_transfer = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
for i, name in enumerate(seqgroup_names_to_transfer):
sg = ClansSeqgroup(name=name)
sg.add(source_entry1)
source.add_group(sg)
seqgroup_names_to_omit = ['x', 'y', 'z']
for i, name in enumerate(seqgroup_names_to_omit):
sg = ClansSeqgroup(name=name)
sg.add(source_entry2)
source.add_group(sg)
target = Clans()
# different seq is tolerated, only name identity is checked
target_entry = ClansEntry(name='X', seq='Q')
target.add_entry(target_entry)
self.assertEqual(source[0].name, target[0].name)
target.append_groups_from(source)
## each group should have exactly one member
self.assertEqual(len(set([len(group.members) for group in target.seqgroups])), 1)
## all groups of seqgroup_names should have been transferred
self.assertEqual(len(target.seqgroups), len(seqgroup_names_to_transfer))
self.assertEqual([group.name for group in target.seqgroups], seqgroup_names_to_transfer)
## the ones from seqgroup_names_to_omit should not be there
self.assertEqual(len([group.name for group in target.seqgroups
if group.name in seqgroup_names_to_omit]), 0)
def testAddingClansEntries(self):
c = Clans()
sg = ClansSeqgroup()
c.add_group(sg)
e = ClansEntry()
c.add_entry(e)
## add entry to seqgroup
sg.add(e)
self.assertEqual(len(sg), 1)
self.assertEqual(len(e.groups), 1)
## adding the same entry is forbidden
self.assertRaises(ValueError, sg.add, e)
## adding s.th. else than a ClansEntry
self.assertRaises(TypeError, sg.add, 23)
def testRemovingClansEntries(self):
c = Clans()
sg = ClansSeqgroup()
c.add_group(sg)
e = ClansEntry()
c.add_entry(e)
sg.add(e)
sg.remove(e)
self.assertEqual(len(sg), 0)
self.assertEqual(len(e.groups), 0)
self.assertRaises(TypeError, sg.remove, 23)
self.assertRaises(ValueError, sg.remove, e)
@test.functional
class TestClansParser(test.Case):
def setUp(self):
super(TestClansParser, self).setUp()
self.filename = self.config.getTestFile('out.clans')
def testPrematureGetter(self):
'''
Test whether the premature (before parsing) access to clans_instance is
properly handled.
'''
cp = ClansParser()
self.assertRaises(ValueError, cp.__getattribute__, 'clans_instance')
def testParseFile(self):
'''
Test parsing of a small dummy file with known values
'''
from numpy import array
cp = ClansParser()
self.clans_instance = cp.parse_file(self.filename)
self.assertEqual(len(self.clans_instance), 41)
self.assertRaises(IndexError, self.clans_instance.__getitem__, 41)
correct_rotmtx = array([[0.75614862, 0.65439992, 0.],
[-0.65439992, 0.75614862, 0.],
[0., 0., 1.]])
self.assertEqual(self.clans_instance.rotmtx.shape, (3, 3))
self.assertTrue(
(self.clans_instance.rotmtx - correct_rotmtx < 1e-6).all())
self.assertEqual(len(self.clans_instance.seqgroups), 4)
seqgroup_names = ('insect hypoth. protein (2 copies, C term)',
'allergens >= xyz',
'empty group WITH terminal semicolon in numbers line',
'empty group WITHOUT terminal semicolon in numbers line')
seqgroup_sizes = (20, 17, 0, 0)
for i, seqgroup in enumerate(self.clans_instance.seqgroups):
self.assertEqual(len(seqgroup), seqgroup_sizes[i])
self.assertEqual(seqgroup.name, seqgroup_names[i])
@test.functional
class TestClansFileWriter(test.Case):
def setUp(self):
super(TestClansFileWriter, self).setUp()
self.filename = self.config.getTestFile('out.clans')
self.temp = self.config.getTempStream()
def testWrittenIsIdenticalToOriginal(self):
cp = ClansParser()
clans_instance = cp.parse_file(self.filename)
clans_instance.write(self.temp.name)
self.temp.flush()
with open(self.filename) as original_file:
original_lines = original_file.readlines()
with open(self.temp.name) as written_file:
written_lines = written_file.readlines()
self.assertEqual(len(original_lines), len(written_lines))
in_hsps = False
start_tag_hsp = '<hsp>\n'
end_tag_hsp = '</hsp>\n'
in_seqgroups = False
start_tag_seqgroups = '<seqgroups>\n'
end_tag_seqgroups = '</seqgroups>\n'
colorarr_tag = 'colorarr='
color_tag = 'color='
for i, original_line in enumerate(original_lines):
if original_line == start_tag_hsp:
in_hsps = True
continue
if original_line == end_tag_hsp:
in_hsps = False
continue
if original_line == start_tag_seqgroups:
in_seqgroups = True
continue
if original_line == end_tag_seqgroups:
in_seqgroups = False
continue
if original_line.startswith(colorarr_tag):
## remove colorarr_tag from beginning of line
original_line = original_line[len(colorarr_tag):].strip().strip(':')
self.assertTrue(written_lines[i].startswith(colorarr_tag))
written_line = written_lines[i][len(colorarr_tag):].strip().strip(':')
original_colors = original_line.replace('(', ''). replace(')', '').split(':')
written_colors = written_line.replace('(', ''). replace(')', '').split(':')
self.assertEqual(len(original_colors), len(written_colors))
for j, original_color_string in enumerate(original_colors):
original_color = Color.from_string(original_color_string)
written_color = Color.from_string(written_colors[j])
self.assertEqual(original_color.r, written_color.r)
self.assertEqual(original_color.g, written_color.g)
self.assertEqual(original_color.b, written_color.b)
self.assertEqual(original_color.a, written_color.a)
continue
if original_line.startswith(color_tag):
original_color_string = original_line[len(color_tag):].strip()
self.assertTrue(written_lines[i].startswith(color_tag))
written_color_string = written_lines[i][len(color_tag):].strip()
original_color = Color.from_string(original_color_string)
written_color = Color.from_string(written_color_string)
self.assertEqual(original_color.r, written_color.r)
self.assertEqual(original_color.g, written_color.g)
self.assertEqual(original_color.b, written_color.b)
self.assertEqual(original_color.a, written_color.a)
continue
if in_hsps:
original_start_end, original_value \
= original_line.strip().split(':')
written_start_end, written_value \
= written_lines[i].strip().split(':')
self.assertEqual(original_start_end, written_start_end)
self.assertTrue((float(original_value) - float(written_value)) < 1e-6)
elif in_seqgroups and (original_line == 'numbers=\n'):
## a terminal semicolon is added by the ClansWriter
self.assertEqual(original_line.strip() + ';', written_lines[i].strip())
else:
self.assertEqual(original_line, written_lines[i])
def tearDown(self):
self.temp.close()
if __name__ == '__main__':
test.Console()
|
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# project
from checks import AgentCheck
from checks.libs.wmi.sampler import WMISampler
from collections import namedtuple
WMIMetric = namedtuple('WMIMetric', ['name', 'value', 'tags'])
class InvalidWMIQuery(Exception):
"""
Invalid WMI Query.
"""
pass
class MissingTagBy(Exception):
"""
WMI query returned multiple rows but no `tag_by` value was given.
"""
pass
class TagQueryUniquenessFailure(Exception):
"""
'Tagging query' did not return or returned multiple results.
"""
pass
class WinWMICheck(AgentCheck):
"""
WMI check.
Windows only.
"""
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.wmi_samplers = {}
self.wmi_props = {}
def _format_tag_query(self, sampler, wmi_obj, tag_query):
"""
Format `tag_query` or raise on incorrect parameters.
"""
try:
link_source_property = int(wmi_obj[tag_query[0]])
target_class = tag_query[1]
link_target_class_property = tag_query[2]
target_property = tag_query[3]
except IndexError:
self.log.error(
u"Wrong `tag_queries` parameter format. "
"Please refer to the configuration file for more information.")
raise
except TypeError:
self.log.error(
u"Incorrect 'link source property' in `tag_queries` parameter:"
" `{wmi_property}` is not a property of `{wmi_class}`".format(
wmi_property=tag_query[0],
wmi_class=sampler.class_name,
)
)
raise
return target_class, target_property, [{link_target_class_property: link_source_property}]
def _raise_on_invalid_tag_query_result(self, sampler, wmi_obj, tag_query):
"""
"""
target_property = sampler.property_names[0]
target_class = sampler.class_name
if len(sampler) != 1:
message = "no result was returned"
if len(sampler):
message = "multiple results returned (one expected)"
self.log.warning(
u"Failed to extract a tag from `tag_queries` parameter: {reason}."
" wmi_object={wmi_obj} - query={tag_query}".format(
reason=message,
wmi_obj=wmi_obj, tag_query=tag_query,
)
)
raise TagQueryUniquenessFailure
if sampler[0][target_property] is None:
self.log.error(
u"Incorrect 'target property' in `tag_queries` parameter:"
" `{wmi_property}` is empty or is not a property"
"of `{wmi_class}`".format(
wmi_property=target_property,
wmi_class=target_class,
)
)
raise TypeError
def _get_tag_query_tag(self, sampler, wmi_obj, tag_query):
"""
Design a query based on the given WMIObject to extract a tag.
Returns: tag or TagQueryUniquenessFailure exception.
"""
self.log.debug(
u"`tag_queries` parameter found."
" wmi_object={wmi_obj} - query={tag_query}".format(
wmi_obj=wmi_obj, tag_query=tag_query,
)
)
# Extract query information
target_class, target_property, filters = \
self._format_tag_query(sampler, wmi_obj, tag_query)
# Create a specific sampler
tag_query_sampler = WMISampler(
self.log,
target_class, [target_property],
filters=filters,
**sampler.connection
)
tag_query_sampler.sample()
# Extract tag
self._raise_on_invalid_tag_query_result(tag_query_sampler, wmi_obj, tag_query)
link_value = str(tag_query_sampler[0][target_property]).lower()
tag = "{tag_name}:{tag_value}".format(
tag_name=target_property.lower(),
tag_value="_".join(link_value.split())
)
self.log.debug(u"Extracted `tag_queries` tag: '{tag}'".format(tag=tag))
return tag
def _extract_metrics(self, wmi_sampler, tag_by, tag_queries, constant_tags):
"""
Extract and tag metrics from the WMISampler.
Raise when multiple WMIObject were returned by the sampler with no `tag_by` specified.
Returns: List of WMIMetric
```
[
WMIMetric("freemegabytes", 19742, ["name:_total"]),
WMIMetric("avgdiskbytesperwrite", 1536, ["name:c:"]),
]
```
"""
if len(wmi_sampler) > 1 and not tag_by:
raise MissingTagBy(
u"WMI query returned multiple rows but no `tag_by` value was given."
" class={wmi_class} - properties={wmi_properties} - filters={filters}".format(
wmi_class=wmi_sampler.class_name, wmi_properties=wmi_sampler.property_names,
filters=wmi_sampler.filters,
)
)
metrics = []
tag_by = tag_by.lower()
for wmi_obj in wmi_sampler:
tags = list(constant_tags) if constant_tags else []
# Tag with `tag_queries` parameter
for query in tag_queries:
try:
tags.append(self._get_tag_query_tag(wmi_sampler, wmi_obj, query))
except TagQueryUniquenessFailure:
continue
for wmi_property, wmi_value in wmi_obj.iteritems():
# Tag with `tag_by` parameter
if wmi_property == tag_by:
tag_value = str(wmi_value).lower()
if tag_queries and tag_value.find("#") > 0:
tag_value = tag_value[:tag_value.find("#")]
tags.append(
"{name}:{value}".format(
name=tag_by, value=tag_value
)
)
continue
# No metric extraction on 'Name' property
if wmi_property == 'name':
continue
try:
metrics.append(WMIMetric(wmi_property, float(wmi_value), tags))
except ValueError:
self.log.warning(u"When extracting metrics with WMI, found a non digit value"
" for property '{0}'.".format(wmi_property))
continue
except TypeError:
self.log.warning(u"When extracting metrics with WMI, found a missing property"
" '{0}'".format(wmi_property))
continue
return metrics
def _submit_metrics(self, metrics, metric_name_and_type_by_property):
"""
Resolve metric names and types and submit it.
"""
for metric in metrics:
if metric.name not in metric_name_and_type_by_property:
# Only report the metrics that were specified in the configration
# Ignore added properties like 'Timestamp_Sys100NS', `Frequency_Sys100NS`, etc ...
continue
metric_name, metric_type = metric_name_and_type_by_property[metric.name]
try:
func = getattr(self, metric_type.lower())
except AttributeError:
raise Exception(u"Invalid metric type: {0}".format(metric_type))
func(metric_name, metric.value, metric.tags)
def _get_instance_key(self, host, namespace, wmi_class, other=None):
"""
Return an index key for a given instance. Useful for caching.
"""
if other:
return "{host}:{namespace}:{wmi_class}-{other}".format(
host=host, namespace=namespace, wmi_class=wmi_class, other=other
)
return "{host}:{namespace}:{wmi_class}".format(
host=host, namespace=namespace, wmi_class=wmi_class,
)
def _get_wmi_sampler(self, instance_key, wmi_class, properties, tag_by="", **kwargs):
"""
Create and cache a WMISampler for the given (class, properties)
"""
properties = properties + [tag_by] if tag_by else properties
if instance_key not in self.wmi_samplers:
wmi_sampler = WMISampler(self.log, wmi_class, properties, **kwargs)
self.wmi_samplers[instance_key] = wmi_sampler
return self.wmi_samplers[instance_key]
def _get_wmi_properties(self, instance_key, metrics, tag_queries):
"""
Create and cache a (metric name, metric type) by WMI property map and a property list.
"""
if instance_key not in self.wmi_props:
metric_name_by_property = dict(
(wmi_property.lower(), (metric_name, metric_type))
for wmi_property, metric_name, metric_type in metrics
)
properties = map(lambda x: x[0], metrics + tag_queries)
self.wmi_props[instance_key] = (metric_name_by_property, properties)
return self.wmi_props[instance_key]
def from_time(year=None, month=None, day=None, hours=None, minutes=None, seconds=None, microseconds=None, timezone=None):
"""Convenience wrapper to take a series of date/time elements and return a WMI time
of the form `yyyymmddHHMMSS.mmmmmm+UUU`. All elements may be int, string or
omitted altogether. If omitted, they will be replaced in the output string
by a series of stars of the appropriate length.
:param year: The year element of the date/time
:param month: The month element of the date/time
:param day: The day element of the date/time
:param hours: The hours element of the date/time
:param minutes: The minutes element of the date/time
:param seconds: The seconds element of the date/time
:param microseconds: The microseconds element of the date/time
:param timezone: The timeezone element of the date/time
:returns: A WMI datetime string of the form: `yyyymmddHHMMSS.mmmmmm+UUU`
"""
def str_or_stars(i, length):
if i is None:
return "*" * length
else:
return str(i).rjust(length, "0")
wmi_time = ""
wmi_time += str_or_stars(year, 4)
wmi_time += str_or_stars(month, 2)
wmi_time += str_or_stars(day, 2)
wmi_time += str_or_stars(hours, 2)
wmi_time += str_or_stars(minutes, 2)
wmi_time += str_or_stars(seconds, 2)
wmi_time += "."
wmi_time += str_or_stars(microseconds, 6)
if timezone is None:
wmi_time += "+"
else:
try:
int(timezone)
except ValueError:
wmi_time += "+"
else:
if timezone >= 0:
wmi_time += "+"
else:
wmi_time += "-"
timezone = abs(timezone)
wmi_time += str_or_stars(timezone, 3)
return wmi_time
def to_time(wmi_time):
"""Convenience wrapper to take a WMI datetime string of the form
yyyymmddHHMMSS.mmmmmm+UUU and return a 9-tuple containing the
individual elements, or None where string contains placeholder
stars.
:param wmi_time: The WMI datetime string in `yyyymmddHHMMSS.mmmmmm+UUU` format
:returns: A 9-tuple of (year, month, day, hours, minutes, seconds, microseconds, timezone)
"""
def int_or_none(s, start, end):
try:
return int(s[start:end])
except ValueError:
return None
year = int_or_none(wmi_time, 0, 4)
month = int_or_none(wmi_time, 4, 6)
day = int_or_none(wmi_time, 6, 8)
hours = int_or_none(wmi_time, 8, 10)
minutes = int_or_none(wmi_time, 10, 12)
seconds = int_or_none(wmi_time, 12, 14)
microseconds = int_or_none(wmi_time, 15, 21)
timezone = wmi_time[22:]
if timezone == "***":
timezone = None
return year, month, day, hours, minutes, seconds, microseconds, timezone
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.