repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
mastizada/pontoon | pontoon/projects/tests.py | 1 | 2869 | from django.http import HttpResponse
from django.shortcuts import render
from django_nose.tools import (
assert_equal,
assert_code,
)
from mock import patch
from pontoon.base.tests import (
ProjectFactory,
ResourceFactory,
TranslationFactory,
)
from pontoon.base.tests.test_views import ViewTestCase
from pontoon.projects import views
class ProjectTests(ViewTestCase):
def test_project_doesnt_exist(self):
"""
Checks if view is returning error when project slug is invalid.
"""
assert_code(self.client.get('/projects/project_doesnt_exist/'), 404)
def test_project_view(self):
"""
Checks if project page is returned properly.
"""
project = ProjectFactory.create()
ResourceFactory.create(project=project)
with patch('pontoon.projects.views.render', wraps=render) as mock_render:
self.client.get('/projects/{}/'.format(project.slug))
assert_equal(mock_render.call_args[0][2]['project'], project)
class ProjectContributorsTests(ViewTestCase):
def test_project_doesnt_exist(self):
"""
Checks if view handles invalid project.
"""
assert_code(self.client.get('/projects/project_doesnt_exist/contributors/'), 404)
def test_project_top_contributors(self):
"""
Tests if view returns top contributors specific for given project.
"""
first_project = ProjectFactory.create()
ResourceFactory.create(project=first_project)
first_project_contributor = (
TranslationFactory.create(entity__resource__project=first_project).user
)
second_project = ProjectFactory.create()
ResourceFactory.create(project=second_project)
second_project_contributor = (
TranslationFactory.create(entity__resource__project=second_project).user
)
with patch.object(
views.ProjectContributorsView, 'render_to_response', return_value=HttpResponse('')
) as mock_render:
self.client.get(
'/projects/{}/ajax/contributors/'.format(first_project.slug),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
assert_equal(mock_render.call_args[0][0]['project'], first_project)
assert_equal(
list(mock_render.call_args[0][0]['contributors']),
[first_project_contributor]
)
self.client.get(
'/projects/{}/ajax/contributors/'.format(second_project.slug),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
assert_equal(mock_render.call_args[0][0]['project'], second_project)
assert_equal(
list(mock_render.call_args[0][0]['contributors']),
[second_project_contributor]
)
| bsd-3-clause |
mfeliu/djest | djest/__init__.py | 2 | 3815 | from django.test import TestCase
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from django.core import mail
from django.http.response import HttpResponseRedirect
from uuid import uuid4
from bs4 import BeautifulSoup
import json
class BaseCase(TestCase, dict):
def __init__(self, *args, **kwargs):
super(BaseCase, self).__init__(*args, **kwargs)
def assert_redirect_to(self, part):
self.assertTrue(
isinstance(self.response, HttpResponseRedirect)
)
self.assertTrue(
part in self.response.url
)
def assert_mail_count(self, n):
self.assertEqual(len(mail.outbox), n)
def reverse(self, *args, **kwargs):
return reverse_lazy(*args, **kwargs)
def content(self):
content = None
if hasattr(self.response, 'rendered_content'):
content = self.response.rendered_content
elif hasattr(self.response, 'content'):
content = self.response.content
return content
def json(self):
try:
return json.loads(self.content())
except:
return None
def wout(self):
'''
Write out the current response's rendered_content
to a file in /tmp/out.html TODO: Use temporary
builtin python module.
'''
content = self.content()
self.debug(content)
if content:
with open('/tmp/out.html', 'w') as f:
f.write(content.encode('utf8'))
def debug(self, message):
'''
Utility method for debugging. Make sure
settings.TEST_DEBUG is defined and set to
True. When used, self.debug_buffer will contain
concatinated debug messages.
'''
if (not hasattr(settings, 'TEST_DEBUG')) or (
not settings.TEST_DEBUG
):
return
if not hasattr(self, 'debug_buffer'):
self.debug_buffer = ''
try:
message = BeautifulSoup(message).body.get_text()
except:
pass
while '\n\n' in message:
message = message.replace('\n\n', '\n')
self.debug_buffer += (
message +
'\n------------------------------\n'
)
def nop(*args, **kwargs):
# use for mocking
# don't do anything
pass
def new(self, name, klass, m_kwargs ):
if name in self:
raise ValueError('Model already exists %s' % name)
m = klass(**m_kwargs)
m.save()
self[name] = m
return m
def post(self, url, data):
self.response = self.client.post(
url,
data,
follow = True
)
if hasattr(
self.response, 'context'
) and self.response.context:
if 'errorlist' in self.response.context.keys():
self.wout()
raise Exception('Form did not validate?')
if 'form' in self.response.context:
if self.response.context['form']._errors:
self.wout()
raise Exception('Form did not validate?')
return self.response
def get(self, url):
self.response = self.client.get(url)
return self.response
def uuid4(self):
return uuid4().hex
def assert_in_title(self, test):
soup_title = BeautifulSoup(
self.response.rendered_content
).title.get_text().lower()
test = test.lower()
if not test in soup_title:
raise AssertionError('%s is not in %s' % (
test,
soup_title
))
self.assertTrue(True)
| gpl-2.0 |
davidvon/pipa-pay-server | site-packages/whoosh/compat.py | 72 | 5322 | import array, sys
# Run time aliasing of Python2/3 differences
def htmlescape(s, quote=True):
# this is html.escape reimplemented with cgi.escape,
# so it works for python 2.x, 3.0 and 3.1
import cgi
s = cgi.escape(s, quote)
if quote:
# python 3.2 also replaces the single quotes:
s = s.replace("'", "'")
return s
if sys.version_info[0] < 3:
PY3 = False
def b(s):
return s
import cStringIO as StringIO
StringIO = BytesIO = StringIO.StringIO
callable = callable
integer_types = (int, long)
iteritems = lambda o: o.iteritems()
itervalues = lambda o: o.itervalues()
iterkeys = lambda o: o.iterkeys()
from itertools import izip
long_type = long
next = lambda o: o.next()
import cPickle as pickle
from cPickle import dumps, loads, dump, load
string_type = basestring
text_type = unicode
bytes_type = str
unichr = unichr
from urllib import urlretrieve
def byte(num):
return chr(num)
def u(s):
return unicode(s, "unicode_escape")
def with_metaclass(meta, base=object):
class _WhooshBase(base):
__metaclass__ = meta
return _WhooshBase
xrange = xrange
zip_ = zip
def memoryview_(source, offset=None, length=None):
if offset or length:
return buffer(source, offset, length)
else:
return buffer(source)
else:
PY3 = True
import collections
def b(s):
return s.encode("latin-1")
import io
BytesIO = io.BytesIO
callable = lambda o: isinstance(o, collections.Callable)
exec_ = eval("exec")
integer_types = (int,)
iteritems = lambda o: o.items()
itervalues = lambda o: o.values()
iterkeys = lambda o: iter(o.keys())
izip = zip
long_type = int
next = next
import pickle
from pickle import dumps, loads, dump, load
StringIO = io.StringIO
string_type = str
text_type = str
bytes_type = bytes
unichr = chr
from urllib.request import urlretrieve
def byte(num):
return bytes((num,))
def u(s):
if isinstance(s, bytes):
return s.decode("ascii")
return s
def with_metaclass(meta, base=object):
ns = dict(base=base, meta=meta)
exec_("""class _WhooshBase(base, metaclass=meta):
pass""", ns)
return ns["_WhooshBase"]
xrange = range
zip_ = lambda * args: list(zip(*args))
def memoryview_(source, offset=None, length=None):
mv = memoryview(source)
if offset or length:
return mv[offset:offset + length]
else:
return mv
try:
# for python >= 3.2, avoid DeprecationWarning for cgi.escape
from html import escape as htmlescape
except ImportError:
pass
if hasattr(array.array, "tobytes"):
def array_tobytes(arry):
return arry.tobytes()
def array_frombytes(arry, bs):
return arry.frombytes(bs)
else:
def array_tobytes(arry):
return arry.tostring()
def array_frombytes(arry, bs):
return arry.fromstring(bs)
# Implementations missing from older versions of Python
try:
from itertools import permutations # @UnusedImport
except ImportError:
# Python 2.5
def permutations(iterable, r=None):
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n - r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i + 1:] + indices[i:i + 1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
try:
# Python 2.6-2.7
from itertools import izip_longest # @UnusedImport
except ImportError:
try:
# Python 3.0
from itertools import zip_longest as izip_longest # @UnusedImport
except ImportError:
# Python 2.5
from itertools import chain, izip, repeat
def izip_longest(*args, **kwds):
fillvalue = kwds.get('fillvalue')
def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):
yield counter()
fillers = repeat(fillvalue)
iters = [chain(it, sentinel(), fillers) for it in args]
try:
for tup in izip(*iters):
yield tup
except IndexError:
pass
try:
from operator import methodcaller # @UnusedImport
except ImportError:
# Python 2.5
def methodcaller(name, *args, **kwargs):
def caller(obj):
return getattr(obj, name)(*args, **kwargs)
return caller
try:
from abc import abstractmethod # @UnusedImport
except ImportError:
# Python 2.5
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
"""
funcobj.__isabstractmethod__ = True
return funcobj
| apache-2.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-64/09-modules/myenv/lib/python2.7/site-packages/django/contrib/gis/db/backends/oracle/operations.py | 307 | 9866 | """
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.db.backends.oracle.base import Database
from django.db.backends.oracle.operations import DatabaseOperations
from django.utils import six
DEFAULT_TOLERANCE = '0.05'
class SDOOperator(SpatialOperator):
sql_template = "%(func)s(%(lhs)s, %(rhs)s) = 'TRUE'"
class SDODistance(SpatialOperator):
sql_template = "SDO_GEOM.SDO_DISTANCE(%%(lhs)s, %%(rhs)s, %s) %%(op)s %%%%s" % DEFAULT_TOLERANCE
class SDODWithin(SpatialOperator):
sql_template = "SDO_WITHIN_DISTANCE(%(lhs)s, %(rhs)s, %%s) = 'TRUE'"
class SDODisjoint(SpatialOperator):
sql_template = "SDO_GEOM.RELATE(%%(lhs)s, 'DISJOINT', %%(rhs)s, %s) = 'DISJOINT'" % DEFAULT_TOLERANCE
class SDORelate(SpatialOperator):
sql_template = "SDO_RELATE(%(lhs)s, %(rhs)s, 'mask=%(mask)s') = 'TRUE'"
def check_relate_argument(self, arg):
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
if not isinstance(arg, six.string_types) or not mask_regex.match(arg):
raise ValueError('Invalid SDO_RELATE mask: "%s"' % arg)
def as_sql(self, connection, lookup, template_params, sql_params):
template_params['mask'] = sql_params.pop()
return super(SDORelate, self).as_sql(connection, lookup, template_params, sql_params)
class OracleOperations(BaseSpatialOperations, DatabaseOperations):
name = 'oracle'
oracle = True
disallowed_aggregates = (aggregates.Collect, aggregates.Extent3D, aggregates.MakeLine)
Adapter = OracleSpatialAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'SDO_GEOM.SDO_AREA'
gml = 'SDO_UTIL.TO_GMLGEOMETRY'
centroid = 'SDO_GEOM.SDO_CENTROID'
difference = 'SDO_GEOM.SDO_DIFFERENCE'
distance = 'SDO_GEOM.SDO_DISTANCE'
extent = 'SDO_AGGR_MBR'
intersection = 'SDO_GEOM.SDO_INTERSECTION'
length = 'SDO_GEOM.SDO_LENGTH'
num_geom = 'SDO_UTIL.GETNUMELEM'
num_points = 'SDO_UTIL.GETNUMVERTICES'
perimeter = length
point_on_surface = 'SDO_GEOM.SDO_POINTONSURFACE'
reverse = 'SDO_UTIL.REVERSE_LINESTRING'
sym_difference = 'SDO_GEOM.SDO_XOR'
transform = 'SDO_CS.TRANSFORM'
union = 'SDO_GEOM.SDO_UNION'
unionagg = 'SDO_AGGR_UNION'
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
gis_operators = {
'contains': SDOOperator(func='SDO_CONTAINS'),
'coveredby': SDOOperator(func='SDO_COVEREDBY'),
'covers': SDOOperator(func='SDO_COVERS'),
'disjoint': SDODisjoint(),
'intersects': SDOOperator(func='SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'equals': SDOOperator(func='SDO_EQUAL'),
'exact': SDOOperator(func='SDO_EQUAL'),
'overlaps': SDOOperator(func='SDO_OVERLAPS'),
'same_as': SDOOperator(func='SDO_EQUAL'),
'relate': SDORelate(), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches': SDOOperator(func='SDO_TOUCH'),
'within': SDOOperator(func='SDO_INSIDE'),
'distance_gt': SDODistance(op='>'),
'distance_gte': SDODistance(op='>='),
'distance_lt': SDODistance(op='<'),
'distance_lte': SDODistance(op='<='),
'dwithin': SDODWithin(),
}
truncate_params = {'relate': None}
def geo_quote_name(self, name):
return super(OracleOperations, self).geo_quote_name(name).upper()
def get_db_converters(self, expression):
converters = super(OracleOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
geometry_fields = (
'PointField', 'GeometryField', 'LineStringField',
'PolygonField', 'MultiPointField', 'MultiLineStringField',
'MultiPolygonField', 'GeometryCollectionField', 'GeomField',
'GMLField',
)
if internal_type in geometry_fields:
converters.append(self.convert_textfield_value)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
def convert_extent(self, clob, srid):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read(), srid)
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def convert_geom(self, value, geo_field):
if value:
if isinstance(value, Database.LOB):
value = value.read()
return Geometry(value, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return 'NULL'
def transform_value(val, srid):
return val.srid != srid
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (self.transform, value.srid, f.srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % f.srid
def spatial_aggregate_name(self, agg_name):
"""
Returns the spatial aggregate SQL name.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import OracleGeometryColumns
return OracleGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys
return OracleSpatialRefSys
def modify_insert_params(self, placeholders, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888
"""
# This code doesn't work for bulk insert cases.
assert len(placeholders) == 1
return [[param for pholder, param
in six.moves.zip(placeholders[0], params[0]) if pholder != 'NULL'], ]
| gpl-3.0 |
pidah/st2contrib | packs/orion/actions/lib/actions.py | 2 | 7567 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from st2actions.runners.pythonrunner import Action
from orionsdk import SwisClient
class OrionBaseAction(Action):
def __init__(self, config):
super(OrionBaseAction, self).__init__(config)
self.client = None
if "orion" not in self.config:
raise ValueError("Orion host details not in the config.yaml")
def connect(self, platform):
"""
Connect to an Orion platform from the packs config.yaml.
"""
try:
self.client = SwisClient(
self.config['orion'][platform]['host'],
self.config['orion'][platform]['user'],
self.config['orion'][platform]['password'])
except KeyError:
raise ValueError("Orion host details not in the config.yaml")
def query(self, swql, **kargs):
"""
Run SWQL against the Orion Platform.
"""
return self.client.query(swql, **kargs)
def invoke(self, entity, verb, *args):
"""
Run an Invoke against the Orion Platform.
"""
return self.client.invoke(entity, verb, *args)
def create(self, entity, **kargs):
"""
Run an Create against the Orion Platform.
"""
return self.client.create(entity, **kargs)
def node_exists(self, caption, ip_address):
"""
Check if an Node exists (caption and or ip) on the Orion platform.
Returns: True or False.
"""
swql = """SELECT NodeID, IPAddress FROM Orion.Nodes
WHERE Caption=@caption"""
kargs = {'caption': caption}
caption_data = self.query(swql, **kargs)
if len(caption_data['results']) >= 1:
self.logger.debug(
"One (or more) Nodes match '{}' Caption.".format(caption))
return True
swql = """SELECT NodeID, IPAddress FROM Orion.Nodes
WHERE IPAddress=@ip_address"""
kargs = {'ip_address': ip_address}
ip_data = self.query(swql, **kargs)
if len(ip_data['results']) >= 1:
self.logger.debug(
"One (or more) Nodes match '{}' IP.".format(ip_address))
return True
else:
return False
def get_node_id(self, caption):
"""
Gets an NodeID from the Orion platform.
Raises: ValueError on muliple or no matching caption.
Returns: the NodeID (int)
"""
swql = "SELECT NodeID FROM Orion.Nodes WHERE Caption=@caption"
kargs = {'caption': caption}
data = self.query(swql, **kargs)
if len(data['results']) == 1:
try:
return data['results'][0]['NodeID']
except IndexError:
raise ValueError("Invalid Node")
elif len(data['results']) >= 2:
self.logger.debug(
"Muliple Nodes match '{}' Caption: {}".format(
caption, data))
raise ValueError("Muliple Nodes match '{}' Caption".format(
caption))
elif len(data['results']) == 0:
self.logger.debug(
"No Nodes match '{}' Caption: {}".format(
caption, data))
raise ValueError("No matching Caption for '{}'".format(
caption))
def get_engine_id(self, poller):
"""
Takes a poller name (or primary) and returns the EngineID for
the poller.
Raises: ValueError on an invaild poller.
Returns: The EngineID (int)
"""
if poller == "primary":
return 1
else:
swql = """SELECT EngineID, ServerName, IP, ServerType
FROM Orion.Engines
WHERE ServerName=@poller"""
kargs = {'poller': poller}
data = self.query(swql, **kargs)
if len(data['results']) == 1:
return data['results'][0]['EngineID']
else:
self.send_user_error("Invalid poller name")
raise ValueError("Invalid poller name")
def get_ncm_node_id(self, caption):
"""
Queries the Network configuration Manager nodes table on the Orion
platform for the NodeID of a given node name (aka NodeCaption).
Raises: IndexError on Invalid number of nodes (e.g. 0 or 2+).
Returns: A single node id.
"""
swql = "SELECT NodeID FROM Cirrus.Nodes WHERE NodeCaption=@node"
kargs = {'node': caption}
data = self.query(swql, **kargs)
if len(data['results']) == 1:
try:
return data['results'][0]['NodeID']
except IndexError:
raise IndexError("Invalid Node")
elif len(data['results']) >= 2:
raise IndexError("Muliple Nodes match '{}' NodeCaption".format(
caption))
elif len(data['results']) == 0:
raise IndexError("No matching NodeCaption for '{}'".format(
caption))
def get_ncm_transfer_results(self, transfer_id, sleep_delay=10):
"""
Gets the completed (waits until finished). NCM job transfer status
from Orion.
Retruns: The completed status.
"""
ts = {}
while True:
swql = """SELECT TransferID, Action, Status, ErrorMessage,
DeviceOutput FROM NCM.TransferResults
WHERE TransferID=@transfer_id"""
kargs = {'transfer_id': transfer_id}
transfer_data = self.query(swql, **kargs)
status = transfer_data['results'][0]['Status']
if status == 1:
time.sleep(sleep_delay)
elif status == 2:
ts['status'] = "Complete"
break
elif status == 3:
ts['status'] = "Error"
ts['ErrorMessage'] = transfer_data['results'][0][
'ErrorMessage']
break
else:
ts['status'] = "Unknown"
ts['ErrorMessage'] = "Invalid stauts: {}".format(status)
break
return ts
def status_code_to_text(self, status):
"""
Takes an Solarwinds Orion status code and translates it to
human text and also a colour that can be used in Slack.
"""
if status == 0:
return ("Unknown", "grey")
elif status == 1:
return ("Up", "good")
elif status == 2:
return ("Down", "danger")
elif status == 3:
return ("Warning", "warning")
elif status == 14:
return ("Critical", "danger")
def send_user_error(self, message):
"""
Prints an user error message.
"""
print(message)
| apache-2.0 |
brijeshkesariya/odoo | addons/crm/wizard/crm_phonecall_to_phonecall.py | 337 | 4535 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
class crm_phonecall2phonecall(osv.osv_memory):
_name = 'crm.phonecall2phonecall'
_description = 'Phonecall To Phonecall'
_columns = {
'name' : fields.char('Call summary', required=True, select=1),
'user_id' : fields.many2one('res.users',"Assign To"),
'contact_name':fields.char('Contact'),
'phone':fields.char('Phone'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',False),('section_id','=',section_id),\
('object_id.model', '=', 'crm.phonecall')]"),
'date': fields.datetime('Date'),
'section_id':fields.many2one('crm.case.section','Sales Team'),
'action': fields.selection([('schedule','Schedule a call'), ('log','Log a call')], 'Action', required=True),
'partner_id' : fields.many2one('res.partner', "Partner"),
'note':fields.text('Note')
}
def action_cancel(self, cr, uid, ids, context=None):
"""
Closes Phonecall to Phonecall form
"""
return {'type':'ir.actions.act_window_close'}
def action_schedule(self, cr, uid, ids, context=None):
value = {}
if context is None:
context = {}
phonecall = self.pool.get('crm.phonecall')
phonecall_ids = context and context.get('active_ids') or []
for this in self.browse(cr, uid, ids, context=context):
phocall_ids = phonecall.schedule_another_phonecall(cr, uid, phonecall_ids, this.date, this.name, \
this.user_id and this.user_id.id or False, \
this.section_id and this.section_id.id or False, \
this.categ_id and this.categ_id.id or False, \
action=this.action, context=context)
return phonecall.redirect_phonecall_view(cr, uid, phocall_ids[phonecall_ids[0]], context=context)
def default_get(self, cr, uid, fields, context=None):
"""
This function gets default values
"""
res = super(crm_phonecall2phonecall, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
res.update({'action': 'schedule', 'date': time.strftime('%Y-%m-%d %H:%M:%S')})
if record_id:
phonecall = self.pool.get('crm.phonecall').browse(cr, uid, record_id, context=context)
categ_id = False
data_obj = self.pool.get('ir.model.data')
try:
res_id = data_obj._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = data_obj.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
if 'name' in fields:
res.update({'name': phonecall.name})
if 'user_id' in fields:
res.update({'user_id': phonecall.user_id and phonecall.user_id.id or False})
if 'date' in fields:
res.update({'date': False})
if 'section_id' in fields:
res.update({'section_id': phonecall.section_id and phonecall.section_id.id or False})
if 'categ_id' in fields:
res.update({'categ_id': categ_id})
if 'partner_id' in fields:
res.update({'partner_id': phonecall.partner_id and phonecall.partner_id.id or False})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nbeaver/numpy | numpy/lib/arraysetops.py | 109 | 13815 | """
Set operations for 1D numeric arrays based on sorting.
:Contains:
ediff1d,
unique,
intersect1d,
setxor1d,
in1d,
union1d,
setdiff1d
:Notes:
For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
Speed could be gained in some operations by an implementation of
sort(), that can provide directly the permutation vectors, avoiding
thus calls to argsort().
To do: Optionally return indices analogously to unique for all functions.
:Author: Robert Cimrman
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = [
'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
'in1d'
]
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, 3, -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
ary = np.asanyarray(ary).flat
ed = ary[1:] - ary[:-1]
arrays = [ed]
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in
# the common case where neither to_begin or to_end was given.
ed = np.hstack(arrays)
return ed
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, the indices of the unique array that
reconstruct the input array, and the number of times each unique value
comes up in the input array.
Parameters
----------
ar : array_like
Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` that result in the unique
array.
return_inverse : bool, optional
If True, also return the indices of the unique array that can be used
to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique value comes up
in `ar`.
.. versionadded:: 1.9.0
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
(flattened) original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the (flattened) original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. versionadded:: 1.9.0
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'],
dtype='|S1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'],
dtype='|S1')
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
inv_idx = np.empty(ar.shape, dtype=np.intp)
inv_idx[perm] = iflag
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
def intersect1d(ar1, ar2, assume_unique=False):
"""
Find the intersection of two arrays.
Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
To intersect more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
"""
if not assume_unique:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of a 1-D array is also present in a second array.
Returns a boolean array the same length as `ar1` that is True
where an element of `ar1` is in `ar2` and False otherwise.
Parameters
----------
ar1 : (M,) array_like
Input array.
ar2 : array_like
The values against which to test each value of `ar1`.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted (that is,
False where an element of `ar1` is in `ar2` and True otherwise).
Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
to (but is faster than) ``np.invert(in1d(a, b))``.
.. versionadded:: 1.8.0
Returns
-------
in1d : (M,) ndarray, bool
The values `ar1[in1d]` are in `ar2`.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`in1d` can be considered as an element-wise function version of the
python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
equivalent to ``np.array([item in b for item in a])``.
However, this idea fails if `ar2` is a set, or similar (non-sequence)
container: As ``ar2`` is converted to an array, in those cases
``asarray(ar2)`` is an object array rather than the expected array of
contained values.
.. versionadded:: 1.4.0
Examples
--------
>>> test = np.array([0, 1, 2, 5, 0])
>>> states = [0, 2]
>>> mask = np.in1d(test, states)
>>> mask
array([ True, False, True, False, True], dtype=bool)
>>> test[mask]
array([0, 2, 0])
>>> mask = np.in1d(test, states, invert=True)
>>> mask
array([False, True, False, True, False], dtype=bool)
>>> test[mask]
array([1, 5])
"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
ret = np.empty(ar.shape, dtype=bool)
ret[order] = flag
if assume_unique:
return ret[:len(ar1)]
else:
return ret[rev_idx]
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2)))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Find the set difference of two arrays.
Return the sorted, unique values in `ar1` that are not in `ar2`.
Parameters
----------
ar1 : array_like
Input array.
ar2 : array_like
Input comparison array.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setdiff1d : ndarray
Sorted 1D array of values in `ar1` that are not in `ar2`.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4, 1])
>>> b = np.array([3, 4, 5, 6])
>>> np.setdiff1d(a, b)
array([1, 2])
"""
if assume_unique:
ar1 = np.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
| bsd-3-clause |
tchlux/fmodpy | fmodpy/parsing/argument.py | 1 | 36329 | from . import pop_group
# --------------------------------------------------------------------
# Base class for all "Argument" objects that contain information about
# the pieces of data that need to be passed between Python and Fortran.
class Argument:
parent = None # Container of this argument (Module, Subroutine, etc.)
type = "" # argument type (REAL, INTEGER, etc.)
name = "" # name of this argument in code
size = "" # size in bytes (uses compiled code to evaluate)
kind = "" # KIND setting
intent = "INOUT" # INTENT setting
show_intent = True # if True then include INTENT in __str__
kind_prefix = "KIND=" # String that precedes kind declaration in Fortran
allocatable = False # May be (re)allocated at runtime.
optional = False # May be excluded from input.
save = False # Save value between executions.
value = False # Pass by value, not by reference.
pointer = False # Pass by pointer, not by reference.
parameter = False # Whether this argument is a parameter (set at compile time)
dimension = None # If array, this list describes the shape and size.
c_types = {} # The C-types used to declare this argument (key is self.size)
c_types_arrays = {} # The C-types used to declare this argument as arrays.
default_singleton = "1" # The default value assigned to a singleton.
# Properties are used to add wrapper logic around retrieving types.
@property
def c_type(self):
if (self.size not in self.c_types):
raise(NotImplementedError(f"\n\nUnrecognized size '{self.size}' for argument '{self.name}', no known corresponding C type."))
return self.c_types[self.size]
@property
def c_type_array(self):
return self.c_types_arrays.get(self.size, self.c_type)
@property
def py_type(self): return None
# ----------------------------------------------------------------
# Generating Fortran Wrapper
# Return the name of this variable that should be used locally,
# safe for use with RESULT values from FUNCTIONs that do not
# define a separate name for results.
#
# WARNING: This function returns a string not a list, because the
# size checking will have an exact 1-1 mapping of arguments.
def function_safe_name(self):
name = self.name
if (self.parent is not None) and (self.parent.name == self.name):
name += "_RESULT"
return name
# Return the string name of this variable for "PRESENT" checking.
# WARNING: This is only used by "OPTIONAL" arguments.
def fort_present_name(self): return self.name + "_PRESENT"
# WARNING: This function returns a string not a list, because the
# Fortran call will have an exact 1-1 mapping of arguments.
def fort_call_name(self):
name = self.name
if (self.parent is not None) and (self.parent.name == self.name):
name += "_RESULT"
elif (self.allocatable):
name += "_LOCAL"
return name
# Names of arguments that will be given to the Fortran wrapper.
def fort_input(self):
names = []
# Add a boolean "_PRESENT" for this variable if optional.
if (self.optional):
names.append(self.name+"_PRESENT")
# Add extra arguments for the dimension sizes.
if (self.dimension is not None):
for i in range(len(self.dimension)):
names.append(f"{self.name}_DIM_{i+1}")
# Append the actual name of this variable.
names.append(self.name)
# Add an identifier to this variable if it has the name of a function.
if (self.parent is not None) and (self.parent.name == self.name):
names[-1] += "_RESULT"
return names
# Lines of Fortran code necessary to declare variables for this argument.
def fort_declare(self):
lines = []
if (self.optional):
lines.append(f"LOGICAL, INTENT(IN) :: {self.name}_PRESENT")
# If this is an allocatable, the sizes have to be given as output.
size_type = "IN" if not self.allocatable else "OUT"
# Add extra arguments for the dimension sizes.
if (self.dimension is not None):
og_dimension = self.dimension[:]
for i in range(len(self.dimension)):
lines.append(f"INTEGER, INTENT({size_type}) :: {self.name}_DIM_{i+1}")
# If this is allocatable, the dimension cannot be assumed on input.
if self.allocatable: self.dimension[i] = ":"
else: self.dimension[i] = f"{self.name}_DIM_{i+1}"
# Add a local variable that will have the "ALLOCATABLE" metadata.
if self.allocatable:
temp_arg = self.copy()
temp_arg.save = True
temp_arg.name += "_LOCAL"
temp_arg.show_intent = False
temp_arg.optional= False
lines.append(str(temp_arg))
# Append actual name of this variable (with dimension
# values filled to be the names of the integer inputs
# and OPTIONAL / ALLOCATABLE turned off).
temp_arg = self.copy()
if (self.parent is not None) and (self.parent.name == self.name):
temp_arg.name += "_RESULT"
temp_arg.optional = False
temp_arg.allocatable = False
# If this is allocatable, it will be returned as an INT64 memory address.
if self.allocatable:
temp_arg.dimension = None
temp_arg.intent = "OUT"
temp_arg.show_intent = True
temp_arg.type = "INTEGER"
temp_arg.kind = "INT64"
# Add the actual definition of this argument.
lines.append(str(temp_arg))
# Reset the dimension, if it had one beforehand.
if (self.dimension is not None): self.dimension = og_dimension
# Return the lines of declaration.
return lines
# Lines of Fortran code that must be executed before the call.
def fort_prepare(self): return []
# Lines of Fortran code that must be executed after the call.
def fort_after(self, present=True):
lines = []
is_output = ("OUT" in self.intent) or (self.intent == "")
# Copy over the size of the output allocatable array if it was not known.
if (is_output and self.allocatable):
if (self.dimension is None): raise NotImplementedError
for i in range(1,len(self.dimension)+1):
if present: size = f"SIZE({self.function_safe_name()}_LOCAL,{i})"
else: size = "0"
lines.append(f"{self.function_safe_name()}_DIM_{i} = {size}")
# Copy the allocatable address only when it is present.
if present:
first_pos = ",".join(["1"]*len(self.dimension))
lines.append(f"{self.function_safe_name()} = LOC({self.name}_LOCAL({first_pos}))")
# Return the list of lines.
return lines
# ----------------------------------------------------------------
# Generating Python Wrapper
# The names of arguments as seen in the Python function declaration.
def py_input(self):
# If this is not allowed as input, do not include it.
if (not self._allowed_input()): return []
# This object is now either mutable or partially an input.
elif self._is_optional(): return [self.name.lower()+"=None"]
else: return [self.name.lower()]
# The lines of Python code to execute before the Fortran call.
def py_declare(self):
lines = []
# This is where output arrays whose dimensions are known can
# be initialized. Everything else behaves normally.
py_name = self.name.lower()
# If this is literally an optional, a boolean is required.
if self.optional:
lines.append(f"{py_name}_present = ctypes.c_bool(True)")
# If inputs can be given, and they're optional, checks are needed.
if (self._allowed_input() and self._is_optional()):
lines.append(f"if ({py_name} is None):")
# If this is an optional argument, make it "not present".
if self.optional:
lines.append(f" {py_name}_present = ctypes.c_bool(False)")
# If this is a singleton, set its default value.
if (self.dimension is None):
lines.append(f" {py_name} = {self.c_type}()")
# This has a dimension AND is allocatable.
elif (self.allocatable):
if (self.optional):
default_shape = ','.join('0'*len(self.dimension))
lines.append(f" {py_name} = numpy.zeros(shape=({default_shape}), dtype={self.c_type_array}, order='F')")
lines.append(f"elif (type({py_name}) == bool) and ({py_name}):")
else:
lines.append(f" {py_name} = ctypes.c_void_p()")
default_shape = ','.join('1'*len(self.dimension))
# If the size is known, then we can initialize this optional array to pass it in.
if (self._known_size()):
default_shape = ', '.join(self._default_size()).lower()
# Create lines for initializing the default values.
if (self._is_input() or self.optional):
lines.append(f" {py_name} = numpy.zeros(shape=({default_shape}), dtype={self.c_type_array}, order='F')")
else:
lines.append(f" {py_name} = ctypes.c_void_p()")
# This has a dimension, but is NOT allocatable.
else:
default_shape = ','.join('1'*len(self.dimension))
# If this is optional (not present shape is always 1).
if (self.optional):
lines.append(f" {py_name} = numpy.zeros(shape=({default_shape}), dtype={self.c_type_array}, order='F')")
lines.append(f"elif (type({py_name}) == bool) and ({py_name}):")
# If the size is known, then we can initialize this optional array to pass it in.
if (self._known_size()):
default_shape = ', '.join(self._default_size()).lower()
# Create lines for initializing the default values.
lines.append(f" {py_name} = numpy.zeros(shape=({default_shape}), dtype={self.c_type_array}, order='F')")
# Convert appropriate array-inputs into Fortran compatible arrays.
if ((self.dimension is not None) and (self._allowed_input())):
# Check for Fortran-continuity and data type of array inputs.
p, s = "", "" # <- prefix, spaces
if self._is_optional(): p, s = f"el", " "
lines += [f"{p}if ((not issubclass(type({py_name}), numpy.ndarray)) or",
f"{s} (not numpy.asarray({py_name}).flags.f_contiguous) or",
f"{s} (not ({py_name}.dtype == numpy.dtype({self.c_type_array})))):",
" import warnings",
f" warnings.warn(\"The provided argument '{py_name}' was not an f_contiguous NumPy array of type '{self.c_type_array}' (or equivalent). Automatically converting (probably creating a full copy).\")",
f" {py_name} = numpy.asarray({py_name}, dtype={self.c_type_array}, order='F')",]
# If this is an output-only allocatable, declare a local pointer.
elif ((not self.optional) and self._is_output() and self.allocatable):
lines.append(f"{py_name} = ctypes.c_void_p()")
# Otherwise this an output immutable type, declare it locally.
elif (not self._allowed_input()):
lines.append(f"{py_name} = {self.c_type}()")
# This is a singleton input, convert to appropraite C type.
elif (self._is_input() and (self.dimension is None)):
lines.append(f"if (type({py_name}) is not {self.c_type}): {py_name} = {self.c_type}({py_name})")
# Define the dimension size variables if appropriate.
if (self.dimension is not None):
# If this object is allowed as input..
if (self._allowed_input()):
# This is optional, so we need to check what to do with dimensions.
if (self.optional):
lines.append(f"if ({py_name}_present):")
for i in range(len(self.dimension)):
lines.append(f" {py_name}_dim_{i+1} = ctypes.c_int({py_name}.shape[{i}])")
lines.append("else:")
for i in range(len(self.dimension)):
lines.append(f" {py_name}_dim_{i+1} = ctypes.c_int()")
if self.allocatable:
lines.append(f"{py_name} = ctypes.c_void_p({py_name}.ctypes.data)")
# This is not optional, so it is declared at this point, get the dimensions.
else:
for i in range(len(self.dimension)):
lines.append(f"{py_name}_dim_{i+1} = ctypes.c_int({py_name}.shape[{i}])")
# This array is output, initialize dimension memory locations.
else:
for i in range(len(self.dimension)):
lines.append(f"{py_name}_dim_{i+1} = ctypes.c_int()")
# Return lines of declaration code.
return lines
# The names of arguments passed to the "BIND(C)" Fortran code.
def py_call(self):
names = []
py_name = self.name.lower()
# Add a boolean "_PRESENT" for this variable if optional.
if (self.optional): names.append("ctypes.byref("+py_name+"_present)")
# Add extra arguments for the dimension sizes.
if (self.dimension is not None):
for i in range(len(self.dimension)):
names.append(f"ctypes.byref({py_name}_dim_{i+1})")
# Append the actual name of this variable.
names.append(py_name)
# If this is a memory view, then access the first element
# (start of block of memory).
if ((self.dimension is not None) and (not self.allocatable)):
names[-1] = "ctypes.c_void_p(" + names[-1] + ".ctypes.data)"
else:
names[-1] = "ctypes.byref(" + names[-1] + ")"
# Return the list of names.
return names
# The lines of Python code that must be executed after the Fortran call.
def py_after(self):
lines = []
if self.allocatable and self._is_output():
py_name = self.name.lower()
# This must be an array argument and it must be 'INTENT(OUT)'.
if (self.dimension is None): raise(NotImplementedError)
# Get the pointer to the first index.
local_dims = [f"{py_name}_dim_{i+1}.value" for i in range(len(self.dimension))]
# Compute the size of the (flattened) array.
lines += [f"{py_name}_size = ({') * ('.join(local_dims)})"]
shape = ','.join(local_dims[::-1])
# Make a line that checks if the array should be None.
if self.optional:
check_line = [f"if ({py_name}_present) and ({py_name}_size > 0):"]
else:
check_line = [f"if ({py_name}_size > 0):"]
lines += check_line
# Get flat array pointer and reshape by the dimensions
# reversed, then transpose (f_contiguous).
lines += [f" {py_name} = numpy.array(ctypes.cast({py_name}, ctypes.POINTER({self.c_type}*{py_name}_size)).contents, copy=False)"]
# If the array type does not match the singleton type, use a view.
if (self.c_type != self.c_type_array):
lines[-1] += f".view({self.c_type_array})"
# If this is a tensor, then reshape it (from C style, row major) to Fortran style.
if (len(self.dimension) > 1):
lines += [f" {py_name} = {py_name}.reshape({shape}).T"]
# Otherwise, (if not present) this output is None.
lines += [f"elif ({py_name}_size == 0):",
f" {py_name} = numpy.zeros({shape}, dtype={self.c_type_array}, order='F')",
f"else:",
f" {py_name} = None"]
# Return all lines.
return lines
# The name of the argument in the final "return" line to Python.
def py_return(self):
names = []
if (self._is_output()):
# Transform array memory views back into NumPy arrays.
is_pointer = (self._is_output() and self.allocatable)
# Add this argument to the output.
py_name = self.name.lower()
value = py_name
# Retrieve the Python "value" of the C object if appropriate.
if (self.dimension is None): value += ".value"
# Return None for missing optional returns.
if self.optional: names.append(f"({value} if {py_name}_present else None)")
else: names.append(f"{value}")
# Return the processed name.
return names
# ----------------------------------------------------------------
# Module Attribute Access
# Define a 'getter' function in Python for a public module attribute.
def py_getter(self):
py_name = self.name.lower()
call_args = []
lines = [f'def get_{py_name}(self):']
# If this is not in a MODULE, then that was unexpected.
if ((self.parent == None) or (self.parent.type != "MODULE")):
raise(NotImplementedError)
# If this module attribute is allocatable, we might need to return None.
if (self.allocatable):
call_args.append(f'ctypes.byref({py_name}_allocated)')
lines.append(f' {py_name}_allocated = ctypes.c_bool(False)')
# Define the dimension size variables if appropriate.
if (self.dimension is not None):
for i in range(len(self.dimension)):
lines.append(f' {py_name}_dim_{i+1} = ctypes.c_int()')
call_args.append(f'ctypes.byref({py_name}_dim_{i+1})')
# Define the actual attribute holder itself.
if (self.allocatable or (self.dimension is not None)):
lines += [f' {py_name} = ctypes.c_void_p()']
else:
lines += [f' {py_name} = {self.c_type}()']
call_args.append(f'ctypes.byref({py_name})')
# Make the call to the Fortran wrapped function for getting.
module_name = self.parent.name.lower()
lines += [f' clib.{module_name}_get_{py_name}({", ".join(call_args)})']
# If this allocatable is not allocated, return "None".
if (self.allocatable):
lines.append(f' if (not {py_name}_allocated.value): return None')
# If this is an array, retreive its value from a pointer.
if (self.dimension is not None):
# Get the pointer to the first index.
local_dims = [f"{py_name}_dim_{i+1}.value" for i in range(len(self.dimension))]
# Compute the size of the (flattened) array.
lines.append(f" {py_name}_size = ({') * ('.join(local_dims)})")
# Get flat array pointer and reshape by the dimensions
# reversed, then transpose (f_contiguous).
lines.append(f" if ({py_name}_size > 0):")
lines.append(f" {py_name} = numpy.array(ctypes.cast({py_name}, ctypes.POINTER({self.c_type}*{py_name}_size)).contents, copy=False)")
# If the array type does not match the singleton type, use a view.
if (self.c_type != self.c_type_array):
lines[-1] += f".view({self.c_type_array})"
lines.append(f" else:")
lines.append(f" {py_name} = numpy.zeros((0,), dtype={self.c_type}, order='F')")
# If this is a tensor, then reshape it to be a tensor again.
if (len(self.dimension) > 1):
shape = ','.join(local_dims[::-1])
lines += [f" {py_name} = {py_name}.reshape({shape}).T"]
# Return the value.
lines += [f' return {py_name}']
else:
lines += [f' return {py_name}.value']
# Return the lines that compose this "getter".
return lines
# Define a 'setter' function in Python for a public module attribute.
def py_setter(self):
py_name = self.name.lower()
lines = [f'def set_{py_name}(self, {py_name}):']
if self.parameter:
lines += [" raise(NotImplementedError('Module attributes with PARAMETER status cannot be set.'))"]
return lines
call_args = []
# If this is not in a MODULE, then that was unexpected.
if ((self.parent == None) or (self.parent.type != "MODULE")):
raise(NotImplementedError)
# Get the size if there is a dimension.
if (self.dimension is not None):
lines += [f" if ((not issubclass(type({py_name}), numpy.ndarray)) or",
f" (not numpy.asarray({py_name}).flags.f_contiguous) or",
f" (not ({py_name}.dtype == numpy.dtype({self.c_type})))):",
" import warnings",
f" warnings.warn(\"The provided argument '{py_name}' was not an f_contiguous NumPy array of type '{self.c_type}' (or equivalent). Automatically converting (probably creating a full copy).\")",
f" {py_name} = numpy.asarray({py_name}, dtype={self.c_type}, order='F')",]
# Store all the sizes.
for i in range(len(self.dimension)):
dim_name = f'{py_name}_dim_{i+1}'
lines.append(f' {dim_name} = ctypes.c_int({py_name}.shape[{i}])')
call_args.append(f'ctypes.byref({dim_name})')
# Call passing in all arguments (might include sizes).
call_args.append(f'ctypes.c_void_p({py_name}.ctypes.data)')
else:
# Initialize correct c_type version of value.
lines.append(f' {py_name} = {self.c_type}({py_name})')
# Call passing in all arguments (might include sizes).
call_args.append(f'ctypes.byref({py_name})')
module_name = self.parent.name.lower()
lines += [f' clib.{module_name}_set_{py_name}({", ".join(call_args)})']
return lines
# Define the 'property' referencing the correct getter and setter.
def py_property(self):
py_name = self.name.lower()
module_name = self.parent.name.lower()
getter_name = f"get_{py_name}"
setter_name = f"set_{py_name}"
return [f'{py_name} = property({getter_name}, {setter_name})']
# Define the FORTRAN BIND(C) subroutine for retreiving the value
# of this module attribute.
def fort_getter(self):
# Add extra arguments for the dimension sizes.
args = []
decs = [f"USE {self.parent.name}, ONLY: {self.name}"] # declarations
lines = []
# Create a local copy.
temp = self.copy()
temp.name += "_LOCAL"
# Get allocatable present input.
if (self.allocatable):
args.append(f'{self.name}_ALLOCATED')
decs.append(f'LOGICAL, INTENT(OUT) :: {self.name}_ALLOCATED')
lines += [f'{self.name}_ALLOCATED = ALLOCATED({self.name})',
f'IF (.NOT. {self.name}_ALLOCATED) RETURN']
# Get all dimension size inputs.
if (self.dimension is not None):
temp.type = "INTEGER"
temp.kind = "INT64"
temp.dimension = None
temp.allocatable = False
decs.insert(0,"USE ISO_FORTRAN_ENV, ONLY: INT64")
for i in range(len(self.dimension)):
args.append(f"{self.name}_DIM_{i+1}")
decs.append(f"INTEGER, INTENT(OUT) :: {self.name}_DIM_{i+1}")
lines.append(f"{self.name}_DIM_{i+1} = SIZE({self.name}, {i+1})")
# Disable "parameter" status for the local copy.
if self.parameter: temp.parameter = False
# Add argument for the actual variable.
args.append(f"{temp.name}")
decs += [str(temp)]
# Insert the declarations before the lines.
lines = decs + lines
# Do the assignment.
if (self.dimension is None):
lines.append(f"{temp.name} = {self.name}")
else:
first_pos = ",".join(["1"]*len(self.dimension))
lines.append(f"{temp.name} = LOC({self.name}({first_pos}))")
# Add indentation to all lints.
lines = [" "+l for l in lines]
# Add the subroutine line (with all arguments).
lines.insert(0, f"SUBROUTINE {self.parent.name}_GET_{self.name}({', '.join(args)}) BIND(C)")
# Add the end of the subroutine declaration line.
lines.append(f"END SUBROUTINE {self.parent.name}_GET_{self.name}")
return lines
# Define the FORTRAN BIND(C) subroutine for setting the value
# of this module attribute.
def fort_setter(self):
# If this is a parameter, it cannot be set.
if (self.parameter): return []
# Add extra arguments for the dimension sizes.
args = []
lines = []
# Get all dimension size inputs.
temp = self.copy()
temp.name += "_LOCAL"
temp.dimension = []
temp.allocatable = False
if (self.dimension is not None):
for i in range(len(self.dimension)):
args.append(f"{self.name}_DIM_{i+1}")
lines.append(f"INTEGER, INTENT(IN) :: {self.name}_DIM_{i+1}")
temp.dimension.append(args[-1])
# Add argument for the actual variable.
args.append(temp.name)
# Insert the declaration for the local (input / set) value.
lines += [str(temp)]
# Get allocatable present input.
if (self.allocatable):
lines += [f'IF (ALLOCATED({self.name})) THEN',
f' DEALLOCATE({self.name})',
'END IF']
shape_str = ','.join([f'1:{name}' for name in temp.dimension])
lines += [f'ALLOCATE({self.name}({shape_str}))']
lines += [f"{self.name} = {temp.name}"]
# Add indentation to all lints.
lines = [" "+l for l in lines]
# Add the subroutine line (with all arguments).
lines.insert(0, f"SUBROUTINE {self.parent.name}_SET_{self.name}({', '.join(args)}) BIND(C)")
lines.insert(1, f" USE {self.parent.name}, ONLY: {self.name}")
# Add the end of the subroutine declaration line.
lines.append(f"END SUBROUTINE {self.parent.name}_SET_{self.name}")
return lines
# ----------------------------------------------------------------
# Wrapper Utilities
# Return True if this variable has INTENT(IN).
def _is_input(self): return ("IN" in self.intent)
# Return True if this variable has INTENT(OUT).
def _is_output(self): return ("OUT" in self.intent)
# Check to see if this argument has known size.
def _known_size(self): return (len(self._dimension_args()) == 0)
# Return a list of integers that are the indices of dimensions
# (1-indexed) that require user input definition.
def _dimension_args(self):
if (self.dimension is None): return []
return [i+1 for (i,d) in enumerate(self.dimension) if d in {":","*"}]
# Check to see if this argument is allowed as input.
def _allowed_input(self):
# This has "IN" in the intent.
if self._is_input(): return True
# This variable is strictly OUT.
elif (self.optional): return True
elif (self.dimension is None): return False
elif (self.allocatable): return False
# Could be given pre-allocated array.
return True
# Return True if this argument is optional (for Python users).
def _is_optional(self):
# This is explicitly declared optional.
if (self.optional): return True
# This has "IN" in the intent.
elif (self._is_input()): return False
# This is not allowed to be optional, because it has no input.
elif (not self._allowed_input()): return False
# This is an INTENT(OUT) argument.
# Size is known already.
elif (self._known_size()): return True
# Size will be known after.
elif (self.allocatable): return True
# Need to know the size.
else: return False
# Compute the default size of this Argument (assuming that
# len(self._dimension_args()) == 0), in terms of Python, converted
# from the assumed size of the object in Fortran.
def _default_size(self):
if (len(self._dimension_args()) != 0): raise(NotImplementedError)
sizes = []
for size in self.dimension:
size = size.lower().replace(" ","")
# Replace all occurrences of "SIZE(<arg>, <component>)"
# with equivalent NumPy array syntax.
while "size(" in size:
start_index = size.index("size(") + len("size(")
before_call = size[:start_index-len("size(")]
parens = 1
for index in range(start_index, len(size)):
if size[index] == "(": parens += 1
elif size[index] == ")": parens -= 1
if parens <= 0: break
argument = size[start_index:index]
after_call = size[index+len(")"):]
if "," not in argument:
func_replacement = f"{argument}.size"
else:
name = argument[:argument.index(",")]
dim = argument[argument.index(",")+1:]
dim = str(int(dim) - 1)
func_replacement = f"{name}.shape[{dim}]"
# Replace float division with integer division in python.
size = before_call + func_replacement + after_call
# Append this NumPy size.
sizes.append(size)
# fix any module attribute names to have "self." before them
if (hasattr(self.parent, "parent") and
hasattr(self.parent.parent, "type") and
(self.parent.parent.type == "MODULE")):
# create a temporary function that says whether or
# not a character could belong to a python name
is_not_py_char = lambda c: not (c.isalpha() or c.isdigit() or (c == '_'))
# cycle the internal attribute of the parent module that
# might be accessible here
for arg in self.parent.parent.arguments:
name = arg.name.lower()
# cycle all size entries to check their contents
for i in range(len(sizes)):
# if the module attribute is used and it is not just
# a substring match, then make the replacement
s = sizes[i]
if (name in s):
j = s.index(name)
if ((j == 0) or is_not_py_char(s[j-1])):
sizes[i] = s.replace(name, "self."+name)
# Return the list of sizes.
return sizes
# ----------------------------------------------------------------
# Generic Methods
# Generate a deep copy of this argument and return it.
def copy(self):
arg = type(self)([self.type])
for attr in dir(self):
# Skip hidden attributes.
if (attr[:1] == "_"): continue
# TODO: Come up with better general way to handle these special cases.
if (attr in {"c_type", "c_type_array", "py_type"}): continue
value = getattr(self, attr)
# Skip executable attributes.
if (hasattr(value, "__call__")): continue
# If this attribute has a ".copy" function, then use it.
if (hasattr(value, "copy") and hasattr(value.copy, "__call__")):
value = value.copy()
# Set the attribute in the new argument.
setattr(arg, attr, value)
return arg
# Default initialization, process standard Fortran specifications.
# Expects to be given list of strings that comes from a declaration
# line, but with the ":: name1, ..." stripped off the end.
def __init__(self, line, parent=None):
if (len(line) == 0): raise(NotImplementedError)
# Make sure this line matches the expected type.
if (line.pop(0) != self.type): raise(NotImplementedError)
# Set the parent.
self.parent = parent
# If the line is empty now, then we're done (use defaults).
if (len(line) == 0): return
# Parse the remainder of the declaration line.
#
# Get the KIND declaration, if it was given.
group, line = pop_group(line)
if (len(group) > 0):
if (tuple(group[:2]) == ("KIND","=")):
group = group[2:]
self.kind = " ".join(group)
# The rest can come in any order, so loop over possibilities.
while (len(line) > 0):
# Skip commas.
if (line[0] == ","):
line.pop(0)
continue
# The following happen if the ":: names" has not been stripped.
if (line[0] == ":"): raise(NotImplementedError)
# Read ALLOCATABLE
if (line[0] == "ALLOCATABLE"):
line.pop(0)
self.allocatable = True
# Read DIMENSION
elif (line[0] == "DIMENSION"):
group, line = pop_group(line[1:])
if (len(group) == 0): raise(NotImplementedError)
# Break up dimensions by commas, allow nested parentheses.
self.dimension = [""]
num_open = 0
while (len(group) > 0):
next_value = group.pop(0)
if (next_value == ",") and (num_open == 0):
self.dimension.append("")
else: self.dimension[-1] += next_value
if (next_value == "("): num_open += 1
if (next_value == ")"): num_open -= 1
# Done processing dimension.
# Read INTENT
elif (line[0] == "INTENT"):
group, line = pop_group(line[1:])
if (len(group) == 0): raise(NotImplementedError)
self.intent = "".join(group)
# Read OPTIONAL
elif (line[0] == "OPTIONAL"):
line.pop(0)
self.optional = True
# Read SAVE
elif (line[0] == "SAVE"):
line.pop(0)
self.save = True
# Read PARAMETER (discard this)
elif (line[0] == "PARAMETER"):
line.pop(0)
self.parameter = True
# Read EXTERNAL (discard this)
elif (line[0] == "EXTERNAL"):
line.pop(0)
import warnings
warnings.warn("fmodpy.parsing.argument: Ignoring 'EXTERNAL' status of argument.")
# Otherwise, this is an unrecognized argument.
else:
raise(NotImplementedError(f"\n\nUnrecognized part of Argument '{line[0]}'.\n"))
# Print the Fortran string declaration of this argument.
def __str__(self):
out = f"{self.type}"
if (len(self.kind) > 0): out += f"({self.kind_prefix}{self.kind})"
if (self.show_intent and (len(self.intent) > 0)): out += f", INTENT({self.intent})"
if (self.parameter): out += ", PARAMETER"
if (self.optional): out += f", OPTIONAL"
if (self.allocatable): out += f", ALLOCATABLE"
if (self.save): out += f", SAVE"
if ((self.dimension is not None) and
(len(self.dimension) > 0)):
out += f", DIMENSION({','.join(self.dimension)})"
if (len(self.name) > 0): out += f" :: {self.name}"
# Return the final string.
return out
| mit |
newyork167/volatility | volatility/plugins/overlays/windows/kpcr_vtypes.py | 58 | 2798 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2012 Michael Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.obj as obj
class _KPCROnx86(obj.CType):
"""KPCR for 32bit windows"""
def idt_entries(self):
for i, entry in enumerate(self.IDT.dereference()):
yield i, entry
def gdt_entries(self):
for i, entry in enumerate(self.GDT.dereference()):
yield i, entry
def get_kdbg(self):
"""Find this CPUs KDBG.
Please note the KdVersionBlock pointer is NULL on
all KPCR structures except the one for the first CPU.
In some cases on x64, even the first CPU has a NULL
KdVersionBlock, so this is really a hit-or-miss.
"""
DebuggerDataList = self.KdVersionBlock.dereference_as("_DBGKD_GET_VERSION64").DebuggerDataList
# DebuggerDataList is a pointer to unsigned long on x86
# and a pointer to unsigned long long on x64. The first
# dereference() dereferences the pointer, and the second
# dereference() dereferences the unsigned long or long long
# as the actual KDBG address.
return DebuggerDataList.dereference().dereference_as("_KDDEBUGGER_DATA64")
@property
def ProcessorBlock(self):
return self.PrcbData
class _KPCROnx64(_KPCROnx86):
"""KPCR for x64 windows"""
@property
def ProcessorBlock(self):
return self.Prcb
@property
def IDT(self):
return self.IdtBase
@property
def GDT(self):
return self.GdtBase
class KPCRProfileModification(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
if profile.metadata.get('memory_model', '32bit') == '32bit':
kpcr_class = _KPCROnx86
else:
kpcr_class = _KPCROnx64
profile.object_classes.update({'_KPCR': kpcr_class})
profile.merge_overlay({
'_KPRCB': [ None, {
'VendorString': [ None, ['String', dict(length = 13)]],
}]})
| gpl-2.0 |
skyddv/neutron | neutron/notifiers/nova.py | 9 | 11662 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import auth as ks_auth
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient import session as ks_session
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import uuidutils
from sqlalchemy.orm import attributes as sql_attr
from neutron.common import constants
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.notifiers import batch_notifier
LOG = logging.getLogger(__name__)
VIF_UNPLUGGED = 'network-vif-unplugged'
VIF_PLUGGED = 'network-vif-plugged'
VIF_DELETED = 'network-vif-deleted'
NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed',
constants.PORT_STATUS_ERROR: 'failed',
constants.PORT_STATUS_DOWN: 'completed'}
NOVA_API_VERSION = "2"
class DefaultAuthPlugin(v2_auth.Password):
"""A wrapper around standard v2 user/pass to handle bypass url.
This is only necessary because novaclient doesn't support endpoint_override
yet - bug #1403329.
When this bug is fixed we can pass the endpoint_override to the client
instead and remove this class.
"""
def __init__(self, **kwargs):
self._endpoint_override = kwargs.pop('endpoint_override', None)
super(DefaultAuthPlugin, self).__init__(**kwargs)
def get_endpoint(self, session, **kwargs):
if self._endpoint_override:
return self._endpoint_override
return super(DefaultAuthPlugin, self).get_endpoint(session, **kwargs)
class Notifier(object):
def __init__(self):
# FIXME(jamielennox): A notifier is being created for each Controller
# and each Notifier is handling it's own auth. That means that we are
# authenticating the exact same thing len(controllers) times. This
# should be an easy thing to optimize.
auth = ks_auth.load_from_conf_options(cfg.CONF, 'nova')
endpoint_override = None
if not auth:
LOG.warning(_LW('Authenticating to nova using nova_admin_* options'
' is deprecated. This should be done using'
' an auth plugin, like password'))
if cfg.CONF.nova_admin_tenant_id:
endpoint_override = "%s/%s" % (cfg.CONF.nova_url,
cfg.CONF.nova_admin_tenant_id)
auth = DefaultAuthPlugin(
auth_url=cfg.CONF.nova_admin_auth_url,
username=cfg.CONF.nova_admin_username,
password=cfg.CONF.nova_admin_password,
tenant_id=cfg.CONF.nova_admin_tenant_id,
tenant_name=cfg.CONF.nova_admin_tenant_name,
endpoint_override=endpoint_override)
session = ks_session.Session.load_from_conf_options(cfg.CONF,
'nova',
auth=auth)
# NOTE(andreykurilin): novaclient.v1_1 was renamed to v2 and there is
# no way to import the contrib module directly without referencing v2,
# which would only work for novaclient >= 2.21.0.
novaclient_cls = nova_client.get_client_class(NOVA_API_VERSION)
server_external_events = importutils.import_module(
novaclient_cls.__module__.replace(
".client", ".contrib.server_external_events"))
self.nclient = novaclient_cls(
session=session,
region_name=cfg.CONF.nova.region_name,
extensions=[server_external_events])
self.batch_notifier = batch_notifier.BatchNotifier(
cfg.CONF.send_events_interval, self.send_events)
def _is_compute_port(self, port):
try:
if (port['device_id'] and uuidutils.is_uuid_like(port['device_id'])
and port['device_owner'].startswith('compute:')):
return True
except (KeyError, AttributeError):
pass
return False
def _get_network_changed_event(self, device_id):
return {'name': 'network-changed',
'server_uuid': device_id}
def _get_port_delete_event(self, port):
return {'server_uuid': port['device_id'],
'name': VIF_DELETED,
'tag': port['id']}
@property
def _plugin(self):
# NOTE(arosen): this cannot be set in __init__ currently since
# this class is initialized at the same time as NeutronManager()
# which is decorated with synchronized()
if not hasattr(self, '_plugin_ref'):
self._plugin_ref = manager.NeutronManager.get_plugin()
return self._plugin_ref
def send_network_change(self, action, original_obj,
returned_obj):
"""Called when a network change is made that nova cares about.
:param action: the event that occurred.
:param original_obj: the previous value of resource before action.
:param returned_obj: the body returned to client as result of action.
"""
if not cfg.CONF.notify_nova_on_port_data_changes:
return
# When neutron re-assigns floating ip from an original instance
# port to a new instance port without disassociate it first, an
# event should be sent for original instance, that will make nova
# know original instance's info, and update database for it.
if (action == 'update_floatingip'
and returned_obj['floatingip'].get('port_id')
and original_obj.get('port_id')):
disassociate_returned_obj = {'floatingip': {'port_id': None}}
event = self.create_port_changed_event(action, original_obj,
disassociate_returned_obj)
self.batch_notifier.queue_event(event)
event = self.create_port_changed_event(action, original_obj,
returned_obj)
self.batch_notifier.queue_event(event)
def create_port_changed_event(self, action, original_obj, returned_obj):
port = None
if action in ['update_port', 'delete_port']:
port = returned_obj['port']
elif action in ['update_floatingip', 'create_floatingip',
'delete_floatingip']:
# NOTE(arosen) if we are associating a floatingip the
# port_id is in the returned_obj. Otherwise on disassociate
# it's in the original_object
port_id = (returned_obj['floatingip'].get('port_id') or
original_obj.get('port_id'))
if port_id is None:
return
ctx = context.get_admin_context()
port = self._plugin.get_port(ctx, port_id)
if port and self._is_compute_port(port):
if action == 'delete_port':
return self._get_port_delete_event(port)
else:
return self._get_network_changed_event(port['device_id'])
def record_port_status_changed(self, port, current_port_status,
previous_port_status, initiator):
"""Determine if nova needs to be notified due to port status change.
"""
# clear out previous _notify_event
port._notify_event = None
# If there is no device_id set there is nothing we can do here.
if not port.device_id:
LOG.debug("device_id is not set on port yet.")
return
if not port.id:
LOG.warning(_LW("Port ID not set! Nova will not be notified of "
"port status change."))
return
# We only want to notify about nova ports.
if not self._is_compute_port(port):
return
# We notify nova when a vif is unplugged which only occurs when
# the status goes from ACTIVE to DOWN.
if (previous_port_status == constants.PORT_STATUS_ACTIVE and
current_port_status == constants.PORT_STATUS_DOWN):
event_name = VIF_UNPLUGGED
# We only notify nova when a vif is plugged which only occurs
# when the status goes from:
# NO_VALUE/DOWN/BUILD -> ACTIVE/ERROR.
elif (previous_port_status in [sql_attr.NO_VALUE,
constants.PORT_STATUS_DOWN,
constants.PORT_STATUS_BUILD]
and current_port_status in [constants.PORT_STATUS_ACTIVE,
constants.PORT_STATUS_ERROR]):
event_name = VIF_PLUGGED
# All the remaining state transitions are of no interest to nova
else:
LOG.debug("Ignoring state change previous_port_status: "
"%(pre_status)s current_port_status: %(cur_status)s"
" port_id %(id)s",
{'pre_status': previous_port_status,
'cur_status': current_port_status,
'id': port.id})
return
port._notify_event = (
{'server_uuid': port.device_id,
'name': event_name,
'status': NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status),
'tag': port.id})
def send_port_status(self, mapper, connection, port):
event = getattr(port, "_notify_event", None)
self.batch_notifier.queue_event(event)
port._notify_event = None
def send_events(self, batched_events):
LOG.debug("Sending events: %s", batched_events)
try:
response = self.nclient.server_external_events.create(
batched_events)
except nova_exceptions.NotFound:
LOG.warning(_LW("Nova returned NotFound for event: %s"),
batched_events)
except Exception:
LOG.exception(_LE("Failed to notify nova on events: %s"),
batched_events)
else:
if not isinstance(response, list):
LOG.error(_LE("Error response returned from nova: %s"),
response)
return
response_error = False
for event in response:
try:
code = event['code']
except KeyError:
response_error = True
continue
if code != 200:
LOG.warning(_LW("Nova event: %s returned with failed "
"status"), event)
else:
LOG.info(_LI("Nova event response: %s"), event)
if response_error:
LOG.error(_LE("Error response returned from nova: %s"),
response)
| apache-2.0 |
agaffney/ansible | test/units/parsing/test_unquote.py | 298 | 1602 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing.quoting import unquote
import pytest
UNQUOTE_DATA = (
(u'1', u'1'),
(u'\'1\'', u'1'),
(u'"1"', u'1'),
(u'"1 \'2\'"', u'1 \'2\''),
(u'\'1 "2"\'', u'1 "2"'),
(u'\'1 \'2\'\'', u'1 \'2\''),
(u'"1\\"', u'"1\\"'),
(u'\'1\\\'', u'\'1\\\''),
(u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
(u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
(u'"', u'"'),
(u'\'', u'\''),
# Not entirely sure these are good but they match the current
# behaviour
(u'"1""2"', u'1""2'),
(u'\'1\'\'2\'', u'1\'\'2'),
(u'"1" 2 "3"', u'1" 2 "3'),
(u'"1"\'2\'"3"', u'1"\'2\'"3'),
)
@pytest.mark.parametrize("quoted, expected", UNQUOTE_DATA)
def test_unquote(quoted, expected):
assert unquote(quoted) == expected
| gpl-3.0 |
sibaru/shadowsocks | shadowsocks/manager.py | 925 | 9692 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 100
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
| apache-2.0 |
adlius/osf.io | api_tests/metrics/test_composite_query.py | 6 | 2673 | import time
import pytest
from datetime import datetime
from osf_tests.factories import (
PreprintFactory,
AuthUserFactory
)
from osf.metrics import PreprintDownload
from api.base.settings import API_PRIVATE_BASE as API_BASE
@pytest.fixture()
def preprint():
return PreprintFactory()
@pytest.fixture()
def user():
user = AuthUserFactory()
user.is_staff = True
user.add_system_tag('preprint_metrics')
user.save()
return user
@pytest.fixture
def base_url():
return '/{}metrics/preprints/'.format(API_BASE)
@pytest.mark.es
@pytest.mark.django_db
class TestElasticSearch:
def test_elasticsearch_agg_query(self, app, user, base_url, preprint):
post_url = '{}downloads/'.format(base_url)
payload = {
'data': {
'type': 'preprint_metrics',
'attributes': {
'query': {
'aggs': {
'preprints_by_year': {
'composite': {
'sources': [{
'date': {
'date_histogram': {
'field': 'timestamp',
'interval': 'year'
}
}
}]
}
}
}
}
}
}
}
resp = app.post_json_api(post_url, payload, auth=user.auth)
assert resp.status_code == 200
assert resp.json['hits']['hits'] == []
PreprintDownload.record_for_preprint(
preprint,
path=preprint.primary_file.path,
timestamp=datetime(year=2020, month=1, day=1),
)
PreprintDownload.record_for_preprint(
preprint,
path=preprint.primary_file.path,
timestamp=datetime(year=2020, month=2, day=1)
)
time.sleep(1) # gives ES some time to update
resp = app.post_json_api(post_url, payload, auth=user.auth)
assert resp.status_code == 200
assert len(resp.json['aggregations']['preprints_by_year']['buckets']) == 1
payload['data']['attributes']['query']['aggs']['preprints_by_year']['composite']['sources'][0]['date']['date_histogram']['interval'] = 'month'
resp = app.post_json_api(post_url, payload, auth=user.auth)
assert len(resp.json['aggregations']['preprints_by_year']['buckets']) == 2
| apache-2.0 |
SkyTruth/scraper | nrc/nrc/extensions/failLogger.py | 1 | 2623 |
import StringIO
from datetime import datetime
import smtplib
import traceback
from scrapy import signals
from scrapy import log
from nrc import settings
class FailLogger(object):
@classmethod
def from_crawler(cls, crawler):
ext = cls()
crawler.signals.connect(ext.spider_error, signal=signals.spider_error)
crawler.signals.connect(ext.spider_close, signal=signals.spider_closed)
return ext
def spider_error(self, failure, response, spider):
spider.exception_count += 1
if spider.exception_count == 1:
# email on first exception
temp = StringIO.StringIO()
temp.write("Uncaught exception from {0}:\n\t{1}\n\n"
.format(spider.name, failure.getErrorMessage()))
failure.printTraceback(file=temp)
message = temp.getvalue()
self.send_error_email (spider, message, failure)
temp.close()
def spider_close(self, spider, reason):
if spider.exception_count > 1:
message = ("Total of %s uncaught exceptions in %s execution."
% (spider.exception_count, spider.name))
self.send_error_email (spider, message)
@staticmethod
def report_exception(spider, e, srcmsg=""):
spider.exception_count += 1
if spider.exception_count == 1:
msg = ("{0}\nUncaught exception from {1}:\n\t{2}\n\n{3}"
.format(srcmsg, spider.name, e, traceback.format_exc()))
FailLogger.send_error_email (spider, msg, e)
@staticmethod
def send_error_email (spider, message, failure=None):
if isinstance(failure, Exception):
subject = ('%s Exception: %s' % (spider.name, failure))
elif failure:
subject = ('%s Exception: %s'
% (spider.name, failure.getErrorMessage()))
else:
subject = '%s: %s Exceptions' % (spider.name,
spider.exception_count)
spider.log ('Sending alert:\n\t%s' % (subject,), log.ERROR)
senddate = datetime.strftime(datetime.now(), '%Y-%m-%d')
header = ("Date: %s\r\nFrom: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n"
% (senddate, settings.MAIL_FROM, settings.MAIL_TO, subject))
server = smtplib.SMTP('%s:%s'
% (settings.MAIL_HOST, settings.MAIL_PORT))
server.starttls()
server.login(settings.MAIL_USER, settings.MAIL_PASS)
server.sendmail(settings.MAIL_FROM, settings.MAIL_TO, header+message)
server.quit()
| mit |
yehnkay/VN-IME-for-Sublime-Text | vn_ime.py | 3 | 4648 | import sublime, sublime_plugin
STATUS = False
MOD = False
LASTKEY = ''
class SaveOnModifiedListener(sublime_plugin.EventListener):
def on_modified(self, view):
global STATUS
global MOD
if not STATUS:
return
if not MOD:
view.run_command('startime')
MOD = False
class StartimeCommand(sublime_plugin.TextCommand):
curPost = 0
curSize = 0
stateIME = True
keyDefine = ['w','s','f','x','j','a','o','e','d','r','z']
def run(self, edit):
pos = self.view.sel()[0]
global LASTKEY
if self.view.size() > self.curSize :
a = pos.begin() - 1
b = pos.end()
charRegion = sublime.Region(a, b)
char = self.view.substr(charRegion)
if self.find_key_unicode(char):
if self.check_grammar(self.view.word(charRegion)):
final = self.replace_word_key(char,self.view.word(charRegion))
if final :
global MOD
self.view.run_command("runchange", {'a':a,'b':b,"final":final})
MOD = True
self.curPost = pos
self.curSize = self.view.size();
LASTKEY = char
elif self.view.size() < self.curSize:
self.curSize = self.view.size();
def find_key_unicode(self,key):
if key in self.keyDefine:
return True
return False
def check_grammar(self,word):
word = self.view.substr(word)
# _len = len(word)-2
# for i in _len:
# if word[i] == word[i+1]:
# return False
return True
def replace_word_key(self,key,word):
word = self.view.substr(word)
finalWord = ''
charSour = ''
charDest = ''
if key == 'w':
charSour = ['a','o','u','ă','ơ','ư','â']
charDest = ['ă','ơ','ư','a','o','u','a']
elif key == 's':
charSour = ['a','ă','â','e','ê','i','o','ơ','ô','y','u','ư','á','í','é','ó','ý','ú']
charDest = ['á','ắ','ấ','é','ế','í','ó','ớ','ố','ý','ú','ứ','a','i','e','o','y','u']
elif key == 'f':
charSour = ['a','ă','â','e','ê','i','o','ơ','ô','y','u','ư','ì','à','è','ì','ò','ỳ']
charDest = ['à','ằ','ầ','è','ề','ì','ò','ờ','ồ','ỳ','ù','ừ','i','a','e','i','o','y']
elif key == 'x':
charSour = ['a','ă','â','e','ê','i','o','ơ','ô','y','u','ư','ẽ','ã','ẽ','ĩ','õ','ỹ','ũ']
charDest = ['ã','ẵ','ẫ','ẽ','ễ','ĩ','õ','ỡ','ỗ','ỹ','ũ','ữ','e','a','e','i','o','y','u']
elif key == 'j':
charSour = ['a','ă','â','e','ê','i','o','ơ','ô','u','ư','ạ','ẹ','ị','ọ','ụ']
charDest = ['ạ','ặ','ậ','ẹ','ệ','ị','ọ','ợ','ộ','ụ','ự','a','e','i','o','u']
elif key == 'a':
charSour = ['a','ă','â']
charDest = ['â','â','a']
elif key == 'o':
charSour = ['o','ơ','ô']
charDest = ['ô','ô','o']
elif key == 'e':
charSour = ['e','ê']
charDest = ['ê','e']
elif key == 'd':
charSour = ['d','đ']
charDest = ['đ','d']
elif key == 'r':
charSour = ['a','ă','â','e','ê','i','o','ơ','ô','y','u','ư','ỏ','ả','ẻ','ỉ','ỏ','ỷ','ủ']
charDest = ['ả','ẳ','ẩ','ẻ','ể','ỉ','ỏ','ở','ổ','ỷ','ủ','ử','o','a','e','i','o','y','u']
finalWord = self.convertWordChar(key,word,charSour,charDest)
if finalWord != word:
return finalWord
return False
def convertWordChar(self,key,word,charSour,charDest):
global LASTKEY
w = list(word)
hasChanged = False
del w[-1]
if len(w) >6 or (len(w) >2 and w[0] in ['o','e'] ):
return word
if len(w) > 3 or (len(w)==3 and w[2] in ['o']) :
for i in reversed(range(len(w))):
if hasChanged:
break
for j in range(len(charSour)):
if (i in [3,4] and w[i] in ['i','u']):
continue
if (w[i] == charSour[j]) :
w[i] = charDest[j]
hasChanged = True
break
elif len(w) <= 3:
for i in range(len(w)):
if hasChanged:
break
for j in range(len(charSour)):
if w[i] == charSour[j]:
w[i] = charDest[j]
hasChanged = True
break
if LASTKEY == key:
if w[-1] == key:
w.append(LASTKEY)
if hasChanged :
word = "".join(w)
return word
class ControlimeCommand(sublime_plugin.TextCommand):
stateIME = True
def run(self, edit):
global STATUS
if self.stateIME == False:
STATUS = False
self.stateIME = True
sublime.status_message("VN IME Stoped")
self.view.set_status('VN IME'," VN IME : OFF")
elif self.stateIME :
STATUS = True
self.stateIME = False
sublime.status_message("VN IME Started")
self.view.set_status('VN IME'," VN IME : ON")
class RunchangeCommand(sublime_plugin.TextCommand):
def run(self, edit, a, b, final):
charRegion = sublime.Region(a, b)
self.view.replace(edit,self.view.word(charRegion),final)
| apache-2.0 |
anand-c-goog/tensorflow | tensorflow/contrib/distributions/python/ops/poisson.py | 29 | 5151 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Poisson distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
_poisson_prob_note = """
Note thet the input value must be a non-negative floating point tensor with
dtype `dtype` and whose shape can be broadcast with `self.lam`. `x` is only
legal if it is non-negative and its components are equal to integer values.
"""
class Poisson(distribution.Distribution):
"""Poisson distribution.
The Poisson distribution is parameterized by `lam`, the rate parameter.
The pmf of this distribution is:
```
pmf(k) = e^(-lam) * lam^k / k!, k >= 0
```
"""
def __init__(self,
lam,
validate_args=False,
allow_nan_stats=True,
name="Poisson"):
"""Construct Poisson distributions.
Args:
lam: Floating point tensor, the rate parameter of the
distribution(s). `lam` must be positive.
validate_args: `Boolean`, default `False`. Whether to assert that
`lam > 0` as well as inputs to pmf computations are non-negative
integers. If validate_args is `False`, then `pmf` computations might
return `NaN`, but can be evaluated at any real value.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[lam]) as ns:
with ops.control_dependencies([check_ops.assert_positive(lam)] if
validate_args else []):
self._lam = array_ops.identity(lam, name="lam")
super(Poisson, self).__init__(
dtype=self._lam.dtype,
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._lam],
name=ns)
@property
def lam(self):
"""Rate parameter."""
return self._lam
def _batch_shape(self):
return array_ops.shape(self.lam)
def _get_batch_shape(self):
return self.lam.get_shape()
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(_poisson_prob_note)
def _log_prob(self, x):
x = self._assert_valid_sample(x, check_integer=True)
return x * math_ops.log(self.lam) - self.lam - math_ops.lgamma(x + 1)
@distribution_util.AppendDocstring(_poisson_prob_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
def _cdf(self, x):
x = self._assert_valid_sample(x, check_integer=False)
return math_ops.igammac(math_ops.floor(x + 1), self.lam)
def _mean(self):
return array_ops.identity(self.lam)
def _variance(self):
return array_ops.identity(self.lam)
def _std(self):
return math_ops.sqrt(self.variance())
@distribution_util.AppendDocstring(
"""Note that when `lam` is an integer, there are actually two modes.
Namely, `lam` and `lam - 1` are both modes. Here we return
only the larger of the two modes.""")
def _mode(self):
return math_ops.floor(self.lam)
def _assert_valid_sample(self, x, check_integer=True):
if not self.validate_args: return x
with ops.name_scope("check_x", values=[x]):
dependencies = [check_ops.assert_non_negative(x)]
if check_integer:
dependencies += [distribution_util.assert_integer_form(
x, message="x has non-integer components.")]
return control_flow_ops.with_dependencies(dependencies, x)
| apache-2.0 |
aplanas/kmanga | tests/test_updatedb.py | 1 | 16206 | # -*- coding: utf-8 -*-
#
# (c) 2018 Alberto Planas <aplanas@gmail.com>
#
# This file is part of KManga.
#
# KManga is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KManga is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with KManga. If not, see <http://www.gnu.org/licenses/>.
import datetime
import unittest
import django
from django.test.utils import setup_test_environment
from django.test.runner import DiscoverRunner
import scraper.items
from scraper.pipelines import UpdateDBPipeline
from core.models import Source, SourceLanguage, Genre, Manga
# Configure Django to run tests outside the manage.py tool
django.setup()
setup_test_environment()
class Spider(object):
pass
class TestUpdateDBPipeline(unittest.TestCase):
def setUp(self):
self.dr = DiscoverRunner()
self.old_config = self.dr.setup_databases()
self.updatedb = UpdateDBPipeline(
images_store='tests/fixtures/images')
source = Source.objects.create(
name='source',
spider='spider',
url='http://example.com'
)
SourceLanguage.objects.create(
language='EN',
source=source
)
self.spider = Spider()
self.spider.name = 'Spider'
def tearDown(self):
self.dr.teardown_databases(self.old_config)
def test_update_relation(self):
source = Source.objects.get(spider='spider')
names = ['g1', 'g2', 'g3']
items = [{'name': i} for i in names]
n, u, d = self.updatedb._update_relation(source, 'genre_set',
'name', items,
self.updatedb._update_name)
self.assertEqual(n, set(names))
self.assertEqual(u, set())
self.assertEqual(d, set())
names = ['g2', 'g3', 'g4']
items = [{'name': i} for i in names]
n, u, d = self.updatedb._update_relation(source, 'genre_set',
'name', items,
self.updatedb._update_name)
self.assertEqual(n, set(['g4']))
self.assertEqual(u, set(['g2', 'g3']))
self.assertEqual(d, set(['g1']))
def test_update_genres(self):
names = ['g1', 'g2', 'g3']
genres = scraper.items.Genres(
names=names
)
self.updatedb.update_genres(genres, self.spider)
self.assertEqual({o.name for o in Genre.objects.all()}, set(names))
names = ['g2', 'g3', 'g4']
genres = scraper.items.Genres(
names=names
)
self.updatedb.update_genres(genres, self.spider)
self.assertEqual({o.name for o in Genre.objects.all()}, set(names))
def test_update_collection(self):
names = ['g1', 'g2', 'g3']
genres = scraper.items.Genres(
names=names
)
self.updatedb.update_genres(genres, self.spider)
manga = scraper.items.Manga(
name='Manga1',
alt_name=['Manga1', 'MangaA'],
author='Author',
artist='Artist',
reading_direction='LR',
status='O',
genres=['g1', 'g2'],
rank=1,
rank_order='ASC',
description='Description',
image_urls=['http://manga1.org/images/height-large.jpg'],
images=[{
'url': 'http://manga1.org/images/height-large.jpg',
'path': 'height-large.jpg',
'checksum': None
}],
issues=[
scraper.items.Issue(
name='issue1',
number='1',
order=1,
language='EN',
release=datetime.date(year=2014, month=1, day=1),
url='http://manga1.org/issue1'),
scraper.items.Issue(
name='issue2',
number='2',
order=2,
language='EN',
release=datetime.date(year=2014, month=1, day=2),
url='http://manga1.org/issue2'),
],
url='http://manga1.org')
self.updatedb.update_collection(manga, self.spider)
self.assertEqual(len(Manga.objects.all()), 1)
m = Manga.objects.all()[0]
self.assertEqual(m.name, 'Manga1')
self.assertEqual(len(m.altname_set.all()), 2)
self.assertEqual({o.name for o in m.altname_set.all()},
set(('Manga1', 'MangaA')))
self.assertEqual(m.author, 'Author')
self.assertEqual(m.artist, 'Artist')
self.assertEqual(m.reading_direction, 'LR')
self.assertEqual(m.status, 'O')
self.assertEqual(len(m.genres.all()), 2)
self.assertEqual({o.name for o in m.genres.all()},
set(('g1', 'g2')))
self.assertEqual(m.rank, 1.0)
self.assertEqual(m.rank_order, 'ASC')
self.assertEqual(m.description, 'Description')
self.assertEqual(len(m.issue_set.all()), 2)
i = m.issue_set.get(name='issue1')
self.assertEqual(i.name, 'issue1')
self.assertEqual(i.number, '1')
self.assertEqual(i.order, 1)
self.assertEqual(i.language, 'EN')
self.assertEqual(i.release, datetime.date(year=2014, month=1, day=1))
self.assertEqual(i.url, 'http://manga1.org/issue1')
i = m.issue_set.get(name='issue2')
self.assertEqual(i.name, 'issue2')
self.assertEqual(i.number, '2')
self.assertEqual(i.order, 2)
self.assertEqual(i.language, 'EN')
self.assertEqual(i.release, datetime.date(year=2014, month=1, day=2))
self.assertEqual(i.url, 'http://manga1.org/issue2')
# Remove the image
m.cover.delete()
def test_update2_collection(self):
names = ['g1', 'g2', 'g3']
genres = scraper.items.Genres(
names=names
)
self.updatedb.update_genres(genres, self.spider)
manga = scraper.items.Manga(
name='Manga1',
alt_name=['Manga1', 'MangaA'],
author='Author',
artist='Artist',
reading_direction='LR',
status='O',
genres=['g1', 'g2'],
rank=1,
rank_order='ASC',
description='Description',
image_urls=['http://manga1.org/images/height-large.jpg'],
images=[{
'url': 'http://manga1.org/images/height-large.jpg',
'path': 'height-large.jpg',
'checksum': None
}],
issues=[
scraper.items.Issue(
name='issue1',
number='1',
order=1,
language='EN',
release=datetime.date(year=2014, month=1, day=1),
url='http://manga1.org/issue1'),
scraper.items.Issue(
name='issue2',
number='2',
order=2,
language='EN',
release=datetime.date(year=2014, month=1, day=2),
url='http://manga1.org/issue2'),
],
url='http://manga1.org')
self.updatedb.update_collection(manga, self.spider)
# Add a new alt_name
manga['alt_name'].append('MangaB')
self.updatedb.update_collection(manga, self.spider)
m = Manga.objects.all()[0]
self.assertEqual(len(m.altname_set.all()), 3)
self.assertEqual({o.name for o in m.altname_set.all()},
set(('Manga1', 'MangaA', 'MangaB')))
# Remove an alt_name
del manga['alt_name'][0]
self.updatedb.update_collection(manga, self.spider)
m = Manga.objects.all()[0]
self.assertEqual(len(m.altname_set.all()), 2)
self.assertEqual({o.name for o in m.altname_set.all()},
set(('MangaA', 'MangaB')))
# Change author
manga['author'] = 'Another Author'
self.updatedb.update_collection(manga, self.spider)
# Change status
manga['status'] = 'C'
self.updatedb.update_collection(manga, self.spider)
# Add a new genre
manga['genres'].append('g3')
self.updatedb.update_collection(manga, self.spider)
m = Manga.objects.all()[0]
self.assertEqual(len(m.genres.all()), 3)
self.assertEqual({o.name for o in m.genres.all()},
set(('g1', 'g2', 'g3')))
# Remove a genre
del manga['genres'][1]
self.updatedb.update_collection(manga, self.spider)
m = Manga.objects.all()[0]
self.assertEqual(len(m.genres.all()), 2)
self.assertEqual({o.name for o in m.genres.all()},
set(('g1', 'g3')))
# Add a new issue
manga['issues'].append(
scraper.items.Issue(
name='issue3',
number='3',
order=3,
language='EN',
release=datetime.date(year=2014, month=1, day=3),
url='http://manga1.org/issue3')
)
self.updatedb.update_collection(manga, self.spider)
m = Manga.objects.all()[0]
self.assertEqual(len(m.issue_set.all()), 3)
# Remove an issue
del manga['issues'][0]
self.updatedb.update_collection(manga, self.spider)
m = Manga.objects.all()[0]
self.assertEqual(len(m.issue_set.all()), 2)
# Check the final result
self.assertEqual(len(Manga.objects.all()), 1)
m = Manga.objects.all()[0]
self.assertEqual(m.name, 'Manga1')
self.assertEqual(len(m.altname_set.all()), 2)
self.assertEqual({o.name for o in m.altname_set.all()},
set(('MangaA', 'MangaB')))
self.assertEqual(m.author, 'Another Author')
self.assertEqual(m.artist, 'Artist')
self.assertEqual(m.reading_direction, 'LR')
self.assertEqual(m.status, 'C')
self.assertEqual(len(m.genres.all()), 2)
self.assertEqual({o.name for o in m.genres.all()},
set(('g1', 'g3')))
self.assertEqual(m.rank, 1.0)
self.assertEqual(m.rank_order, 'ASC')
self.assertEqual(m.description, 'Description')
self.assertEqual(len(m.issue_set.all()), 2)
i = m.issue_set.get(name='issue2')
self.assertEqual(i.name, 'issue2')
self.assertEqual(i.number, '2')
self.assertEqual(i.order, 2)
self.assertEqual(i.language, 'EN')
self.assertEqual(i.release, datetime.date(year=2014, month=1, day=2))
self.assertEqual(i.url, 'http://manga1.org/issue2')
i = m.issue_set.get(name='issue3')
self.assertEqual(i.name, 'issue3')
self.assertEqual(i.number, '3')
self.assertEqual(i.order, 3)
self.assertEqual(i.language, 'EN')
self.assertEqual(i.release, datetime.date(year=2014, month=1, day=3))
self.assertEqual(i.url, 'http://manga1.org/issue3')
# Remove the image
m.cover.delete()
def test_update_latest(self):
names = ['g1', 'g2', 'g3']
genres = scraper.items.Genres(
names=names
)
self.updatedb.update_genres(genres, self.spider)
manga = scraper.items.Manga(
name='Manga1',
alt_name=['Manga1', 'MangaA'],
author='Author',
artist='Artist',
reading_direction='LR',
status='O',
genres=['g1', 'g2'],
rank=1,
rank_order='ASC',
description='Description',
image_urls=['http://manga1.org/images/height-large.jpg'],
images=[{
'url': 'http://manga1.org/images/height-large.jpg',
'path': 'height-large.jpg',
'checksum': None
}],
issues=[
scraper.items.Issue(
name='issue1',
number='1',
order=1,
language='EN',
release=datetime.date(year=2014, month=1, day=1),
url='http://manga1.org/issue1'),
scraper.items.Issue(
name='issue2',
number='2',
order=2,
language='EN',
release=datetime.date(year=2014, month=1, day=2),
url='http://manga1.org/issue2'),
],
url='http://manga1.org')
self.updatedb.update_collection(manga, self.spider)
manga = scraper.items.Manga(
name='Manga1',
issues=[
scraper.items.Issue(
name='issue3',
number='3',
order=3,
language='EN',
release=datetime.date(year=2014, month=1, day=3),
url='http://manga1.org/issue3'),
scraper.items.Issue(
name='issue4',
number='4',
order=4,
language='EN',
release=datetime.date(year=2014, month=1, day=4),
url='http://manga1.org/issue4'),
],
url='http://manga1.org')
self.updatedb.update_latest(manga, self.spider)
self.assertEqual(len(Manga.objects.all()), 1)
m = Manga.objects.all()[0]
self.assertEqual(m.name, 'Manga1')
self.assertEqual(len(m.altname_set.all()), 2)
self.assertEqual({o.name for o in m.altname_set.all()},
set(('Manga1', 'MangaA')))
self.assertEqual(m.author, 'Author')
self.assertEqual(m.artist, 'Artist')
self.assertEqual(m.reading_direction, 'LR')
self.assertEqual(m.status, 'O')
self.assertEqual(len(m.genres.all()), 2)
self.assertEqual({o.name for o in m.genres.all()},
set(('g1', 'g2')))
self.assertEqual(m.rank, 1.0)
self.assertEqual(m.rank_order, 'ASC')
self.assertEqual(m.description, 'Description')
self.assertEqual(len(m.issue_set.all()), 4)
i = m.issue_set.get(name='issue1')
self.assertEqual(i.name, 'issue1')
self.assertEqual(i.number, '1')
self.assertEqual(i.order, 1)
self.assertEqual(i.language, 'EN')
self.assertEqual(i.release, datetime.date(year=2014, month=1, day=1))
self.assertEqual(i.url, 'http://manga1.org/issue1')
i = m.issue_set.get(name='issue2')
self.assertEqual(i.name, 'issue2')
self.assertEqual(i.number, '2')
self.assertEqual(i.order, 2)
self.assertEqual(i.language, 'EN')
self.assertEqual(i.release, datetime.date(year=2014, month=1, day=2))
self.assertEqual(i.url, 'http://manga1.org/issue2')
i = m.issue_set.get(name='issue3')
self.assertEqual(i.name, 'issue3')
self.assertEqual(i.number, '3')
self.assertEqual(i.order, 3)
self.assertEqual(i.language, 'EN')
self.assertEqual(i.release, datetime.date(year=2014, month=1, day=3))
self.assertEqual(i.url, 'http://manga1.org/issue3')
i = m.issue_set.get(name='issue4')
self.assertEqual(i.name, 'issue4')
self.assertEqual(i.number, '4')
self.assertEqual(i.order, 4)
self.assertEqual(i.language, 'EN')
self.assertEqual(i.release, datetime.date(year=2014, month=1, day=4))
self.assertEqual(i.url, 'http://manga1.org/issue4')
# Remove the image
m.cover.delete()
| gpl-3.0 |
beni55/edx-platform | lms/djangoapps/instructor_task/api.py | 16 | 16164 | """
API for submitting background tasks by an instructor for a course.
Also includes methods for getting information about tasks that have
already been submitted, filtered either by running state or input
arguments.
"""
import hashlib
from celery.states import READY_STATES
from xmodule.modulestore.django import modulestore
from instructor_task.models import InstructorTask
from instructor_task.tasks import (
rescore_problem,
reset_problem_attempts,
delete_problem_state,
send_bulk_course_email,
calculate_grades_csv,
calculate_students_features_csv,
cohort_students,
)
from instructor_task.api_helper import (
check_arguments_for_rescoring,
encode_problem_and_student_input,
encode_entrance_exam_and_student_input,
check_entrance_exam_problems_for_rescoring,
submit_task,
)
from bulk_email.models import CourseEmail
def get_running_instructor_tasks(course_id):
"""
Returns a query of InstructorTask objects of running tasks for a given course.
Used to generate a list of tasks to display on the instructor dashboard.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
instructor_tasks = instructor_tasks.exclude(task_state=state)
return instructor_tasks.order_by('-id')
def get_instructor_task_history(course_id, usage_key=None, student=None, task_type=None):
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that optionally match a particular problem, a student, and/or a task type.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
if usage_key is not None or student is not None:
_, task_key = encode_problem_and_student_input(usage_key, student)
instructor_tasks = instructor_tasks.filter(task_key=task_key)
if task_type is not None:
instructor_tasks = instructor_tasks.filter(task_type=task_type)
return instructor_tasks.order_by('-id')
def get_entrance_exam_instructor_task_history(course_id, usage_key=None, student=None): # pylint: disable=invalid-name
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that optionally match an entrance exam and student if present.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
if usage_key is not None or student is not None:
_, task_key = encode_entrance_exam_and_student_input(usage_key, student)
instructor_tasks = instructor_tasks.filter(task_key=task_key)
return instructor_tasks.order_by('-id')
# Disabling invalid-name because this fn name is longer than 30 chars.
def submit_rescore_problem_for_student(request, usage_key, student): # pylint: disable=invalid-name
"""
Request a problem to be rescored as a background task.
The problem will be rescored for the specified student only. Parameters are the `course_id`,
the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored for this student, or NotImplementedError if
the problem doesn't support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(usage_key)
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(usage_key, student)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_rescore_problem_for_all_students(request, usage_key): # pylint: disable=invalid-name
"""
Request a problem to be rescored as a background task.
The problem will be rescored for all students who have accessed the
particular problem in a course and have provided and checked an answer.
Parameters are the `course_id` and the `problem_url`.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored, or NotImplementedError if the problem doesn't
support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(usage_key)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(usage_key)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_rescore_entrance_exam_for_student(request, usage_key, student=None): # pylint: disable=invalid-name
"""
Request entrance exam problems to be re-scored as a background task.
The entrance exam problems will be re-scored for given student or if student
is None problems for all students who have accessed the entrance exam.
Parameters are `usage_key`, which must be a :class:`Location`
representing entrance exam section and the `student` as a User object.
ItemNotFoundError is raised if entrance exam does not exists for given
usage_key, AlreadyRunningError is raised if the entrance exam
is already being re-scored, or NotImplementedError if the problem doesn't
support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check problems for rescoring: let exceptions return up to the caller.
check_entrance_exam_problems_for_rescoring(usage_key)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_entrance_exam_and_student_input(usage_key, student)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_reset_problem_attempts_for_all_students(request, usage_key): # pylint: disable=invalid-name
"""
Request to have attempts reset for a problem as a background task.
The problem's attempts will be reset for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `usage_key`, which must be a :class:`Location`.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the usage_key is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_item(usage_key)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_problem_and_student_input(usage_key)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_reset_problem_attempts_in_entrance_exam(request, usage_key, student): # pylint: disable=invalid-name
"""
Request to have attempts reset for a entrance exam as a background task.
Problem attempts for all problems in entrance exam will be reset
for specified student. If student is None problem attempts will be
reset for all students.
Parameters are `usage_key`, which must be a :class:`Location`
representing entrance exam section and the `student` as a User object.
ItemNotFoundError is raised if entrance exam does not exists for given
usage_key, AlreadyRunningError is raised if the entrance exam
is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure entrance exam(section) exists for given usage_key
modulestore().get_item(usage_key)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_entrance_exam_and_student_input(usage_key, student)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_delete_problem_state_for_all_students(request, usage_key): # pylint: disable=invalid-name
"""
Request to have state deleted for a problem as a background task.
The problem's state will be deleted for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `usage_key`, which must be a :class:`Location`.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the particular problem's state is already being deleted.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the usage_key is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_item(usage_key)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_problem_and_student_input(usage_key)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_delete_entrance_exam_state_for_student(request, usage_key, student): # pylint: disable=invalid-name
"""
Requests reset of state for entrance exam as a background task.
Module state for all problems in entrance exam will be deleted
for specified student.
Parameters are `usage_key`, which must be a :class:`Location`
representing entrance exam section and the `student` as a User object.
ItemNotFoundError is raised if entrance exam does not exists for given
usage_key, AlreadyRunningError is raised if the entrance exam
is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure entrance exam(section) exists for given usage_key
modulestore().get_item(usage_key)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_entrance_exam_and_student_input(usage_key, student)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_bulk_course_email(request, course_key, email_id):
"""
Request to have bulk email sent as a background task.
The specified CourseEmail object will be sent be updated for all students who have enrolled
in a course. Parameters are the `course_key` and the `email_id`, the id of the CourseEmail object.
AlreadyRunningError is raised if the same recipients are already being emailed with the same
CourseEmail object.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# Assume that the course is defined, and that the user has already been verified to have
# appropriate access to the course. But make sure that the email exists.
# We also pull out the To argument here, so that is displayed in
# the InstructorTask status.
email_obj = CourseEmail.objects.get(id=email_id)
to_option = email_obj.to_option
task_type = 'bulk_course_email'
task_class = send_bulk_course_email
# Pass in the to_option as a separate argument, even though it's (currently)
# in the CourseEmail. That way it's visible in the progress status.
# (At some point in the future, we might take the recipient out of the CourseEmail,
# so that the same saved email can be sent to different recipients, as it is tested.)
task_input = {'email_id': email_id, 'to_option': to_option}
task_key_stub = "{email_id}_{to_option}".format(email_id=email_id, to_option=to_option)
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_calculate_grades_csv(request, course_key):
"""
AlreadyRunningError is raised if the course's grades are already being updated.
"""
task_type = 'grade_course'
task_class = calculate_grades_csv
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_calculate_students_features_csv(request, course_key, features):
"""
Submits a task to generate a CSV containing student profile info.
Raises AlreadyRunningError if said CSV is already being updated.
"""
task_type = 'profile_info_csv'
task_class = calculate_students_features_csv
task_input = {'features': features}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_cohort_students(request, course_key, file_name):
"""
Request to have students cohorted in bulk.
Raises AlreadyRunningError if students are currently being cohorted.
"""
task_type = 'cohort_students'
task_class = cohort_students
task_input = {'file_name': file_name}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
| agpl-3.0 |
gencer/sentry | src/south/migration/utils.py | 3 | 2417 | import sys
from collections import deque
from django.utils.datastructures import SortedDict
from django.db import models
from south import exceptions
class SortedSet(SortedDict):
def __init__(self, data=tuple()):
self.extend(data)
def __str__(self):
return "SortedSet(%s)" % list(self)
def add(self, value):
self[value] = True
def remove(self, value):
del self[value]
def extend(self, iterable):
[self.add(k) for k in iterable]
def get_app_label(app):
"""
Returns the _internal_ app label for the given app module.
i.e. for <module django.contrib.auth.models> will return 'auth'
"""
return app.__name__.split('.')[-2]
def app_label_to_app_module(app_label):
"""
Given the app label, returns the module of the app itself (unlike models.get_app,
which returns the models module)
"""
# Get the models module
app = models.get_app(app_label)
module_name = ".".join(app.__name__.split(".")[:-1])
try:
module = sys.modules[module_name]
except KeyError:
__import__(module_name, {}, {}, [''])
module = sys.modules[module_name]
return module
def flatten(*stack):
stack = deque(stack)
while stack:
try:
x = next(stack[0])
except TypeError:
stack[0] = iter(stack[0])
x = next(stack[0])
except StopIteration:
stack.popleft()
continue
if hasattr(x, '__iter__') and not isinstance(x, str):
stack.appendleft(x)
else:
yield x
dependency_cache = {}
def _dfs(start, get_children, path):
if (start, get_children) in dependency_cache:
return dependency_cache[(start, get_children)]
results = []
if start in path:
raise exceptions.CircularDependency(path[path.index(start):] + [start])
path.append(start)
results.append(start)
children = sorted(get_children(start), key=lambda x: str(x))
# We need to apply all the migrations this one depends on
for n in children:
results = _dfs(n, get_children, path) + results
path.pop()
results = list(SortedSet(results))
dependency_cache[(start, get_children)] = results
return results
def dfs(start, get_children):
return _dfs(start, get_children, [])
def depends(start, get_children):
return dfs(start, get_children)
| bsd-3-clause |
uruz/django-rest-framework | tests/test_filters.py | 38 | 30783 | from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.conf.urls import url
from django.core.urlresolvers import reverse
from django.db import models
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import unittest
from django.utils.dateparse import parse_date
from django.utils.six.moves import reload_module
from rest_framework import filters, generics, serializers, status
from rest_framework.compat import django_filters
from rest_framework.test import APIRequestFactory
from .models import BaseFilterableItem, BasicModel, FilterableItem
factory = APIRequestFactory()
if django_filters:
class FilterableItemSerializer(serializers.ModelSerializer):
class Meta:
model = FilterableItem
# Basic filter on a list view.
class FilterFieldsRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_fields = ['decimal', 'date']
filter_backends = (filters.DjangoFilterBackend,)
# These class are used to test a filter class.
class SeveralFieldsFilter(django_filters.FilterSet):
text = django_filters.CharFilter(lookup_type='icontains')
decimal = django_filters.NumberFilter(lookup_type='lt')
date = django_filters.DateFilter(lookup_type='gt')
class Meta:
model = FilterableItem
fields = ['text', 'decimal', 'date']
class FilterClassRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = SeveralFieldsFilter
filter_backends = (filters.DjangoFilterBackend,)
# These classes are used to test a misconfigured filter class.
class MisconfiguredFilter(django_filters.FilterSet):
text = django_filters.CharFilter(lookup_type='icontains')
class Meta:
model = BasicModel
fields = ['text']
class IncorrectlyConfiguredRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = MisconfiguredFilter
filter_backends = (filters.DjangoFilterBackend,)
class FilterClassDetailView(generics.RetrieveAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = SeveralFieldsFilter
filter_backends = (filters.DjangoFilterBackend,)
# These classes are used to test base model filter support
class BaseFilterableItemFilter(django_filters.FilterSet):
text = django_filters.CharFilter()
class Meta:
model = BaseFilterableItem
class BaseFilterableItemFilterRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = BaseFilterableItemFilter
filter_backends = (filters.DjangoFilterBackend,)
# Regression test for #814
class FilterFieldsQuerysetView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_fields = ['decimal', 'date']
filter_backends = (filters.DjangoFilterBackend,)
class GetQuerysetView(generics.ListCreateAPIView):
serializer_class = FilterableItemSerializer
filter_class = SeveralFieldsFilter
filter_backends = (filters.DjangoFilterBackend,)
def get_queryset(self):
return FilterableItem.objects.all()
urlpatterns = [
url(r'^(?P<pk>\d+)/$', FilterClassDetailView.as_view(), name='detail-view'),
url(r'^$', FilterClassRootView.as_view(), name='root-view'),
url(r'^get-queryset/$', GetQuerysetView.as_view(),
name='get-queryset-view'),
]
class CommonFilteringTestCase(TestCase):
def _serialize_object(self, obj):
return {'id': obj.id, 'text': obj.text, 'decimal': str(obj.decimal), 'date': obj.date.isoformat()}
def setUp(self):
"""
Create 10 FilterableItem instances.
"""
base_data = ('a', Decimal('0.25'), datetime.date(2012, 10, 8))
for i in range(10):
text = chr(i + ord(base_data[0])) * 3 # Produces string 'aaa', 'bbb', etc.
decimal = base_data[1] + i
date = base_data[2] - datetime.timedelta(days=i * 2)
FilterableItem(text=text, decimal=decimal, date=date).save()
self.objects = FilterableItem.objects
self.data = [
self._serialize_object(obj)
for obj in self.objects.all()
]
class IntegrationTestFiltering(CommonFilteringTestCase):
"""
Integration tests for filtered list views.
"""
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_get_filtered_fields_root_view(self):
"""
GET requests to paginated ListCreateAPIView should return paginated results.
"""
view = FilterFieldsRootView.as_view()
# Basic test with no filter.
request = factory.get('/')
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data)
# Tests that the decimal filter works.
search_decimal = Decimal('2.25')
request = factory.get('/', {'decimal': '%s' % search_decimal})
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if Decimal(f['decimal']) == search_decimal]
self.assertEqual(response.data, expected_data)
# Tests that the date filter works.
search_date = datetime.date(2012, 9, 22)
request = factory.get('/', {'date': '%s' % search_date}) # search_date str: '2012-09-22'
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if parse_date(f['date']) == search_date]
self.assertEqual(response.data, expected_data)
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_filter_with_queryset(self):
"""
Regression test for #814.
"""
view = FilterFieldsQuerysetView.as_view()
# Tests that the decimal filter works.
search_decimal = Decimal('2.25')
request = factory.get('/', {'decimal': '%s' % search_decimal})
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if Decimal(f['decimal']) == search_decimal]
self.assertEqual(response.data, expected_data)
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_filter_with_get_queryset_only(self):
"""
Regression test for #834.
"""
view = GetQuerysetView.as_view()
request = factory.get('/get-queryset/')
view(request).render()
# Used to raise "issubclass() arg 2 must be a class or tuple of classes"
# here when neither `model' nor `queryset' was specified.
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_get_filtered_class_root_view(self):
"""
GET requests to filtered ListCreateAPIView that have a filter_class set
should return filtered results.
"""
view = FilterClassRootView.as_view()
# Basic test with no filter.
request = factory.get('/')
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data)
# Tests that the decimal filter set with 'lt' in the filter class works.
search_decimal = Decimal('4.25')
request = factory.get('/', {'decimal': '%s' % search_decimal})
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if Decimal(f['decimal']) < search_decimal]
self.assertEqual(response.data, expected_data)
# Tests that the date filter set with 'gt' in the filter class works.
search_date = datetime.date(2012, 10, 2)
request = factory.get('/', {'date': '%s' % search_date}) # search_date str: '2012-10-02'
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if parse_date(f['date']) > search_date]
self.assertEqual(response.data, expected_data)
# Tests that the text filter set with 'icontains' in the filter class works.
search_text = 'ff'
request = factory.get('/', {'text': '%s' % search_text})
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if search_text in f['text'].lower()]
self.assertEqual(response.data, expected_data)
# Tests that multiple filters works.
search_decimal = Decimal('5.25')
search_date = datetime.date(2012, 10, 2)
request = factory.get('/', {
'decimal': '%s' % (search_decimal,),
'date': '%s' % (search_date,)
})
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if parse_date(f['date']) > search_date and
Decimal(f['decimal']) < search_decimal]
self.assertEqual(response.data, expected_data)
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_incorrectly_configured_filter(self):
"""
An error should be displayed when the filter class is misconfigured.
"""
view = IncorrectlyConfiguredRootView.as_view()
request = factory.get('/')
self.assertRaises(AssertionError, view, request)
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_base_model_filter(self):
"""
The `get_filter_class` model checks should allow base model filters.
"""
view = BaseFilterableItemFilterRootView.as_view()
request = factory.get('/?text=aaa')
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_unknown_filter(self):
"""
GET requests with filters that aren't configured should return 200.
"""
view = FilterFieldsRootView.as_view()
search_integer = 10
request = factory.get('/', {'integer': '%s' % search_integer})
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
class IntegrationTestDetailFiltering(CommonFilteringTestCase):
"""
Integration tests for filtered detail views.
"""
urls = 'tests.test_filters'
def _get_url(self, item):
return reverse('detail-view', kwargs=dict(pk=item.pk))
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_get_filtered_detail_view(self):
"""
GET requests to filtered RetrieveAPIView that have a filter_class set
should return filtered results.
"""
item = self.objects.all()[0]
data = self._serialize_object(item)
# Basic test with no filter.
response = self.client.get(self._get_url(item))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
# Tests that the decimal filter set that should fail.
search_decimal = Decimal('4.25')
high_item = self.objects.filter(decimal__gt=search_decimal)[0]
response = self.client.get(
'{url}'.format(url=self._get_url(high_item)),
{'decimal': '{param}'.format(param=search_decimal)})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# Tests that the decimal filter set that should succeed.
search_decimal = Decimal('4.25')
low_item = self.objects.filter(decimal__lt=search_decimal)[0]
low_item_data = self._serialize_object(low_item)
response = self.client.get(
'{url}'.format(url=self._get_url(low_item)),
{'decimal': '{param}'.format(param=search_decimal)})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, low_item_data)
# Tests that multiple filters works.
search_decimal = Decimal('5.25')
search_date = datetime.date(2012, 10, 2)
valid_item = self.objects.filter(decimal__lt=search_decimal, date__gt=search_date)[0]
valid_item_data = self._serialize_object(valid_item)
response = self.client.get(
'{url}'.format(url=self._get_url(valid_item)), {
'decimal': '{decimal}'.format(decimal=search_decimal),
'date': '{date}'.format(date=search_date)
})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, valid_item_data)
class SearchFilterModel(models.Model):
title = models.CharField(max_length=20)
text = models.CharField(max_length=100)
class SearchFilterSerializer(serializers.ModelSerializer):
class Meta:
model = SearchFilterModel
class SearchFilterTests(TestCase):
def setUp(self):
# Sequence of title/text is:
#
# z abc
# zz bcd
# zzz cde
# ...
for idx in range(10):
title = 'z' * (idx + 1)
text = (
chr(idx + ord('a')) +
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
SearchFilterModel(title=title, text=text).save()
def test_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('title', 'text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'b'})
response = view(request)
self.assertEqual(
response.data,
[
{'id': 1, 'title': 'z', 'text': 'abc'},
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
)
def test_exact_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('=title', 'text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'zzz'})
response = view(request)
self.assertEqual(
response.data,
[
{'id': 3, 'title': 'zzz', 'text': 'cde'}
]
)
def test_startswith_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('title', '^text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'b'})
response = view(request)
self.assertEqual(
response.data,
[
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
)
def test_regexp_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('$title', '$text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'z{2} ^b'})
response = view(request)
self.assertEqual(
response.data,
[
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
)
def test_search_with_nonstandard_search_param(self):
with override_settings(REST_FRAMEWORK={'SEARCH_PARAM': 'query'}):
reload_module(filters)
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('title', 'text')
view = SearchListView.as_view()
request = factory.get('/', {'query': 'b'})
response = view(request)
self.assertEqual(
response.data,
[
{'id': 1, 'title': 'z', 'text': 'abc'},
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
)
reload_module(filters)
class AttributeModel(models.Model):
label = models.CharField(max_length=32)
class SearchFilterModelM2M(models.Model):
title = models.CharField(max_length=20)
text = models.CharField(max_length=100)
attributes = models.ManyToManyField(AttributeModel)
class SearchFilterM2MSerializer(serializers.ModelSerializer):
class Meta:
model = SearchFilterModelM2M
class SearchFilterM2MTests(TestCase):
def setUp(self):
# Sequence of title/text/attributes is:
#
# z abc [1, 2, 3]
# zz bcd [1, 2, 3]
# zzz cde [1, 2, 3]
# ...
for idx in range(3):
label = 'w' * (idx + 1)
AttributeModel(label=label)
for idx in range(10):
title = 'z' * (idx + 1)
text = (
chr(idx + ord('a')) +
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
SearchFilterModelM2M(title=title, text=text).save()
SearchFilterModelM2M.objects.get(title='zz').attributes.add(1, 2, 3)
def test_m2m_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModelM2M.objects.all()
serializer_class = SearchFilterM2MSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('=title', 'text', 'attributes__label')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'zz'})
response = view(request)
self.assertEqual(len(response.data), 1)
class OrderingFilterModel(models.Model):
title = models.CharField(max_length=20)
text = models.CharField(max_length=100)
class OrderingFilterRelatedModel(models.Model):
related_object = models.ForeignKey(OrderingFilterModel,
related_name="relateds")
class OrderingFilterSerializer(serializers.ModelSerializer):
class Meta:
model = OrderingFilterModel
class DjangoFilterOrderingModel(models.Model):
date = models.DateField()
text = models.CharField(max_length=10)
class Meta:
ordering = ['-date']
class DjangoFilterOrderingSerializer(serializers.ModelSerializer):
class Meta:
model = DjangoFilterOrderingModel
class DjangoFilterOrderingTests(TestCase):
def setUp(self):
data = [{
'date': datetime.date(2012, 10, 8),
'text': 'abc'
}, {
'date': datetime.date(2013, 10, 8),
'text': 'bcd'
}, {
'date': datetime.date(2014, 10, 8),
'text': 'cde'
}]
for d in data:
DjangoFilterOrderingModel.objects.create(**d)
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_default_ordering(self):
class DjangoFilterOrderingView(generics.ListAPIView):
serializer_class = DjangoFilterOrderingSerializer
queryset = DjangoFilterOrderingModel.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ['text']
ordering = ('-date',)
view = DjangoFilterOrderingView.as_view()
request = factory.get('/')
response = view(request)
self.assertEqual(
response.data,
[
{'id': 3, 'date': '2014-10-08', 'text': 'cde'},
{'id': 2, 'date': '2013-10-08', 'text': 'bcd'},
{'id': 1, 'date': '2012-10-08', 'text': 'abc'}
]
)
class OrderingFilterTests(TestCase):
def setUp(self):
# Sequence of title/text is:
#
# zyx abc
# yxw bcd
# xwv cde
for idx in range(3):
title = (
chr(ord('z') - idx) +
chr(ord('y') - idx) +
chr(ord('x') - idx)
)
text = (
chr(idx + ord('a')) +
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
OrderingFilterModel(title=title, text=text).save()
def test_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'text'})
response = view(request)
self.assertEqual(
response.data,
[
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
)
def test_reverse_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': '-text'})
response = view(request)
self.assertEqual(
response.data,
[
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
)
def test_incorrectfield_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'foobar'})
response = view(request)
self.assertEqual(
response.data,
[
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
)
def test_default_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
oredering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('')
response = view(request)
self.assertEqual(
response.data,
[
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
)
def test_default_ordering_using_string(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = 'title'
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('')
response = view(request)
self.assertEqual(
response.data,
[
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
)
def test_ordering_by_aggregate_field(self):
# create some related models to aggregate order by
num_objs = [2, 5, 3]
for obj, num_relateds in zip(OrderingFilterModel.objects.all(),
num_objs):
for _ in range(num_relateds):
new_related = OrderingFilterRelatedModel(
related_object=obj
)
new_related.save()
class OrderingListView(generics.ListAPIView):
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = 'title'
ordering_fields = '__all__'
queryset = OrderingFilterModel.objects.all().annotate(
models.Count("relateds"))
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'relateds__count'})
response = view(request)
self.assertEqual(
response.data,
[
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
]
)
def test_ordering_with_nonstandard_ordering_param(self):
with override_settings(REST_FRAMEWORK={'ORDERING_PARAM': 'order'}):
reload_module(filters)
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'order': 'text'})
response = view(request)
self.assertEqual(
response.data,
[
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
)
reload_module(filters)
class SensitiveOrderingFilterModel(models.Model):
username = models.CharField(max_length=20)
password = models.CharField(max_length=100)
# Three different styles of serializer.
# All should allow ordering by username, but not by password.
class SensitiveDataSerializer1(serializers.ModelSerializer):
username = serializers.CharField()
class Meta:
model = SensitiveOrderingFilterModel
fields = ('id', 'username')
class SensitiveDataSerializer2(serializers.ModelSerializer):
username = serializers.CharField()
password = serializers.CharField(write_only=True)
class Meta:
model = SensitiveOrderingFilterModel
fields = ('id', 'username', 'password')
class SensitiveDataSerializer3(serializers.ModelSerializer):
user = serializers.CharField(source='username')
class Meta:
model = SensitiveOrderingFilterModel
fields = ('id', 'user')
class SensitiveOrderingFilterTests(TestCase):
def setUp(self):
for idx in range(3):
username = {0: 'userA', 1: 'userB', 2: 'userC'}[idx]
password = {0: 'passA', 1: 'passC', 2: 'passB'}[idx]
SensitiveOrderingFilterModel(username=username, password=password).save()
def test_order_by_serializer_fields(self):
for serializer_cls in [
SensitiveDataSerializer1,
SensitiveDataSerializer2,
SensitiveDataSerializer3
]:
class OrderingListView(generics.ListAPIView):
queryset = SensitiveOrderingFilterModel.objects.all().order_by('username')
filter_backends = (filters.OrderingFilter,)
serializer_class = serializer_cls
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': '-username'})
response = view(request)
if serializer_cls == SensitiveDataSerializer3:
username_field = 'user'
else:
username_field = 'username'
# Note: Inverse username ordering correctly applied.
self.assertEqual(
response.data,
[
{'id': 3, username_field: 'userC'},
{'id': 2, username_field: 'userB'},
{'id': 1, username_field: 'userA'},
]
)
def test_cannot_order_by_non_serializer_fields(self):
for serializer_cls in [
SensitiveDataSerializer1,
SensitiveDataSerializer2,
SensitiveDataSerializer3
]:
class OrderingListView(generics.ListAPIView):
queryset = SensitiveOrderingFilterModel.objects.all().order_by('username')
filter_backends = (filters.OrderingFilter,)
serializer_class = serializer_cls
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'password'})
response = view(request)
if serializer_cls == SensitiveDataSerializer3:
username_field = 'user'
else:
username_field = 'username'
# Note: The passwords are not in order. Default ordering is used.
self.assertEqual(
response.data,
[
{'id': 1, username_field: 'userA'}, # PassB
{'id': 2, username_field: 'userB'}, # PassC
{'id': 3, username_field: 'userC'}, # PassA
]
)
| bsd-2-clause |
bmya/odoo-support | web_support_client_doc/__openerp__.py | 1 | 1875 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Web Support Website Doc - Client',
'version': '8.0.1.0.0',
'category': 'Support',
'sequence': 14,
'summary': '',
'description': """
Web Support Client with Website Documentation Integration
=========================================================
TODO: check if we need this module or we remove it
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'web_support_client',
'website_doc',
],
'data': [
'views/support_view.xml',
'data/cron.xml',
],
'demo': [
],
'test': [
],
# we have depreciate this module because we add a lin to adhoc
# documentation on web support client directy
'installable': False,
'auto_install': True,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| lgpl-3.0 |
haudren/scipy | scipy/linalg/tests/test_solvers.py | 95 | 9591 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import inv
from numpy.testing import TestCase, rand, run_module_suite, assert_raises, \
assert_equal, assert_almost_equal, assert_array_almost_equal, assert_, \
assert_allclose
from scipy.linalg import solve_sylvester, solve_lyapunov, \
solve_discrete_lyapunov, solve_continuous_are, solve_discrete_are
class TestSolveLyapunov(TestCase):
cases = [
(np.array([[1, 2], [3, 4]]),
np.array([[9, 10], [11, 12]])),
# a, q all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a real; q complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a complex; q real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0, 2.0],[-1.0, 2.0]])),
# An example from Kitagawa, 1977
(np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
[1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
[0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
# Companion matrix example. a complex; q real; a.shape[0] = 11
(np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
0.010+0.j],
[1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j]]),
np.eye(11)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.array(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T))),
]
def check_continuous_case(self, a, q):
x = solve_lyapunov(a, q)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal(np.dot(np.dot(a, x),a.conj().transpose()) - x, -1.0*q)
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear')
class TestSolveContinuousARE(TestCase):
cases = [
# An example from Laub, A. J.
# (http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf)
(np.matrix([[0, 1], [0, 0]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Difficult from a numerical standpoint, again from Laub, A. J.
(np.matrix([[4, 3], [-9.0/2.0, -7.0/2.0]]),
np.matrix([[1,], [-1,]]),
np.matrix([[9, 6], [6, 4]]),
np.matrix([[1,],])),
# Complex a; real b, q, r
(np.matrix([[0, 1-2j], [0, -3j]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, q, r; complex b
(np.matrix([[0, 1], [0, -1]]),
np.matrix([[-2j,], [1j,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, b; complex q, r
(np.matrix([[0, 1], [0, -1]]),
np.matrix([[1, 2], [1, 3]]),
np.matrix([[1, -3j], [1-1j, 2]]),
np.matrix([[-2j, 2], [1j, 3]])),
]
def check_case(self, a, b, q, r):
"""Checks if (A'X + XA - XBR^-1B'X+Q=0) is true"""
x = solve_continuous_are(a, b, q, r)
assert_array_almost_equal(
a.getH()*x + x*a - x*b*inv(r)*b.getH()*x + q, 0.0)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2], case[3])
class TestSolveDiscreteARE(TestCase):
cases = [
# Difficult from a numerical standpoint, again from Laub, A. J.
# (http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf)
(np.matrix([[4, 3], [-9.0/2.0, -7.0/2.0]]),
np.matrix([[1,], [-1,]]),
np.matrix([[9, 6], [6, 4]]),
np.matrix([[1,],])),
# Another example from Laub
(np.matrix([[0.9512, 0], [0, 0.9048]]),
np.matrix([[4.877, 4.877], [-1.1895, 3.569]]),
np.matrix([[0.005, 0],[0, 0.02]]),
np.matrix([[1.0/3.0, 0],[0, 3]])),
# Complex a; real b, q, r
(np.matrix([[2, 1-2j], [0, -3j]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, q, r; complex b
(np.matrix([[2, 1], [0, -1]]),
np.matrix([[-2j,], [1j,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, b; complex q, r
(np.matrix([[3, 1], [0, -1]]),
np.matrix([[1, 2], [1, 3]]),
np.matrix([[1, -3j], [1-1j, 2]]),
np.matrix([[-2j, 2], [1j, 3]])),
]
def check_case(self, a, b, q, r):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
x = solve_discrete_are(a, b, q, r)
assert_array_almost_equal(
a.getH()*x*a-(a.getH()*x*b)*inv(r+b.getH()*x*b)*(b.getH()*x*a)+q-x, 0.0)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2], case[3])
class TestSolveSylvester(TestCase):
cases = [
# a, b, c all real.
(np.array([[1, 2], [0, 4]]),
np.array([[5, 6], [0, 8]]),
np.array([[9, 10], [11, 12]])),
# a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
# quasi-triangular form.
(np.array([[1.0, 0, 0, 0], [0, 1.0, 2.0, 0.0], [0, 0, 3.0, -4], [0, 0, 2, 5]]),
np.array([[2.0, 0, 0,1.0], [0, 1.0, 0.0, 0.0], [0, 0, 1.0, -1], [0, 0, 1, 1]]),
np.array([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])),
# a, b, c all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 2j], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a and b real; c complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a and c complex; b real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a complex; b and c real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0, 2.0],[-1.0, 2.0]])),
# not square matrices, real
(np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5]]),
np.array([[1, 2], [3, 4], [5, 6]])),
# not square matrices, complex
(np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5-1j]]),
np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
]
def check_case(self, a, b, c):
x = solve_sylvester(a, b, c)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2])
def test_trivial(self):
a = np.array([[1.0, 0.0], [0.0, 1.0]])
b = np.array([[1.0]])
c = np.array([2.0, 2.0]).reshape(-1,1)
x = solve_sylvester(a, b, c)
assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1,1))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
dwf/numpy | numpy/core/tests/test_numeric.py | 4 | 54008 | import sys
from decimal import Decimal
import numpy as np
from numpy.core import *
from numpy.random import rand, randint, randn
from numpy.testing import *
from numpy.testing.utils import WarningManager
from numpy.core.multiarray import dot as dot_
import warnings
class Vec(object):
def __init__(self,sequence=None):
if sequence is None:
sequence=[]
self.array=array(sequence)
def __add__(self,other):
out=Vec()
out.array=self.array+other.array
return out
def __sub__(self,other):
out=Vec()
out.array=self.array-other.array
return out
def __mul__(self,other): # with scalar
out=Vec(self.array.copy())
out.array*=other
return out
def __rmul__(self,other):
return self*other
class TestDot(TestCase):
def setUp(self):
self.A = rand(10,8)
self.b1 = rand(8,1)
self.b2 = rand(8)
self.b3 = rand(1,8)
self.b4 = rand(10)
self.N = 14
def test_matmat(self):
A = self.A
c1 = dot(A.transpose(), A)
c2 = dot_(A.transpose(), A)
assert_almost_equal(c1, c2, decimal=self.N)
def test_matvec(self):
A, b1 = self.A, self.b1
c1 = dot(A, b1)
c2 = dot_(A, b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_matvec2(self):
A, b2 = self.A, self.b2
c1 = dot(A, b2)
c2 = dot_(A, b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat(self):
A, b4 = self.A, self.b4
c1 = dot(b4, A)
c2 = dot_(b4, A)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat2(self):
b3, A = self.b3, self.A
c1 = dot(b3, A.transpose())
c2 = dot_(b3, A.transpose())
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat3(self):
A, b4 = self.A, self.b4
c1 = dot(A.transpose(),b4)
c2 = dot_(A.transpose(),b4)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecvecouter(self):
b1, b3 = self.b1, self.b3
c1 = dot(b1, b3)
c2 = dot_(b1, b3)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecvecinner(self):
b1, b3 = self.b1, self.b3
c1 = dot(b3, b1)
c2 = dot_(b3, b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_columnvect1(self):
b1 = ones((3,1))
b2 = [5.3]
c1 = dot(b1,b2)
c2 = dot_(b1,b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_columnvect2(self):
b1 = ones((3,1)).transpose()
b2 = [6.2]
c1 = dot(b2,b1)
c2 = dot_(b2,b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecscalar(self):
b1 = rand(1,1)
b2 = rand(1,8)
c1 = dot(b1,b2)
c2 = dot_(b1,b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecscalar2(self):
b1 = rand(8,1)
b2 = rand(1,1)
c1 = dot(b1,b2)
c2 = dot_(b1,b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_all(self):
dims = [(),(1,),(1,1)]
for dim1 in dims:
for dim2 in dims:
arg1 = rand(*dim1)
arg2 = rand(*dim2)
c1 = dot(arg1, arg2)
c2 = dot_(arg1, arg2)
assert_(c1.shape == c2.shape)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecobject(self):
U_non_cont = transpose([[1.,1.],[1.,2.]])
U_cont = ascontiguousarray(U_non_cont)
x = array([Vec([1.,0.]),Vec([0.,1.])])
zeros = array([Vec([0.,0.]),Vec([0.,0.])])
zeros_test = dot(U_cont,x) - dot(U_non_cont,x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
class TestResize(TestCase):
def test_copies(self):
A = array([[1,2],[3,4]])
Ar1 = array([[1,2,3,4],[1,2,3,4]])
assert_equal(resize(A, (2,4)), Ar1)
Ar2 = array([[1,2],[3,4],[1,2],[3,4]])
assert_equal(resize(A, (4,2)), Ar2)
Ar3 = array([[1,2,3],[4,1,2],[3,4,1],[2,3,4]])
assert_equal(resize(A, (4,3)), Ar3)
def test_zeroresize(self):
A = array([[1,2],[3,4]])
Ar = resize(A, (0,))
assert_equal(Ar, array([]))
class TestNonarrayArgs(TestCase):
# check that non-array arguments to functions wrap them in arrays
def test_squeeze(self):
A = [[[1,1,1],[2,2,2],[3,3,3]]]
assert_(squeeze(A).shape == (3,3))
def test_cumproduct(self):
A = [[1,2,3],[4,5,6]]
assert_(all(cumproduct(A) == array([1,2,6,24,120,720])))
def test_size(self):
A = [[1,2,3],[4,5,6]]
assert_(size(A) == 6)
assert_(size(A,0) == 2)
assert_(size(A,1) == 3)
def test_mean(self):
A = [[1,2,3],[4,5,6]]
assert_(mean(A) == 3.5)
assert_(all(mean(A,0) == array([2.5,3.5,4.5])))
assert_(all(mean(A,1) == array([2.,5.])))
def test_std(self):
A = [[1,2,3],[4,5,6]]
assert_almost_equal(std(A), 1.707825127659933)
assert_almost_equal(std(A,0), array([1.5, 1.5, 1.5]))
assert_almost_equal(std(A,1), array([0.81649658, 0.81649658]))
def test_var(self):
A = [[1,2,3],[4,5,6]]
assert_almost_equal(var(A), 2.9166666666666665)
assert_almost_equal(var(A,0), array([2.25, 2.25, 2.25]))
assert_almost_equal(var(A,1), array([0.66666667, 0.66666667]))
class TestBoolScalar(TestCase):
def test_logical(self):
f = False_
t = True_
s = "xyz"
self.assertTrue((t and s) is s)
self.assertTrue((f and s) is f)
def test_bitwise_or(self):
f = False_
t = True_
self.assertTrue((t | t) is t)
self.assertTrue((f | t) is t)
self.assertTrue((t | f) is t)
self.assertTrue((f | f) is f)
def test_bitwise_and(self):
f = False_
t = True_
self.assertTrue((t & t) is t)
self.assertTrue((f & t) is f)
self.assertTrue((t & f) is f)
self.assertTrue((f & f) is f)
def test_bitwise_xor(self):
f = False_
t = True_
self.assertTrue((t ^ t) is f)
self.assertTrue((f ^ t) is t)
self.assertTrue((t ^ f) is t)
self.assertTrue((f ^ f) is f)
class TestSeterr(TestCase):
def test_default(self):
err = geterr()
self.assertEqual(err, dict(
divide='warn',
invalid='warn',
over='warn',
under='ignore',
))
def test_set(self):
err = seterr()
try:
old = seterr(divide='print')
self.assertTrue(err == old)
new = seterr()
self.assertTrue(new['divide'] == 'print')
seterr(over='raise')
self.assertTrue(geterr()['over'] == 'raise')
self.assertTrue(new['divide'] == 'print')
seterr(**old)
self.assertTrue(geterr() == old)
finally:
seterr(**err)
def test_divide_err(self):
err = seterr(divide='raise')
try:
try:
array([1.]) / array([0.])
except FloatingPointError:
pass
else:
self.fail()
seterr(divide='ignore')
array([1.]) / array([0.])
finally:
seterr(**err)
class TestFloatExceptions(TestCase):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError, exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
"""Check that fpe exception is raised.
Given a floating operation `flop` and two scalar values, check that
the operation raises the floating point exception specified by
`fpeerr`. Tests all variants with 0-d array scalars as well.
"""
self.assert_raises_fpe(fpeerr, flop, sc1, sc2);
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2);
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]);
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]);
@dec.knownfailureif(True, "See ticket 1755")
def test_floating_exceptions(self):
"""Test basic arithmetic function errors"""
oldsettings = np.seterr(all='raise')
try:
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a,b:a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a,b:a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a,b:a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a,b:a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a,b:a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a,b:a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a,b:a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a,b:a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a,b:a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a,b:a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a,b:a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a,b:a*b, ftype(0), ftype(np.inf))
finally:
np.seterr(**oldsettings)
class TestTypes(TestCase):
def check_promotion_cases(self, promote_func):
"""Tests that the scalars get coerced correctly."""
b = np.bool_(0)
i8, i16, i32, i64 = int8(0), int16(0), int32(0), int64(0)
u8, u16, u32, u64 = uint8(0), uint16(0), uint32(0), uint64(0)
f32, f64, fld = float32(0), float64(0), longdouble(0)
c64, c128, cld = complex64(0), complex128(0), clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8,i16), np.dtype(int16))
assert_equal(promote_func(i32,i8), np.dtype(int32))
assert_equal(promote_func(i16,i64), np.dtype(int64))
assert_equal(promote_func(u8,u32), np.dtype(uint32))
assert_equal(promote_func(f32,f64), np.dtype(float64))
assert_equal(promote_func(fld,f32), np.dtype(longdouble))
assert_equal(promote_func(f64,fld), np.dtype(longdouble))
assert_equal(promote_func(c128,c64), np.dtype(complex128))
assert_equal(promote_func(cld,c128), np.dtype(clongdouble))
assert_equal(promote_func(c64,fld), np.dtype(clongdouble))
# coercion between kinds
assert_equal(promote_func(b,i32), np.dtype(int32))
assert_equal(promote_func(b,u8), np.dtype(uint8))
assert_equal(promote_func(i8,u8), np.dtype(int16))
assert_equal(promote_func(u8,i32), np.dtype(int32))
assert_equal(promote_func(i64,u32), np.dtype(int64))
assert_equal(promote_func(u64,i32), np.dtype(float64))
assert_equal(promote_func(i32,f32), np.dtype(float64))
assert_equal(promote_func(i64,f32), np.dtype(float64))
assert_equal(promote_func(f32,i16), np.dtype(float32))
assert_equal(promote_func(f32,u32), np.dtype(float64))
assert_equal(promote_func(f32,c64), np.dtype(complex64))
assert_equal(promote_func(c128,f32), np.dtype(complex128))
assert_equal(promote_func(cld,f64), np.dtype(clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(array([b]),i8), np.dtype(int8))
assert_equal(promote_func(array([b]),u8), np.dtype(uint8))
assert_equal(promote_func(array([b]),i32), np.dtype(int32))
assert_equal(promote_func(array([b]),u32), np.dtype(uint32))
assert_equal(promote_func(array([i8]),i64), np.dtype(int8))
assert_equal(promote_func(u64,array([i32])), np.dtype(int32))
assert_equal(promote_func(i64,array([u32])), np.dtype(uint32))
assert_equal(promote_func(int32(-1),array([u64])), np.dtype(float64))
assert_equal(promote_func(f64,array([f32])), np.dtype(float32))
assert_equal(promote_func(fld,array([f32])), np.dtype(float32))
assert_equal(promote_func(array([f64]),fld), np.dtype(float64))
assert_equal(promote_func(fld,array([c64])), np.dtype(complex64))
assert_equal(promote_func(c64,array([f64])), np.dtype(complex128))
assert_equal(promote_func(complex64(3j),array([f64])),
np.dtype(complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(array([b]),f64), np.dtype(float64))
assert_equal(promote_func(array([b]),i64), np.dtype(int64))
assert_equal(promote_func(array([b]),u64), np.dtype(uint64))
assert_equal(promote_func(array([i8]),f64), np.dtype(float64))
assert_equal(promote_func(array([u16]),f64), np.dtype(float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(array([u16]), i32), np.dtype(uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(array([f32]),c128), np.dtype(complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True,False]), np.array([-3,12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
## Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
## a float32, shouldn't promote to float64
#a = np.array([1.0, 1.5], dtype=np.float32)
#t = np.array([True, False])
#b = t*a
#assert_equal(b, [1.0, 0.0])
#assert_equal(b.dtype, np.dtype('f4'))
#b = (1-t)*a
#assert_equal(b, [0.0, 1.5])
#assert_equal(b.dtype, np.dtype('f4'))
## Probably ~t (bitwise negation) is more proper to use here,
## but this is arguably less intuitive to understand at a glance, and
## would fail if 't' is actually an integer array instead of boolean:
#b = (~t)*a
#assert_equal(b, [0.0, 1.5])
#assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U16'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U16'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U16'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U16'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, np.complex))
assert_(not np.can_cast(np.complex, np.float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S4'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
class TestFromiter(TestCase):
def makegen(self):
for x in xrange(24):
yield x**2
def test_types(self):
ai32 = fromiter(self.makegen(), int32)
ai64 = fromiter(self.makegen(), int64)
af = fromiter(self.makegen(), float)
self.assertTrue(ai32.dtype == dtype(int32))
self.assertTrue(ai64.dtype == dtype(int64))
self.assertTrue(af.dtype == dtype(float))
def test_lengths(self):
expected = array(list(self.makegen()))
a = fromiter(self.makegen(), int)
a20 = fromiter(self.makegen(), int, 20)
self.assertTrue(len(a) == len(expected))
self.assertTrue(len(a20) == 20)
try:
fromiter(self.makegen(), int, len(expected) + 10)
except ValueError:
pass
else:
self.fail()
def test_values(self):
expected = array(list(self.makegen()))
a = fromiter(self.makegen(), int)
a20 = fromiter(self.makegen(), int, 20)
self.assertTrue(alltrue(a == expected,axis=0))
self.assertTrue(alltrue(a20 == expected[:20],axis=0))
class TestNonzero(TestCase):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(array([])), 0)
assert_equal(np.count_nonzero(array([], dtype='?')), 0)
assert_equal(np.nonzero(array([])), ([],))
assert_equal(np.count_nonzero(array(0)), 0)
assert_equal(np.count_nonzero(array(0, dtype='?')), 0)
assert_equal(np.nonzero(array(0)), ([],))
assert_equal(np.count_nonzero(array(1)), 1)
assert_equal(np.count_nonzero(array(1, dtype='?')), 1)
assert_equal(np.nonzero(array(1)), ([0],))
def test_nonzero_onedim(self):
x = array([1,0,2,-1,0,0,8])
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
x = array([(1,2),(0,0),(1,1),(-1,3),(0,7)],
dtype=[('a','i4'),('b','i2')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
assert_equal(np.nonzero(x['a']), ([0,2,3],))
assert_equal(np.nonzero(x['b']), ([0,2,3,4],))
def test_nonzero_twodim(self):
x = array([[0,1,0],[2,0,3]])
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0,1,1],[1,0,2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0,1,2],[0,1,2]))
x = array([[(0,1),(0,0),(1,11)],
[(1,1),(1,0),(0,0)],
[(0,0),(1,5),(0,1)]], dtype=[('a','f4'),('b','u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0,1,1,2],[2,0,1,1]))
assert_equal(np.nonzero(x['b']), ([0,0,1,2,2],[0,2,0,1,2]))
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0,1,1,2],[1,1,2,0]))
assert_equal(np.nonzero(x['b'].T), ([0,0,1,2,2],[0,1,2,0,2]))
class TestIndex(TestCase):
def test_boolean(self):
a = rand(3,5,8)
V = rand(5,8)
g1 = randint(0,5,size=15)
g2 = randint(0,8,size=15)
V[g1,g2] = -V[g1,g2]
assert_((array([a[0][V>0],a[1][V>0],a[2][V>0]]) == a[:,V>0]).all())
def test_boolean_edgecase(self):
a = np.array([], dtype='int32')
b = np.array([], dtype='bool')
c = a[b]
assert_equal(c, [])
assert_equal(c.dtype, np.dtype('int32'))
class TestBinaryRepr(TestCase):
def test_zero(self):
assert_equal(binary_repr(0),'0')
def test_large(self):
assert_equal(binary_repr(10736848),'101000111101010011010000')
def test_negative(self):
assert_equal(binary_repr(-1), '-1')
assert_equal(binary_repr(-1, width=8), '11111111')
class TestBaseRepr(TestCase):
def test_base3(self):
assert_equal(base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(base_repr(12, 10), '12')
assert_equal(base_repr(12, 10, 4), '000012')
assert_equal(base_repr(12, 4), '30')
assert_equal(base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(base_repr(-12, 10), '-12')
assert_equal(base_repr(-12, 10, 4), '-000012')
assert_equal(base_repr(-12, 4), '-30')
class TestArrayComparisons(TestCase):
def test_array_equal(self):
res = array_equal(array([1,2]), array([1,2]))
assert_(res)
assert_(type(res) is bool)
res = array_equal(array([1,2]), array([1,2,3]))
assert_(not res)
assert_(type(res) is bool)
res = array_equal(array([1,2]), array([3,4]))
assert_(not res)
assert_(type(res) is bool)
res = array_equal(array([1,2]), array([1,3]))
assert_(not res)
assert_(type(res) is bool)
def test_array_equiv(self):
res = array_equiv(array([1,2]), array([1,2]))
assert_(res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([1,2,3]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([3,4]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([1,3]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,1]), array([1]))
assert_(res)
assert_(type(res) is bool)
res = array_equiv(array([1,1]), array([[1],[1]]))
assert_(res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([2]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([[1],[2]]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([[1,2,3],[4,5,6],[7,8,9]]))
assert_(not res)
assert_(type(res) is bool)
def assert_array_strict_equal(x, y):
assert_array_equal(x, y)
# Check flags
assert_(x.flags == y.flags)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
class TestClip(TestCase):
def setUp(self):
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None):
if out is None:
return a.clip(m,M)
else:
return a.clip(m,M,out)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = less(a, m)+2*greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.j *rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(float32)
def _neg_byteorder(self, a):
a = asarray(a)
if sys.byteorder == 'little':
a = a.astype(a.dtype.newbyteorder('>'))
else:
a = a.astype(a.dtype.newbyteorder('<'))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(int32)
# Now the real test cases
def test_simple_double(self):
"""Test native double input with scalar min/max."""
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_int(self):
"""Test native int input with scalar min/max."""
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_array_double(self):
"""Test native double input with array min/max."""
a = self._generate_data(self.nr, self.nc)
m = zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
"""Test non native double input with scalar min/max.
Test native double input with non native double scalar min/max."""
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
"Test native double input with non native double scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
"""Test native complex input with native double scalar min/max.
Test native input with complex double scalar min/max.
"""
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
"Test native input with complex double scalar min/max."
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_clip_non_contig(self):
"""Test clip for non contiguous native input and native scalar min/max."""
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_strict_equal(ac, act)
def test_simple_out(self):
"""Test native double input with scalar min/max."""
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = zeros(a.shape)
act = zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_inout(self):
"""Test native int32 input with double min/max and int32 out."""
a = self._generate_int32_data(self.nr, self.nc)
m = float64(0)
M = float64(2)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
"""Test native int32 input with int32 scalar min/max and int64 out."""
a = self._generate_int32_data(self.nr, self.nc)
m = int32(-1)
M = int32(1)
ac = zeros(a.shape, dtype = int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
"""Test native int32 input with double array min/max and int32 out."""
a = self._generate_int32_data(self.nr, self.nc)
m = zeros(a.shape, float64)
M = float64(1)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
"""Test native double input with scalar min/max and int out."""
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
"""Test native double input with array min/max in-place."""
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
"""Test native double input with scalar min/max in-place."""
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
"""Test non contiguous double input with double scalar min/max in-place."""
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
"Test native double input with scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
"Test native int32 input with int32 scalar min/max."
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
"Test native int32 input with float64 scalar min/max."
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, float64(m), float64(M))
act = self.clip(a, float64(m), float64(M))
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
"Test native int32 input with float32 scalar min/max."
a = self._generate_int32_data(self.nr, self.nc)
m = float32(-2)
M = float32(4)
act = self.fastclip(a,m,M)
ac = self.clip(a,m,M)
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
"Test native int32 with double arrays min/max."
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m * zeros(a.shape), M)
act = self.clip(a, m * zeros(a.shape), M)
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
"Test native with NON native scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
"Test NON native with native array min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5 * ones(a.shape)
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
"Test NON native with native scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m , M)
act = a_s.clip(m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
"Test native with NON native array min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5 * ones(a.shape)
M = 1.
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s , M)
act = self.clip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
"""Test native int32 with float min/max and float out for output argument."""
a = self._generate_int_data(self.nr, self.nc)
b = zeros(a.shape, dtype = float32)
m = float32(-0.5)
M = float32(1)
act = self.clip(a, m, M, out = b)
ac = self.fastclip(a, m , M, out = b)
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
"Test non native with native scalar, min/max, out non native"
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
bt = b.copy()
m = -0.5
M = 1.
self.fastclip(a, m , M, out = b)
self.clip(a, m, M, out = bt)
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
"Test native int32 input and min/max and float out"
a = self._generate_int_data(self.nr, self.nc)
b = zeros(a.shape, dtype = float32)
m = int32(0)
M = int32(1)
act = self.clip(a, m, M, out = b)
ac = self.fastclip(a, m , M, out = b)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
"Test native double input with scalar min/max"
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = zeros(a.shape)
act = zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
"Test native int32 input with double min/max and int32 out"
a = self._generate_int32_data(self.nr, self.nc)
m = float64(0)
M = float64(2)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple_int32(self):
"Test native int32 input with int32 scalar min/max and int64 out"
a = self._generate_int32_data(self.nr, self.nc)
m = int32(-1)
M = int32(1)
ac = zeros(a.shape, dtype = int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_int32(self):
"Test native int32 input with double array min/max and int32 out"
a = self._generate_int32_data(self.nr, self.nc)
m = zeros(a.shape, float64)
M = float64(1)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_outint32(self):
"Test native double input with scalar min/max and int out"
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_inplace_array(self):
"Test native double input with array min/max"
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_inplace_simple(self):
"Test native double input with scalar min/max"
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_func_takes_out(self):
""" Ensure that the clip() function takes an out= argument.
"""
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
a2 = clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
self.assertTrue(a2 is a)
class TestAllclose(object):
rtol = 1e-5
atol = 1e-8
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
def tst_allclose(self,x,y):
assert_(allclose(x,y), "%s and %s not close" % (x,y))
def tst_not_allclose(self,x,y):
assert_(not allclose(x,y), "%s and %s shouldn't be close" % (x,y))
def test_ip_allclose(self):
"""Parametric test factory."""
arr = array([100,1000])
aran = arange(125).reshape((5,5,5))
atol = self.atol
rtol = self.rtol
data = [([1,0], [1,0]),
([atol], [0]),
([1], [1+rtol+atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol*2),
(aran, aran + aran*rtol),
(inf, inf),
(inf, [inf])]
for (x,y) in data:
yield (self.tst_allclose,x,y)
def test_ip_not_allclose(self):
"""Parametric test factory."""
aran = arange(125).reshape((5,5,5))
atol = self.atol
rtol = self.rtol
data = [([inf,0], [1,inf]),
([inf,0], [1,0]),
([inf,inf], [1,inf]),
([inf,inf], [1,0]),
([-inf, 0], [inf, 0]),
([nan,0], [nan,0]),
([atol*2], [0]),
([1], [1+rtol+atol*2]),
(aran, aran + aran*atol + atol*2),
(array([inf,1]), array([0,inf]))]
for (x,y) in data:
yield (self.tst_not_allclose,x,y)
def test_no_parameter_modification(self):
x = array([inf,1])
y = array([0,inf])
allclose(x,y)
assert_array_equal(x,array([inf,1]))
assert_array_equal(y,array([0,inf]))
class TestIsclose(object):
rtol = 1e-5
atol = 1e-8
def setup(self):
atol = self.atol
rtol = self.rtol
arr = array([100,1000])
aran = arange(125).reshape((5,5,5))
self.all_close_tests = [
([1, 0], [1, 0]),
([atol], [0]),
([1], [1 + rtol + atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol),
(aran, aran + aran*rtol),
(inf, inf),
(inf, [inf]),
([inf, -inf], [inf, -inf]),
]
self.none_close_tests = [
([inf, 0], [1, inf]),
([inf, -inf], [1, 0]),
([inf, inf], [1, -inf]),
([inf, inf], [1, 0]),
([nan, 0], [nan, -inf]),
([atol*2], [0]),
([1], [1 + rtol + atol*2]),
(aran, aran + rtol*1.1*aran + atol*1.1),
(array([inf, 1]), array([0, inf])),
]
self.some_close_tests = [
([inf, 0], [inf, atol*2]),
([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, nan, 1e6]),
(arange(3), [0, 1, 2.1]),
(nan, [nan, nan, nan]),
([0], [atol, inf, -inf, nan]),
(0, [atol, inf, -inf, nan]),
]
self.some_close_results = [
[True, False],
[True, False, False],
[True, True, False],
[False, False, False],
[True, False, False, False],
[True, False, False, False],
]
def test_ip_isclose(self):
self.setup()
tests = self.some_close_tests
results = self.some_close_results
for (x, y), result in zip(tests, results):
yield (assert_array_equal, isclose(x, y), result)
def tst_all_isclose(self, x, y):
assert_(all(isclose(x, y)), "%s and %s not close" % (x, y))
def tst_none_isclose(self, x, y):
msg = "%s and %s shouldn't be close"
assert_(not any(isclose(x, y)), msg % (x, y))
def tst_isclose_allclose(self, x, y):
msg = "isclose.all() and allclose aren't same for %s and %s"
assert_array_equal(isclose(x, y).all(), allclose(x, y), msg % (x, y))
def test_ip_all_isclose(self):
self.setup()
for (x,y) in self.all_close_tests:
yield (self.tst_all_isclose, x, y)
def test_ip_none_isclose(self):
self.setup()
for (x,y) in self.none_close_tests:
yield (self.tst_none_isclose, x, y)
def test_ip_isclose_allclose(self):
self.setup()
tests = (self.all_close_tests + self.none_close_tests +
self.some_close_tests)
for (x, y) in tests:
yield (self.tst_isclose_allclose, x, y)
def test_equal_nan(self):
assert_array_equal(isclose(nan, nan, equal_nan=True), [True])
arr = array([1.0, nan])
assert_array_equal(isclose(arr, arr, equal_nan=True), [True, True])
def test_masked_arrays(self):
x = np.ma.masked_where([True, True, False], np.arange(3))
assert_(type(x) == type(isclose(2, x)))
x = np.ma.masked_where([True, True, False], [nan, inf, nan])
assert_(type(x) == type(isclose(inf, x)))
x = np.ma.masked_where([True, True, False], [nan, nan, nan])
y = isclose(nan, x, equal_nan=True)
assert_(type(x) == type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
def test_scalar_return(self):
assert_(isscalar(isclose(1, 1)))
def test_no_parameter_modification(self):
x = array([inf, 1])
y = array([0, inf])
isclose(x, y)
assert_array_equal(x, array([inf, 1]))
assert_array_equal(y, array([0, inf]))
class TestStdVar(TestCase):
def setUp(self):
self.A = array([1,-1,1,-1])
self.real_var = 1
def test_basic(self):
assert_almost_equal(var(self.A),self.real_var)
assert_almost_equal(std(self.A)**2,self.real_var)
def test_ddof1(self):
assert_almost_equal(var(self.A,ddof=1),
self.real_var*len(self.A)/float(len(self.A)-1))
assert_almost_equal(std(self.A,ddof=1)**2,
self.real_var*len(self.A)/float(len(self.A)-1))
def test_ddof2(self):
assert_almost_equal(var(self.A,ddof=2),
self.real_var*len(self.A)/float(len(self.A)-2))
assert_almost_equal(std(self.A,ddof=2)**2,
self.real_var*len(self.A)/float(len(self.A)-2))
class TestStdVarComplex(TestCase):
def test_basic(self):
A = array([1,1.j,-1,-1.j])
real_var = 1
assert_almost_equal(var(A),real_var)
assert_almost_equal(std(A)**2,real_var)
class TestLikeFuncs(TestCase):
'''Test ones_like, zeros_like, and empty_like'''
def setUp(self):
self.data = [
# Array scalars
(array(3.), None),
(array(3), 'f8'),
# 1D arrays
(arange(6, dtype='f4'), None),
(arange(6), 'c16'),
# 2D C-layout arrays
(arange(6).reshape(2,3), None),
(arange(6).reshape(3,2), 'i1'),
# 2D F-layout arrays
(arange(6).reshape((2,3), order='F'), None),
(arange(6).reshape((3,2), order='F'), 'i1'),
# 3D C-layout arrays
(arange(24).reshape(2,3,4), None),
(arange(24).reshape(4,3,2), 'f4'),
# 3D F-layout arrays
(arange(24).reshape((2,3,4), order='F'), None),
(arange(24).reshape((4,3,2), order='F'), 'f4'),
# 3D non-C/F-layout arrays
(arange(24).reshape(2,3,4).swapaxes(0,1), None),
(arange(24).reshape(4,3,2).swapaxes(0,1), '?'),
]
def check_like_function(self, like_function, value):
for d, dtype in self.data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype)
assert_equal(dz.shape, d.shape)
assert_equal(array(dz.strides)*d.dtype.itemsize,
array(d.strides)*dz.dtype.itemsize)
assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# C order, default dtype
dz = like_function(d, order='C', dtype=dtype)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# F order, default dtype
dz = like_function(d, order='F', dtype=dtype)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# A order
dz = like_function(d, order='A', dtype=dtype)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# Test the 'subok' parameter
a = np.matrix([[1,2],[3,4]])
b = like_function(a)
assert_(type(b) is np.matrix)
b = like_function(a, subok=False)
assert_(not (type(b) is np.matrix))
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
class _TestCorrelate(TestCase):
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.y = np.array([-1, -2, -3], dtype=dt)
self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt)
self.z2 = np.array([ -5., -14., -26., -20., -14., -8., -3.], dtype=dt)
def test_float(self):
self._setup(np.float)
z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z2)
def test_object(self):
self._setup(Decimal)
z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z2)
class TestCorrelate(_TestCorrelate):
old_behavior = True
def _setup(self, dt):
# correlate uses an unconventional definition so that correlate(a, b)
# == correlate(b, a), so force the corresponding outputs to be the same
# as well
_TestCorrelate._setup(self, dt)
self.z2 = self.z1
@dec.deprecated()
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
y = np.array([-1, -2j, 3+1j], dtype=np.complex)
r_z = np.array([3+1j, 6, 8-1j, 9+1j, -1-8j, -4-1j], dtype=np.complex)
z = np.correlate(x, y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, r_z)
@dec.deprecated()
def test_float(self):
_TestCorrelate.test_float(self)
@dec.deprecated()
def test_object(self):
_TestCorrelate.test_object(self)
class TestCorrelateNew(_TestCorrelate):
old_behavior = False
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
y = np.array([-1, -2j, 3+1j], dtype=np.complex)
r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex)
#z = np.acorrelate(x, y, 'full')
#assert_array_almost_equal(z, r_z)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, r_z)
class TestArgwhere(object):
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
[[0, 2],
[1, 0],
[1, 1],
[1, 2]])
def test_list(self):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
class TestStringFunction(object):
def test_set_string_function(self):
a = np.array([1])
np.set_string_function(lambda x: "FOO", repr=True)
assert_equal(repr(a), "FOO")
np.set_string_function(None, repr=True)
assert_equal(repr(a), "array([1])")
np.set_string_function(lambda x: "FOO", repr=False)
assert_equal(str(a), "FOO")
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
remibergsma/cloudstack-cloudmonkey | cloudmonkey/printer.py | 2 | 3937 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
from pygments import highlight
from pygments.console import ansiformat
from pygments.formatter import Formatter
from pygments.lexer import bygroups, RegexLexer
from pygments.token import *
import sys
except ImportError, e:
print e
MONKEY_COLORS = {
Token: '',
Whitespace: 'reset',
Text: 'reset',
Name: 'green',
Operator: 'teal',
Operator.Word: 'lightgray',
String: 'purple',
Keyword: '_red_',
Error: 'red',
Literal: 'yellow',
Number: 'blue',
}
def get_colorscheme():
return MONKEY_COLORS
class MonkeyLexer(RegexLexer):
keywords = ['[a-z]*id', '"[a-z]*id"', '^[a-z A-Z]*:']
attributes = ['[Tt]rue', '[Ff]alse']
params = ['[a-z]*[Nn]ame', 'type', '[Ss]tate']
uuid_rgx = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
date_rgx = r'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9:]{8}[0-9+]{5}'
def makelistre(lis):
return r'(' + r'|'.join(lis) + r')'
tokens = {
'root': [
(r' ', Whitespace),
(date_rgx, Number),
(r'"' + date_rgx + r'"', Number),
(uuid_rgx, Literal),
(r'"' + uuid_rgx + r'"', Literal),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'^[-=]*\n', Operator.Word),
(r'Error', Error),
(makelistre(attributes), Literal),
(makelistre(params) + r'( = )(.*)', bygroups(Name, Operator,
String)),
(makelistre(keywords), Keyword),
(makelistre(params), Name),
(r'(^[a-zA-Z]* )(=)', bygroups(Name, Operator)),
(r'\S+', Text),
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos - 1] == ']'
class MonkeyFormatter(Formatter):
def __init__(self, **options):
Formatter.__init__(self, **options)
self.colorscheme = get_colorscheme()
def format(self, tokensource, outfile):
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ansiformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
def monkeyprint(text):
fmter = MonkeyFormatter()
lexer = MonkeyLexer()
lexer.encoding = 'utf-8'
fmter.encoding = 'utf-8'
if text.startswith("Error"):
highlight(text, lexer, fmter, sys.stderr)
else:
highlight(text, lexer, fmter, sys.stdout)
| apache-2.0 |
lnawrot/traffic-simulator | site_scons/site_tools/qt5/test/qrc/subdir/sconstest-subdir.py | 2 | 1676 | #!/usr/bin/env python
#
# Copyright (c) 2001-2010,2011,2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
In this test the QRC file is placed in a subfolder (qrc/icons.qrc). The
Qrc5() builder should correctly strip the leading path and set the "-name"
option for the RCC executable to "icons" only.
"""
import TestSCons
test = TestSCons.TestSCons()
test.dir_fixture("image")
test.file_fixture('../../qtenv.py')
test.file_fixture('../../../__init__.py','site_scons/site_tools/qt5/__init__.py')
test.run()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
ovilab/atomify-lammps | libs/lammps/tools/i-pi/ipi/utils/inputvalue.py | 33 | 36887 | """Contains the classes that are used to write to and read from restart files.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
The classes defined in this module define the base functions which parse the
data in the restart files. Each restart object defined has a fields and an
attributes dictionary, which are filled with the tags and attributes that
are allowed to be present, along with their default values and data type.
These are then filled with the data from the xml file when the program
is initialised, and are filled by the values calculated in the program which
are then output to the checkpoint file when a restart file is required.
Also deals with checking for user input errors, of the form of misspelt tags,
bad data types, and failure to input required fields.
Classes:
Input: Base input class.
InputAttribute: Input class for attribute data.
InputValue: Input class for scalar objects.
InputArray: Input class for arrays.
input_default: Class used to create mutable objects dynamically.
"""
__all__ = ['Input', 'InputValue', 'InputAttribute', 'InputArray', 'input_default']
import numpy as np
from copy import copy
from ipi.utils.io.io_xml import *
from ipi.utils.units import unit_to_internal, unit_to_user
class input_default(object):
"""Contains information required to dynamically create objects
Used so that we can define mutable default input values to various tags
without the usual trouble with having a class object that is also mutable,
namely that all members of that class share the same mutable object, so that
changing it for one instance of that class changes it for all others. It
does this by not holding the mutable default value, but instead the
information to create it, so that each instance of an input class can
have a separate instance of the default value.
Attributes:
type: Either a class type or function call from which to create the
default object.
args: A tuple giving positional arguments to be passed to the function.
kwargs: A dictionary giving key word arguments to be passed to the
function.
"""
def __init__(self, factory, args = None, kwargs = None):
"""Initialises input_default.
Args:
type: The class or function to be used to create the default object.
args: A tuple giving the arguments to be used to initialise
the default value.
kwargs: A dictionary giving the key word arguments to be used
to initialise the default value.
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
# a default will be generated by factory(*args, **kwargs)
# *args unpacks the tuple, and is used for positional arguments
# **kwargs unpacks the dictionary, and is used for keyword arguments
self.factory = factory
self.args = args
self.kwargs = kwargs
class Input(object):
"""Base class for input handling.
Has the generic methods for dealing with the xml input file. Parses the input
data, outputs the output data, and deals with storing and returning the
data obtained during the simulation for the restart files.
Attributes:
fields: A dictionary holding the possible tags contained within the
tags for this restart object, which are then turned into the objects
held by the object given by this restart object. The dictionary is
of the form:
{"tag name": ( Input_object,
{"default": default value,
"dtype": data type,
"options": list of available options,
"help": help string,
"dimension": dimensionality of data}), ... }.
dynamic: A dictionary holding the possible tags contained within the
tags for this restart object, which are then turned into the objects
held by the object given by this restart object. These are used for
tags that can be specified more than once.
The dictionary is of the form:
{"tag name": ( Input_object,
{"default": default value,
"dtype": data type,
"options": list of available options,
"help": help string,
"dimension": dimensionality of data}), ... }.
attribs: A dictionary holding the attribute data for the tag for this
restart object. The dictionary is of the form:
{"attribute name": ( Input_object,
{"default": default value,
"dtype": data type,
"options": list of available options,
"help": help string,
"dimension": dimensionality of data}), ... }.
extra: A list of tuples ( "name", Input_object ) that may be used to
extend the capabilities of the class, i.e. to hold several instances of
a field with the same name, or to hold variable numbers of elements.
default_help: The default help string.
_help: The help string of the object. Defaults to default_help.
_default: Optional default value.
_optional: A bool giving whether the field is a required field.
_explicit: A bool giving whether the field has been specified by the user.
_text: All text written between the tags of the object.
_label: A label to be used to identify the class in the latex user manual.
_defwrite: The string which would be output if the class has its default
value.
"""
fields = {}
attribs = {}
dynamic = {}
default_help = "Generic input value"
default_label = "" #used as a way to reference a particular class using
#hyperlinks
def __init__(self, help=None, default=None):
"""Initialises Input.
Automatically adds all the fields and attribs names to the input object's
dictionary, then initialises all the appropriate input objects
as the corresponding values.
Args:
help: A help string.
default: A default value.
"""
# list of extended (dynamic) fields
self.extra = []
if help is None:
self._help = self.default_help
else:
self._help = help
if isinstance(default,input_default):
#creates default dynamically if a suitable template is defined.
self._default = default.factory(*default.args, **default.kwargs)
else:
self._default = default
self._optional = not (self._default is None)
self._label = self.default_label
#For each tag name in the fields and attribs dictionaries,
#creates and object of the type given, expanding the dictionary to give
#the arguments of the __init__() function, then adds it to the input
#object's dictionary.
for f, v in self.fields.iteritems():
self.__dict__[f] = v[0](**v[1])
for a, v in self.attribs.iteritems():
self.__dict__[a] = v[0](**v[1])
self.set_default()
self._text = ""
# stores what we would write out if the default was set
self._defwrite = ""
if not self._default is None:
self._defwrite = self.write(name="%%NAME%%")
def set_default(self):
"""Sets the default value of the object."""
if not self._default is None:
self.store(self._default)
elif not hasattr(self, 'value'):
self.value = None #Makes sure we don't get exceptions when we
#look for self.value
self._explicit = False #Since the value was not set by the user
def store(self, value=None):
"""Dummy function for storing data."""
self._explicit = True
pass
def fetch(self):
"""Dummy function to retrieve data."""
self.check()
pass
def check(self):
"""Base function to check for input errors.
Raises:
ValueError: Raised if the user does not specify a required field.
"""
if not (self._explicit or self._optional):
raise ValueError("Uninitialized Input value of type " + type(self).__name__)
def extend(self, name, xml):
""" Dynamically add elements to the 'extra' list.
Picks from one of the templates in the self.dynamic dictionary, then
parses.
Args:
name: The tag name of the dynamically stored tag.
xml: The xml_node object used to parse the data stored in the tags.
"""
newfield = self.dynamic[name][0](**self.dynamic[name][1])
newfield.parse(xml)
self.extra.append((name,newfield))
def write(self, name="", indent="", text="\n"):
"""Writes data in xml file format.
Writes the tag, attributes, data and closing tag appropriate to the
particular fields and attribs data. Writes in a recursive manner, so
that objects contained in the fields dictionary have their write function
called, so that their tags are written between the start and end tags
of this object, as is required for the xml format.
This also adds an indent to the lower levels of the xml heirarchy,
so that it is easy to see which tags contain other tags.
Args:
name: An optional string giving the tag name. Defaults to "".
indent: An optional string giving the string to be added to the start
of the line, so usually a number of tabs. Defaults to "".
text: Additional text to be output between the tags.
Returns:
A string giving all the data contained in the fields and attribs
dictionaries, in the appropriate xml format.
"""
rstr = indent + "<" + name;
for a in self.attribs:
# only write out attributes that are not defaults
# have a very simple way to check whether they actually add something:
# we compare with the string that would be output if the argument was set
# to its default
defstr = self.__dict__[a]._defwrite.replace("%%NAME%%",a)
outstr = self.__dict__[a].write(name=a)
if outstr != defstr:
rstr += " " + outstr
rstr += ">"
rstr += text
for f in self.fields:
#only write out fields that are not defaults
defstr = self.__dict__[f]._defwrite.replace("%%NAME%%",f)
if defstr != self.__dict__[f].write(f): # here we must compute the write string twice not to be confused by indents.
rstr += self.__dict__[f].write(f, " " + indent)
for (f,v) in self.extra:
# also write out extended (dynamic) fields if present
rstr += v.write(f, " " + indent)
if text.find('\n') >= 0:
rstr += indent + "</" + name + ">\n"
else:
rstr += "</" + name + ">\n"
return rstr
def parse(self, xml=None, text=""):
"""Parses an xml file.
Uses the xml_node class defined in io_xml to read all the information
contained within the root tags, and uses it to give values for the attribs
and fields data recursively. It does this by giving all the data between
the appropriate field tag to the appropriate field restart object as a
string, and the appropriate attribute data to the appropriate attribs
restart object as a string. These data are then parsed by these objects
until all the information is read, or an input error is found.
Args:
xml: An xml_node object containing all the data for the parent
tag.
text: The data held between the start and end tags.
Raises:
NameError: Raised if one of the tags in the xml input file is
incorrect.
ValueError: Raised if the user does not specify a required field.
"""
# before starting, sets everything to its default -- if a default is set!
for a in self.attribs:
self.__dict__[a].set_default()
for f in self.fields:
self.__dict__[f].set_default()
self.extra = []
self._explicit = True
if xml is None:
self._text = text
else:
for a, v in xml.attribs.iteritems():
if a in self.attribs:
self.__dict__[a].parse(text=v)
elif a == "_text":
pass
else:
raise NameError("Attribute name '" + a + "' is not a recognized property of '" + xml.name + "' objects")
for (f, v) in xml.fields: #reads all field and dynamic data.
if f in self.fields:
self.__dict__[f].parse(xml=v)
elif f == "_text":
self._text = v
elif f in self.dynamic:
self.extend(f, v)
else:
raise NameError("Tag name '" + f + "' is not a recognized property of '" + xml.name + "' objects")
#checks for missing arguments.
for a in self.attribs:
va = self.__dict__[a]
if not (va._explicit or va._optional):
raise ValueError("Attribute name '" + a + "' is mandatory and was not found in the input for the property " + xml.name)
for f in self.fields:
vf = self.__dict__[f]
if not (vf._explicit or vf._optional):
raise ValueError("Field name '" + f + "' is mandatory and was not found in the input for the property " + xml.name)
def detail_str(self):
"""Prints out the supplementary information about a particular input class.
Used to print out the dimensions, default value, possible options and data
type of an input value to the LaTeX helf file.
"""
xstr = ""
if hasattr(self, '_dimension') and self._dimension != "undefined": #gives dimension
xstr += "dimension: " + self._dimension + "; "
if self._default != None and issubclass(self.__class__, InputAttribute):
#We only print out the default if it has a well defined value.
#For classes such as InputCell, self._default is not the value,
#instead it is an object that is stored to give the default value in
#self.value. For this reason we print out self.value at this stage,
#and not self._default
xstr += "default: " + self.pprint(self.value) + "; "
if issubclass(self.__class__, InputAttribute):
#if possible, prints out the type of data that is being used
xstr += "data type: " + self.type_print(self.type) + "; "
if hasattr(self, "_valid"):
if self._valid is not None:
xstr += "options: " #prints out valid options, if
for option in self._valid: #required.
xstr += "`" + str(option) + "', "
xstr = xstr.rstrip(", ")
xstr += "; "
return xstr
def help_latex(self, name="", level=0, stop_level=None, standalone=True):
"""Function to generate a LaTeX formatted help file.
Args:
name: Name of the tag that has to be written out.
level: Current level of the hierarchy being considered.
stop_level: The depth to which information will be given. If not given,
will give all information.
standalone: A boolean giving whether the latex file produced will be a
stand-alone document, or will be intended as a section of a larger
document with cross-references between the different sections.
Returns:
A LaTeX formatted string.
"""
#stops when we've printed out the prerequisite number of levels
if (not stop_level is None and level > stop_level):
return ""
rstr = ""
if level == 0:
if standalone:
#assumes that it is a stand-alone document, so must have
#document options.
rstr += r"\documentclass[12pt,fleqn]{report}"
rstr += r"""
\usepackage{etoolbox}
\usepackage{suffix}
\newcommand{\ipiitem}[3]{%
\setul{1pt}{.4pt}\ifblank{#1}{}{\ifstrequal{#1}{\underline{\smash{}}}{}{
{\noindent\textbf{#1}:\rule{0.0pt}{1.05\baselineskip}\quad}}}% uses a strut to add a bit of vertical space
{#2}\parskip=0pt\par
\ifblank{#3}{}%
{ {\hfill\raggedleft\textit{\small #3}\par} }
}
\makeatletter
\newenvironment{ipifield}[4]{%
\ifblank{#1}{}{\vspace{0.5em}}
\noindent\parskip=0pt\begin{tabular}[t]{|p{1.0\linewidth}}
%cell without border
\multicolumn{1}{@{}p{1.0\linewidth}}{
\ipiitem{\underline{\smash{#1}}}{#2}{}
\ifblank{#4}{ %
\ifblank{#3}{}{{\hfill\raggedleft\textit{\small #3}}\par}}{} } \vspace{-1em}\\ %
% cell with border
\ifblank{#4}{} %
{ \ifblank{#3}{}{\vspace{-1em}{\hfill\raggedleft\textit{\small #3}}\par} %
{#4}\vspace{-1em}\\\hline } % negative vspace to undo the line break
\end{tabular}
\parskip=0pt\list{}{\listparindent 1.5em%
\leftmargin \listparindent
\rightmargin 0pt
\parsep 0pt
\itemsep 0pt
\topsep 0pt
}%
\item\relax
}
{\endlist}
\makeatother
"""
rstr += "\n\\begin{document}\n"
if self._label != "" and not standalone:
#assumes that it is part of a cross-referenced document, so only
#starts a new section.
rstr += "\\section{" + self._label + "}\n"
rstr += "\\label{" + self._label + "}\n"
rstr += "\\begin{ipifield}{}%\n"
else:
if self._label != "" and not standalone:
rstr += "\\begin{ipifield}{\hyperref["+self._label+"]{"+name+"}}%\n"
else:
rstr += "\\begin{ipifield}{"+name+"}%\n"
rstr += "{"+self._help+"}%\n"
rstr += "{"+self.detail_str()+"}%\n"
rstr += "{"
# Prints out the attributes
if len(self.attribs) != 0:
#don't print out units if not necessary
if len(self.attribs) == 1 and (("units" in self.attribs) and self._dimension == "undefined"):
pass
else:
for a in self.attribs:
#don't print out units if not necessary
if not (a == "units" and self._dimension == "undefined"):
rstr += "\\ipiitem{" + a + "}%\n{" + self.__dict__[a]._help + "}%\n{"+self.__dict__[a].detail_str()+"}%\n" #!!MUST ADD OTHER STUFF
rstr+="}\n"
#As above, for the fields. Only prints out if we have not reached the
#user-specified limit.
if len(self.fields) != 0 and level != stop_level:
for f in self.fields:
rstr += self.__dict__[f].help_latex(name=f, level=level+1, stop_level=stop_level, standalone=standalone)
if len(self.dynamic) != 0 and level != stop_level:
for f, v in self.dynamic.iteritems():
dummy_obj = v[0](**v[1])
rstr += dummy_obj.help_latex(name=f, level=level+1, stop_level=stop_level, standalone=standalone)
rstr += "\\end{ipifield}\n"
if level == 0 and standalone:
#ends the created document if it is not part of a larger document
rstr += "\\end{document}"
#Some escape characters are necessary for the proper latex formatting
rstr = rstr.replace('_', '\\_')
rstr = rstr.replace('\\\\_', '\\_')
rstr = rstr.replace('...', '\\ldots ')
rstr = rstr.replace('<', '$<$')
rstr = rstr.replace('>', '$>$')
return rstr
def pprint(self, default, indent="", latex = True):
"""Function to convert arrays and other objects to human-readable strings.
Args:
default: The object that needs to be converted to a string.
indent: The indent at the beginning of a line.
latex: A boolean giving whether the string will be latex-format.
Returns:
A formatted string.
"""
if type(default) is np.ndarray:
if default.shape == (0,):
return " [ ] " #proper treatment of empty arrays.
else:
#indents new lines for multi-D arrays properly
rstr = "\n" + indent + " "
rstr += str(default).replace("\n", "\n" + indent + " ")
if not latex:
rstr += "\n" + indent + " "
return rstr
elif type(default) == str:
if latex:
return "`" + default + "'" #indicates that it is a string
else:
return " " + default + " "
elif default == []:
return " [ ] "
elif default == {}:
if latex:
return " \\{ \\} " #again, escape characters needed for latex
else: #formatting
return " { } "
else:
#in most cases standard formatting will do
return " " + str(default) + " "
def type_print(self, dtype):
"""Function to convert a data types to human-readable strings.
Args:
dtype: A data type.
"""
if dtype == bool:
return "boolean"
elif dtype == float or dtype == np.float64:
return "float"
elif dtype == int or dtype == np.uint64 or dtype == np.int64:
return "integer"
elif dtype == dict:
return "dictionary"
elif dtype == str:
return "string"
elif dtype == tuple:
return "tuple"
else:
raise TypeError("Unrecognized data type " + str(dtype))
def help_xml(self, name="", indent="", level=0, stop_level=None):
"""Function to generate an xml formatted help file.
Args:
name: A string giving the name of the root node.
indent: The indent at the beginning of a line.
level: Current level of the hierarchy being considered.
stop_level: The depth to which information will be given. If not given,
all information will be given
Returns:
An xml formatted string.
"""
#stops when we've printed out the prerequisite number of levels
if (not stop_level is None and level > stop_level):
return ""
#these are booleans which tell us whether there are any attributes
#and fields to print out
show_attribs = (len(self.attribs) != 0)
show_fields = (not (len(self.fields) == 0 and len(self.dynamic) == 0)) and level != stop_level
rstr = ""
rstr = indent + "<" + name; #prints tag name
for a in self.attribs:
if not (a == "units" and self._dimension == "undefined"):
#don't print out units if not necessary
rstr += " " + a + "=''" #prints attribute names
rstr += ">\n"
#prints help string
rstr += indent + " <help> " + self._help + " </help>\n"
if show_attribs:
for a in self.attribs:
if not (a == "units" and self._dimension == "undefined"):
#information about tags is found in tags beginning with the name
#of the attribute
rstr += indent + " <" + a + "_help> " + self.__dict__[a]._help + " </" + a + "_help>\n"
#prints dimensionality of the object
if hasattr(self, '_dimension') and self._dimension != "undefined":
rstr += indent + " <dimension> " + self._dimension + " </dimension>\n"
if self._default != None and issubclass(self.__class__, InputAttribute):
#We only print out the default if it has a well defined value.
#For classes such as InputCell, self._default is not the value,
#instead it is an object that is stored, putting the default value in
#self.value. For this reason we print out self.value at this stage,
#and not self._default
rstr += indent + " <default>" + self.pprint(self.value, indent=indent, latex=False) + "</default>\n"
if show_attribs:
for a in self.attribs:
if not (a == "units" and self._dimension == "undefined"):
if self.__dict__[a]._default is not None:
rstr += indent + " <" + a + "_default>" + self.pprint(self.__dict__[a]._default, indent=indent, latex=False) + "</" + a + "_default>\n"
#prints out valid options, if required.
if hasattr(self, "_valid"):
if self._valid is not None:
rstr += indent + " <options> " + str(self._valid) + " </options>\n"
if show_attribs:
for a in self.attribs:
if not (a == "units" and self._dimension == "undefined"):
if hasattr(self.__dict__[a], "_valid"):
if self.__dict__[a]._valid is not None:
rstr += indent + " <" + a + "_options> " + str(self.__dict__[a]._valid) + " </" + a + "_options>\n"
#if possible, prints out the type of data that is being used
if issubclass(self.__class__, InputAttribute):
rstr += indent + " <dtype> " + self.type_print(self.type) + " </dtype>\n"
if show_attribs:
for a in self.attribs:
if not (a == "units" and self._dimension == "undefined"):
rstr += indent + " <" + a + "_dtype> " + self.type_print(self.__dict__[a].type) + " </" + a + "_dtype>\n"
#repeats the above instructions for any fields or dynamic tags.
#these will only be printed if their level in the hierarchy is not above
#the user specified limit.
if show_fields:
for f in self.fields:
rstr += self.__dict__[f].help_xml(f, " " + indent, level+1, stop_level)
for f, v in self.dynamic.iteritems():
#we must create the object manually, as dynamic objects are
#not automatically added to the input object's dictionary
dummy_obj = v[0](**v[1])
rstr += dummy_obj.help_xml(f, " " + indent, level+1, stop_level)
rstr += indent + "</" + name + ">\n"
return rstr
class InputAttribute(Input):
"""Class for handling attribute data.
Has the methods for dealing with attribute data of the form:
<tag_name attrib='data'> ..., where data is just a value. Takes the data and
converts it to the required data_type, so that it can be used in the
simulation.
Attributes:
type: Data type of the data.
value: Value of data. Also specifies data type if type is None.
_valid: An optional list of valid options.
"""
def __init__(self, help=None, default=None, dtype=None, options=None):
"""Initialises InputAttribute.
Args:
help: A help string.
default: A default value.
dtype: An optional data type. Defaults to None.
options: An optional list of valid options.
"""
if not dtype is None:
self.type = dtype
else:
raise TypeError("You must provide dtype")
super(InputAttribute,self).__init__(help, default)
if options is not None:
self._valid = options
if not default is None and not self._default in self._valid:
#This makes sure that the programmer has set the default value
#so that it is a valid value.
raise ValueError("Default value '" + str(self._default) + "' not in option list " + str(self._valid)+ "\n" + self._help)
else:
self._valid = None
def parse(self, text=""):
"""Reads the data for a single attribute value from an xml file.
Args:
text: The data held between the start and end tags.
"""
super(InputAttribute, self).parse(text=text)
self.value = read_type(self.type, self._text)
def store(self, value):
"""Stores the input data.
Args:
value: The raw data to be stored.
"""
super(InputAttribute,self).store(value)
self.value = value
def fetch(self):
"""Returns the stored data."""
super(InputAttribute,self).fetch()
return self.value
def check(self):
"""Function to check for input errors.
Raises:
ValueError: Raised if the value chosen is not one of the valid options.
"""
super(InputAttribute,self).check()
if not (self._valid is None or self.value in self._valid):
#This checks that the user has set the value to a valid value.
raise ValueError(str(self.value) + " is not a valid option (" + str(self._valid) + ")")
def write(self, name=""):
"""Writes data in xml file format.
Writes the attribute data in the appropriate format.
Args:
name: An optional string giving the attribute name. Defaults to "".
Returns:
A string giving the stored value in the appropriate format.
"""
return name + "='" + write_type(self.type, self.value) + "'"
class InputValue(InputAttribute):
"""Scalar class for input handling.
Has the methods for dealing with simple data tags of the form:
<tag_name> data </tag_name>, where data is just a value. Takes the data and
converts it to the required data_type, so that it can be used in the
simulation.
Attributes:
units: The units that the input data is given in.
_dimension: The dimensionality of the data.
"""
default_dimension = "undefined"
default_units = ""
attribs= { "units" : ( InputAttribute, { "dtype" : str, "help" : "The units the input data is given in.", "default" : default_units } ) }
def __init__(self, help=None, default=None, dtype=None, options=None, dimension=None):
"""Initialises InputValue.
Args:
help: A help string.
dimension: The dimensionality of the value.
default: A default value.
dtype: An optional data type. Defaults to None.
options: An optional list of valid options.
"""
# a note on units handling:
# 1) units are only processed at parse/fetch time:
# internally EVERYTHING is in internal units
# 2) if one adds an explicit "units" attribute to a derived class,
# the internal units handling will be just ignored
if dimension is None:
self._dimension = self.default_dimension
else:
self._dimension = dimension
super(InputValue,self).__init__(help, default, dtype, options)
def store(self, value, units=""):
"""Converts the data to the appropriate data type and units and stores it.
Args:
value: The raw data to be stored.
units: Optional string giving the units that the data should be stored
in.
"""
super(InputValue,self).store(value)
if units != "":
self.units.store(units) #User can define in the code the units to be
#printed
self.value = value
if self._dimension != "undefined":
self.value *= unit_to_user(self._dimension, units, 1.0)
def fetch(self):
"""Returns the stored data in the user defined units."""
super(InputValue,self).fetch()
rval = self.value
if self._dimension != "undefined":
rval *= unit_to_internal(self._dimension, self.units.fetch(), 1.0)
return rval
def write(self, name="", indent=""):
"""Writes data in xml file format.
Writes the data in the appropriate format between appropriate tags.
Args:
name: An optional string giving the tag name. Defaults to "".
indent: An optional string giving the string to be added to the start
of the line, so usually a number of tabs. Defaults to "".
Returns:
A string giving the stored value in the appropriate xml format.
"""
return Input.write(self, name=name, indent=indent, text=write_type(self.type, self.value))
def parse(self, xml=None, text=""):
"""Reads the data for a single value from an xml file.
Args:
xml: An xml_node object containing the all the data for the parent
tag.
text: The data held between the start and end tags.
"""
Input.parse(self, xml=xml, text=text)
self.value = read_type(self.type, self._text)
ELPERLINE = 5
class InputArray(InputValue):
"""Array class for input handling.
Has the methods for dealing with simple data tags of the form:
<tag_name shape="(shape)"> data </tag_name>, where data is an array
of the form [data[0], data[1], ... , data[length]].
Takes the data and converts it to the required data type,
so that it can be used in the simulation. Also holds the shape of the array,
so that we can use a simple 1D list of data to specify a multi-dimensional
array.
Attributes:
shape: The shape of the array.
"""
attribs = copy(InputValue.attribs)
attribs["shape"] = (InputAttribute, {"dtype": tuple, "help": "The shape of the array.", "default": (0,)})
def __init__(self, help=None, default=None, dtype=None, dimension=None):
"""Initialises InputArray.
Args:
help: A help string.
dimension: The dimensionality of the value.
default: A default value.
dtype: An optional data type. Defaults to None.
"""
super(InputArray,self).__init__(help, default, dtype, dimension=dimension)
def store(self, value, units=""):
"""Converts the data to the appropriate data type, shape and units and
stores it.
Args:
value: The raw data to be stored.
units: Optional string giving the units that the data should be stored
in.
"""
super(InputArray,self).store(value=np.array(value, dtype=self.type).flatten().copy(), units=units)
self.shape.store(value.shape)
#if the shape is not specified, assume the array is linear.
if self.shape.fetch() == (0,):
self.shape.store((len(self.value),))
def fetch(self):
"""Returns the stored data in the user defined units."""
value = super(InputArray,self).fetch()
#if the shape is not specified, assume the array is linear.
if self.shape.fetch() == (0,):
value = np.resize(self.value,0).copy()
else:
value = self.value.reshape(self.shape.fetch()).copy()
return value
def write(self, name="", indent=""):
"""Writes data in xml file format.
Writes the data in the appropriate format between appropriate tags. Note
that only ELPERLINE values are printed on each line if there are more
than this in the array. If the values are floats, or another data type
with a fixed width of data output, then they are aligned in columns.
Args:
name: An optional string giving the tag name. Defaults to "".
indent: An optional string giving the string to be added to the start
of the line, so usually a number of tabs. Defaults to "".
Returns:
A string giving the stored value in the appropriate xml format.
"""
rstr = ""
if (len(self.value) > ELPERLINE):
rstr += "\n" + indent + " [ "
else:
rstr += " [ " #inlines the array if it is small enough
for i, v in enumerate(self.value):
if (len(self.value) > ELPERLINE and i > 0 and i%ELPERLINE == 0):
rstr += "\n" + indent + " "
rstr += write_type(self.type, v) + ", "
rstr = rstr.rstrip(", ") #get rid of trailing commas
if (len(self.value) > ELPERLINE):
rstr += " ]\n"
else:
rstr += " ] "
return Input.write(self, name=name, indent=indent, text=rstr)
def parse(self, xml=None, text=""):
"""Reads the data for an array from an xml file.
Args:
xml: An xml_node object containing the all the data for the parent
tag.
text: The data held between the start and end tags.
"""
Input.parse(self, xml=xml, text=text)
self.value = read_array(self.type, self._text)
#if the shape is not specified, assume the array is linear.
if self.shape.fetch() == (0,):
self.shape.store((len(self.value),))
| gpl-3.0 |
kamanashisroy/cloc_pie | pie.py | 1 | 1141 | """
This code generates pie chart out of cloc summary file
"""
from pylab import *
import re
import csv
# read cloc generated file
lineno = 0
# The slices will be ordered and plotted counter-clockwise.
labels = []
fracs = []
#labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
#fracs = [15, 30, 45, 10]
#explode=(0, 0.05, 0, 0)
if sys.argv[1].endswith(".csv"):
with open(sys.argv[1], 'r') as f:
langs = csv.reader(f,delimiter=',')
header = langs.next();
for row in langs:
labels.append(row[header.index('language')])
fracs.append(row[header.index('code')])
f.closed
else:
with open(sys.argv[1], 'r') as f:
for line in f:
lineno += 1
if lineno < 5 :
continue
if line.startswith('--'):
continue
if line.startswith('SUM'):
continue
m = re.search('^([a-zA-Z/+\s4]+)+\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)$', line);
labels.append(m.group(1))
fracs.append(m.group(5))
f.closed
# make a square figure and axes
figure(1, figsize=(6,6))
ax = axes([0.1, 0.1, 0.8, 0.8])
pie(fracs, labels=labels, autopct='%1.1f%%', shadow=True)
#title('Language pie', bbox={'facecolor':'0.8', 'pad':5})
#show()
savefig("pie.svg")
| gpl-3.0 |
shssoichiro/servo | components/script/dom/bindings/codegen/parser/tests/test_callback_interface.py | 142 | 2793 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
callback interface TestCallbackInterface {
attribute boolean bool;
};
""")
results = parser.finish()
iface = results[0]
harness.ok(iface.isCallback(), "Interface should be a callback")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface : TestInterface {
attribute boolean bool;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow non-callback parent of callback interface")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface : TestCallbackInterface {
};
callback interface TestCallbackInterface {
attribute boolean bool;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow callback parent of non-callback interface")
parser = parser.reset()
parser.parse("""
callback interface TestCallbackInterface1 {
void foo();
};
callback interface TestCallbackInterface2 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
};
callback interface TestCallbackInterface3 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
static void bar();
};
callback interface TestCallbackInterface4 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
static void bar();
const long baz = 5;
};
callback interface TestCallbackInterface5 {
static attribute boolean bool;
void foo();
};
callback interface TestCallbackInterface6 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
void bar();
};
callback interface TestCallbackInterface7 {
static attribute boolean bool;
};
callback interface TestCallbackInterface8 {
attribute boolean bool;
};
callback interface TestCallbackInterface9 : TestCallbackInterface1 {
void foo();
};
callback interface TestCallbackInterface10 : TestCallbackInterface1 {
void bar();
};
""")
results = parser.finish()
for (i, iface) in enumerate(results):
harness.check(iface.isSingleOperationInterface(), i < 4,
"Interface %s should be a single operation interface" %
iface.identifier.name)
| mpl-2.0 |
mheap/ansible | lib/ansible/modules/storage/netapp/na_ontap_user.py | 8 | 14649 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_user
short_description: useradmin configuration and management
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: Sumit Kumar (sumit4@netapp.com)
description:
- Create or destroy users.
options:
state:
description:
- Whether the specified user should exist or not.
choices: ['present', 'absent']
default: 'present'
name:
description:
- The name of the user to manage.
required: true
application:
description:
- Application to grant access to.
required: true
choices: ['console', 'http','ontapi','rsh','snmp','sp','ssh','telnet']
authentication_method:
description:
- Authentication method for the application.
- Not all authentication methods are valid for an application.
- Valid authentication methods for each application are as denoted in I(authentication_choices_description).
- Password for console application
- Password, domain, nsswitch, cert for http application.
- Password, domain, nsswitch, cert for ontapi application.
- Community for snmp application (when creating SNMPv1 and SNMPv2 users).
- The usm and community for snmp application (when creating SNMPv3 users).
- Password for sp application.
- Password for rsh application.
- Password for telnet application.
- Password, publickey, domain, nsswitch for ssh application.
required: true
choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm']
set_password:
description:
- Password for the user account.
- It is ignored for creating snmp users, but is required for creating non-snmp users.
- For an existing user, this value will be used as the new password.
role_name:
description:
- The name of the role. Required when C(state=present)
lock_user:
description:
- Whether the specified user account is locked.
type: bool
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: Create User
na_ontap_user:
state: present
name: SampleUser
application: ssh
authentication_method: password
set_password: apn1242183u1298u41
lock_user: True
role_name: vsadmin
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapUser(object):
"""
Common operations to manage users and roles.
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str'),
application=dict(required=True, type='str', choices=[
'console', 'http', 'ontapi', 'rsh',
'snmp', 'sp', 'ssh', 'telnet']),
authentication_method=dict(required=True, type='str',
choices=['community', 'password',
'publickey', 'domain',
'nsswitch', 'usm']),
set_password=dict(required=False, type='str'),
role_name=dict(required=False, type='str'),
lock_user=dict(required=False, type='bool'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['role_name'])
],
supports_check_mode=True
)
parameters = self.module.params
# set up state variables
self.state = parameters['state']
self.name = parameters['name']
self.application = parameters['application']
self.authentication_method = parameters['authentication_method']
self.set_password = parameters['set_password']
self.role_name = parameters['role_name']
self.lock_user = parameters['lock_user']
self.vserver = parameters['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
def get_user(self):
"""
Checks if the user exists.
:return:
True if user found
False if user is not found
:rtype: bool
"""
security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-account-info', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
security_login_get_iter.add_child_elem(query)
return_value = None
try:
result = self.server.invoke_successfully(security_login_get_iter,
enable_tunneling=False)
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
interface_attributes = result.get_child_by_name('attributes-list').\
get_child_by_name('security-login-account-info')
return_value = {
'is_locked': interface_attributes.get_child_content('is-locked')
}
return return_value
except netapp_utils.zapi.NaApiError as error:
# Error 16034 denotes a user not being found.
if to_native(error.code) == "16034":
return False
else:
self.module.fail_json(msg='Error getting user %s: %s' % (self.name, to_native(error)),
exception=traceback.format_exc())
def get_user_lock_info(self):
"""
gets details of the user.
"""
security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-account-info', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
# 'role-name': self.role_name,
'authentication-method':
self.authentication_method})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
security_login_get_iter.add_child_elem(query)
result = self.server.invoke_successfully(security_login_get_iter, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) == 1:
interface_attributes = result.get_child_by_name('attributes-list').\
get_child_by_name('security-login-account-info')
return_value = {
'is_locked': interface_attributes.get_child_content('is-locked')
}
return return_value
def create_user(self):
user_create = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-create', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method,
'role-name': self.role_name})
if self.set_password is not None:
user_create.add_new_child('password', self.set_password)
try:
self.server.invoke_successfully(user_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating user %s: %s' % (self.name, to_native(error)),
exception=traceback.format_exc())
def lock_given_user(self):
"""
locks the user
:return:
True if user locked
False if lock user is not performed
:rtype: bool
"""
user_lock = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-lock', **{'vserver': self.vserver,
'user-name': self.name})
try:
self.server.invoke_successfully(user_lock,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error locking user %s: %s' % (self.name, to_native(error)),
exception=traceback.format_exc())
def unlock_given_user(self):
"""
unlocks the user
:return:
True if user unlocked
False if unlock user is not performed
:rtype: bool
"""
user_unlock = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-unlock', **{'vserver': self.vserver,
'user-name': self.name})
try:
self.server.invoke_successfully(user_unlock,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
if to_native(error.code) == '13114':
return False
else:
self.module.fail_json(msg='Error unlocking user %s: %s' % (self.name, to_native(error)),
exception=traceback.format_exc())
def delete_user(self):
user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-delete', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method})
try:
self.server.invoke_successfully(user_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error removing user %s: %s' % (self.name, to_native(error)),
exception=traceback.format_exc())
def change_password(self):
"""
Changes the password
:return:
True if password updated
False if password is not updated
:rtype: bool
"""
self.server.set_vserver(self.vserver)
modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-modify-password', **{
'new-password': str(self.set_password),
'user-name': self.name})
try:
self.server.invoke_successfully(modify_password,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
if to_native(error.code) == '13114':
return False
else:
self.module.fail_json(msg='Error setting password for user %s: %s' % (self.name, to_native(error)),
exception=traceback.format_exc())
self.server.set_vserver(None)
return True
def apply(self):
property_changed = False
password_changed = False
lock_user_changed = False
netapp_utils.ems_log_event("na_ontap_user", self.server)
user_exists = self.get_user()
if user_exists:
if self.state == 'absent':
property_changed = True
elif self.state == 'present':
if self.set_password is not None:
password_changed = True
if self.lock_user is not None:
if self.lock_user is True and user_exists['is_locked'] != 'true':
lock_user_changed = True
elif self.lock_user is False and user_exists['is_locked'] != 'false':
lock_user_changed = True
else:
if self.state == 'present':
# Check if anything needs to be updated
property_changed = True
changed = property_changed or password_changed or lock_user_changed
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not user_exists:
self.create_user()
else:
if password_changed:
self.change_password()
if lock_user_changed:
if self.lock_user:
self.lock_given_user()
else:
self.unlock_given_user()
elif self.state == 'absent':
self.delete_user()
self.module.exit_json(changed=changed)
def main():
obj = NetAppOntapUser()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
ryanpitts/source | source/tags/migrations/0001_initial.py | 2 | 5411 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TechnologyTag'
db.create_table('tags_technologytag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100)),
))
db.send_create_signal('tags', ['TechnologyTag'])
# Adding model 'TechnologyTaggedItem'
db.create_table('tags_technologytaggeditem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('object_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tags_technologytaggeditem_tagged_items', to=orm['contenttypes.ContentType'])),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tags_technologytaggeditem_techtag_items', to=orm['tags.TechnologyTag'])),
))
db.send_create_signal('tags', ['TechnologyTaggedItem'])
# Adding model 'ConceptTag'
db.create_table('tags_concepttag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100)),
))
db.send_create_signal('tags', ['ConceptTag'])
# Adding model 'ConceptTaggedItem'
db.create_table('tags_concepttaggeditem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('object_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tags_concepttaggeditem_tagged_items', to=orm['contenttypes.ContentType'])),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tags_concepttaggeditem_concepttag_items', to=orm['tags.ConceptTag'])),
))
db.send_create_signal('tags', ['ConceptTaggedItem'])
def backwards(self, orm):
# Deleting model 'TechnologyTag'
db.delete_table('tags_technologytag')
# Deleting model 'TechnologyTaggedItem'
db.delete_table('tags_technologytaggeditem')
# Deleting model 'ConceptTag'
db.delete_table('tags_concepttag')
# Deleting model 'ConceptTaggedItem'
db.delete_table('tags_concepttaggeditem')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tags.concepttag': {
'Meta': {'object_name': 'ConceptTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'tags.concepttaggeditem': {
'Meta': {'object_name': 'ConceptTaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags_concepttaggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags_concepttaggeditem_concepttag_items'", 'to': "orm['tags.ConceptTag']"})
},
'tags.technologytag': {
'Meta': {'object_name': 'TechnologyTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'tags.technologytaggeditem': {
'Meta': {'object_name': 'TechnologyTaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags_technologytaggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags_technologytaggeditem_techtag_items'", 'to': "orm['tags.TechnologyTag']"})
}
}
complete_apps = ['tags'] | bsd-3-clause |
gauribhoite/personfinder | env/site-packages/webob/etag.py | 21 | 6261 | """
Does parsing of ETag-related headers: If-None-Matches, If-Matches
Also If-Range parsing
"""
from webob.datetime_utils import *
from webob.util import header_docstring, warn_deprecation
__all__ = ['AnyETag', 'NoETag', 'ETagMatcher', 'IfRange', 'NoIfRange', 'etag_property']
def etag_property(key, default, rfc_section):
doc = header_docstring(key, rfc_section)
doc += " Converts it as a Etag."
def fget(req):
value = req.environ.get(key)
if not value:
return default
elif value == '*':
return AnyETag
else:
return ETagMatcher.parse(value)
def fset(req, val):
if val is None:
req.environ[key] = None
else:
req.environ[key] = str(val)
def fdel(req):
del req.environ[key]
return property(fget, fset, fdel, doc=doc)
def _warn_weak_match_deprecated():
warn_deprecation("weak_match is deprecated", '1.2', 3)
class _AnyETag(object):
"""
Represents an ETag of *, or a missing ETag when matching is 'safe'
"""
def __repr__(self):
return '<ETag *>'
def __nonzero__(self):
return False
def __contains__(self, other):
return True
def weak_match(self, other):
_warn_weak_match_deprecated()
return True
def __str__(self):
return '*'
AnyETag = _AnyETag()
class _NoETag(object):
"""
Represents a missing ETag when matching is unsafe
"""
def __repr__(self):
return '<No ETag>'
def __nonzero__(self):
return False
def __contains__(self, other):
return False
def weak_match(self, other):
_warn_weak_match_deprecated()
return False
def __str__(self):
return ''
NoETag = _NoETag()
class ETagMatcher(object):
"""
Represents an ETag request. Supports containment to see if an
ETag matches. You can also use
``etag_matcher.weak_contains(etag)`` to allow weak ETags to match
(allowable for conditional GET requests, but not ranges or other
methods).
"""
def __init__(self, etags, weak_etags=()):
self.etags = etags
self.weak_etags = weak_etags
def __contains__(self, other):
return other in self.etags or other in self.weak_etags
def weak_match(self, other):
_warn_weak_match_deprecated()
if other.lower().startswith('w/'):
other = other[2:]
return other in self.etags or other in self.weak_etags
def __repr__(self):
return '<ETag %s>' % (
' or '.join(self.etags))
@classmethod
def parse(cls, value):
"""
Parse this from a header value
"""
results = []
weak_results = []
while value:
if value.lower().startswith('w/'):
# Next item is weak
weak = True
value = value[2:]
else:
weak = False
if value.startswith('"'):
try:
etag, rest = value[1:].split('"', 1)
except ValueError:
etag = value.strip(' ",')
rest = ''
else:
rest = rest.strip(', ')
else:
if ',' in value:
etag, rest = value.split(',', 1)
rest = rest.strip()
else:
etag = value
rest = ''
if etag == '*':
return AnyETag
if etag:
if weak:
weak_results.append(etag)
else:
results.append(etag)
value = rest
return cls(results, weak_results)
def __str__(self):
items = map('"%s"'.__mod__, self.etags)
for weak in self.weak_etags:
items.append('W/"%s"' % weak)
return ', '.join(items)
class IfRange(object):
"""
Parses and represents the If-Range header, which can be
an ETag *or* a date
"""
def __init__(self, etag=None, date=None):
self.etag = etag
self.date = date
def __repr__(self):
if self.etag is None:
etag = '*'
else:
etag = str(self.etag)
if self.date is None:
date = '*'
else:
date = serialize_date(self.date)
return '<%s etag=%s, date=%s>' % (
self.__class__.__name__,
etag, date)
def __str__(self):
if self.etag is not None:
return str(self.etag)
elif self.date:
return serialize_date(self.date)
else:
return ''
def match(self, etag=None, last_modified=None):
"""
Return True if the If-Range header matches the given etag or last_modified
"""
if self.date is not None:
if last_modified is None:
# Conditional with nothing to base the condition won't work
return False
return last_modified <= self.date
elif self.etag is not None:
if not etag:
return False
return etag in self.etag
return True
def match_response(self, response):
"""
Return True if this matches the given ``webob.Response`` instance.
"""
return self.match(etag=response.etag, last_modified=response.last_modified)
@classmethod
def parse(cls, value):
"""
Parse this from a header value.
"""
date = etag = None
if not value:
etag = NoETag()
elif value and value.endswith(' GMT'):
# Must be a date
date = parse_date(value)
else:
etag = ETagMatcher.parse(value)
return cls(etag=etag, date=date)
class _NoIfRange(object):
"""
Represents a missing If-Range header
"""
def __repr__(self):
return '<Empty If-Range>'
def __str__(self):
return ''
def __nonzero__(self):
return False
def match(self, etag=None, last_modified=None):
return True
def match_response(self, response):
return True
NoIfRange = _NoIfRange()
| apache-2.0 |
tuttinator/nz-mps-python | models/mp.py | 1 | 1686 | from sqlalchemy import Column, Integer, Boolean, String
from database import Base
""" Member of Parliament """
class MP(Base):
__tablename__ = 'mps'
id = Column(Integer, primary_key=True)
first_name = Column(String(150))
last_name = Column(String(150))
party = Column(String(150))
list_mp = Column(Boolean)
details_url = Column(String(150))
image_url = Column(String(150))
_base_url = 'http://www.parliament.nz'
""" Init a new MP """
def __init__(self, name=None, details_url=None, electoral_details=None):
self.last_name, self.first_name = [n.strip() for n in name.split(',')]
self.details_url = self._base_url + details_url
self.party, self.electorate = [e.strip() for e in electoral_details.split(',')]
self.list_mp = False
self._parse_electorate()
def __repr__(self):
return('<MP %s>' % self._formatted_title())
def _formatted_title(self):
return("id=%i first_name='%s' last_name='%s' party='%s'" % (self.id, self.first_name, self.last_name, self.party))
def image_from_src(self, src):
self.image_url = self._base_url + src
def as_json(self):
return {
'id': self.id,
'first_name': self.first_name,
'last_name': self.last_name,
'party': self.party,
'list_mp': self.list_mp,
'details_url': self.details_url,
'image_url': self.image_url
}
""" Parses 'List' as a List MP """
def _parse_electorate(self):
if self.electorate == 'List':
self.electorate = None
self.list_mp = True
| mit |
shingonoide/odoo | addons/l10n_gr/__init__.py | 438 | 1102 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#import sandwich_wizard
#import order_create
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ict-felix/stack | ofam/src/package.py | 3 | 5266 | #!/usr/bin/env python
# Copyright (c) 2011-2012 The Board of Trustees of The Leland Stanford Junior University
import os
import os.path
from optparse import OptionParser
import shutil
import subprocess
VALID_DEB_DIALECTS = set([
"lucid",
"squeeze"
])
def package_rpm (opts):
print "This package script does not currently support generating RPMs."
def setup_deb_dialect(dialect):
if dialect not in VALID_DEB_DIALECTS:
print "Unknown debian dialect: %s" % (dialect)
sys.exit(1)
# Right now the only thing we do with dialect setup is copy the proper
# dialect control files (deps, post-install instructions, etc.)
control_dir = "%s/debian/" % (os.getcwd())
for f in os.listdir(control_dir):
if os.path.isfile("%s/%s" % (control_dir, f)):
if f.endswith(dialect):
shutil.copyfile("%s/%s" % (control_dir, f),
"%s/%s" % (control_dir, f[:-(len(dialect)+1)]))
def getDialectRepoPath (dialect):
return dialect
def package_deb (opts):
setup_deb_dialect(opts.dialect)
repo = getDialectRepoPath(opts.dialect)
try:
i = int(opts.tag)
rc = False
except ValueError:
rc = True
minor = opts.version.split(".")[1]
if (int(minor) % 2):
rtypes = ["unstable"]
else:
if not rc:
rtypes = ["stable", "staging"]
else:
rtypes = ["staging"]
debchange(opts.version, opts.tag)
if rc:
fixup_setup(opts.version, opts.tag)
else:
fixup_setup(opts.version)
if opts.tag == "1":
repo_tag(opts.version)
build_deb(opts.version, opts.tag, rtypes, repo)
print "----------------------------------------"
if not rc:
print "Make sure to HG COMMIT after this build!"
else:
call("hg revert --all --no-backup")
drops = {}
for rtype in rtypes:
pkg = "/tmp/%s/%s/all/foam_%s-%s_all.deb" % (repo, rtype, opts.version, opts.tag)
meta = "/tmp/%s/%s/Packages.gz" % (repo, rtype)
print "%s %s" % (pkg, meta)
drops[rtype] = [pkg, meta]
return drops
def call (cmd):
print cmd
p = subprocess.Popen(cmd, shell=True)
os.waitpid(p.pid, 0)
def build_deb (version, tag, rtypes, repo):
call("/usr/bin/python setup.py sdist")
shutil.copyfile("dist/foam-%s.tar.gz" % (version), "/tmp/foam-%s.tar.gz" % version)
foamdir = os.getcwd()
os.chdir("/tmp")
try:
shutil.rmtree("/tmp/foam-%s" % version)
except OSError:
pass
call("tar -zxf foam-%s.tar.gz" % version)
shutil.copytree("%s/debian" % foamdir, "/tmp/foam-%s/debian" % version)
shutil.move("foam-%s.tar.gz" % version, "foam_%s.orig.tar.gz" % version)
os.chdir("/tmp/foam-%s" % version)
call("/usr/bin/debuild -uc -us")
os.chdir("/tmp")
for rtype in rtypes:
try:
os.makedirs("/tmp/%s/%s/all" % (repo, rtype))
except os.error, e:
continue
for rtype in rtypes:
try:
os.remove("/tmp/%s/%s/all/foam_%s-%s_all.deb" % (repo, rtype, version, tag))
except os.error, e:
continue
for rtype in rtypes:
shutil.copy("/tmp/foam_%s-%s_all.deb" % (version, tag),
"/tmp/%s/%s/all/" % (repo, rtype))
call("/usr/bin/dpkg-scanpackages -m %s/%s | gzip -9c > /tmp/%s/%s/Packages.gz" % (
repo, rtype, repo, rtype))
os.chdir(foamdir)
def debchange (version, tag):
call("/usr/bin/dch -v %s-%s \"New upstream\"" % (version, tag))
def repo_tag (version):
call("/usr/local/bin/hg tag FOAM-%s" % version)
def fixup_setup (version, tag = None):
f = open("setup.py", "r")
new_setup = []
for line in f.readlines():
if line.count("version"):
new_setup.append(" version='%s',\n" % version)
else:
new_setup.append(line)
f.close()
f = open("setup.py", "w+")
f.write("".join(new_setup))
f = open("src/foam/version.py", "w+")
if tag is None:
f.write("VERSION = '%s'\n" % (version))
else:
f.write("VERSION = '%s-%s'\n" % (version, tag))
f.close()
def publish (val, drops, dialect):
method,options = val.split(",", 1)
if method == "s3":
publish_s3(options, drops, dialect)
else:
print "Unknown method for publication: %s" % (method)
def publish_s3 (options, drops, dialect):
from boto.s3.connection import S3Connection
from boto.s3.key import Key
bucket = options.strip()
# You must set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
# in your environment
conn = S3Connection()
b = conn.get_bucket(bucket)
for repo,paths in drops.iteritems():
for path in paths:
k = Key(b)
k.key = 'foam-pkg/%s/%s/all/%s' % (dialect, repo, os.path.basename(path))
k.set_contents_from_filename(path)
k.make_public()
print "Uploaded %s to %s" % (path, k)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--version", dest="version")
parser.add_option("--tag", dest="tag", default="1")
parser.add_option("--rpm", dest="rpm", action="store_true", default=False)
parser.add_option("--deb", dest="deb", action="store_true", default=False)
parser.add_option("--dialect", dest="dialect", default="lucid")
parser.add_option("--publish", dest="publish", default=None)
(opts, args) = parser.parse_args()
if opts.deb:
drops = package_deb(opts)
if opts.rpm:
drops = package_rpm(opts)
if opts.publish:
publish(opts.publish, drops, opts.dialect)
| apache-2.0 |
rlmv/pyphi | test/test_validate.py | 1 | 5805 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pyphi import exceptions, macro, Network, Subsystem, validate
def test_validate_direction():
with pytest.raises(ValueError):
assert validate.direction("dogeeeee")
def test_validate_tpm_wrong_shape():
tpm = np.arange(3**3).reshape(3, 3, 3)
with pytest.raises(ValueError):
assert validate.tpm(tpm)
def test_validate_tpm_nonbinary_nodes():
tpm = np.arange(3*3*2).reshape(3, 3, 2)
with pytest.raises(ValueError):
assert validate.tpm(tpm)
def test_validate_tpm_conditional_independence():
tpm = np.array([
[1, 0, 0, 0],
[0, .5, .5, 0],
[0, .5, .5, 0],
[0, 0, 0, 1],
])
with pytest.raises(ValueError):
validate.conditionally_independent(tpm)
with pytest.raises(ValueError):
validate.tpm(tpm)
def test_validate_cm_valid(s):
assert validate.connectivity_matrix(s.network.connectivity_matrix)
def test_validate_cm_not_square():
cm = np.random.binomial(1, 0.5, (4, 5))
with pytest.raises(ValueError):
assert validate.connectivity_matrix(cm)
def test_validate_cm_not_2D():
cm = np.arange(8).reshape(2, 2, 2)
with pytest.raises(ValueError):
assert validate.connectivity_matrix(cm)
def test_validate_cm_not_binary():
cm = np.arange(16).reshape(4, 4)
with pytest.raises(ValueError):
assert validate.connectivity_matrix(cm)
def test_validate_network_wrong_cm_size(s):
with pytest.raises(ValueError):
Network(s.network.tpm, np.ones(16).reshape(4, 4))
def test_validate_is_network(s):
with pytest.raises(ValueError):
validate.is_network(s)
validate.is_network(s.network)
def test_validate_state_no_error_1(s):
validate.state_reachable(s)
def test_validate_state_error(s):
with pytest.raises(exceptions.StateUnreachableError):
state = (0, 1, 0)
Subsystem(s.network, state, s.node_indices)
def test_validate_state_no_error_2():
tpm = np.array([
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
])
net = Network(tpm)
# Globally impossible state.
state = (1, 1, 0, 0)
# But locally possible for first two nodes.
subsystem = Subsystem(net, state, (0, 1))
validate.state_reachable(subsystem)
def test_validate_node_labels():
validate.node_labels(['A', 'B'], (0, 1))
validate.node_labels(None, (0, 1))
with pytest.raises(ValueError):
validate.node_labels(['A'], (0, 1))
with pytest.raises(ValueError):
validate.node_labels(['A', 'B'], (0,))
with pytest.raises(ValueError):
validate.node_labels(['A', 'A'], (0, 1))
def test_validate_time_scale():
with pytest.raises(ValueError):
validate.time_scale(1.3)
with pytest.raises(ValueError):
validate.time_scale(-1)
with pytest.raises(ValueError):
validate.time_scale(0)
validate.time_scale(1)
validate.time_scale(2)
# ... etc
def test_validate_coarse_grain():
# Good:
cg = macro.CoarseGrain(((2,), (3,)), (((0,), (1,)), (((0,), (1,)))))
validate.coarse_grain(cg)
# Mismatched output and state lengths
cg = macro.CoarseGrain(((2,),), (((0,), (1,)), (((0,), (1,)))))
with pytest.raises(ValueError):
validate.coarse_grain(cg)
# Missing 1-node-on specification in second state grouping
cg = macro.CoarseGrain(((2,), (3,)), (((0,), (1,)), (((0,), ()))))
with pytest.raises(ValueError):
validate.coarse_grain(cg)
# Two partitions contain same element
cg = macro.CoarseGrain(((5,), (5,)), (((0,), (1,)), (((0,), (1,)))))
with pytest.raises(ValueError):
validate.coarse_grain(cg)
def test_validate_blackbox():
validate.blackbox(macro.Blackbox(((0, 1),), (1,)))
# Unsorted output indices
with pytest.raises(ValueError):
validate.blackbox(macro.Blackbox(((0, 1),), (1, 0)))
# Two boxes may not contain the same elements
with pytest.raises(ValueError):
validate.blackbox(macro.Blackbox(((0,), (0, 1)), (0, 1)))
# Every box must have an output
with pytest.raises(ValueError):
validate.blackbox(macro.Blackbox(((0,), (1,)), (0,)))
def test_validate_partition():
# Micro-element appears in two macro-elements
with pytest.raises(ValueError):
validate.partition(((0,), (0, 1)))
def test_validate_blackbox_and_coarsegrain():
blackbox = None
coarse_grain = macro.CoarseGrain(((0, 1), (2,)), ((0, 1), (2,)))
validate.blackbox_and_coarse_grain(blackbox, coarse_grain)
blackbox = macro.Blackbox(((0, 1), (2,)), (0, 2))
coarse_grain = None
validate.blackbox_and_coarse_grain(blackbox, coarse_grain)
blackbox = macro.Blackbox(((0, 1), (2,)), (0, 1, 2))
coarse_grain = macro.CoarseGrain(((0, 1), (2,)), ((0, 1), (2,)))
validate.blackbox_and_coarse_grain(blackbox, coarse_grain)
# Blackboxing with multiple outputs must be coarse-grained
blackbox = macro.Blackbox(((0, 1), (2,)), (0, 1, 2))
coarse_grain = None
with pytest.raises(ValueError):
validate.blackbox_and_coarse_grain(blackbox, coarse_grain)
# Coarse-graining does not group multiple outputs of a box into the same
# macro element
blackbox = macro.Blackbox(((0, 1), (2,)), (0, 1, 2))
coarse_grain = macro.CoarseGrain(((0,), (1, 2)), ((0, 1), (2,)))
with pytest.raises(ValueError):
validate.blackbox_and_coarse_grain(blackbox, coarse_grain)
| gpl-3.0 |
ehazlett/graphite | carbon/lib/carbon/service.py | 2 | 7179 | #!/usr/bin/env python
"""Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from os.path import exists
from twisted.application.service import MultiService
from twisted.application.internet import TCPServer, TCPClient, UDPServer
from twisted.internet.protocol import ServerFactory
from twisted.python.components import Componentized
from twisted.python.log import ILogObserver
# Attaching modules to the global state module simplifies import order hassles
from carbon import util, state, events, instrumentation
from carbon.log import carbonLogObserver
state.events = events
state.instrumentation = instrumentation
class CarbonRootService(MultiService):
"""Root Service that properly configures twistd logging"""
def setServiceParent(self, parent):
MultiService.setServiceParent(self, parent)
if isinstance(parent, Componentized):
parent.setComponent(ILogObserver, carbonLogObserver)
def createBaseService(config):
from carbon.conf import settings
from carbon.protocols import (MetricLineReceiver, MetricPickleReceiver,
MetricDatagramReceiver)
root_service = CarbonRootService()
root_service.setName(settings.program)
use_amqp = settings.get("ENABLE_AMQP", False)
if use_amqp:
from carbon import amqp_listener
amqp_host = settings.get("AMQP_HOST", "localhost")
amqp_port = settings.get("AMQP_PORT", 5672)
amqp_user = settings.get("AMQP_USER", "guest")
amqp_password = settings.get("AMQP_PASSWORD", "guest")
amqp_verbose = settings.get("AMQP_VERBOSE", False)
amqp_vhost = settings.get("AMQP_VHOST", "/")
amqp_spec = settings.get("AMQP_SPEC", None)
amqp_exchange_name = settings.get("AMQP_EXCHANGE", "graphite")
for interface, port, protocol in ((settings.LINE_RECEIVER_INTERFACE,
settings.LINE_RECEIVER_PORT,
MetricLineReceiver),
(settings.PICKLE_RECEIVER_INTERFACE,
settings.PICKLE_RECEIVER_PORT,
MetricPickleReceiver)):
if port:
factory = ServerFactory()
factory.protocol = protocol
service = TCPServer(int(port), factory, interface=interface)
service.setServiceParent(root_service)
if settings.ENABLE_UDP_LISTENER:
service = UDPServer(int(settings.UDP_RECEIVER_PORT),
MetricDatagramReceiver(),
interface=settings.UDP_RECEIVER_INTERFACE)
service.setServiceParent(root_service)
if use_amqp:
factory = amqp_listener.createAMQPListener(
amqp_user, amqp_password,
vhost=amqp_vhost, spec=amqp_spec,
exchange_name=amqp_exchange_name,
verbose=amqp_verbose)
service = TCPClient(amqp_host, int(amqp_port), factory)
service.setServiceParent(root_service)
if settings.ENABLE_MANHOLE:
from carbon import manhole
factory = manhole.createManholeListener()
service = TCPServer(int(settings.MANHOLE_PORT), factory,
interface=settings.MANHOLE_INTERFACE)
service.setServiceParent(root_service)
# Instantiate an instrumentation service that will record metrics about
# this service.
from carbon.instrumentation import InstrumentationService
service = InstrumentationService()
service.setServiceParent(root_service)
return root_service
def createCacheService(config):
from carbon.cache import MetricCache
from carbon.conf import settings
from carbon.protocols import CacheManagementHandler
# Configure application components
events.metricReceived.addHandler(MetricCache.store)
root_service = createBaseService(config)
factory = ServerFactory()
factory.protocol = CacheManagementHandler
service = TCPServer(int(settings.CACHE_QUERY_PORT), factory,
interface=settings.CACHE_QUERY_INTERFACE)
service.setServiceParent(root_service)
# have to import this *after* settings are defined
from carbon.writer import WriterService
service = WriterService()
service.setServiceParent(root_service)
if settings.USE_FLOW_CONTROL:
events.cacheFull.addHandler(events.pauseReceivingMetrics)
events.cacheSpaceAvailable.addHandler(events.resumeReceivingMetrics)
return root_service
def createAggregatorService(config):
from carbon.aggregator import receiver
from carbon.aggregator.rules import RuleManager
from carbon.routers import ConsistentHashingRouter
from carbon.client import CarbonClientManager
from carbon.rewrite import RewriteRuleManager
from carbon.conf import settings
from carbon import events
root_service = createBaseService(config)
# Configure application components
router = ConsistentHashingRouter()
client_manager = CarbonClientManager(router)
client_manager.setServiceParent(root_service)
events.metricReceived.addHandler(receiver.process)
events.metricGenerated.addHandler(client_manager.sendDatapoint)
RuleManager.read_from(settings["aggregation-rules"])
if exists(settings["rewrite-rules"]):
RewriteRuleManager.read_from(settings["rewrite-rules"])
if not settings.DESTINATIONS:
raise Exception("Required setting DESTINATIONS is missing from carbon.conf")
for destination in util.parseDestinations(settings.DESTINATIONS):
client_manager.startClient(destination)
return root_service
def createRelayService(config):
from carbon.routers import RelayRulesRouter, ConsistentHashingRouter
from carbon.client import CarbonClientManager
from carbon.conf import settings
from carbon import events
root_service = createBaseService(config)
# Configure application components
if settings.RELAY_METHOD == 'rules':
router = RelayRulesRouter(settings["relay-rules"])
elif settings.RELAY_METHOD == 'consistent-hashing':
router = ConsistentHashingRouter(settings.REPLICATION_FACTOR)
client_manager = CarbonClientManager(router)
client_manager.setServiceParent(root_service)
events.metricReceived.addHandler(client_manager.sendDatapoint)
events.metricGenerated.addHandler(client_manager.sendDatapoint)
if not settings.DESTINATIONS:
raise Exception("Required setting DESTINATIONS is missing from carbon.conf")
for destination in util.parseDestinations(settings.DESTINATIONS):
client_manager.startClient(destination)
return root_service
| apache-2.0 |
daniorerio/trackpy | benchmarks/suite.py | 3 | 2664 | import getpass
import sys
import os
from vbench.api import Benchmark, BenchmarkRunner
from datetime import datetime
USERNAME = getpass.getuser()
if sys.platform == 'darwin':
HOME = '/Users/%s' % USERNAME
else:
HOME = '/home/%s' % USERNAME
try:
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(os.path.expanduser('~/.vbenchcfg')))
REPO_PATH = config.get('setup', 'repo_path')
REPO_URL = config.get('setup', 'repo_url')
DB_PATH = config.get('setup', 'db_path')
TMP_DIR = config.get('setup', 'tmp_dir')
except:
REPO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
REPO_URL = 'git@github.com:danielballan/mr.git'
DB_PATH = os.path.join(REPO_PATH, 'vb_suite/benchmarks.db')
TMP_DIR = os.path.join(HOME, 'tmp/vb_mr')
PREPARE = """
python setup.py clean
"""
BUILD = """
python setup.py build_ext --inplace
"""
dependencies = []
START_DATE = datetime(2012, 9, 19) # first full day when setup.py existed
# repo = GitRepo(REPO_PATH)
RST_BASE = 'source'
def generate_rst_files(benchmarks):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
vb_path = os.path.join(RST_BASE, 'vbench')
fig_base_path = os.path.join(vb_path, 'figures')
if not os.path.exists(vb_path):
print 'creating %s' % vb_path
os.makedirs(vb_path)
if not os.path.exists(fig_base_path):
print 'creating %s' % fig_base_path
os.makedirs(fig_base_path)
for bmk in benchmarks:
print 'Generating rst file for %s' % bmk.name
rst_path = os.path.join(RST_BASE, 'vbench/%s.txt' % bmk.name)
fig_full_path = os.path.join(fig_base_path, '%s.png' % bmk.name)
# make the figure
plt.figure(figsize=(10, 6))
ax = plt.gca()
bmk.plot(DB_PATH, ax=ax)
start, end = ax.get_xlim()
plt.xlim([start - 30, end + 30])
plt.savefig(fig_full_path, bbox_inches='tight')
plt.close('all')
fig_rel_path = 'vbench/figures/%s.png' % bmk.name
rst_text = bmk.to_rst(image_path=fig_rel_path)
with open(rst_path, 'w') as f:
f.write(rst_text)
ref = __import__('benchmarks')
benchmarks = [v for v in ref.__dict__.values() if isinstance(v, Benchmark)]
runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL,
BUILD, DB_PATH, TMP_DIR, PREPARE,
always_clean=True,
run_option='eod', start_date=START_DATE,
module_dependencies=dependencies)
if __name__ == '__main__':
runner.run()
generate_rst_files(benchmarks)
| bsd-3-clause |
ericmjl/bokeh | bokeh/protocol/exceptions.py | 1 | 2449 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide named exceptions having to do with handling Bokeh Protocol
messages.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'MessageError',
'ProtocolError',
'ValidationError',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class MessageError(Exception):
''' Indicate an error in constructing a Bokeh Message object.
This exception usually indicates that the JSON fragments of a message
cannot be decoded at all.
'''
pass
class ProtocolError(Exception):
''' Indicate an error in processing wire protocol fragments.
This exception indicates that decoded message fragments cannot be properly
assembled.
'''
pass
class ValidationError(Exception):
''' Indicate an error validating wire protocol fragments.
This exception typically indicates that a binary message fragment was
received when a text fragment was expected, or vice-versa.
'''
pass
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
JohnKendrick/PDielec | Sphinx/conf.py | 1 | 6518 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'PDielec'
copyright = '2018, John Kendrick and Andrew Burnett'
author = 'John Kendrick and Andrew Burnett'
numfig = True
# The short X.Y version
version = '4.0'
# The full version, including alpha/beta/rc tags
release = '4.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
#
preamble = r'''
\usepackage{svg}
\usepackage{hyperref}
\usepackage{bm}
\usepackage{float}
\usepackage{parskip}
\usepackage{amsmath}
\newcommand{\water}{H_{2}O}
\newcommand{\tensor}[1]{\bar{\bar{#1}}}
\newcommand{\tensorbs}[1]{\bar{\bar{\bm{#1}}}}
\newcommand{\tensorbf}[1]{\bar{\bar{\bm{#1}}}}
\newcommand{\fieldbf}[1]{\bar{\bm{#1}}}
'''
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#mathjax_path = 'https://cdn.rawgit.com/mathjax/MathJax/2.7.1/MathJax.js'
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinxcontrib.bibtex',
'cloud_sptheme.ext.table_styling',
]
bibtex_bibfiles = ['pdielec.bib']
mathjax_config = {
"extensions": ["AMSmath.js"] ,
"TeX": {
"Macros": {
"bm": ["{\\boldsymbol{#1}}", 1],
"tensor": ["{\\bar{\\bar{#1}}}", 1],
"tensorbs": ["{\\bar{\\bar{\\bm{#1}}}}", 1],
"tensorbf": ["{\\bar{\\bar{\\bm{#1}}}}", 1],
"fieldbf": ["{\\bar{\\bm{#1}}}", 1],
"water": "\\H_{2}O",
},
},
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "index-latex.rst", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = project + ': John Kendrick & Andrew Burnett'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PDielecdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': preamble,
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index-latex', 'PDielec.tex', 'PDielec Documentation',
'John Kendrick and Andrew Burnett', 'manual'),
]
pngmath_latex_preamble = preamble
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pdielec', 'PDielec Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PDielec', 'PDielec Documentation',
author, 'PDielec', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| mit |
osvalr/odoo | addons/email_template/ir_actions.py | 281 | 3520 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class actions_server(osv.Model):
""" Add email option in server actions. """
_name = 'ir.actions.server'
_inherit = ['ir.actions.server']
def _get_states(self, cr, uid, context=None):
res = super(actions_server, self)._get_states(cr, uid, context=context)
res.insert(0, ('email', 'Send Email'))
return res
_columns = {
'email_from': fields.related(
'template_id', 'email_from', type='char',
readonly=True, string='From'
),
'email_to': fields.related(
'template_id', 'email_to', type='char',
readonly=True, string='To (Emails)'
),
'partner_to': fields.related(
'template_id', 'partner_to', type='char',
readonly=True, string='To (Partners)'
),
'subject': fields.related(
'template_id', 'subject', type='char',
readonly=True, string='Subject'
),
'body_html': fields.related(
'template_id', 'body_html', type='text',
readonly=True, string='Body'
),
'template_id': fields.many2one(
'email.template', 'Email Template', ondelete='set null',
domain="[('model_id', '=', model_id)]",
),
}
def on_change_template_id(self, cr, uid, ids, template_id, context=None):
""" Render the raw template in the server action fields. """
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to']
if template_id:
template_values = self.pool.get('email.template').read(cr, uid, [template_id], fields, context)[0]
values = dict((field, template_values[field]) for field in fields if template_values.get(field))
if not values.get('email_from'):
return {'warning': {'title': 'Incomplete template', 'message': 'Your template should define email_from'}, 'value': values}
else:
values = dict.fromkeys(fields, False)
return {'value': values}
def run_action_email(self, cr, uid, action, eval_context=None, context=None):
if not action.template_id or not context.get('active_id'):
return False
self.pool['email.template'].send_mail(cr, uid, action.template_id.id, context.get('active_id'),
force_send=False, raise_exception=False, context=context)
return False
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/scipy/optimize/_lsq/dogbox.py | 40 | 11699 | """
dogleg algorithm with rectangular trust regions for least-squares minimization.
The description of the algorithm can be found in [Voglis]_. The algorithm does
trust-region iterations, but the shape of trust regions is rectangular as
opposed to conventional elliptical. The intersection of a trust region and
an initial feasible region is again some rectangle. Thus on each iteration a
bound-constrained quadratic optimization problem is solved.
A quadratic problem is solved by well-known dogleg approach, where the
function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
along this path, and optimization amounts to simply following along this
path as long as a point stays within the bounds. A constrained Cauchy step
(along the anti-gradient) is considered for safety in rank deficient cases,
in this situations the convergence might be slow.
If during iterations some variable hit the initial bound and the component
of anti-gradient points outside the feasible region, then a next dogleg step
won't make any progress. At this state such variables satisfy first-order
optimality conditions and they are excluded before computing a next dogleg
step.
Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
dense and sparse matrices, or Jacobian being LinearOperator). The second
option allows to solve very large problems (up to couple of millions of
residuals on a regular PC), provided the Jacobian matrix is sufficiently
sparse. But note that dogbox is not very good for solving problems with
large number of constraints, because of variables exclusion-inclusion on each
iteration (a required number of function evaluations might be high or accuracy
of a solution will be poor), thus its large-scale usage is probably limited
to unconstrained problems.
References
----------
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
Approach for Unconstrained and Bound Constrained Nonlinear
Optimization", WSEAS International Conference on Applied
Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import lstsq, norm
from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
from scipy.optimize import OptimizeResult
from scipy._lib.six import string_types
from .common import (
step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
build_quadratic_1d, minimize_quadratic_1d, compute_grad,
compute_jac_scale, check_termination, scale_for_robust_loss_function,
print_header_nonlinear, print_iteration_nonlinear)
def lsmr_operator(Jop, d, active_set):
"""Compute LinearOperator to use in LSMR by dogbox algorithm.
`active_set` mask is used to excluded active variables from computations
of matrix-vector products.
"""
m, n = Jop.shape
def matvec(x):
x_free = x.ravel().copy()
x_free[active_set] = 0
return Jop.matvec(x * d)
def rmatvec(x):
r = d * Jop.rmatvec(x)
r[active_set] = 0
return r
return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
def find_intersection(x, tr_bounds, lb, ub):
"""Find intersection of trust-region bounds and initial bounds.
Returns
-------
lb_total, ub_total : ndarray with shape of x
Lower and upper bounds of the intersection region.
orig_l, orig_u : ndarray of bool with shape of x
True means that an original bound is taken as a corresponding bound
in the intersection region.
tr_l, tr_u : ndarray of bool with shape of x
True means that a trust-region bound is taken as a corresponding bound
in the intersection region.
"""
lb_centered = lb - x
ub_centered = ub - x
lb_total = np.maximum(lb_centered, -tr_bounds)
ub_total = np.minimum(ub_centered, tr_bounds)
orig_l = np.equal(lb_total, lb_centered)
orig_u = np.equal(ub_total, ub_centered)
tr_l = np.equal(lb_total, -tr_bounds)
tr_u = np.equal(ub_total, tr_bounds)
return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
"""Find dogleg step in a rectangular region.
Returns
-------
step : ndarray, shape (n,)
Computed dogleg step.
bound_hits : ndarray of int, shape (n,)
Each component shows whether a corresponding variable hits the
initial bound after the step is taken:
* 0 - a variable doesn't hit the bound.
* -1 - lower bound is hit.
* 1 - upper bound is hit.
tr_hit : bool
Whether the step hit the boundary of the trust-region.
"""
lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
x, tr_bounds, lb, ub
)
bound_hits = np.zeros_like(x, dtype=int)
if in_bounds(newton_step, lb_total, ub_total):
return newton_step, bound_hits, False
to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
# The classical dogleg algorithm would check if Cauchy step fits into
# the bounds, and just return it constrained version if not. But in a
# rectangular trust region it makes sense to try to improve constrained
# Cauchy step too. Thus we don't distinguish these two cases.
cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
step_diff = newton_step - cauchy_step
step_size, hits = step_size_to_bound(cauchy_step, step_diff,
lb_total, ub_total)
bound_hits[(hits < 0) & orig_l] = -1
bound_hits[(hits > 0) & orig_u] = 1
tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
return cauchy_step + step_size * step_diff, bound_hits, tr_hit
def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose):
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
Delta = norm(x0 * scale_inv, ord=np.inf)
if Delta == 0:
Delta = 1.0
on_bound = np.zeros_like(x0, dtype=int)
on_bound[np.equal(x0, lb)] = -1
on_bound[np.equal(x0, ub)] = 1
x = x0
step = np.empty_like(x0)
if max_nfev is None:
max_nfev = x0.size * 100
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
active_set = on_bound * g < 0
free_set = ~active_set
g_free = g[free_set]
g_full = g.copy()
g[active_set] = 0
g_norm = norm(g, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
x_free = x[free_set]
lb_free = lb[free_set]
ub_free = ub[free_set]
scale_free = scale[free_set]
# Compute (Gauss-)Newton and build quadratic model for Cauchy step.
if tr_solver == 'exact':
J_free = J[:, free_set]
newton_step = lstsq(J_free, -f)[0]
# Coefficients for the quadratic model along the anti-gradient.
a, b = build_quadratic_1d(J_free, g_free, -g_free)
elif tr_solver == 'lsmr':
Jop = aslinearoperator(J)
# We compute lsmr step in scaled variables and then
# transform back to normal variables, if lsmr would give exact lsq
# solution this would be equivalent to not doing any
# transformations, but from experience it's better this way.
# We pass active_set to make computations as if we selected
# the free subset of J columns, but without actually doing any
# slicing, which is expensive for sparse matrices and impossible
# for LinearOperator.
lsmr_op = lsmr_operator(Jop, scale, active_set)
newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
newton_step *= scale_free
# Components of g for active variables were zeroed, so this call
# is correct and equivalent to using J_free and g_free.
a, b = build_quadratic_1d(Jop, g, -g)
actual_reduction = -1.0
while actual_reduction <= 0 and nfev < max_nfev:
tr_bounds = Delta * scale_free
step_free, on_bound_free, tr_hit = dogleg_step(
x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
step.fill(0.0)
step[free_set] = step_free
if tr_solver == 'exact':
predicted_reduction = -evaluate_quadratic(J_free, g_free,
step_free)
elif tr_solver == 'lsmr':
predicted_reduction = -evaluate_quadratic(Jop, g, step)
x_new = x + step
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step * scale_inv, ord=np.inf)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, tr_hit
)
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
if actual_reduction > 0:
on_bound[free_set] = on_bound_free
x = x_new
# Set variables exactly at the boundary.
mask = on_bound == -1
x[mask] = lb[mask]
mask = on_bound == 1
x[mask] = ub[mask]
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
| mit |
hurricup/intellij-community | plugins/hg4idea/testData/bin/mercurial/hgweb/hgweb_mod.py | 91 | 15218 | # hgweb/hgweb_mod.py - Web interface for a repository.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
from mercurial import ui, hg, hook, error, encoding, templater, util, repoview
from mercurial.templatefilters import websub
from mercurial.i18n import _
from common import get_stat, ErrorResponse, permhooks, caching
from common import HTTP_OK, HTTP_NOT_MODIFIED, HTTP_BAD_REQUEST
from common import HTTP_NOT_FOUND, HTTP_SERVER_ERROR
from request import wsgirequest
import webcommands, protocol, webutil, re
perms = {
'changegroup': 'pull',
'changegroupsubset': 'pull',
'getbundle': 'pull',
'stream_out': 'pull',
'listkeys': 'pull',
'unbundle': 'push',
'pushkey': 'push',
}
def makebreadcrumb(url, prefix=''):
'''Return a 'URL breadcrumb' list
A 'URL breadcrumb' is a list of URL-name pairs,
corresponding to each of the path items on a URL.
This can be used to create path navigation entries.
'''
if url.endswith('/'):
url = url[:-1]
if prefix:
url = '/' + prefix + url
relpath = url
if relpath.startswith('/'):
relpath = relpath[1:]
breadcrumb = []
urlel = url
pathitems = [''] + relpath.split('/')
for pathel in reversed(pathitems):
if not pathel or not urlel:
break
breadcrumb.append({'url': urlel, 'name': pathel})
urlel = os.path.dirname(urlel)
return reversed(breadcrumb)
class hgweb(object):
def __init__(self, repo, name=None, baseui=None):
if isinstance(repo, str):
if baseui:
u = baseui.copy()
else:
u = ui.ui()
self.repo = hg.repository(u, repo)
else:
self.repo = repo
self.repo = self._getview(self.repo)
self.repo.ui.setconfig('ui', 'report_untrusted', 'off')
self.repo.baseui.setconfig('ui', 'report_untrusted', 'off')
self.repo.ui.setconfig('ui', 'nontty', 'true')
self.repo.baseui.setconfig('ui', 'nontty', 'true')
hook.redirect(True)
self.mtime = -1
self.size = -1
self.reponame = name
self.archives = 'zip', 'gz', 'bz2'
self.stripecount = 1
# a repo owner may set web.templates in .hg/hgrc to get any file
# readable by the user running the CGI script
self.templatepath = self.config('web', 'templates')
self.websubtable = self.loadwebsub()
# The CGI scripts are often run by a user different from the repo owner.
# Trust the settings from the .hg/hgrc files by default.
def config(self, section, name, default=None, untrusted=True):
return self.repo.ui.config(section, name, default,
untrusted=untrusted)
def configbool(self, section, name, default=False, untrusted=True):
return self.repo.ui.configbool(section, name, default,
untrusted=untrusted)
def configlist(self, section, name, default=None, untrusted=True):
return self.repo.ui.configlist(section, name, default,
untrusted=untrusted)
def _getview(self, repo):
viewconfig = self.config('web', 'view', 'served')
if viewconfig == 'all':
return repo.unfiltered()
elif viewconfig in repoview.filtertable:
return repo.filtered(viewconfig)
else:
return repo.filtered('served')
def refresh(self, request=None):
st = get_stat(self.repo.spath)
# compare changelog size in addition to mtime to catch
# rollbacks made less than a second ago
if st.st_mtime != self.mtime or st.st_size != self.size:
self.mtime = st.st_mtime
self.size = st.st_size
r = hg.repository(self.repo.baseui, self.repo.root)
self.repo = self._getview(r)
self.maxchanges = int(self.config("web", "maxchanges", 10))
self.stripecount = int(self.config("web", "stripes", 1))
self.maxshortchanges = int(self.config("web", "maxshortchanges",
60))
self.maxfiles = int(self.config("web", "maxfiles", 10))
self.allowpull = self.configbool("web", "allowpull", True)
encoding.encoding = self.config("web", "encoding",
encoding.encoding)
if request:
self.repo.ui.environ = request.env
def run(self):
if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
raise RuntimeError("This function is only intended to be "
"called while running as a CGI script.")
import mercurial.hgweb.wsgicgi as wsgicgi
wsgicgi.launch(self)
def __call__(self, env, respond):
req = wsgirequest(env, respond)
return self.run_wsgi(req)
def run_wsgi(self, req):
self.refresh(req)
# work with CGI variables to create coherent structure
# use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
req.url = req.env['SCRIPT_NAME']
if not req.url.endswith('/'):
req.url += '/'
if 'REPO_NAME' in req.env:
req.url += req.env['REPO_NAME'] + '/'
if 'PATH_INFO' in req.env:
parts = req.env['PATH_INFO'].strip('/').split('/')
repo_parts = req.env.get('REPO_NAME', '').split('/')
if parts[:len(repo_parts)] == repo_parts:
parts = parts[len(repo_parts):]
query = '/'.join(parts)
else:
query = req.env['QUERY_STRING'].split('&', 1)[0]
query = query.split(';', 1)[0]
# process this if it's a protocol request
# protocol bits don't need to create any URLs
# and the clients always use the old URL structure
cmd = req.form.get('cmd', [''])[0]
if protocol.iscmd(cmd):
try:
if query:
raise ErrorResponse(HTTP_NOT_FOUND)
if cmd in perms:
self.check_perm(req, perms[cmd])
return protocol.call(self.repo, req, cmd)
except ErrorResponse, inst:
# A client that sends unbundle without 100-continue will
# break if we respond early.
if (cmd == 'unbundle' and
(req.env.get('HTTP_EXPECT',
'').lower() != '100-continue') or
req.env.get('X-HgHttp2', '')):
req.drain()
req.respond(inst, protocol.HGTYPE,
body='0\n%s\n' % inst.message)
return ''
# translate user-visible url structure to internal structure
args = query.split('/', 2)
if 'cmd' not in req.form and args and args[0]:
cmd = args.pop(0)
style = cmd.rfind('-')
if style != -1:
req.form['style'] = [cmd[:style]]
cmd = cmd[style + 1:]
# avoid accepting e.g. style parameter as command
if util.safehasattr(webcommands, cmd):
req.form['cmd'] = [cmd]
else:
cmd = ''
if cmd == 'static':
req.form['file'] = ['/'.join(args)]
else:
if args and args[0]:
node = args.pop(0)
req.form['node'] = [node]
if args:
req.form['file'] = args
ua = req.env.get('HTTP_USER_AGENT', '')
if cmd == 'rev' and 'mercurial' in ua:
req.form['style'] = ['raw']
if cmd == 'archive':
fn = req.form['node'][0]
for type_, spec in self.archive_specs.iteritems():
ext = spec[2]
if fn.endswith(ext):
req.form['node'] = [fn[:-len(ext)]]
req.form['type'] = [type_]
# process the web interface request
try:
tmpl = self.templater(req)
ctype = tmpl('mimetype', encoding=encoding.encoding)
ctype = templater.stringify(ctype)
# check read permissions non-static content
if cmd != 'static':
self.check_perm(req, None)
if cmd == '':
req.form['cmd'] = [tmpl.cache['default']]
cmd = req.form['cmd'][0]
if self.configbool('web', 'cache', True):
caching(self, req) # sets ETag header or raises NOT_MODIFIED
if cmd not in webcommands.__all__:
msg = 'no such method: %s' % cmd
raise ErrorResponse(HTTP_BAD_REQUEST, msg)
elif cmd == 'file' and 'raw' in req.form.get('style', []):
self.ctype = ctype
content = webcommands.rawfile(self, req, tmpl)
else:
content = getattr(webcommands, cmd)(self, req, tmpl)
req.respond(HTTP_OK, ctype)
return content
except (error.LookupError, error.RepoLookupError), err:
req.respond(HTTP_NOT_FOUND, ctype)
msg = str(err)
if (util.safehasattr(err, 'name') and
not isinstance(err, error.ManifestLookupError)):
msg = 'revision not found: %s' % err.name
return tmpl('error', error=msg)
except (error.RepoError, error.RevlogError), inst:
req.respond(HTTP_SERVER_ERROR, ctype)
return tmpl('error', error=str(inst))
except ErrorResponse, inst:
req.respond(inst, ctype)
if inst.code == HTTP_NOT_MODIFIED:
# Not allowed to return a body on a 304
return ['']
return tmpl('error', error=inst.message)
def loadwebsub(self):
websubtable = []
websubdefs = self.repo.ui.configitems('websub')
# we must maintain interhg backwards compatibility
websubdefs += self.repo.ui.configitems('interhg')
for key, pattern in websubdefs:
# grab the delimiter from the character after the "s"
unesc = pattern[1]
delim = re.escape(unesc)
# identify portions of the pattern, taking care to avoid escaped
# delimiters. the replace format and flags are optional, but
# delimiters are required.
match = re.match(
r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
% (delim, delim, delim), pattern)
if not match:
self.repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
% (key, pattern))
continue
# we need to unescape the delimiter for regexp and format
delim_re = re.compile(r'(?<!\\)\\%s' % delim)
regexp = delim_re.sub(unesc, match.group(1))
format = delim_re.sub(unesc, match.group(2))
# the pattern allows for 6 regexp flags, so set them if necessary
flagin = match.group(3)
flags = 0
if flagin:
for flag in flagin.upper():
flags |= re.__dict__[flag]
try:
regexp = re.compile(regexp, flags)
websubtable.append((regexp, format))
except re.error:
self.repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
% (key, regexp))
return websubtable
def templater(self, req):
# determine scheme, port and server name
# this is needed to create absolute urls
proto = req.env.get('wsgi.url_scheme')
if proto == 'https':
proto = 'https'
default_port = "443"
else:
proto = 'http'
default_port = "80"
port = req.env["SERVER_PORT"]
port = port != default_port and (":" + port) or ""
urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
logourl = self.config("web", "logourl", "http://mercurial.selenic.com/")
logoimg = self.config("web", "logoimg", "hglogo.png")
staticurl = self.config("web", "staticurl") or req.url + 'static/'
if not staticurl.endswith('/'):
staticurl += '/'
# some functions for the templater
def header(**map):
yield tmpl('header', encoding=encoding.encoding, **map)
def footer(**map):
yield tmpl("footer", **map)
def motd(**map):
yield self.config("web", "motd", "")
# figure out which style to use
vars = {}
styles = (
req.form.get('style', [None])[0],
self.config('web', 'style'),
'paper',
)
style, mapfile = templater.stylemap(styles, self.templatepath)
if style == styles[0]:
vars['style'] = style
start = req.url[-1] == '?' and '&' or '?'
sessionvars = webutil.sessionvars(vars, start)
if not self.reponame:
self.reponame = (self.config("web", "name")
or req.env.get('REPO_NAME')
or req.url.strip('/') or self.repo.root)
def websubfilter(text):
return websub(text, self.websubtable)
# create the templater
tmpl = templater.templater(mapfile,
filters={"websub": websubfilter},
defaults={"url": req.url,
"logourl": logourl,
"logoimg": logoimg,
"staticurl": staticurl,
"urlbase": urlbase,
"repo": self.reponame,
"header": header,
"footer": footer,
"motd": motd,
"sessionvars": sessionvars,
"pathdef": makebreadcrumb(req.url),
})
return tmpl
def archivelist(self, nodeid):
allowed = self.configlist("web", "allow_archive")
for i, spec in self.archive_specs.iteritems():
if i in allowed or self.configbool("web", "allow" + i):
yield {"type" : i, "extension" : spec[2], "node" : nodeid}
archive_specs = {
'bz2': ('application/x-bzip2', 'tbz2', '.tar.bz2', None),
'gz': ('application/x-gzip', 'tgz', '.tar.gz', None),
'zip': ('application/zip', 'zip', '.zip', None),
}
def check_perm(self, req, op):
for hook in permhooks:
hook(self, req, op)
| apache-2.0 |
infogulch/pyspades | pyspades/weapon.py | 7 | 3816 | import math
from twisted.internet import reactor
from pyspades.constants import *
from pyspades.collision import distance_3d_vector
class BaseWeapon(object):
shoot = False
reloading = False
id = None
shoot_time = None
next_shot = None
start = None
def __init__(self, reload_callback):
self.reload_callback = reload_callback
self.reset()
def restock(self):
self.current_stock = self.stock
def reset(self):
self.shoot = False
if self.reloading:
self.reload_call.cancel()
self.reloading = False
self.current_ammo = self.ammo
self.current_stock = self.stock
def set_shoot(self, value):
if value == self.shoot:
return
current_time = reactor.seconds()
if value:
self.start = current_time
if self.current_ammo <= 0:
return
elif self.reloading and not self.slow_reload:
return
self.shoot_time = max(current_time, self.next_shot)
if self.reloading:
self.reloading = False
self.reload_call.cancel()
else:
ammo = self.current_ammo
self.current_ammo = self.get_ammo(True)
self.next_shot = self.shoot_time + self.delay * (
ammo - self.current_ammo)
self.shoot = value
def reload(self):
if self.reloading:
return
ammo = self.get_ammo()
if not self.current_stock or ammo >= self.ammo:
return
elif self.slow_reload and self.shoot and ammo:
return
self.reloading = True
self.set_shoot(False)
self.current_ammo = ammo
self.reload_call = reactor.callLater(self.reload_time, self.on_reload)
def on_reload(self):
self.reloading = False
if self.slow_reload:
self.current_ammo += 1
self.current_stock -= 1
self.reload_callback()
self.reload()
else:
new_stock = max(0, self.current_stock - (
self.ammo - self.current_ammo))
self.current_ammo += self.current_stock - new_stock
self.current_stock = new_stock
self.reload_callback()
def get_ammo(self, no_max = False):
if self.shoot:
dt = reactor.seconds() - self.shoot_time
ammo = self.current_ammo - max(0, int(
math.ceil(dt / self.delay)))
else:
ammo = self.current_ammo
if no_max:
return ammo
return max(0, ammo)
def is_empty(self, tolerance = CLIP_TOLERANCE):
return self.get_ammo(True) < -tolerance or not self.shoot
def get_damage(self, value, position1, position2):
return self.damage[value]
class Rifle(BaseWeapon):
name = 'Rifle'
delay = 0.5
ammo = 10
stock = 50
reload_time = 2.5
slow_reload = False
damage = {
TORSO : 49,
HEAD : 100,
ARMS : 33,
LEGS : 33
}
class SMG(BaseWeapon):
name = 'SMG'
delay = 0.11 # actually 0.1, but due to AoS scheduling, it's usually 0.11
ammo = 30
stock = 120
reload_time = 2.5
slow_reload = False
damage = {
TORSO : 29,
HEAD : 75,
ARMS : 18,
LEGS : 18
}
class Shotgun(BaseWeapon):
name = 'Shotgun'
delay = 1.0
ammo = 6
stock = 48
reload_time = 0.5
slow_reload = True
damage = {
TORSO : 27,
HEAD : 37,
ARMS : 16,
LEGS : 16
}
WEAPONS = {
RIFLE_WEAPON : Rifle,
SMG_WEAPON : SMG,
SHOTGUN_WEAPON : Shotgun,
}
for id, weapon in WEAPONS.iteritems():
weapon.id = id | gpl-3.0 |
haxoza/django | tests/model_options/models/tablespaces.py | 342 | 1853 | from django.db import models
# Since the test database doesn't have tablespaces, it's impossible for Django
# to create the tables for models where db_tablespace is set. To avoid this
# problem, we mark the models as unmanaged, and temporarily revert them to
# managed during each test. We also set them to use the same tables as the
# "reference" models to avoid errors when other tests run 'migrate'
# (proxy_models_inheritance does).
class ScientistRef(models.Model):
name = models.CharField(max_length=50)
class ArticleRef(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True)
authors = models.ManyToManyField(ScientistRef, related_name='articles_written_set')
reviewers = models.ManyToManyField(ScientistRef, related_name='articles_reviewed_set')
class Scientist(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = 'model_options_scientistref'
db_tablespace = 'tbl_tbsp'
managed = False
class Article(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True, db_tablespace='idx_tbsp')
authors = models.ManyToManyField(Scientist, related_name='articles_written_set')
reviewers = models.ManyToManyField(Scientist, related_name='articles_reviewed_set', db_tablespace='idx_tbsp')
class Meta:
db_table = 'model_options_articleref'
db_tablespace = 'tbl_tbsp'
managed = False
# Also set the tables for automatically created models
Authors = Article._meta.get_field('authors').remote_field.through
Authors._meta.db_table = 'model_options_articleref_authors'
Reviewers = Article._meta.get_field('reviewers').remote_field.through
Reviewers._meta.db_table = 'model_options_articleref_reviewers'
| bsd-3-clause |
Monithon/Monithon-2.0 | customforms/migrations/0003_auto__chg_field_formfieldoption_label.py | 1 | 2236 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'FormFieldOption.label'
db.alter_column(u'customforms_formfieldoption', 'label', self.gf('django.db.models.fields.CharField')(max_length=5000))
def backwards(self, orm):
# Changing field 'FormFieldOption.label'
db.alter_column(u'customforms_formfieldoption', 'label', self.gf('django.db.models.fields.CharField')(max_length=100))
models = {
u'customforms.form': {
'Meta': {'object_name': 'Form'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'customforms.formfield': {
'Meta': {'object_name': 'FormField'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'customforms.formfieldoption': {
'Meta': {'ordering': "['weight']", 'object_name': 'FormFieldOption'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['customforms.Form']"}),
'form_field': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'config'", 'to': u"orm['customforms.FormField']"}),
'hint': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '5000'}),
'list_field': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'obligatory': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'options': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '10'})
}
}
complete_apps = ['customforms'] | gpl-2.0 |
eayunstack/ceilometer | ceilometer/i18n.py | 13 | 1366 | # Copyright 2014 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
import oslo_i18n
DOMAIN = 'ceilometer'
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def translate(value, user_locale):
return oslo_i18n.translate(value, user_locale)
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)
| apache-2.0 |
erdincay/pyload | module/lib/SafeEval.py | 42 | 1268 | ## {{{ http://code.activestate.com/recipes/286134/ (r3) (modified)
import dis
_const_codes = map(dis.opmap.__getitem__, [
'POP_TOP','ROT_TWO','ROT_THREE','ROT_FOUR','DUP_TOP',
'BUILD_LIST','BUILD_MAP','BUILD_TUPLE',
'LOAD_CONST','RETURN_VALUE','STORE_SUBSCR'
])
_load_names = ['False', 'True', 'null', 'true', 'false']
_locals = {'null': None, 'true': True, 'false': False}
def _get_opcodes(codeobj):
i = 0
opcodes = []
s = codeobj.co_code
names = codeobj.co_names
while i < len(s):
code = ord(s[i])
opcodes.append(code)
if code >= dis.HAVE_ARGUMENT:
i += 3
else:
i += 1
return opcodes, names
def test_expr(expr, allowed_codes):
try:
c = compile(expr, "", "eval")
except:
raise ValueError, "%s is not a valid expression" % expr
codes, names = _get_opcodes(c)
for code in codes:
if code not in allowed_codes:
for n in names:
if n not in _load_names:
raise ValueError, "opcode %s not allowed" % dis.opname[code]
return c
def const_eval(expr):
c = test_expr(expr, _const_codes)
return eval(c, None, _locals)
## end of http://code.activestate.com/recipes/286134/ }}}
| gpl-3.0 |
prescottprue/PiOpenLighting | python/ola/PidStore.py | 2 | 33135 | # This program is free software; you can redistribute it and/or modify
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# PidStore.py
# Copyright (C) 2010 Simon Newton
# Holds all the information about RDM PIDs
"""The PID Store."""
__author__ = 'nomis52@gmail.com (Simon Newton)'
import binascii
import math
import ola.RDMConstants
import os
import struct
import sys
from google.protobuf import text_format
from ola import PidStoreLocation
from ola import Pids_pb2
from ola.UID import UID
# Various sub device enums
ROOT_DEVICE = 0
MAX_VALID_SUB_DEVICE = 0x0200;
ALL_SUB_DEVICES = 0xffff
# The two types of commands classes
RDM_GET, RDM_SET, RDM_DISCOVERY = range(3)
class Error(Exception):
"""Base error class."""
class InvalidPidFormat(Error):
"Indicates the PID data file was invalid."""
class PidStructureException(Error):
"""Raised if the PID structure isn't vaild."""
class ArgsValidationError(Error):
"""Raised if the arguments don't match the expected frame format."""
class UnpackException(Error):
"""Raised if we can't unpack the data corectly."""
class Pid(object):
"""A class that describes everything about a PID."""
def __init__(self, name, value,
discovery_request = None,
discovery_response = None,
get_request = None,
get_response = None,
set_request = None,
set_response = None,
discovery_validators = [],
get_validators = [],
set_validators = []):
"""Create a new PID.
Args:
name: the human readable name
value: the 2 byte PID value
discovery_request: A Group object, or None if DISCOVERY isn't supported
discovery_response: A Group object, or None if DISCOVERY isn't supported
get_request: A Group object, or None if GET isn't supported
get_response:
set_request: A Group object, or None if SET isn't supported
set_response:
discovery_validators:
get_validators:
set_validators:
"""
self._name = name
self._value = value
self._requests = {
RDM_GET: get_request,
RDM_SET: set_request,
RDM_DISCOVERY: discovery_request,
}
self._responses = {
RDM_GET: get_response,
RDM_SET: set_response,
RDM_DISCOVERY: discovery_response,
}
self._validators = {
RDM_GET: get_validators,
RDM_SET: set_validators,
RDM_DISCOVERY: discovery_validators,
}
@property
def name(self):
return self._name
@property
def value(self):
return self._value
def RequestSupported(self, command_class):
"""Check if this PID allows a command class."""
return self._requests.get(command_class) is not None
def ValidateAddressing(self, args, command_class):
"""Run the validators."""
validators = self._validators.get(command_class)
if validators is None:
return false
args['pid'] = self
for validator in validators:
if not validator(args):
return False
return True
def __cmp__(self, other):
return cmp(self._value, other._value)
def __str__(self):
return '%s (0x%04hx)' % (self.name, self.value)
def __hash__(self):
return self._value
def Pack(self, args, command_class):
"""Pack args
Args:
args: A list of arguments of the right types.
command_class: RDM_GET or RDM_SET or RDM_DISCOVERY
Returns:
Binary data which can be used as the Param Data.
"""
group = self._requests.get(command_class)
blob, args_used = group.Pack(args)
return blob
def Unpack(self, data, command_class):
"""Unpack a message.
Args:
data: The raw data
command_class: RDM_GET or RDM_SET or RDM_DISCOVERY
"""
group = self._responses.get(command_class)
if group is None:
raise UnpackException('Response contained data (hex): %s' %
binascii.b2a_hex(data))
output = group.Unpack(data)[0]
return output
def GetRequestDescription(self, command_class):
"""Get a help string that describes the format of the request.
Args:
command_class: RDM_GET or RDM_SET or RDM_DISCOVERY
Returns:
A help string.
"""
group = self._requests.get(command_class)
return group.GetDescription()
# The following classes are used to describe RDM messages
class Atom(object):
"""The basic field in an RDM message."""
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def CheckForSingleArg(self, args):
if len(args) < 1:
raise ArgsValidationError('Missing argument for %s' % self.name)
def __str__(self):
return '%s, %s' % (self.__class__, self._name)
def __repr__(self):
return '%s, %s' % (self.__class__, self._name)
def GetDescription(self, indent=0):
return str(self)
class FixedSizeAtom(Atom):
def __init__(self, name, format_char):
super(FixedSizeAtom, self).__init__(name)
self._char = format_char
@property
def size(self):
return struct.calcsize(self._FormatString())
def FixedSize(self):
"""Returns true if the size of this atom doesn't vary."""
return True
def Unpack(self, data):
format_string = self._FormatString()
try:
values = struct.unpack(format_string, data)
except struct.error:
raise UnpackException(e)
return values[0]
def Pack(self, args):
format_string = self._FormatString()
try:
data = struct.pack(format_string, args[0])
except struct.error, e:
raise ArgsValidationError("Can't pack data: %s" % e)
return data, 1
def _FormatString(self):
return '!%s' % self._char
class Bool(FixedSizeAtom):
BOOL_MAP = {
'true': 1,
'false': 0,
}
def __init__(self, name):
# once we have 2.6 use ? here
super(Bool, self).__init__(name, 'B')
def Pack(self, args):
self.CheckForSingleArg(args)
arg = args[0]
if isinstance(arg, str):
arg = args[0].lower()
if arg not in self.BOOL_MAP:
raise ArgsValidationError('Argument should be true or false')
arg = self.BOOL_MAP[arg]
return super(Bool, self).Pack([arg])
def Unpack(self, value):
return bool(super(Bool, self).Unpack(value))
def GetDescription(self, indent=0):
indent = ' ' * indent
return '%s%s: <true|false>' % (indent, self.name)
class Range(object):
"""A range of allowed int values."""
def __init__(self, min, max):
self.min = min
self.max = max
def Matches(self, value):
return value >= self.min and value <= self.max
def __str__(self):
if self.min == self.max:
return '%d' % self.min
else:
return '[%d, %d]' % (self.min, self.max)
class IntAtom(FixedSizeAtom):
def __init__(self, name, char, max_value, **kwargs):
super(IntAtom, self).__init__(name, char)
# About Labels & Ranges:
# If neither labels nor ranges are specified, the valid values is the range of
# the data type.
# If labels are specified, and ranges aren't, the valid values are the labels
# If ranges are specified, the valid values are those which fall into the range
# (inclusive).
# If both are specified, the enum values must fall into the specified ranges.
# ranges limit the allowed values for a field
self._ranges = kwargs.get('ranges', [])[:]
self._multiplier = kwargs.get('multiplier', 0)
# labels provide a user friendly way of referring to data values
self._labels = {}
for value, label in kwargs.get('labels', []):
self._labels[label.lower()] = value
if not kwargs.get('ranges', []):
# Add the labels to the list of allowed values
self._ranges.append(Range(value, value))
if not self._ranges:
self._ranges.append(Range(0, max_value))
def Pack(self, args):
self.CheckForSingleArg(args)
arg = args[0]
if isinstance(arg, str):
arg = arg.lower()
value = self._labels.get(arg)
# not a labeled value
if value is None and self._multiplier >= 0:
try:
value = int(args[0])
except ValueError, e:
raise ArgsValidationError(e)
multiplier = 10 ** self._multiplier
if value % multiplier:
raise ArgsValidationError('Conversion will lose data: %d -> %d' %
(value, (value / multiplier * multiplier)))
value = value / multiplier
elif value is None:
try:
value = float(args[0])
except ValueError, e:
raise ArgsValidationError(e)
scaled_value = value * 10 ** abs(self._multiplier)
fraction, int_value = math.modf(scaled_value)
if fraction:
raise ArgsValidationError(
'Conversion will lose data: %s -> %s' %
(value, int_value * (10.0 ** self._multiplier)))
value = int(int_value)
for range in self._ranges:
if range.Matches(value):
break
else:
raise ArgsValidationError('Param %d out of range, must be one of %s' %
(value, self._GetAllowedRanges()))
return super(IntAtom, self).Pack([value])
def Unpack(self, data):
return self._AccountForMultiplier(super(IntAtom, self).Unpack(data))
def GetDescription(self, indent=0):
indent = ' ' * indent
increment = ''
if self._multiplier:
increment = ', increment %s' % (10 ** self._multiplier)
return ('%s%s: <%s> %s' % (indent, self.name, self._GetAllowedRanges(),
increment))
def _GetAllowedRanges(self):
values = self._labels.keys()
for range in self._ranges:
if range.min == range.max:
values.append(str(self._AccountForMultiplier(range.min)))
else:
values.append('[%s, %s]' %
(self._AccountForMultiplier(range.min),
self._AccountForMultiplier(range.max)))
return ('%s' % ', '.join(values))
def _AccountForMultiplier(self, value):
new_value = value * (10 ** self._multiplier)
if self._multiplier < 0:
new_value = round(new_value, abs(self._multiplier))
return new_value
class Int8(IntAtom):
"""A single signed byte field."""
def __init__(self, name, **kwargs):
super(Int8, self).__init__(name, 'b', 0xff, **kwargs)
class UInt8(IntAtom):
"""A single unsigned byte field."""
def __init__(self, name, **kwargs):
super(UInt8, self).__init__(name, 'B', 0xff, **kwargs)
class Int16(IntAtom):
"""A two-byte signed field."""
def __init__(self, name, **kwargs):
super(Int16, self).__init__(name, 'h', 0xffff, **kwargs)
class UInt16(IntAtom):
"""A two-byte unsigned field."""
def __init__(self, name, **kwargs):
super(UInt16, self).__init__(name, 'H', 0xffff, **kwargs)
class Int32(IntAtom):
"""A four-byte signed field."""
def __init__(self, name, **kwargs):
super(Int32, self).__init__(name, 'i', 0xffffffff, **kwargs)
class UInt32(IntAtom):
"""A four-byte unsigned field."""
def __init__(self, name, **kwargs):
super(UInt32, self).__init__(name, 'I', 0xffffffff, **kwargs)
class IPV4(IntAtom):
"""A four-byte IPV4 address."""
def __init__(self, name, **kwargs):
super(IPV4, self).__init__(name, 'I', 0xffffffff, **kwargs)
class UIDAtom(FixedSizeAtom):
"""A four-byte IPV4 address."""
def __init__(self, name, **kwargs):
super(UIDAtom, self).__init__(name, 'HI')
def Unpack(self, data):
format_string = self._FormatString()
try:
values = struct.unpack(format_string, data)
except struct.error:
raise UnpackException(e)
return UID(values[0], values[1])
def Pack(self, args):
uid = None
if isinstance(args[0], UID):
uid = args[0]
else:
uid = UID.FromString(args[0])
if uid is None:
raise ArgsValidationError("Invalid UID: %s" % e)
format_string = self._FormatString()
try:
data = struct.pack(format_string, uid.manufacturer_id, uid.device_id)
except struct.error, e:
raise ArgsValidationError("Can't pack data: %s" % e)
return data, 1
class String(Atom):
"""A string field."""
def __init__(self, name, **kwargs):
super(String, self).__init__(name)
self._min = kwargs.get('min_size', 0)
self._max = kwargs.get('max_size', 32)
@property
def min(self):
return self._min
@property
def max(self):
return self._max
@property
def size(self):
# only valid if FixedSize() == True
return self.min
def FixedSize(self):
return self.min == self.max
def Pack(self, args):
self.CheckForSingleArg(args)
arg = args[0]
arg_size = len(arg)
if self.max is not None and arg_size > self.max:
raise ArgsValidationError('%s can be at most %d,' %
(self.name, self.max))
if self.min is not None and arg_size < self.min:
raise ArgsValidationError('%s must be more than %d,' %
(self.name, self.min))
try:
data = struct.unpack('%ds' % arg_size, arg)
except struct.error, e:
raise ArgsValidationError("Can't pack data: %s" % e)
return data[0], 1
def Unpack(self, data):
data_size = len(data)
if self.min and data_size < self.min:
raise UnpackException('%s too short, required %d, got %d' %
(self.name, self.min, data_size))
if self.max and data_size > self.max:
raise UnpackException('%s too long, required %d, got %d' %
(self.name, self.max, data_size))
try:
value = struct.unpack('%ds' % data_size, data)
except struct.error, e:
raise UnpackException(e)
return value[0].rstrip('\x00')
def GetDescription(self, indent=0):
indent = ' ' * indent
return ('%s%s: <string, [%d, %d] bytes>' %
(indent, self.name, self.min, self.max))
def __str__(self):
return 'String(%s, min=%s, max=%s)' % (self.name, self.min, self.max)
class Group(Atom):
"""A repeated group of atoms."""
def __init__(self, name, atoms, **kwargs):
"""Create a group of atoms.
Args:
name: The name of the group
atoms: The list of atoms the group contains
Raises:
PidStructureException: if the structure of this group is invalid.
"""
super(Group, self).__init__(name)
self._atoms = atoms
self._min = kwargs.get('min_size')
self._max = kwargs.get('max_size')
# None for variable sized groups
self._group_size = self._VerifyStructure()
@property
def min(self):
return self._min
@property
def max(self):
return self._max
def _VerifyStructure(self):
"""Verify that we can pack & unpack this group.
We need to make sure we have enough known information to pack & unpack a
group. We don't support repeated groups of variable length data, nor
nested, repeated groups.
For now we support the following cases:
- Fixed size group. This is easy to unpack
- Groups of variable size. We enforce two conditions for these, i) the
variable sized field MUST be the last one ii) Only a single occurance
is allowed. This means you can't do things like:
[(string, int)] # variable sized types must be last
[(int, string)] # assuming string is variable sized
[(int, [(bool,)]] # no way to tell where the group barriers are
Returns:
The number of bytes this group uses, or None if it's variable sized
"""
variable_sized_atoms = []
group_size = 0
for atom in self._atoms:
if atom.FixedSize():
group_size += atom.size
else:
variable_sized_atoms.append(atom)
if len(variable_sized_atoms) > 1:
raise PidStore('More than one variable size field in %s: %s' %
(self.name, variable_sized_atoms))
if not variable_sized_atoms:
# The group is of a fixed size, this means we don't care how many times
# it's repeated.
return group_size
# for now we only support the case where the variable sized field is the
# last one
if variable_sized_atoms[0] != self._atoms[-1]:
raise PidStructureException(
'The variable sized field %s must be the last one' %
variable_sized_atoms[0].name)
# It's impossible to unpack groups of variable length data without more
# information.
if self.min != 1 and self.max != 1:
raise PidStructureException("Repeated groups can't contain variable length data")
return None
def FixedSize(self):
"""This is true if we know the exact size of the group and min == max.
Obviously this is unlikely.
"""
can_determine_size = True
for atom in self._atoms:
if not atom.FixedSize():
can_determine_size = False
break
return (can_determine_size and self._min is not None and
self._min == self._max)
@property
def size(self):
# only valid if FixedSize() == True
return self.min
def Pack(self, args):
"""Pack the args into binary data.
Args:
args: A list of string.
Returns:
binary data
"""
if self._group_size is None:
# variable length data, work out the fixed length portion first
data = []
arg_offset = 0
for atom in self._atoms[0:-1]:
chunk, args_consumed = atom.Pack(args[arg_offset:])
data.append(chunk)
arg_offset += args_consumed
# what remains is for the variable length section
chunk, args_used = self._atoms[-1].Pack(args[arg_offset:])
arg_offset += args_used
data.append(chunk)
if arg_offset < len(args):
raise ArgsValidationError('Too many arguments, expected %d, got %d' %
(arg_offset, len(args)))
return ''.join(data), arg_offset
elif self._group_size == 0:
return '', 0
else:
# this could be groups of fields, but we don't support that yet
data = []
arg_offset = 0
for atom in self._atoms:
chunk, args_consumed = atom.Pack(args[arg_offset:])
data.append(chunk)
arg_offset += args_consumed
if arg_offset < len(args):
raise ArgsValidationError('Too many arguments, expected %d, got %d' %
(arg_offset, len(args)))
return ''.join(data), arg_offset
def Unpack(self, data):
"""Unpack binary data.
Args:
data: The binary data
Returns:
A list of dicts.
"""
# we've already performed checks in _VerifyStructure so we can rely on
# self._group_size
data_size = len(data)
if self._group_size is None:
total_size = 0
for atom in self._atoms[0:-1]:
total_size += atom.size
if data_size < total_size:
raise UnpackException('Response too small, required %d, only got %d' %
(total_size, data_size))
output, used = self._UnpackFixedLength(self._atoms[0:-1], data)
# what remains is for the variable length section
variable_sized_atom = self._atoms[-1]
data = data[used:]
output[variable_sized_atom.name] = variable_sized_atom.Unpack(data)
return [output]
elif self._group_size == 0:
if data_size > 0:
raise UnpackException('Expected 0 bytes but got %d' % data_size)
return [{}]
else:
# groups of fixed length data
if data_size % self._group_size:
raise UnpackException(
'Data size issue for %s, data size %d, group size %d' %
(self.name, data_size, self._group_size))
group_count = data_size / self._group_size
if self.max is not None and group_count > self.max:
raise UnpackException(
'Too many repeated group_count for %s, limit is %d, found %d' %
(self.name, self.max, group_count))
if self.max is not None and group_count < self.min:
raise UnpackException(
'Too few repeated group_count for %s, limit is %d, found %d' %
(self.name, self.min, group_count))
offset = 0
groups = []
while offset + self._group_size <= data_size:
group = self._UnpackFixedLength(
self._atoms,
data[offset:offset + self._group_size])[0]
groups.append(group)
offset += self._group_size
return groups
def GetDescription(self, indent=0):
names = []
output = []
for atom in self._atoms:
names.append('<%s>' % atom.name)
output.append(atom.GetDescription(indent=2))
return ' '.join(names), '\n'.join(output)
def _UnpackFixedLength(self, atoms, data):
"""Unpack a list of atoms of a known, fixed size.
Args:
atoms: A list of atoms, must all have FixedSize() == True.
data: The binary data.
Returns:
A tuple in the form (output_dict, data_consumed)
"""
output = {}
offset = 0
for atom in atoms:
size = atom.size
output[atom.name] = atom.Unpack(data[offset:offset + size])
offset += size
return output, offset
def __str__(self):
return ('Group: atoms: %s, [%s, %s]' %
(str(self._atoms), self.min, self.max))
# These are validators which can be applied before a request is sent
def RootDeviceValidator(args):
"""Ensure the sub device is the root device."""
if args.get('sub_device') != ROOT_DEVICE:
print >> sys.stderr, (
"Can't send GET %s to non root sub devices" % args['pid'].name)
return False
return True
def SubDeviceValidator(args):
"""Ensure the sub device is in the range 0 - 512 or 0xffff."""
sub_device = args.get('sub_device')
if (sub_device is None or
(sub_device > MAX_VALID_SUB_DEVICE and sub_device != ALL_SUB_DEVICES)):
print >> sys.stderr, (
"%s isn't a valid sub device" % sub_device)
return False
return True
def NonBroadcastSubDeviceValiator(args):
"""Ensure the sub device is in the range 0 - 512."""
sub_device = args.get('sub_device')
if (sub_device is None or sub_device > MAX_VALID_SUB_DEVICE):
print >> sys.stderr, (
"Sub device %s needs to be between 0 and 512" % sub_device)
return False
return True
def SpecificSubDeviceValidator(args):
"""Ensure the sub device is in the range 1 - 512."""
sub_device = args.get('sub_device')
if (sub_device is None or sub_device == ROOT_DEVICE or
sub_device > MAX_VALID_SUB_DEVICE):
print >> sys.stderr, (
"Sub device %s needs to be between 1 and 512" % sub_device)
return False
return True
class PidStore(object):
"""The class which holds information about all the PIDs."""
def __init__(self):
self._pid_store = Pids_pb2.PidStore()
self._pids = {}
self._name_to_pid = {}
self._manufacturer_pids = {}
self._manufacturer_names_to_pids = {}
self._manufacturer_id_to_name = {}
def Load(self, pid_files, validate = True):
"""Load a PidStore from a file.
Args:
pid_files: A list of PID files on disk to load
validate: When True, enable strict checking.
"""
self._pid_store.Clear()
for pid_file in pid_files:
self.LoadFile(pid_file, validate)
def LoadFile(self, pid_file_name, validate):
"""Load a pid file."""
pid_file = open(pid_file_name, 'r')
lines = pid_file.readlines()
pid_file.close()
try:
text_format.Merge('\n'.join(lines), self._pid_store)
except text_format.ParseError, e:
raise InvalidPidFormat(str(e))
for pid_pb in self._pid_store.pid:
if validate:
if ((pid_pb.value >= ola.RDMConstants.RDM_MANUFACTURER_PID_MIN) and
(pid_pb.value <= ola.RDMConstants.RDM_MANUFACTURER_PID_MAX)):
raise InvalidPidFormat('%0x04hx between %0x04hx and %0x04hx in %s' %
(pid_pb.value,
ola.RDMConstants.RDM_MANUFACTURER_PID_MIN,
ola.RDMConstants.RDM_MANUFACTURER_PID_MAX,
file))
if pid_pb.value in self._pids:
raise InvalidPidFormat('0x%04hx listed more than once in %s' %
(pid_pb.value, file))
if pid_pb.name in self._name_to_pid:
raise InvalidPidFormat('%s listed more than once in %s' %
(pid_pb.name, file))
pid = self._PidProtoToObject(pid_pb)
self._pids[pid.value] = pid
self._name_to_pid[pid.name] = pid
for manufacturer in self._pid_store.manufacturer:
pid_dict = self._manufacturer_pids.setdefault(
manufacturer.manufacturer_id,
{})
name_dict = self._manufacturer_names_to_pids.setdefault(
manufacturer.manufacturer_id,
{})
self._manufacturer_id_to_name[manufacturer.manufacturer_id] = (
manufacturer.manufacturer_name)
for pid_pb in manufacturer.pid:
if validate:
if ((pid_pb.value < ola.RDMConstants.RDM_MANUFACTURER_PID_MIN) or
(pid_pb.value > ola.RDMConstants.RDM_MANUFACTURER_PID_MAX)):
raise InvalidPidFormat(
'Manufacturer pid 0x%04hx not between %0x04hx and %0x04hx' %
(pid_pb.value,
ola.RDMConstants.RDM_MANUFACTURER_PID_MIN,
ola.RDMConstants.RDM_MANUFACTURER_PID_MAX))
if pid_pb.value in pid_dict:
raise InvalidPidFormat(
'0x%04hx listed more than once for 0x%04hx in %s' % (
pid_pb.value, manufacturer.manufacturer_id, file))
if pid_pb.name in name_dict:
raise InvalidPidFormat(
'%s listed more than once for %s in %s' % (
pid_pb.name, manufacturer, file))
pid = self._PidProtoToObject(pid_pb)
pid_dict[pid.value] = pid
name_dict[pid.name] = pid
# we no longer need the protobuf representation
self._pid_store.Clear()
def Pids(self):
"""Returns a list of all PIDs. Manufacturer PIDs aren't included.
Returns:
A list of Pid objects.
"""
return self._pids.values()
def ManufacturerPids(self, esta_id):
"""Return a list of all Manufacturer PIDs for a given esta_id.
Args:
esta_id: The 2-byte esta / manufacturer ID.
Returns:
A list of Pid objects.
"""
return self._manufacturer_pids.get(esta_id, {}).values()
def GetPid(self, pid_value, esta_id=None):
"""Look up a PIDs by the 2-byte PID value.
Args:
pid_value: The 2-byte PID value, e.g. 0x8000
esta_id: The 2-byte esta / manufacturer ID.
Returns:
A Pid object, or None if no PID was found.
"""
pid = self._pids.get(pid_value, None)
if not pid:
pid = self._manufacturer_pids.get(esta_id, {}).get(
pid_value, None)
return pid
def GetName(self, pid_name, esta_id=None):
"""Look up a PIDs by name.
Args:
pid_name: The name of the PID, e.g. 'DEVICE_INFO'
esta_id: The 2-byte esta / manufacturer ID.
Returns:
A Pid object, or None if no PID was found.
"""
pid = self._name_to_pid.get(pid_name)
if not pid:
pid = self._manufacturer_names_to_pids.get(esta_id, {}).get(
pid_name, None)
return pid
def NameToValue(self, pid_name, esta_id=None):
"""A helper method to convert a PID name to a PID value
Args:
pid_name: The name of the PID, e.g. 'DEVICE_INFO'
esta_id: The 2-byte esta / manufacturer ID.
Returns:
The value for this PID, or None if it wasn't found.
"""
pid = self.GetName(pid_name)
if pid:
return pid.value
return pid
def _PidProtoToObject(self, pid_pb):
"""Convert the protobuf representation of a PID to a PID object.
Args:
pid_pb: The protobuf version of the pid
Returns:
A PIDStore.PID object.
"""
def BuildList(field_name):
if not pid_pb.HasField(field_name):
return None
try:
group = self._FrameFormatToGroup(getattr(pid_pb, field_name))
except PidStructureException, e:
raise PidStructureException(
"The structure for the %s in %s isn't valid: %s" %
(field_name, pid_pb.name, e))
return group
discovery_request = BuildList('discovery_request')
discovery_response = BuildList('discovery_response')
get_request = BuildList('get_request')
get_response = BuildList('get_response')
set_request = BuildList('set_request')
set_response = BuildList('set_response')
discovery_validators = []
if pid_pb.HasField('discovery_sub_device_range'):
discovery_validators.append(self._SubDeviceRangeToValidator(
pid_pb.discovery_sub_device_range))
get_validators = []
if pid_pb.HasField('get_sub_device_range'):
get_validators.append(self._SubDeviceRangeToValidator(
pid_pb.get_sub_device_range))
set_validators = []
if pid_pb.HasField('set_sub_device_range'):
set_validators.append(self._SubDeviceRangeToValidator(
pid_pb.set_sub_device_range))
return Pid(pid_pb.name,
pid_pb.value,
discovery_request,
discovery_response,
get_request,
get_response,
set_request,
set_response,
discovery_validators,
get_validators,
set_validators)
def _FrameFormatToGroup(self, frame_format):
"""Convert a frame format to a group."""
atoms = []
for field in frame_format.field:
atoms.append(self._FieldToAtom(field))
return Group('', atoms, min_size=1, max_size=1)
def _FieldToAtom(self, field):
"""Convert a PID proto field message into an atom."""
field_name = str(field.name)
args = {'labels': [],
'ranges': [],
}
if field.HasField('max_size'):
args['max_size'] = field.max_size
if field.HasField('min_size'):
args['min_size'] = field.min_size
if field.HasField('multiplier'):
args['multiplier'] = field.multiplier
for label in field.label:
args['labels'].append((label.value, label.label))
for allowed_value in field.range:
args['ranges'].append(Range(allowed_value.min, allowed_value.max))
if field.type == Pids_pb2.BOOL:
return Bool(field_name)
elif field.type == Pids_pb2.INT8:
return Int8(field_name, **args);
elif field.type == Pids_pb2.UINT8:
return UInt8(field_name, **args);
elif field.type == Pids_pb2.INT16:
return Int16(field_name, **args);
elif field.type == Pids_pb2.UINT16:
return UInt16(field_name, **args);
elif field.type == Pids_pb2.INT32:
return Int32(field_name, **args);
elif field.type == Pids_pb2.UINT32:
return UInt32(field_name, **args);
elif field.type == Pids_pb2.IPV4:
return IPV4(field_name, **args);
elif field.type == Pids_pb2.UID:
return UIDAtom(field_name, **args);
elif field.type == Pids_pb2.GROUP:
if not field.field:
raise InvalidPidFormat('Missing child fields for %s' % field_name)
atoms = []
for child_field in field.field:
atoms.append(self._FieldToAtom(child_field))
return Group(field_name, atoms, **args)
elif field.type == Pids_pb2.STRING:
return String(field_name, **args)
def _SubDeviceRangeToValidator(self, range):
"""Convert a sub device range to a validator."""
if range == Pids_pb2.ROOT_DEVICE:
return RootDeviceValidator
elif range == Pids_pb2.ROOT_OR_ALL_SUBDEVICE:
return SubDeviceValidator
elif range == Pids_pb2.ROOT_OR_SUBDEVICE:
return NonBroadcastSubDeviceValiator
elif range == Pids_pb2.ONLY_SUBDEVICES:
return SpecificSubDeviceValidator
_pid_store = None
def GetStore(location = None, only_files = ()):
"""Get the instance of the PIDStore.
Args:
location: The location to load the store from. If not specified it uses the
location defined in PidStoreLocation.py
Returns:
An instance of PidStore.
"""
global _pid_store
if not _pid_store:
_pid_store = PidStore()
if not location:
location = PidStoreLocation.location
pid_files = []
for file_name in os.listdir(location):
if not file_name.endswith('.proto'):
continue
if only_files and file_name not in only_files:
continue
pid_files.append(os.path.join(location, file_name))
_pid_store.Load(pid_files)
return _pid_store
| lgpl-2.1 |
ZacariasBendeck/flask | examples/flaskr/flaskr.py | 157 | 2893 | # -*- coding: utf-8 -*-
"""
Flaskr
~~~~~~
A microblog example application written as Flask tutorial with
Flask and sqlite3.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
# create our little application :)
app = Flask(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flaskr.db'),
DEBUG=True,
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
"""Initializes the database."""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Creates the database tables."""
init_db()
print('Initialized the database.')
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def show_entries():
db = get_db()
cur = db.execute('select title, text from entries order by id desc')
entries = cur.fetchall()
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
| bsd-3-clause |
borysiasty/inasafe | safe/gis/test/test_numerics.py | 10 | 2184 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**Numeric module test cases.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import unittest
import numpy
from safe.gis.numerics import axes_to_points
from safe.gis.numerics import grid_to_points
class TestNumerics(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_axes2points(self):
"""Grid axes can be converted to point coordinates for all pixels"""
# Test 1
x = numpy.linspace(1, 3, 3)
y = numpy.linspace(10, 20, 2)
P = axes_to_points(x, y)
assert numpy.allclose(
P,
[[1., 20.], [2., 20.], [3., 20.], [1., 10.], [2., 10.], [3., 10.]],
rtol=0.0,
atol=0.0)
# Test 2
x = numpy.linspace(1, 5, 11)
y = numpy.linspace(10, 20, 5)
P = axes_to_points(x, y)
assert numpy.allclose(P[12, :], [1.4, 17.5])
def test_grid2points(self):
"""Raster grids can be converted to point data."""
# Pixel values
A = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
A = numpy.array(A, dtype='f')
M, N = A.shape
L = M * N
# Axis
longitudes = numpy.linspace(100, 110, N, endpoint=False)
latitudes = numpy.linspace(-4, 0, M, endpoint=True)
# Call function to be tested
P, V = grid_to_points(A, longitudes, latitudes)
# Assert correctness
assert P.shape[0] == L
assert P.shape[1] == 2
assert len(V) == L
assert numpy.allclose(P[:N, 0], longitudes)
assert numpy.allclose(P[:L:N, 1], latitudes[::-1])
assert numpy.allclose(V, A.flat[:])
if __name__ == '__main__':
suite = unittest.makeSuite(TestNumerics, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gpl-3.0 |
andreimaximov/algorithms | leetcode/algorithms/skyline/skyline.py | 1 | 3174 | TEST_BUILDINGS = [
[2, 9, 10],
[3, 7, 15],
[5, 12, 12],
[15, 20, 10],
[19, 24, 8]
]
TEST_SKYLINE = [
(2, 10),
(3, 15),
(7, 12),
(12, 0),
(15, 10),
(20, 8),
(24, 0)
]
class Solution(object):
def getSkyline(self, buildings):
"""
:type buildings: List[List[int]]
:rtype: List[(int, int)]
"""
if len(buildings) == 0:
return list()
return self.getSkyline_(buildings, 0, len(buildings) - 1)
def getSkyline_(self, buildings, lo, hi):
"""
:type buildings: List[List[int]]
:type lo: int
:type hi: int
:rtype: List[(int, int)]
"""
assert lo >= 0
assert hi < len(buildings)
assert lo <= hi
if lo == hi:
# Return countour for a single building
b = buildings[lo]
return [(b[0], b[2]), (b[1], 0)]
# Solve left and right sub-problems separately
mid = lo + (hi - lo) // 2
left = self.getSkyline_(buildings, lo, mid)
right = self.getSkyline_(buildings, mid + 1, hi)
# Merge solutions
return self.mergeSkylines_(left, right)
def mergeSkylines_(self, left, right):
"""
:type left: List[(int, int)]
:type right: List[(int, int)]
:rtype: List[(int, int)]
"""
if len(left) == 0:
return right
elif len(right) == 0:
return left
l = 0 # Current point on left skyline
r = 0 # Current point on right skyline
lh = 0 # Last height of left skyline
rh = 0 # Last height of right skyline
skyline = list()
def add(x, height):
if len(skyline) > 0 and skyline[-1][1] == height:
return
skyline.append((x, height))
# Traverse skylines in paralle from left to right.
while l < len(left) or r < len(right):
if r >= len(right) or l < len(left) and left[l][0] < right[r][0]:
# Either we are done with the right skyline or the next point
# on the left is before the next point on the right.
lh = left[l][1]
x = left[l][0]
l += 1
elif l >= len(left) or r < len(right) and left[l][0] > right[r][0]:
# Either we are done with the left skyline or the next point on
# the right is before the next point on the left.
rh = right[r][1]
x = right[r][0]
r += 1
else:
# Next points in each skyline have same x.
lh = left[l][1]
rh = right[r][1]
x = left[l][0] if l < len(left) else right[r][0]
l += 1
r += 1
add(x, max(lh, rh))
return skyline
def main():
solution = Solution()
assert solution.getSkyline(TEST_BUILDINGS) == TEST_SKYLINE
print('Tests pass!')
print('Please run this solution on LeetCode.')
print('https://leetcode.com/problems/the-skyline-problem/')
if __name__ == "__main__":
main()
| mit |
youfoh/webkit-efl | Tools/Scripts/webkitpy/layout_tests/port/__init__.py | 3 | 1860 | #!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Port-specific entrypoints for the layout tests test infrastructure."""
import builders # Why is this in port?
from base import Port # It's possible we don't need to export this virtual baseclass outside the module.
from driver import Driver, DriverInput, DriverOutput
from factory import port_options
| lgpl-2.1 |
alfredoavanzosc/odoomrp-wip-1 | mrp_production_real_costs/models/mrp_production.py | 2 | 6118 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api, exceptions, _
class MrpProduction(models.Model):
_inherit = 'mrp.production'
@api.multi
def calc_mrp_real_cost(self):
self.ensure_one()
return sum([-line.amount for line in
self.analytic_line_ids.filtered(lambda l: l.amount < 0)])
@api.one
@api.depends('analytic_line_ids', 'analytic_line_ids.amount',
'product_qty')
def get_real_cost(self):
self.real_cost = self.calc_mrp_real_cost()
self.unit_real_cost = self.real_cost / self.product_qty
@api.one
@api.depends('avg_cost', 'real_cost')
def get_percentage_difference(self):
self.percentage_difference = 0
if self.avg_cost and self.real_cost:
self.percentage_difference = (self.real_cost * 100 / self.avg_cost)
real_cost = fields.Float("Total Real Cost", compute="get_real_cost",
store=True)
unit_real_cost = fields.Float("Unit Real Cost", compute="get_real_cost",
store=True)
percentage_difference = fields.Float(
"% difference", compute="get_percentage_difference", store=True)
@api.multi
def action_production_end(self):
task_obj = self.env['project.task']
analytic_line_obj = self.env['account.analytic.line']
res = super(MrpProduction, self).action_production_end()
for record in self:
mrp_cost = record.calc_mrp_real_cost()
done_lines = record.move_created_ids2.filtered(lambda l:
l.state == 'done')
create_cost = self.env['mrp.config.settings']._get_parameter(
'final.product.cost')
if create_cost and create_cost.value and mrp_cost > 0.0:
journal_id = self.env.ref('mrp.analytic_journal_materials',
False)
qty = sum([l.product_qty for l in done_lines])
name = ('Final product - ' + (record.name or '') +
'-' + (record.product_id.default_code or ''))
vals = record._prepare_real_cost_analytic_line(
journal_id, name, record, record.product_id, qty=qty,
amount=mrp_cost)
task = task_obj.search([('mrp_production_id', '=', record.id),
('wk_order', '=', False)])
vals['task_id'] = task and task[0].id or False
analytic_line_obj.create(vals)
record.real_cost = mrp_cost
done_lines.product_price_update_production_done()
# Reload produced quants cost to consider all production costs.
# Material, machine and manual costs.
self.load_final_quant_cost()
return res
@api.multi
def load_final_quant_cost(self):
for production in self:
mrp_cost = production.calc_mrp_real_cost()
done_lines = production.move_created_ids2.filtered(
lambda l: l.state == 'done')
total_qty = sum([l.product_qty for l in done_lines])
quants = done_lines.mapped('quant_ids')
quants.write({'cost': mrp_cost / total_qty})
@api.model
def _prepare_real_cost_analytic_line(
self, journal, name, production, product, general_account=None,
workorder=None, qty=1, amount=0):
"""
Prepare the vals for creating an analytic entry for real cost
:param journal: Journal of the entry
:param name: Name of the entry
:param production: Origin product
:param product: Product for the entry
:param general_account: General account for the entry
:param workorder: Origin workorder
:param qty: Quantity for the entry. This quantity will multiply both
standard and average costs for the entry costs.
:param amount: Cost for calculating real cost.
:return: Dictionary with the analytic entry vals.
"""
analytic_line_obj = self.env['account.analytic.line']
property_obj = self.env['ir.property']
if not general_account:
general_account = (
product.property_account_income or
product.categ_id.property_account_income_categ or
property_obj.get('property_account_expense_categ',
'product.category'))
if not production.analytic_account_id:
raise exceptions.Warning(
_('You must define one Analytic Account for this MO: %s') %
(production.name))
return {
'name': name,
'mrp_production_id': production.id,
'workorder': workorder and workorder.id or False,
'account_id': self.analytic_account_id.id,
'journal_id': journal.id,
'user_id': self.env.uid,
'date': analytic_line_obj._get_default_date(),
'product_id': product and product.id or False,
'unit_amount': qty,
'amount': amount,
'product_uom_id': product.uom_id.id,
'general_account_id': general_account.id,
}
| agpl-3.0 |
frootloops/swift | utils/sil-opt-verify-all-modules.py | 65 | 5971 | #!/usr/bin/env python
# utils/sil-opt-verify-all-modules.py - Verifies Swift modules -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import print_function
import argparse
import glob
import multiprocessing
import os
import pipes
import subprocess
import sys
import tempfile
def get_verify_toolchain_modules_commands(toolchain_dir, sil_opt):
if sil_opt is None:
sil_opt = os.path.join(toolchain_dir, 'usr', 'bin', 'sil-opt')
toolchain_basename = os.path.basename(toolchain_dir)
if toolchain_basename.startswith('Legacy'):
return []
if toolchain_basename.startswith('XcodeDefault'):
toolchain_name = 'XcodeDefault'
if toolchain_basename.startswith('tvOS'):
toolchain_name = 'tvOS'
if toolchain_basename.startswith('OSX'):
toolchain_name = 'OSX'
if toolchain_basename.startswith('watchOS'):
toolchain_name = 'watchOS'
if toolchain_basename.startswith('iOS'):
toolchain_name = 'iOS'
return get_verify_resource_dir_modules_commands(
os.path.join(toolchain_dir, 'usr', 'lib', 'swift'),
os.path.join(toolchain_dir, 'usr', 'bin', 'sil-opt'),
toolchain_name)
def get_verify_build_dir_commands(build_dir, toolchain_name='XcodeDefault'):
return get_verify_resource_dir_modules_commands(
os.path.join(build_dir, 'lib', 'swift'),
os.path.join(build_dir, 'bin', 'sil-opt'),
toolchain_name)
def get_verify_resource_dir_modules_commands(
resource_dir, sil_opt, toolchain_name):
print("================================================================")
print("Resource dir: " + resource_dir)
print("sil-opt path: " + sil_opt)
known_platforms = [
('appletvos', 'arm64', 'arm64-apple-tvos9.0'),
('appletvsimulator', 'x86_64', 'x86_64-apple-tvos9.0'),
('iphoneos', 'armv7', 'armv7-apple-ios7.0'),
('iphoneos', 'armv7s', 'armv7s-apple-ios7.0'),
('iphoneos', 'arm64', 'arm64-apple-ios7.0'),
('iphonesimulator', 'i386', 'i386-apple-ios7.0'),
('iphonesimulator', 'x86_64', 'x86_64-apple-ios7.0'),
('macosx', 'x86_64', 'x86_64-apple-macosx10.9'),
('watchos', 'armv7k', 'armv7k-apple-watchos2.0'),
('watchsimulator', 'i386', 'i386-apple-watchos2.0'),
]
commands = []
module_cache_dir = tempfile.mkdtemp(
prefix="swift-testsuite-clang-module-cache")
for (subdir, arch, triple) in known_platforms:
modules_dir = os.path.join(resource_dir, subdir, arch)
print(modules_dir)
modules = glob.glob(os.path.join(modules_dir, '*.swiftmodule'))
for module_file_name in modules:
if module_file_name.endswith('XCTest.swiftmodule'):
# FIXME: sil-opt does not have the '-F' option.
continue
commands.append([
'xcrun', '--toolchain', toolchain_name, '--sdk', subdir,
sil_opt,
'-target', triple,
'-resource-dir', resource_dir,
'-module-cache-path', module_cache_dir,
'-verify',
module_file_name,
])
return commands
def quote_shell_command(args):
return " ".join([pipes.quote(a) for a in args])
def run_commands_in_parallel(commands):
makefile = ".DEFAULT_GOAL := all\n"
targets = []
for c in commands:
target_name = "target" + str(len(targets))
targets.append(target_name)
makefile += target_name + ":\n"
makefile += \
"\t" + quote_shell_command(c) + \
" > {target}.stdout\n".format(target=target_name)
makefile += "all: " + " ".join(targets) + "\n"
temp_dir = tempfile.mkdtemp(prefix="swift-testsuite-main")
with open(os.path.join(temp_dir, 'Makefile'), 'w') as makefile_file:
makefile_file.write(makefile)
max_processes = multiprocessing.cpu_count()
subprocess.check_call([
'make',
'-C', temp_dir,
'-j', str(max_processes),
'--keep-going'
])
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Verifies Swift modules.""")
parser.add_argument(
"--sil-opt",
help="use the specified 'sil-opt' binary",
metavar="PATH")
parser.add_argument(
"--verify-build-dir",
help="verify the Swift resource directory under the given build dir.",
metavar="PATH")
parser.add_argument(
"--verify-xcode",
help="verify the Xcode.app that is currently xcode-select'ed",
action="store_true")
args = parser.parse_args()
if args.verify_build_dir is not None and args.verify_xcode:
print("--verify-build-dir and --verify-xcode can't be used together")
return 1
if args.verify_build_dir is not None:
commands = get_verify_build_dir_commands(args.verify_build_dir)
if args.verify_xcode:
# Find Xcode.
swift_path = subprocess.check_output(['xcrun', '--find', 'swift'])
xcode_path = swift_path
for _ in range(0, 7):
xcode_path = os.path.dirname(xcode_path)
toolchains_dir = os.path.join(
xcode_path, 'Contents', 'Developer', 'Toolchains')
toolchains = glob.glob(os.path.join(toolchains_dir, '*.xctoolchain'))
commands = []
for toolchain_dir in toolchains:
commands += get_verify_toolchain_modules_commands(
toolchain_dir, args.sil_opt)
run_commands_in_parallel(commands)
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
seblin/shcol | shcol/__init__.py | 1 | 3629 | # -*- coding: utf-8 -*-
# Copyright (c) 2013-2016, Sebastian Linke
# Released under the Simplified BSD license
# (see LICENSE file for details).
"""
`shcol` is a shell columnizer that works in a similar way as the Unix-tool `ls`
does when rendering directory contents.
Some examples:
>>> import shcol
>>> shcol.print_filenames() # print contents of current directory
bin LICENSE MANIFEST.in setup.py testsuite
build Makefile README.md shcol
>>> shcol.print_filenames('testsuite') # print contents of a subdirectory
test_cli.py test_core.py test_highlevel.py test_script.py
test_cli.pyc test_core.pyc test_highlevel.pyc test_script.pyc
>>> shcol.print_filenames('testsuite/*.py') # only print `*.py`-files
test_cli.py test_core.py test_highlevel.py test_script.py
>>> shcol.print_filenames('~/shcol', hide_dotted=False) # like `ls -A ~/shcol`
bin .git Makefile README.md shcol
build LICENSE MANIFEST.in setup.py testsuite
`shcol` can also columnize the attribute names of a Python-object:
>>> shcol.print_sorted(shcol)
__author__ config helpers __package__ print_columnized_mapping
__builtins__ core highlevel __path__ print_filenames
cli __doc__ __license__ print_attrs __version__
columnize __file__ __name__ print_columnized
>>> shcol.print_sorted(shcol, spacing=5)
__author__ core __license__ print_columnized
__builtins__ __doc__ __name__ print_columnized_mapping
cli __file__ __package__ print_filenames
columnize helpers __path__ __version__
config highlevel print_attrs
Note that the `spacing`-parameter as shown above works with all kinds of
`print_*`-functions in `shcol`.
You can also tell a `print_*`-function to use a specific line width for
its output:
>>> shcol.print_sorted(shcol, spacing=5, line_width=60)
>>> shcol.print_sorted(dir(shcol), spacing=5, line_width=60)
__author__ __path__ helpers
__builtins__ __version__ highlevel
__doc__ cli print_columnized
__file__ columnize print_filenames
__license__ config print_sorted
__name__ core
__package__ formatters
Note that by default the terminal's width is used as the line width.
And of course, you can columnize arbitrary names with `shcol`:
>>> shcol.print_columnized(['foo', 'bar', 'baz'], spacing=7)
foo bar baz
>>> shcol.print_columnized(['foo', 'bar', 'baz'], spacing=7, sort_items=True)
bar baz foo
The following example demonstrates that sorting is locale-aware. Note the
German umlaut in it. Hint: You need German as your default locale setting
to reproduce that in your Python interpreter:
>>> shcol.print_columnized(['foo', 'bär', 'baz'], sort_items=True)
bär baz foo
You can see that `shcol` handles Unicode-characters as you would expect it.
In case you need the raw columnized string you can get that directly:
>>> shcol.columnize(['foo', 'bär', 'baz'], sort_items=True) # on Python 2.7
u'b\\xe4r baz foo'
>>> shcol.columnize(['foo', 'bär', 'baz'], sort_items=True) # on Python 3.x
'bär baz foo'
`shcol` has its focus on usability and speed. Even large lists will be
rendered relatively fast (like ``shcol.print_filenames('/usr/bin')``).
Just give it a try if you like it and feel free to give some feedback. :-)
"""
__author__ = 'Sebastian Linke'
__version__ = '0.4a'
__license__ = 'Simplified BSD'
from . import cli
from .core import *
from .highlevel import *
if __name__ == '__main__':
cli.main()
| bsd-2-clause |
nabsboss/CouchPotatoServer | couchpotato/core/providers/metadata/xbmc/__init__.py | 18 | 2218 | from .main import XBMC
def start():
return XBMC()
config = [{
'name': 'xbmc',
'groups': [
{
'tab': 'renamer',
'subtab': 'metadata',
'name': 'xbmc_metadata',
'label': 'XBMC',
'description': 'Enable metadata XBMC can understand',
'options': [
{
'name': 'meta_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'meta_nfo',
'label': 'NFO',
'default': True,
'type': 'bool',
},
{
'name': 'meta_nfo_name',
'label': 'NFO filename',
'default': '%s.nfo',
'advanced': True,
'description': '<strong>%s</strong> is the rootname of the movie. For example "/path/to/movie cd1.mkv" will be "/path/to/movie"'
},
{
'name': 'meta_url_only',
'label': 'Only IMDB URL',
'default': False,
'advanced': True,
'description': 'Create a nfo with only the IMDB url inside',
'type': 'bool',
},
{
'name': 'meta_fanart',
'label': 'Fanart',
'default': True,
'type': 'bool',
},
{
'name': 'meta_fanart_name',
'label': 'Fanart filename',
'default': '%s-fanart.jpg',
'advanced': True,
},
{
'name': 'meta_thumbnail',
'label': 'Thumbnail',
'default': True,
'type': 'bool',
},
{
'name': 'meta_thumbnail_name',
'label': 'Thumbnail filename',
'default': '%s.tbn',
'advanced': True,
},
],
},
],
}]
| gpl-3.0 |
RTS2/rts2 | scripts/rts2saf/unittest/test_analyze.py | 4 | 6064 | # (C) 2013, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
import unittest
import glob
import os
from rts2saf.config import Configuration
from rts2saf.analyze import SimpleAnalysis, CatalogAnalysis
from rts2saf.sextract import Sextract
from rts2saf.environ import Environment
import logging
if not os.path.isdir('/tmp/rts2saf_log'):
os.mkdir('/tmp/rts2saf_log')
logging.basicConfig(filename='/tmp/rts2saf_log/unittest.log', level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger()
# sequence matters
def suite_simple():
suite = unittest.TestSuite()
suite.addTest(TestSimpleAnalysis('test_readConfiguration'))
suite.addTest(TestSimpleAnalysis('test_fitsInBasepath'))
suite.addTest(TestSimpleAnalysis('test_analyze'))
return suite
def suite_catalog():
suite = unittest.TestSuite()
suite.addTest(TestCatalogAnalysis('test_readConfiguration'))
suite.addTest(TestCatalogAnalysis('test_fitsInBasepath'))
suite.addTest(TestCatalogAnalysis('test_selectAndAnalyze'))
return suite
#@unittest.skip('class not yet implemented')
class TestSimpleAnalysis(unittest.TestCase):
def tearDown(self):
pass
def setUp(self):
self.rt = Configuration(logger=logger)
self.fileName='./rts2saf-flux.cfg'
self.success=self.rt.readConfiguration(fileName=self.fileName)
self.ev=Environment(debug=False, rt=self.rt,logger=logger)
#@unittest.skip('feature not yet implemented')
def test_readConfiguration(self):
logger.info('== {} =='.format(self._testMethodName))
self.assertTrue(self.success, 'config file: {} faulty or not found, return value: {}'.format(self.fileName, self.success))
def test_fitsInBasepath(self):
logger.info('== {} =='.format(self._testMethodName))
fitsFns=glob.glob('{0}/{1}'.format('../samples', self.rt.cfg['FILE_GLOB']))
self.assertEqual(len(fitsFns), 14, 'return value: {}'.format(len(fitsFns)))
def test_analyze(self):
logger.info('== {} =='.format(self._testMethodName))
fitsFns=glob.glob('{0}/{1}'.format('../samples', self.rt.cfg['FILE_GLOB']))
dataSxtr=list()
for k, fitsFn in enumerate(fitsFns):
logger.info('analyze: processing fits file: {0}'.format(fitsFn))
sxtr= Sextract(debug=False, rt=self.rt, logger=logger)
dSx=sxtr.sextract(fitsFn=fitsFn)
if dSx:
dataSxtr.append(dSx)
self.assertEqual(len(dataSxtr), 14, 'return value: {}'.format(len(dataSxtr)))
an=SimpleAnalysis(debug=False, dataSxtr=dataSxtr, Ds9Display=False, FitDisplay=False, focRes=float(self.rt.cfg['FOCUSER_RESOLUTION']), ev=self.ev, rt=self.rt,logger=logger)
resultFitFwhm, resultMeansFwhm, resultFitFlux, resultMeansFlux=an.analyze()
self.assertAlmostEqual(resultFitFwhm.extrFitVal, 2.2175214358, places=2, msg='return value: {}'.format(resultFitFwhm.extrFitVal))
an.display()
#@unittest.skip('class not yet implemented')
class TestCatalogAnalysis(unittest.TestCase):
def tearDown(self):
pass
def setUp(self):
self.rt = Configuration(logger=logger)
self.fileName='./rts2saf-no-filter-wheel.cfg'
self.success=self.rt.readConfiguration(fileName=self.fileName)
self.ev=Environment(debug=False, rt=self.rt,logger=logger)
def test_readConfiguration(self):
logger.info('== {} =='.format(self._testMethodName))
self.assertTrue(self.success, 'config file: {} faulty or not found, return value: {}'.format(self.fileName, self.success))
def test_fitsInBasepath(self):
logger.info('== {} =='.format(self._testMethodName))
fitsFns=glob.glob('{0}/{1}'.format('../samples', self.rt.cfg['FILE_GLOB']))
self.assertEqual(len(fitsFns), 14, 'return value: {}'.format(len(fitsFns)))
#@unittest.skip('feature not yet implemented')
def test_selectAndAnalyze(self):
logger.info('== {} =='.format(self._testMethodName))
fitsFns=glob.glob('{0}/{1}'.format('../samples', self.rt.cfg['FILE_GLOB']))
dataSxtr=list()
for k, fitsFn in enumerate(fitsFns):
logger.info('analyze: processing fits file: {0}'.format(fitsFn))
sxtr= Sextract(debug=False, rt=self.rt, logger=logger)
dSx=sxtr.sextract(fitsFn=fitsFn)
if dSx:
dataSxtr.append(dSx)
self.assertEqual(len(dataSxtr), 14, 'return value: {}'.format(len(dataSxtr)))
an=CatalogAnalysis(debug=False, dataSxtr=dataSxtr, Ds9Display=False, FitDisplay=False, focRes=float(self.rt.cfg['FOCUSER_RESOLUTION']), moduleName='rts2saf.criteria_radius', ev=self.ev, rt=self.rt, logger=logger)
accRFt, rejRFt, allrFt, accRMns, recRMns, allRMns=an.selectAndAnalyze()
self.assertAlmostEqual(allrFt.extrFitVal, 2.2175214358, delta=0.1, msg='return value: {}'.format(allrFt.extrFitVal))
self.assertAlmostEqual(accRFt.extrFitVal, 2.24000979001, delta=0.1, msg='return value: {}'.format(allrFt.extrFitVal))
if __name__ == '__main__':
suiteSimple=suite_simple()
suiteCatalog= suite_catalog()
alltests = unittest.TestSuite([suiteSimple, suiteCatalog])
unittest.TextTestRunner(verbosity=0).run(alltests)
| lgpl-3.0 |
KennyCandy/HAR | _module123/C_64_32.py | 1 | 17396 | # Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'train'
N_HIDDEN_CONFIG = 32
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 64])
b_conv1 = bias_varibale([64])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 64, 1])
b_conv2 = weight_variable([1])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = h_conv2
h_pool2 = tf.reshape(h_pool2, shape=[-1, 32, 36])
feature_mat = h_pool2
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close() | mit |
torgartor21/solar | solar/solar/test/test_resource.py | 1 | 3365 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base
from solar.core import resource
from solar.core import signals
class TestResource(base.BaseResourceTest):
def test_resource_args(self):
sample_meta_dir = self.make_resource_meta("""
id: sample
handler: ansible
version: 1.0.0
input:
value:
schema: int
value: 0
""")
sample1 = self.create_resource(
'sample1', sample_meta_dir, {'value': 1}
)
self.assertEqual(sample1.args['value'], 1)
# test default value
sample2 = self.create_resource('sample2', sample_meta_dir, {})
self.assertEqual(sample2.args['value'], 0)
def test_connections_recreated_after_load(self):
"""
Create resource in some process. Then in other process load it.
All connections should remain the same.
"""
sample_meta_dir = self.make_resource_meta("""
id: sample
handler: ansible
version: 1.0.0
input:
value:
schema: int
value: 0
""")
def creating_process():
sample1 = self.create_resource(
'sample1', sample_meta_dir, {'value': 1}
)
sample2 = self.create_resource(
'sample2', sample_meta_dir, {}
)
signals.connect(sample1, sample2)
self.assertEqual(sample1.args['value'], sample2.args['value'])
creating_process()
signals.CLIENTS = {}
sample1 = resource.load('sample1')
sample2 = resource.load('sample2')
sample1.update({'value': 2})
self.assertEqual(sample1.args['value'], sample2.args['value'])
def test_load(self):
sample_meta_dir = self.make_resource_meta("""
id: sample
handler: ansible
version: 1.0.0
input:
value:
schema: int
value: 0
""")
sample = self.create_resource(
'sample', sample_meta_dir, {'value': 1}
)
sample_l = resource.load('sample')
self.assertDictEqual(sample.args, sample_l.args)
self.assertListEqual(sample.tags, sample_l.tags)
def test_removal(self):
"""Test that connection removed with resource."""
sample_meta_dir = self.make_resource_meta("""
id: sample
handler: ansible
version: 1.0.0
input:
value:
schema: int
value: 0
""")
sample1 = self.create_resource(
'sample1', sample_meta_dir, {'value': 1}
)
sample2 = self.create_resource(
'sample2', sample_meta_dir, {}
)
signals.connect(sample1, sample2)
self.assertEqual(sample1.args['value'], sample2.args['value'])
sample1 = resource.load('sample1')
sample2 = resource.load('sample2')
sample1.delete()
self.assertEqual(sample2.args['value'], 0)
| apache-2.0 |
erjohnso/ansible | lib/ansible/modules/commands/shell.py | 44 | 5468 | # Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# # There is no actual shell module source, when you use 'shell' in ansible,
# it runs the 'command' module with special arguments and it behaves differently.
# See the command source and the comment "#USE_SHELL".
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: shell
short_description: Execute commands in nodes.
description:
- The C(shell) module takes the command name followed by a list of space-delimited arguments.
It is almost exactly like the M(command) module but runs
the command through a shell (C(/bin/sh)) on the remote node.
- For Windows targets, use the M(win_shell) module instead.
version_added: "0.2"
options:
free_form:
description:
- The shell module takes a free form command to run, as a string. There's not an actual
option named "free form". See the examples!
required: true
default: null
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
version_added: "0.8"
required: no
default: null
chdir:
description:
- cd into this directory before running the command
required: false
default: null
version_added: "0.6"
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
default: null
version_added: "0.9"
warn:
description:
- if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false.
required: false
default: True
version_added: "1.8"
stdin:
version_added: "2.4"
description:
- Set the stdin of the command directly to the specified value.
required: false
default: null
notes:
- If you want to execute a command securely and predictably, it may be
better to use the M(command) module instead. Best practices when writing
playbooks will follow the trend of using M(command) unless the C(shell)
module is explicitly required. When running ad-hoc commands, use your best
judgement.
- To sanitize any variables passed to the shell module, you should use
"{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons.
- For Windows targets, use the M(win_shell) module instead.
requirements: [ ]
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
- name: Execute the command in remote shell; stdout goes to the specified file on the remote.
shell: somescript.sh >> somelog.txt
- name: Change the working directory to somedir/ before executing the command.
shell: somescript.sh >> somelog.txt
args:
chdir: somedir/
# You can also use the 'args' form to provide the options.
- name: This command will change the working directory to somedir/ and will only run when somedir/somelog.txt doesn't exist.
shell: somescript.sh >> somelog.txt
args:
chdir: somedir/
creates: somelog.txt
- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
shell: cat < /tmp/*txt
args:
executable: /bin/bash
- name: Run a command using a templated variable (always use quote filter to avoid injection)
shell: cat {{ myfile|quote }}
# You can use shell to run other executables to perform actions inline
- name: Run expect to wait for a successful PXE boot via out-of-band CIMC
shell: |
set timeout 300
spawn ssh admin@{{ cimc_host }}
expect "password:"
send "{{ cimc_password }}\\n"
expect "\\n{{ cimc_name }}"
send "connect host\\n"
expect "pxeboot.n12"
send "\\n"
exit 0
args:
executable: /usr/bin/expect
delegate_to: localhost
'''
RETURN = '''
msg:
description: changed
returned: always
type: boolean
sample: True
start:
description: The command execution start time
returned: always
type: string
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: string
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: string
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: string
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: string
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: string
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
| gpl-3.0 |
BerserkerTroll/root | interpreter/llvm/src/utils/lit/lit/Test.py | 26 | 11948 | import os
from xml.sax.saxutils import escape
from json import JSONEncoder
from lit.BooleanExpression import BooleanExpression
# Test result codes.
class ResultCode(object):
"""Test result codes."""
# We override __new__ and __getnewargs__ to ensure that pickling still
# provides unique ResultCode objects in any particular instance.
_instances = {}
def __new__(cls, name, isFailure):
res = cls._instances.get(name)
if res is None:
cls._instances[name] = res = super(ResultCode, cls).__new__(cls)
return res
def __getnewargs__(self):
return (self.name, self.isFailure)
def __init__(self, name, isFailure):
self.name = name
self.isFailure = isFailure
def __repr__(self):
return '%s%r' % (self.__class__.__name__,
(self.name, self.isFailure))
PASS = ResultCode('PASS', False)
FLAKYPASS = ResultCode('FLAKYPASS', False)
XFAIL = ResultCode('XFAIL', False)
FAIL = ResultCode('FAIL', True)
XPASS = ResultCode('XPASS', True)
UNRESOLVED = ResultCode('UNRESOLVED', True)
UNSUPPORTED = ResultCode('UNSUPPORTED', False)
TIMEOUT = ResultCode('TIMEOUT', True)
# Test metric values.
class MetricValue(object):
def format(self):
"""
format() -> str
Convert this metric to a string suitable for displaying as part of the
console output.
"""
raise RuntimeError("abstract method")
def todata(self):
"""
todata() -> json-serializable data
Convert this metric to content suitable for serializing in the JSON test
output.
"""
raise RuntimeError("abstract method")
class IntMetricValue(MetricValue):
def __init__(self, value):
self.value = value
def format(self):
return str(self.value)
def todata(self):
return self.value
class RealMetricValue(MetricValue):
def __init__(self, value):
self.value = value
def format(self):
return '%.4f' % self.value
def todata(self):
return self.value
class JSONMetricValue(MetricValue):
"""
JSONMetricValue is used for types that are representable in the output
but that are otherwise uninterpreted.
"""
def __init__(self, value):
# Ensure the value is a serializable by trying to encode it.
# WARNING: The value may change before it is encoded again, and may
# not be encodable after the change.
try:
e = JSONEncoder()
e.encode(value)
except TypeError:
raise
self.value = value
def format(self):
e = JSONEncoder(indent=2, sort_keys=True)
return e.encode(self.value)
def todata(self):
return self.value
def toMetricValue(value):
if isinstance(value, MetricValue):
return value
elif isinstance(value, int):
return IntMetricValue(value)
elif isinstance(value, float):
return RealMetricValue(value)
else:
# 'long' is only present in python2
try:
if isinstance(value, long):
return IntMetricValue(value)
except NameError:
pass
# Try to create a JSONMetricValue and let the constructor throw
# if value is not a valid type.
return JSONMetricValue(value)
# Test results.
class Result(object):
"""Wrapper for the results of executing an individual test."""
def __init__(self, code, output='', elapsed=None):
# The result code.
self.code = code
# The test output.
self.output = output
# The wall timing to execute the test, if timing.
self.elapsed = elapsed
# The metrics reported by this test.
self.metrics = {}
def addMetric(self, name, value):
"""
addMetric(name, value)
Attach a test metric to the test result, with the given name and list of
values. It is an error to attempt to attach the metrics with the same
name multiple times.
Each value must be an instance of a MetricValue subclass.
"""
if name in self.metrics:
raise ValueError("result already includes metrics for %r" % (
name,))
if not isinstance(value, MetricValue):
raise TypeError("unexpected metric value: %r" % (value,))
self.metrics[name] = value
# Test classes.
class TestSuite:
"""TestSuite - Information on a group of tests.
A test suite groups together a set of logically related tests.
"""
def __init__(self, name, source_root, exec_root, config):
self.name = name
self.source_root = source_root
self.exec_root = exec_root
# The test suite configuration.
self.config = config
def getSourcePath(self, components):
return os.path.join(self.source_root, *components)
def getExecPath(self, components):
return os.path.join(self.exec_root, *components)
class Test:
"""Test - Information on a single test instance."""
def __init__(self, suite, path_in_suite, config, file_path = None):
self.suite = suite
self.path_in_suite = path_in_suite
self.config = config
self.file_path = file_path
# A list of conditions under which this test is expected to fail.
# Each condition is a boolean expression of features and target
# triple parts. These can optionally be provided by test format
# handlers, and will be honored when the test result is supplied.
self.xfails = []
# A list of conditions that must be satisfied before running the test.
# Each condition is a boolean expression of features. All of them
# must be True for the test to run.
# FIXME should target triple parts count here too?
self.requires = []
# A list of conditions that prevent execution of the test.
# Each condition is a boolean expression of features and target
# triple parts. All of them must be False for the test to run.
self.unsupported = []
# The test result, once complete.
self.result = None
def setResult(self, result):
if self.result is not None:
raise ValueError("test result already set")
if not isinstance(result, Result):
raise ValueError("unexpected result type")
self.result = result
# Apply the XFAIL handling to resolve the result exit code.
try:
if self.isExpectedToFail():
if self.result.code == PASS:
self.result.code = XPASS
elif self.result.code == FAIL:
self.result.code = XFAIL
except ValueError as e:
# Syntax error in an XFAIL line.
self.result.code = UNRESOLVED
self.result.output = str(e)
def getFullName(self):
return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
def getFilePath(self):
if self.file_path:
return self.file_path
return self.getSourcePath()
def getSourcePath(self):
return self.suite.getSourcePath(self.path_in_suite)
def getExecPath(self):
return self.suite.getExecPath(self.path_in_suite)
def isExpectedToFail(self):
"""
isExpectedToFail() -> bool
Check whether this test is expected to fail in the current
configuration. This check relies on the test xfails property which by
some test formats may not be computed until the test has first been
executed.
Throws ValueError if an XFAIL line has a syntax error.
"""
features = self.config.available_features
triple = getattr(self.suite.config, 'target_triple', "")
# Check if any of the xfails match an available feature or the target.
for item in self.xfails:
# If this is the wildcard, it always fails.
if item == '*':
return True
# If this is a True expression of features and target triple parts,
# it fails.
try:
if BooleanExpression.evaluate(item, features, triple):
return True
except ValueError as e:
raise ValueError('Error in XFAIL list:\n%s' % str(e))
return False
def isWithinFeatureLimits(self):
"""
isWithinFeatureLimits() -> bool
A test is within the feature limits set by run_only_tests if
1. the test's requirements ARE satisfied by the available features
2. the test's requirements ARE NOT satisfied after the limiting
features are removed from the available features
Throws ValueError if a REQUIRES line has a syntax error.
"""
if not self.config.limit_to_features:
return True # No limits. Run it.
# Check the requirements as-is (#1)
if self.getMissingRequiredFeatures():
return False
# Check the requirements after removing the limiting features (#2)
featuresMinusLimits = [f for f in self.config.available_features
if not f in self.config.limit_to_features]
if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits):
return False
return True
def getMissingRequiredFeaturesFromList(self, features):
try:
return [item for item in self.requires
if not BooleanExpression.evaluate(item, features)]
except ValueError as e:
raise ValueError('Error in REQUIRES list:\n%s' % str(e))
def getMissingRequiredFeatures(self):
"""
getMissingRequiredFeatures() -> list of strings
Returns a list of features from REQUIRES that are not satisfied."
Throws ValueError if a REQUIRES line has a syntax error.
"""
features = self.config.available_features
return self.getMissingRequiredFeaturesFromList(features)
def getUnsupportedFeatures(self):
"""
getUnsupportedFeatures() -> list of strings
Returns a list of features from UNSUPPORTED that are present
in the test configuration's features or target triple.
Throws ValueError if an UNSUPPORTED line has a syntax error.
"""
features = self.config.available_features
triple = getattr(self.suite.config, 'target_triple', "")
try:
return [item for item in self.unsupported
if BooleanExpression.evaluate(item, features, triple)]
except ValueError as e:
raise ValueError('Error in UNSUPPORTED list:\n%s' % str(e))
def isEarlyTest(self):
"""
isEarlyTest() -> bool
Check whether this test should be executed early in a particular run.
This can be used for test suites with long running tests to maximize
parallelism or where it is desirable to surface their failures early.
"""
return self.suite.config.is_early
def getJUnitXML(self):
test_name = self.path_in_suite[-1]
test_path = self.path_in_suite[:-1]
safe_test_path = [x.replace(".","_") for x in test_path]
safe_name = self.suite.name.replace(".","-")
if safe_test_path:
class_name = safe_name + "." + "/".join(safe_test_path)
else:
class_name = safe_name + "." + safe_name
xml = "<testcase classname='" + class_name + "' name='" + \
test_name + "'"
xml += " time='%.2f'" % (self.result.elapsed,)
if self.result.code.isFailure:
xml += ">\n\t<failure >\n" + escape(self.result.output)
xml += "\n\t</failure>\n</testcase>"
else:
xml += "/>"
return xml
| lgpl-2.1 |
nicholasserra/sentry | tests/sentry/utils/test_cursors.py | 37 | 1321 | from __future__ import absolute_import
import math
from mock import Mock
from sentry.utils.cursors import build_cursor, Cursor
def build_mock(**attrs):
obj = Mock()
for key, value in attrs.items():
setattr(obj, key, value)
obj.__repr__ = lambda x: repr(attrs)
return obj
def test_build_cursor():
event1 = build_mock(id=1.1, message='one')
event2 = build_mock(id=1.1, message='two')
event3 = build_mock(id=2.1, message='three')
results = [event1, event2, event3]
cursor_kwargs = {
'key': lambda x: math.floor(x.id),
'limit': 1,
}
cursor = build_cursor(results, **cursor_kwargs)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert not cursor.prev
assert list(cursor) == [event1]
cursor = build_cursor(results[1:], cursor=cursor.next, **cursor_kwargs)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event2]
cursor = build_cursor(results[2:], cursor=cursor.next, **cursor_kwargs)
assert isinstance(cursor.next, Cursor)
assert not cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event3]
| bsd-3-clause |
endlessm/chromium-browser | testing/buildbot/generate_buildbot_json.py | 1 | 84414 | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to generate the majority of the JSON files in the src/testing/buildbot
directory. Maintaining these files by hand is too unwieldy.
"""
import argparse
import ast
import collections
import copy
import difflib
import glob
import itertools
import json
import os
import re
import string
import sys
import traceback
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class BBGenErr(Exception):
def __init__(self, message):
super(BBGenErr, self).__init__(message)
# This class is only present to accommodate certain machines on
# chromium.android.fyi which run certain tests as instrumentation
# tests, but not as gtests. If this discrepancy were fixed then the
# notion could be removed.
class TestSuiteTypes(object):
GTEST = 'gtest'
class BaseGenerator(object):
def __init__(self, bb_gen):
self.bb_gen = bb_gen
def generate(self, waterfall, tester_name, tester_config, input_tests):
raise NotImplementedError()
def sort(self, tests):
raise NotImplementedError()
def cmp_tests(a, b):
# Prefer to compare based on the "test" key.
val = cmp(a['test'], b['test'])
if val != 0:
return val
if 'name' in a and 'name' in b:
return cmp(a['name'], b['name']) # pragma: no cover
if 'name' not in a and 'name' not in b:
return 0 # pragma: no cover
# Prefer to put variants of the same test after the first one.
if 'name' in a:
return 1
# 'name' is in b.
return -1 # pragma: no cover
class GPUTelemetryTestGenerator(BaseGenerator):
def __init__(self, bb_gen, is_android_webview=False):
super(GPUTelemetryTestGenerator, self).__init__(bb_gen)
self._is_android_webview = is_android_webview
def generate(self, waterfall, tester_name, tester_config, input_tests):
isolated_scripts = []
for test_name, test_config in sorted(input_tests.iteritems()):
test = self.bb_gen.generate_gpu_telemetry_test(
waterfall, tester_name, tester_config, test_name, test_config,
self._is_android_webview)
if test:
isolated_scripts.append(test)
return isolated_scripts
def sort(self, tests):
return sorted(tests, key=lambda x: x['name'])
class GTestGenerator(BaseGenerator):
def __init__(self, bb_gen):
super(GTestGenerator, self).__init__(bb_gen)
def generate(self, waterfall, tester_name, tester_config, input_tests):
# The relative ordering of some of the tests is important to
# minimize differences compared to the handwritten JSON files, since
# Python's sorts are stable and there are some tests with the same
# key (see gles2_conform_d3d9_test and similar variants). Avoid
# losing the order by avoiding coalescing the dictionaries into one.
gtests = []
for test_name, test_config in sorted(input_tests.iteritems()):
# Variants allow more than one definition for a given test, and is defined
# in array format from resolve_variants().
if not isinstance(test_config, list):
test_config = [test_config]
for config in test_config:
test = self.bb_gen.generate_gtest(
waterfall, tester_name, tester_config, test_name, config)
if test:
# generate_gtest may veto the test generation on this tester.
gtests.append(test)
return gtests
def sort(self, tests):
return sorted(tests, cmp=cmp_tests)
class IsolatedScriptTestGenerator(BaseGenerator):
def __init__(self, bb_gen):
super(IsolatedScriptTestGenerator, self).__init__(bb_gen)
def generate(self, waterfall, tester_name, tester_config, input_tests):
isolated_scripts = []
for test_name, test_config in sorted(input_tests.iteritems()):
# Variants allow more than one definition for a given test, and is defined
# in array format from resolve_variants().
if not isinstance(test_config, list):
test_config = [test_config]
for config in test_config:
test = self.bb_gen.generate_isolated_script_test(
waterfall, tester_name, tester_config, test_name, config)
if test:
isolated_scripts.append(test)
return isolated_scripts
def sort(self, tests):
return sorted(tests, key=lambda x: x['name'])
class ScriptGenerator(BaseGenerator):
def __init__(self, bb_gen):
super(ScriptGenerator, self).__init__(bb_gen)
def generate(self, waterfall, tester_name, tester_config, input_tests):
scripts = []
for test_name, test_config in sorted(input_tests.iteritems()):
test = self.bb_gen.generate_script_test(
waterfall, tester_name, tester_config, test_name, test_config)
if test:
scripts.append(test)
return scripts
def sort(self, tests):
return sorted(tests, key=lambda x: x['name'])
class JUnitGenerator(BaseGenerator):
def __init__(self, bb_gen):
super(JUnitGenerator, self).__init__(bb_gen)
def generate(self, waterfall, tester_name, tester_config, input_tests):
scripts = []
for test_name, test_config in sorted(input_tests.iteritems()):
test = self.bb_gen.generate_junit_test(
waterfall, tester_name, tester_config, test_name, test_config)
if test:
scripts.append(test)
return scripts
def sort(self, tests):
return sorted(tests, key=lambda x: x['test'])
class CTSGenerator(BaseGenerator):
def __init__(self, bb_gen):
super(CTSGenerator, self).__init__(bb_gen)
def generate(self, waterfall, tester_name, tester_config, input_tests):
# These only contain one entry and it's the contents of the input tests'
# dictionary, verbatim.
cts_tests = []
cts_tests.append(input_tests)
return cts_tests
def sort(self, tests):
return tests
class InstrumentationTestGenerator(BaseGenerator):
def __init__(self, bb_gen):
super(InstrumentationTestGenerator, self).__init__(bb_gen)
def generate(self, waterfall, tester_name, tester_config, input_tests):
scripts = []
for test_name, test_config in sorted(input_tests.iteritems()):
test = self.bb_gen.generate_instrumentation_test(
waterfall, tester_name, tester_config, test_name, test_config)
if test:
scripts.append(test)
return scripts
def sort(self, tests):
return sorted(tests, cmp=cmp_tests)
def check_compound_references(other_test_suites=None,
sub_suite=None,
suite=None,
target_test_suites=None,
test_type=None,
**kwargs):
"""Ensure comound reference's don't target other compounds"""
del kwargs
if sub_suite in other_test_suites or sub_suite in target_test_suites:
raise BBGenErr('%s may not refer to other composition type test '
'suites (error found while processing %s)'
% (test_type, suite))
def check_basic_references(basic_suites=None,
sub_suite=None,
suite=None,
**kwargs):
"""Ensure test has a basic suite reference"""
del kwargs
if sub_suite not in basic_suites:
raise BBGenErr('Unable to find reference to %s while processing %s'
% (sub_suite, suite))
def check_conflicting_definitions(basic_suites=None,
seen_tests=None,
sub_suite=None,
suite=None,
test_type=None,
**kwargs):
"""Ensure that if a test is reachable via multiple basic suites,
all of them have an identical definition of the tests.
"""
del kwargs
for test_name in basic_suites[sub_suite]:
if (test_name in seen_tests and
basic_suites[sub_suite][test_name] !=
basic_suites[seen_tests[test_name]][test_name]):
raise BBGenErr('Conflicting test definitions for %s from %s '
'and %s in %s (error found while processing %s)'
% (test_name, seen_tests[test_name], sub_suite,
test_type, suite))
seen_tests[test_name] = sub_suite
def check_matrix_identifier(sub_suite=None,
suite=None,
suite_def=None,
all_variants=None,
**kwargs):
"""Ensure 'idenfitier' is defined for each variant"""
del kwargs
sub_suite_config = suite_def[sub_suite]
for variant in sub_suite_config.get('variants', []):
if isinstance(variant, str):
if variant not in all_variants:
raise BBGenErr('Missing variant definition for %s in variants.pyl'
% variant)
variant = all_variants[variant]
if not 'identifier' in variant:
raise BBGenErr('Missing required identifier field in matrix '
'compound suite %s, %s' % (suite, sub_suite))
class BBJSONGenerator(object):
def __init__(self):
self.this_dir = THIS_DIR
self.args = None
self.waterfalls = None
self.test_suites = None
self.exceptions = None
self.mixins = None
self.gn_isolate_map = None
self.variants = None
def generate_abs_file_path(self, relative_path):
return os.path.join(self.this_dir, relative_path) # pragma: no cover
def print_line(self, line):
# Exists so that tests can mock
print line # pragma: no cover
def read_file(self, relative_path):
with open(self.generate_abs_file_path(
relative_path)) as fp: # pragma: no cover
return fp.read() # pragma: no cover
def write_file(self, relative_path, contents):
with open(self.generate_abs_file_path(
relative_path), 'wb') as fp: # pragma: no cover
fp.write(contents) # pragma: no cover
def pyl_file_path(self, filename):
if self.args and self.args.pyl_files_dir:
return os.path.join(self.args.pyl_files_dir, filename)
return filename
def load_pyl_file(self, filename):
try:
return ast.literal_eval(self.read_file(
self.pyl_file_path(filename)))
except (SyntaxError, ValueError) as e: # pragma: no cover
raise BBGenErr('Failed to parse pyl file "%s": %s' %
(filename, e)) # pragma: no cover
# TOOD(kbr): require that os_type be specified for all bots in waterfalls.pyl.
# Currently it is only mandatory for bots which run GPU tests. Change these to
# use [] instead of .get().
def is_android(self, tester_config):
return tester_config.get('os_type') == 'android'
def is_chromeos(self, tester_config):
return tester_config.get('os_type') == 'chromeos'
def is_linux(self, tester_config):
return tester_config.get('os_type') == 'linux'
def is_mac(self, tester_config):
return tester_config.get('os_type') == 'mac'
def is_win(self, tester_config):
return tester_config.get('os_type') == 'win'
def is_win64(self, tester_config):
return (tester_config.get('os_type') == 'win' and
tester_config.get('browser_config') == 'release_x64')
def get_exception_for_test(self, test_name, test_config):
# gtests may have both "test" and "name" fields, and usually, if the "name"
# field is specified, it means that the same test is being repurposed
# multiple times with different command line arguments. To handle this case,
# prefer to lookup per the "name" field of the test itself, as opposed to
# the "test_name", which is actually the "test" field.
if 'name' in test_config:
return self.exceptions.get(test_config['name'])
else:
return self.exceptions.get(test_name)
def should_run_on_tester(self, waterfall, tester_name,test_name, test_config):
# Currently, the only reason a test should not run on a given tester is that
# it's in the exceptions. (Once the GPU waterfall generation script is
# incorporated here, the rules will become more complex.)
exception = self.get_exception_for_test(test_name, test_config)
if not exception:
return True
remove_from = None
remove_from = exception.get('remove_from')
if remove_from:
if tester_name in remove_from:
return False
# TODO(kbr): this code path was added for some tests (including
# android_webview_unittests) on one machine (Nougat Phone
# Tester) which exists with the same name on two waterfalls,
# chromium.android and chromium.fyi; the tests are run on one
# but not the other. Once the bots are all uniquely named (a
# different ongoing project) this code should be removed.
# TODO(kbr): add coverage.
return (tester_name + ' ' + waterfall['name']
not in remove_from) # pragma: no cover
return True
def get_test_modifications(self, test, test_name, tester_name):
exception = self.get_exception_for_test(test_name, test)
if not exception:
return None
return exception.get('modifications', {}).get(tester_name)
def get_test_replacements(self, test, test_name, tester_name):
exception = self.get_exception_for_test(test_name, test)
if not exception:
return None
return exception.get('replacements', {}).get(tester_name)
def merge_command_line_args(self, arr, prefix, splitter):
prefix_len = len(prefix)
idx = 0
first_idx = -1
accumulated_args = []
while idx < len(arr):
flag = arr[idx]
delete_current_entry = False
if flag.startswith(prefix):
arg = flag[prefix_len:]
accumulated_args.extend(arg.split(splitter))
if first_idx < 0:
first_idx = idx
else:
delete_current_entry = True
if delete_current_entry:
del arr[idx]
else:
idx += 1
if first_idx >= 0:
arr[first_idx] = prefix + splitter.join(accumulated_args)
return arr
def maybe_fixup_args_array(self, arr):
# The incoming array of strings may be an array of command line
# arguments. To make it easier to turn on certain features per-bot or
# per-test-suite, look specifically for certain flags and merge them
# appropriately.
# --enable-features=Feature1 --enable-features=Feature2
# are merged to:
# --enable-features=Feature1,Feature2
# and:
# --extra-browser-args=arg1 --extra-browser-args=arg2
# are merged to:
# --extra-browser-args=arg1 arg2
arr = self.merge_command_line_args(arr, '--enable-features=', ',')
arr = self.merge_command_line_args(arr, '--extra-browser-args=', ' ')
return arr
def dictionary_merge(self, a, b, path=None, update=True):
"""http://stackoverflow.com/questions/7204805/
python-dictionaries-of-dictionaries-merge
merges b into a
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
self.dictionary_merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
elif isinstance(a[key], list) and isinstance(b[key], list):
# Args arrays are lists of strings. Just concatenate them,
# and don't sort them, in order to keep some needed
# arguments adjacent (like --time-out-ms [arg], etc.)
if all(isinstance(x, str)
for x in itertools.chain(a[key], b[key])):
a[key] = self.maybe_fixup_args_array(a[key] + b[key])
else:
# TODO(kbr): this only works properly if the two arrays are
# the same length, which is currently always the case in the
# swarming dimension_sets that we have to merge. It will fail
# to merge / override 'args' arrays which are different
# length.
for idx in xrange(len(b[key])):
try:
a[key][idx] = self.dictionary_merge(a[key][idx], b[key][idx],
path + [str(key), str(idx)],
update=update)
except (IndexError, TypeError):
raise BBGenErr('Error merging lists by key "%s" from source %s '
'into target %s at index %s. Verify target list '
'length is equal or greater than source'
% (str(key), str(b), str(a), str(idx)))
elif update:
if b[key] is None:
del a[key]
else:
a[key] = b[key]
else:
raise BBGenErr('Conflict at %s' % '.'.join(
path + [str(key)])) # pragma: no cover
elif b[key] is not None:
a[key] = b[key]
return a
def initialize_args_for_test(
self, generated_test, tester_config, additional_arg_keys=None):
args = []
args.extend(generated_test.get('args', []))
args.extend(tester_config.get('args', []))
def add_conditional_args(key, fn):
val = generated_test.pop(key, [])
if fn(tester_config):
args.extend(val)
add_conditional_args('desktop_args', lambda cfg: not self.is_android(cfg))
add_conditional_args('linux_args', self.is_linux)
add_conditional_args('android_args', self.is_android)
add_conditional_args('chromeos_args', self.is_chromeos)
add_conditional_args('mac_args', self.is_mac)
add_conditional_args('win_args', self.is_win)
add_conditional_args('win64_args', self.is_win64)
for key in additional_arg_keys or []:
args.extend(generated_test.pop(key, []))
args.extend(tester_config.get(key, []))
if args:
generated_test['args'] = self.maybe_fixup_args_array(args)
def initialize_swarming_dictionary_for_test(self, generated_test,
tester_config):
if 'swarming' not in generated_test:
generated_test['swarming'] = {}
if not 'can_use_on_swarming_builders' in generated_test['swarming']:
generated_test['swarming'].update({
'can_use_on_swarming_builders': tester_config.get('use_swarming',
True)
})
if 'swarming' in tester_config:
if ('dimension_sets' not in generated_test['swarming'] and
'dimension_sets' in tester_config['swarming']):
generated_test['swarming']['dimension_sets'] = copy.deepcopy(
tester_config['swarming']['dimension_sets'])
self.dictionary_merge(generated_test['swarming'],
tester_config['swarming'])
# Apply any Android-specific Swarming dimensions after the generic ones.
if 'android_swarming' in generated_test:
if self.is_android(tester_config): # pragma: no cover
self.dictionary_merge(
generated_test['swarming'],
generated_test['android_swarming']) # pragma: no cover
del generated_test['android_swarming'] # pragma: no cover
def clean_swarming_dictionary(self, swarming_dict):
# Clean out redundant entries from a test's "swarming" dictionary.
# This is really only needed to retain 100% parity with the
# handwritten JSON files, and can be removed once all the files are
# autogenerated.
if 'shards' in swarming_dict:
if swarming_dict['shards'] == 1: # pragma: no cover
del swarming_dict['shards'] # pragma: no cover
if 'hard_timeout' in swarming_dict:
if swarming_dict['hard_timeout'] == 0: # pragma: no cover
del swarming_dict['hard_timeout'] # pragma: no cover
if not swarming_dict.get('can_use_on_swarming_builders', False):
# Remove all other keys.
for k in swarming_dict.keys(): # pragma: no cover
if k != 'can_use_on_swarming_builders': # pragma: no cover
del swarming_dict[k] # pragma: no cover
def update_and_cleanup_test(self, test, test_name, tester_name, tester_config,
waterfall):
# Apply swarming mixins.
test = self.apply_all_mixins(
test, waterfall, tester_name, tester_config)
# See if there are any exceptions that need to be merged into this
# test's specification.
modifications = self.get_test_modifications(test, test_name, tester_name)
if modifications:
test = self.dictionary_merge(test, modifications)
if 'swarming' in test:
self.clean_swarming_dictionary(test['swarming'])
# Ensure all Android Swarming tests run only on userdebug builds if another
# build type was not specified.
if 'swarming' in test and self.is_android(tester_config):
for d in test['swarming'].get('dimension_sets', []):
if d.get('os') == 'Android' and not d.get('device_os_type'):
d['device_os_type'] = 'userdebug'
self.replace_test_args(test, test_name, tester_name)
return test
def replace_test_args(self, test, test_name, tester_name):
replacements = self.get_test_replacements(
test, test_name, tester_name) or {}
valid_replacement_keys = ['args', 'non_precommit_args', 'precommit_args']
for key, replacement_dict in replacements.iteritems():
if key not in valid_replacement_keys:
raise BBGenErr(
'Given replacement key %s for %s on %s is not in the list of valid '
'keys %s' % (key, test_name, tester_name, valid_replacement_keys))
for replacement_key, replacement_val in replacement_dict.iteritems():
found_key = False
for i, test_key in enumerate(test.get(key, [])):
# Handle both the key/value being replaced being defined as two
# separate items or as key=value.
if test_key == replacement_key:
found_key = True
# Handle flags without values.
if replacement_val == None:
del test[key][i]
else:
test[key][i+1] = replacement_val
break
elif test_key.startswith(replacement_key + '='):
found_key = True
if replacement_val == None:
del test[key][i]
else:
test[key][i] = '%s=%s' % (replacement_key, replacement_val)
break
if not found_key:
raise BBGenErr('Could not find %s in existing list of values for key '
'%s in %s on %s' % (replacement_key, key, test_name,
tester_name))
def add_common_test_properties(self, test, tester_config):
if tester_config.get('use_multi_dimension_trigger_script'):
# Assumes update_and_cleanup_test has already been called, so the
# builder's mixins have been flattened into the test.
test['trigger_script'] = {
'script': '//testing/trigger_scripts/trigger_multiple_dimensions.py',
'args': [
'--multiple-trigger-configs',
json.dumps(test['swarming']['dimension_sets'] +
tester_config.get('alternate_swarming_dimensions', [])),
'--multiple-dimension-script-verbose',
'True'
],
}
elif self.is_chromeos(tester_config) and tester_config.get('use_swarming',
True):
# The presence of the "device_type" dimension indicates that the tests
# are targeting CrOS hardware and so need the special trigger script.
dimension_sets = test['swarming']['dimension_sets']
if all('device_type' in ds for ds in dimension_sets):
test['trigger_script'] = {
'script': '//testing/trigger_scripts/chromeos_device_trigger.py',
}
def add_android_presentation_args(self, tester_config, test_name, result):
args = result.get('args', [])
bucket = tester_config.get('results_bucket', 'chromium-result-details')
args.append('--gs-results-bucket=%s' % bucket)
if (result['swarming']['can_use_on_swarming_builders'] and not
tester_config.get('skip_merge_script', False)):
result['merge'] = {
'args': [
'--bucket',
bucket,
'--test-name',
test_name
],
'script': '//build/android/pylib/results/presentation/'
'test_results_presentation.py',
}
if not tester_config.get('skip_cipd_packages', False):
cipd_packages = result['swarming'].get('cipd_packages', [])
cipd_packages.append(
{
'cipd_package': 'infra/tools/luci/logdog/butler/${platform}',
'location': 'bin',
'revision': 'git_revision:ff387eadf445b24c935f1cf7d6ddd279f8a6b04c',
}
)
result['swarming']['cipd_packages'] = cipd_packages
if not tester_config.get('skip_output_links', False):
result['swarming']['output_links'] = [
{
'link': [
'https://luci-logdog.appspot.com/v/?s',
'=android%2Fswarming%2Flogcats%2F',
'${TASK_ID}%2F%2B%2Funified_logcats',
],
'name': 'shard #${SHARD_INDEX} logcats',
},
]
if args:
result['args'] = args
def generate_gtest(self, waterfall, tester_name, tester_config, test_name,
test_config):
if not self.should_run_on_tester(
waterfall, tester_name, test_name, test_config):
return None
result = copy.deepcopy(test_config)
if 'test' in result:
result['name'] = test_name
else:
result['test'] = test_name
self.initialize_swarming_dictionary_for_test(result, tester_config)
self.initialize_args_for_test(
result, tester_config, additional_arg_keys=['gtest_args'])
if self.is_android(tester_config) and tester_config.get('use_swarming',
True):
self.add_android_presentation_args(tester_config, test_name, result)
result['args'] = result.get('args', []) + ['--recover-devices']
result = self.update_and_cleanup_test(
result, test_name, tester_name, tester_config, waterfall)
self.add_common_test_properties(result, tester_config)
if not result.get('merge'):
# TODO(https://crbug.com/958376): Consider adding the ability to not have
# this default.
result['merge'] = {
'script': '//testing/merge_scripts/standard_gtest_merge.py',
'args': [],
}
return result
def generate_isolated_script_test(self, waterfall, tester_name, tester_config,
test_name, test_config):
if not self.should_run_on_tester(waterfall, tester_name, test_name,
test_config):
return None
result = copy.deepcopy(test_config)
result['isolate_name'] = result.get('isolate_name', test_name)
result['name'] = result.get('name', test_name)
self.initialize_swarming_dictionary_for_test(result, tester_config)
self.initialize_args_for_test(result, tester_config)
if tester_config.get('use_android_presentation', False):
self.add_android_presentation_args(tester_config, test_name, result)
result = self.update_and_cleanup_test(
result, test_name, tester_name, tester_config, waterfall)
self.add_common_test_properties(result, tester_config)
if not result.get('merge'):
# TODO(https://crbug.com/958376): Consider adding the ability to not have
# this default.
result['merge'] = {
'script': '//testing/merge_scripts/standard_isolated_script_merge.py',
'args': [],
}
return result
def generate_script_test(self, waterfall, tester_name, tester_config,
test_name, test_config):
# TODO(https://crbug.com/953072): Remove this check whenever a better
# long-term solution is implemented.
if (waterfall.get('forbid_script_tests', False) or
waterfall['machines'][tester_name].get('forbid_script_tests', False)):
raise BBGenErr('Attempted to generate a script test on tester ' +
tester_name + ', which explicitly forbids script tests')
if not self.should_run_on_tester(waterfall, tester_name, test_name,
test_config):
return None
result = {
'name': test_name,
'script': test_config['script']
}
result = self.update_and_cleanup_test(
result, test_name, tester_name, tester_config, waterfall)
return result
def generate_junit_test(self, waterfall, tester_name, tester_config,
test_name, test_config):
if not self.should_run_on_tester(waterfall, tester_name, test_name,
test_config):
return None
result = copy.deepcopy(test_config)
result.update({
'name': test_name,
'test': test_config.get('test', test_name),
})
self.initialize_args_for_test(result, tester_config)
result = self.update_and_cleanup_test(
result, test_name, tester_name, tester_config, waterfall)
return result
def generate_instrumentation_test(self, waterfall, tester_name, tester_config,
test_name, test_config):
if not self.should_run_on_tester(waterfall, tester_name, test_name,
test_config):
return None
result = copy.deepcopy(test_config)
if 'test' in result and result['test'] != test_name:
result['name'] = test_name
else:
result['test'] = test_name
result = self.update_and_cleanup_test(
result, test_name, tester_name, tester_config, waterfall)
return result
def substitute_gpu_args(self, tester_config, swarming_config, args):
substitutions = {
# Any machine in waterfalls.pyl which desires to run GPU tests
# must provide the os_type key.
'os_type': tester_config['os_type'],
'gpu_vendor_id': '0',
'gpu_device_id': '0',
}
dimension_set = swarming_config['dimension_sets'][0]
if 'gpu' in dimension_set:
# First remove the driver version, then split into vendor and device.
gpu = dimension_set['gpu']
gpu = gpu.split('-')[0].split(':')
substitutions['gpu_vendor_id'] = gpu[0]
substitutions['gpu_device_id'] = gpu[1]
return [string.Template(arg).safe_substitute(substitutions) for arg in args]
def generate_gpu_telemetry_test(self, waterfall, tester_name, tester_config,
test_name, test_config, is_android_webview):
# These are all just specializations of isolated script tests with
# a bunch of boilerplate command line arguments added.
# The step name must end in 'test' or 'tests' in order for the
# results to automatically show up on the flakiness dashboard.
# (At least, this was true some time ago.) Continue to use this
# naming convention for the time being to minimize changes.
step_name = test_config.get('name', test_name)
if not (step_name.endswith('test') or step_name.endswith('tests')):
step_name = '%s_tests' % step_name
result = self.generate_isolated_script_test(
waterfall, tester_name, tester_config, step_name, test_config)
if not result:
return None
result['isolate_name'] = test_config.get(
'isolate_name', 'telemetry_gpu_integration_test')
# Populate test_id_prefix.
gn_entry = (
self.gn_isolate_map.get(result['isolate_name']) or
self.gn_isolate_map.get('telemetry_gpu_integration_test'))
result['test_id_prefix'] = 'ninja:%s/%s/' % (gn_entry['label'], step_name)
args = result.get('args', [])
test_to_run = result.pop('telemetry_test_name', test_name)
# These tests upload and download results from cloud storage and therefore
# aren't idempotent yet. https://crbug.com/549140.
result['swarming']['idempotent'] = False
# The GPU tests act much like integration tests for the entire browser, and
# tend to uncover flakiness bugs more readily than other test suites. In
# order to surface any flakiness more readily to the developer of the CL
# which is introducing it, we disable retries with patch on the commit
# queue.
result['should_retry_with_patch'] = False
browser = ('android-webview-instrumentation'
if is_android_webview else tester_config['browser_config'])
args = [
test_to_run,
'--show-stdout',
'--browser=%s' % browser,
# --passthrough displays more of the logging in Telemetry when
# run via typ, in particular some of the warnings about tests
# being expected to fail, but passing.
'--passthrough',
'-v',
'--extra-browser-args=--enable-logging=stderr --js-flags=--expose-gc',
] + args
result['args'] = self.maybe_fixup_args_array(self.substitute_gpu_args(
tester_config, result['swarming'], args))
return result
def get_test_generator_map(self):
return {
'android_webview_gpu_telemetry_tests':
GPUTelemetryTestGenerator(self, is_android_webview=True),
'cts_tests':
CTSGenerator(self),
'gpu_telemetry_tests':
GPUTelemetryTestGenerator(self),
'gtest_tests':
GTestGenerator(self),
'instrumentation_tests':
InstrumentationTestGenerator(self),
'isolated_scripts':
IsolatedScriptTestGenerator(self),
'junit_tests':
JUnitGenerator(self),
'scripts':
ScriptGenerator(self),
}
def get_test_type_remapper(self):
return {
# These are a specialization of isolated_scripts with a bunch of
# boilerplate command line arguments added to each one.
'android_webview_gpu_telemetry_tests': 'isolated_scripts',
'gpu_telemetry_tests': 'isolated_scripts',
}
def check_composition_type_test_suites(self, test_type,
additional_validators=None):
"""Pre-pass to catch errors reliabily for compound/matrix suites"""
validators = [check_compound_references,
check_basic_references,
check_conflicting_definitions]
if additional_validators:
validators += additional_validators
target_suites = self.test_suites.get(test_type, {})
other_test_type = ('compound_suites'
if test_type == 'matrix_compound_suites'
else 'matrix_compound_suites')
other_suites = self.test_suites.get(other_test_type, {})
basic_suites = self.test_suites.get('basic_suites', {})
for suite, suite_def in target_suites.iteritems():
if suite in basic_suites:
raise BBGenErr('%s names may not duplicate basic test suite names '
'(error found while processsing %s)'
% (test_type, suite))
seen_tests = {}
for sub_suite in suite_def:
for validator in validators:
validator(
basic_suites=basic_suites,
other_test_suites=other_suites,
seen_tests=seen_tests,
sub_suite=sub_suite,
suite=suite,
suite_def=suite_def,
target_test_suites=target_suites,
test_type=test_type,
all_variants=self.variants
)
def flatten_test_suites(self):
new_test_suites = {}
test_types = ['basic_suites', 'compound_suites', 'matrix_compound_suites']
for category in test_types:
for name, value in self.test_suites.get(category, {}).iteritems():
new_test_suites[name] = value
self.test_suites = new_test_suites
def resolve_test_id_prefixes(self):
for suite in self.test_suites['basic_suites'].itervalues():
for key, test in suite.iteritems():
if not isinstance(test, dict):
# Some test definitions are just strings, such as CTS.
# Skip them.
continue
# This assumes the recipe logic which prefers 'test' to 'isolate_name'
# https://source.chromium.org/chromium/chromium/tools/build/+/master:scripts/slave/recipe_modules/chromium_tests/generators.py;l=89;drc=14c062ba0eb418d3c4623dde41a753241b9df06b
# TODO(crbug.com/1035124): clean this up.
isolate_name = test.get('test') or test.get('isolate_name') or key
gn_entry = self.gn_isolate_map.get(isolate_name)
if gn_entry:
label = gn_entry['label']
if label.count(':') != 1:
raise BBGenErr(
'Malformed GN label "%s" in gn_isolate_map for key "%s",'
' implicit names (like //f/b meaning //f/b:b) are disallowed.' %
(label, isolate_name))
if label.split(':')[1] != isolate_name:
raise BBGenErr(
'gn_isolate_map key name "%s" doesn\'t match GN target name in'
' label "%s" see http://crbug.com/1071091 for details.' %
(isolate_name, label))
test['test_id_prefix'] = 'ninja:%s/' % label
else: # pragma: no cover
# Some tests do not have an entry gn_isolate_map.pyl, such as
# telemetry tests.
# TODO(crbug.com/1035304): require an entry in gn_isolate_map.
pass
def resolve_composition_test_suites(self):
self.check_composition_type_test_suites('compound_suites')
compound_suites = self.test_suites.get('compound_suites', {})
# check_composition_type_test_suites() checks that all basic suites
# referenced by compound suites exist.
basic_suites = self.test_suites.get('basic_suites')
for name, value in compound_suites.iteritems():
# Resolve this to a dictionary.
full_suite = {}
for entry in value:
suite = basic_suites[entry]
full_suite.update(suite)
compound_suites[name] = full_suite
def resolve_variants(self, basic_test_definition, variants):
""" Merge variant-defined configurations to each test case definition in a
test suite.
The output maps a unique test name to an array of configurations because
there may exist more than one definition for a test name using variants. The
test name is referenced while mapping machines to test suites, so unpacking
the array is done by the generators.
Args:
basic_test_definition: a {} defined test suite in the format
test_name:test_config
variants: an [] of {} defining configurations to be applied to each test
case in the basic test_definition
Return:
a {} of test_name:[{}], where each {} is a merged configuration
"""
# Each test in a basic test suite will have a definition per variant.
test_suite = {}
for test_name, test_config in basic_test_definition.iteritems():
definitions = []
for variant in variants:
# Unpack the variant from variants.pyl if it's string based.
if isinstance(variant, str):
variant = self.variants[variant]
# Clone a copy of test_config so that we can have a uniquely updated
# version of it per variant
cloned_config = copy.deepcopy(test_config)
# The variant definition needs to be re-used for each test, so we'll
# create a clone and work with it as well.
cloned_variant = copy.deepcopy(variant)
cloned_config['args'] = (cloned_config.get('args', []) +
cloned_variant.get('args', []))
cloned_config['mixins'] = (cloned_config.get('mixins', []) +
cloned_variant.get('mixins', []))
basic_swarming_def = cloned_config.get('swarming', {})
variant_swarming_def = cloned_variant.get('swarming', {})
if basic_swarming_def and variant_swarming_def:
if ('dimension_sets' in basic_swarming_def and
'dimension_sets' in variant_swarming_def):
# Retain swarming dimension set merge behavior when both variant and
# the basic test configuration both define it
self.dictionary_merge(basic_swarming_def, variant_swarming_def)
# Remove dimension_sets from the variant definition, so that it does
# not replace what's been done by dictionary_merge in the update
# call below.
del variant_swarming_def['dimension_sets']
# Update the swarming definition with whatever is defined for swarming
# by the variant.
basic_swarming_def.update(variant_swarming_def)
cloned_config['swarming'] = basic_swarming_def
# The identifier is used to make the name of the test unique.
# Generators in the recipe uniquely identify a test by it's name, so we
# don't want to have the same name for each variant.
cloned_config['name'] = '{}_{}'.format(test_name,
cloned_variant['identifier'])
definitions.append(cloned_config)
test_suite[test_name] = definitions
return test_suite
def resolve_matrix_compound_test_suites(self):
self.check_composition_type_test_suites('matrix_compound_suites',
[check_matrix_identifier])
matrix_compound_suites = self.test_suites.get('matrix_compound_suites', {})
# check_composition_type_test_suites() checks that all basic suites are
# referenced by matrix suites exist.
basic_suites = self.test_suites.get('basic_suites')
for test_name, matrix_config in matrix_compound_suites.iteritems():
full_suite = {}
for test_suite, mtx_test_suite_config in matrix_config.iteritems():
basic_test_def = copy.deepcopy(basic_suites[test_suite])
if 'variants' in mtx_test_suite_config:
result = self.resolve_variants(basic_test_def,
mtx_test_suite_config['variants'])
full_suite.update(result)
matrix_compound_suites[test_name] = full_suite
def link_waterfalls_to_test_suites(self):
for waterfall in self.waterfalls:
for tester_name, tester in waterfall['machines'].iteritems():
for suite, value in tester.get('test_suites', {}).iteritems():
if not value in self.test_suites:
# Hard / impossible to cover this in the unit test.
raise self.unknown_test_suite(
value, tester_name, waterfall['name']) # pragma: no cover
tester['test_suites'][suite] = self.test_suites[value]
def load_configuration_files(self):
self.waterfalls = self.load_pyl_file('waterfalls.pyl')
self.test_suites = self.load_pyl_file('test_suites.pyl')
self.exceptions = self.load_pyl_file('test_suite_exceptions.pyl')
self.mixins = self.load_pyl_file('mixins.pyl')
self.gn_isolate_map = self.load_pyl_file('gn_isolate_map.pyl')
self.variants = self.load_pyl_file('variants.pyl')
def resolve_configuration_files(self):
self.resolve_test_id_prefixes()
self.resolve_composition_test_suites()
self.resolve_matrix_compound_test_suites()
self.flatten_test_suites()
self.link_waterfalls_to_test_suites()
def unknown_bot(self, bot_name, waterfall_name):
return BBGenErr(
'Unknown bot name "%s" on waterfall "%s"' % (bot_name, waterfall_name))
def unknown_test_suite(self, suite_name, bot_name, waterfall_name):
return BBGenErr(
'Test suite %s from machine %s on waterfall %s not present in '
'test_suites.pyl' % (suite_name, bot_name, waterfall_name))
def unknown_test_suite_type(self, suite_type, bot_name, waterfall_name):
return BBGenErr(
'Unknown test suite type ' + suite_type + ' in bot ' + bot_name +
' on waterfall ' + waterfall_name)
def apply_all_mixins(self, test, waterfall, builder_name, builder):
"""Applies all present swarming mixins to the test for a given builder.
Checks in the waterfall, builder, and test objects for mixins.
"""
def valid_mixin(mixin_name):
"""Asserts that the mixin is valid."""
if mixin_name not in self.mixins:
raise BBGenErr("bad mixin %s" % mixin_name)
def must_be_list(mixins, typ, name):
"""Asserts that given mixins are a list."""
if not isinstance(mixins, list):
raise BBGenErr("'%s' in %s '%s' must be a list" % (mixins, typ, name))
test_name = test.get('name')
remove_mixins = set()
if 'remove_mixins' in builder:
must_be_list(builder['remove_mixins'], 'builder', builder_name)
for rm in builder['remove_mixins']:
valid_mixin(rm)
remove_mixins.add(rm)
if 'remove_mixins' in test:
must_be_list(test['remove_mixins'], 'test', test_name)
for rm in test['remove_mixins']:
valid_mixin(rm)
remove_mixins.add(rm)
del test['remove_mixins']
if 'mixins' in waterfall:
must_be_list(waterfall['mixins'], 'waterfall', waterfall['name'])
for mixin in waterfall['mixins']:
if mixin in remove_mixins:
continue
valid_mixin(mixin)
test = self.apply_mixin(self.mixins[mixin], test)
if 'mixins' in builder:
must_be_list(builder['mixins'], 'builder', builder_name)
for mixin in builder['mixins']:
if mixin in remove_mixins:
continue
valid_mixin(mixin)
test = self.apply_mixin(self.mixins[mixin], test)
if not 'mixins' in test:
return test
if not test_name:
test_name = test.get('test')
if not test_name: # pragma: no cover
# Not the best name, but we should say something.
test_name = str(test)
must_be_list(test['mixins'], 'test', test_name)
for mixin in test['mixins']:
# We don't bother checking if the given mixin is in remove_mixins here
# since this is already the lowest level, so if a mixin is added here that
# we don't want, we can just delete its entry.
valid_mixin(mixin)
test = self.apply_mixin(self.mixins[mixin], test)
del test['mixins']
return test
def apply_mixin(self, mixin, test):
"""Applies a mixin to a test.
Mixins will not override an existing key. This is to ensure exceptions can
override a setting a mixin applies.
Swarming dimensions are handled in a special way. Instead of specifying
'dimension_sets', which is how normal test suites specify their dimensions,
you specify a 'dimensions' key, which maps to a dictionary. This dictionary
is then applied to every dimension set in the test.
"""
new_test = copy.deepcopy(test)
mixin = copy.deepcopy(mixin)
if 'swarming' in mixin:
swarming_mixin = mixin['swarming']
new_test.setdefault('swarming', {})
if 'dimensions' in swarming_mixin:
new_test['swarming'].setdefault('dimension_sets', [{}])
for dimension_set in new_test['swarming']['dimension_sets']:
dimension_set.update(swarming_mixin['dimensions'])
del swarming_mixin['dimensions']
# python dict update doesn't do recursion at all. Just hard code the
# nested update we need (mixin['swarming'] shouldn't clobber
# test['swarming'], but should update it).
new_test['swarming'].update(swarming_mixin)
del mixin['swarming']
if '$mixin_append' in mixin:
# Values specified under $mixin_append should be appended to existing
# lists, rather than replacing them.
mixin_append = mixin['$mixin_append']
for key in mixin_append:
new_test.setdefault(key, [])
if not isinstance(mixin_append[key], list):
raise BBGenErr(
'Key "' + key + '" in $mixin_append must be a list.')
if not isinstance(new_test[key], list):
raise BBGenErr(
'Cannot apply $mixin_append to non-list "' + key + '".')
new_test[key].extend(mixin_append[key])
if 'args' in mixin_append:
new_test['args'] = self.maybe_fixup_args_array(new_test['args'])
del mixin['$mixin_append']
new_test.update(mixin)
return new_test
def generate_output_tests(self, waterfall):
"""Generates the tests for a waterfall.
Args:
waterfall: a dictionary parsed from a master pyl file
Returns:
A dictionary mapping builders to test specs
"""
return {
name: self.get_tests_for_config(waterfall, name, config)
for name, config
in waterfall['machines'].iteritems()
}
def get_tests_for_config(self, waterfall, name, config):
generator_map = self.get_test_generator_map()
test_type_remapper = self.get_test_type_remapper()
tests = {}
# Copy only well-understood entries in the machine's configuration
# verbatim into the generated JSON.
if 'additional_compile_targets' in config:
tests['additional_compile_targets'] = config[
'additional_compile_targets']
for test_type, input_tests in config.get('test_suites', {}).iteritems():
if test_type not in generator_map:
raise self.unknown_test_suite_type(
test_type, name, waterfall['name']) # pragma: no cover
test_generator = generator_map[test_type]
# Let multiple kinds of generators generate the same kinds
# of tests. For example, gpu_telemetry_tests are a
# specialization of isolated_scripts.
new_tests = test_generator.generate(
waterfall, name, config, input_tests)
remapped_test_type = test_type_remapper.get(test_type, test_type)
tests[remapped_test_type] = test_generator.sort(
tests.get(remapped_test_type, []) + new_tests)
return tests
def jsonify(self, all_tests):
return json.dumps(
all_tests, indent=2, separators=(',', ': '),
sort_keys=True) + '\n'
def generate_outputs(self): # pragma: no cover
self.load_configuration_files()
self.resolve_configuration_files()
filters = self.args.waterfall_filters
result = collections.defaultdict(dict)
required_fields = ('project', 'bucket', 'name')
for waterfall in self.waterfalls:
for field in required_fields:
# Verify required fields
if field not in waterfall:
raise BBGenErr("Waterfall %s has no %s" % (waterfall['name'], field))
# Handle filter flag, if specified
if filters and waterfall['name'] not in filters:
continue
# Join config files and hardcoded values together
all_tests = self.generate_output_tests(waterfall)
result[waterfall['name']] = all_tests
# Deduce per-bucket mappings
# This will be the standard after masternames are gone
bucket_filename = waterfall['project'] + '.' + waterfall['bucket']
for buildername in waterfall['machines'].keys():
result[bucket_filename][buildername] = all_tests[buildername]
# Add do not edit warning
for tests in result.values():
tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}
tests['AAAAA2 See generate_buildbot_json.py to make changes'] = {}
return result
def write_json_result(self, result): # pragma: no cover
suffix = '.json'
if self.args.new_files:
suffix = '.new' + suffix
for filename, contents in result.items():
jsonstr = self.jsonify(contents)
self.write_file(self.pyl_file_path(filename + suffix), jsonstr)
def get_valid_bot_names(self):
# Extract bot names from infra/config/luci-milo.cfg.
# NOTE: This reference can cause issues; if a file changes there, the
# presubmit here won't be run by default. A manually maintained list there
# tries to run presubmit here when luci-milo.cfg is changed. If any other
# references to configs outside of this directory are added, please change
# their presubmit to run `generate_buildbot_json.py -c`, so that the tree
# never ends up in an invalid state.
project_star = glob.glob(
os.path.join(self.args.infra_config_dir, 'project.star'))
if project_star:
is_master_pattern = re.compile('is_master\s*=\s*(True|False)')
for l in self.read_file(project_star[0]).splitlines():
match = is_master_pattern.search(l)
if match:
if match.group(1) == 'False':
return None
break
bot_names = set()
milo_configs = glob.glob(
os.path.join(self.args.infra_config_dir, 'generated', 'luci-milo*.cfg'))
for c in milo_configs:
for l in self.read_file(c).splitlines():
if (not 'name: "buildbucket/luci.chromium.' in l and
not 'name: "buildbucket/luci.chrome.' in l):
continue
# l looks like
# `name: "buildbucket/luci.chromium.try/win_chromium_dbg_ng"`
# Extract win_chromium_dbg_ng part.
bot_names.add(l[l.rindex('/') + 1:l.rindex('"')])
return bot_names
def get_builders_that_do_not_actually_exist(self):
# Some of the bots on the chromium.gpu.fyi waterfall in particular
# are defined only to be mirrored into trybots, and don't actually
# exist on any of the waterfalls or consoles.
return [
'GPU FYI Fuchsia Builder',
'ANGLE GPU Android Release (Nexus 5X)',
'ANGLE GPU Linux Release (Intel HD 630)',
'ANGLE GPU Linux Release (NVIDIA)',
'ANGLE GPU Mac Release (Intel)',
'ANGLE GPU Mac Retina Release (AMD)',
'ANGLE GPU Mac Retina Release (NVIDIA)',
'ANGLE GPU Win10 x64 Release (Intel HD 630)',
'ANGLE GPU Win10 x64 Release (NVIDIA)',
'Optional Android Release (Nexus 5X)',
'Optional Linux Release (Intel HD 630)',
'Optional Linux Release (NVIDIA)',
'Optional Mac Release (Intel)',
'Optional Mac Retina Release (AMD)',
'Optional Mac Retina Release (NVIDIA)',
'Optional Win10 x64 Release (Intel HD 630)',
'Optional Win10 x64 Release (NVIDIA)',
'Win7 ANGLE Tryserver (AMD)',
# chromium.fyi
'linux-blink-rel-dummy',
'mac10.10-blink-rel-dummy',
'mac10.11-blink-rel-dummy',
'mac10.12-blink-rel-dummy',
'mac10.13_retina-blink-rel-dummy',
'mac10.13-blink-rel-dummy',
'mac10.14-blink-rel-dummy',
'mac10.15-blink-rel-dummy',
'win7-blink-rel-dummy',
'win10-blink-rel-dummy',
'Dummy WebKit Mac10.13',
'WebKit Linux composite_after_paint Dummy Builder',
'WebKit Linux layout_ng_disabled Builder',
# chromium, due to https://crbug.com/878915
'win-dbg',
'win32-dbg',
'win-archive-dbg',
'win32-archive-dbg',
# TODO(crbug.com/1033753) Delete these when coverage is enabled by default
# on Windows tryjobs.
'GPU Win x64 Builder Code Coverage',
'Win x64 Builder Code Coverage',
'Win10 Tests x64 Code Coverage',
'Win10 x64 Release (NVIDIA) Code Coverage',
# TODO(crbug.com/1024915) Delete these when coverage is enabled by default
# on Mac OS tryjobs.
'Mac Builder Code Coverage',
'Mac10.13 Tests Code Coverage',
'GPU Mac Builder Code Coverage',
'Mac Release (Intel) Code Coverage',
'Mac Retina Release (AMD) Code Coverage',
]
def get_internal_waterfalls(self):
# Similar to get_builders_that_do_not_actually_exist above, but for
# waterfalls defined in internal configs.
return ['chrome', 'chrome.pgo']
def check_input_file_consistency(self, verbose=False):
self.check_input_files_sorting(verbose)
self.load_configuration_files()
self.check_composition_type_test_suites('compound_suites')
self.check_composition_type_test_suites('matrix_compound_suites',
[check_matrix_identifier])
self.resolve_test_id_prefixes()
self.flatten_test_suites()
# All bots should exist.
bot_names = self.get_valid_bot_names()
builders_that_dont_exist = self.get_builders_that_do_not_actually_exist()
if bot_names is not None:
internal_waterfalls = self.get_internal_waterfalls()
for waterfall in self.waterfalls:
# TODO(crbug.com/991417): Remove the need for this exception.
if waterfall['name'] in internal_waterfalls:
continue # pragma: no cover
for bot_name in waterfall['machines']:
if bot_name in builders_that_dont_exist:
continue # pragma: no cover
if bot_name not in bot_names:
if waterfall['name'] in ['client.v8.chromium', 'client.v8.fyi']:
# TODO(thakis): Remove this once these bots move to luci.
continue # pragma: no cover
if waterfall['name'] in ['tryserver.webrtc',
'webrtc.chromium.fyi.experimental']:
# These waterfalls have their bot configs in a different repo.
# so we don't know about their bot names.
continue # pragma: no cover
if waterfall['name'] in ['client.devtools-frontend.integration',
'tryserver.devtools-frontend',
'chromium.devtools-frontend']:
continue # pragma: no cover
raise self.unknown_bot(bot_name, waterfall['name'])
# All test suites must be referenced.
suites_seen = set()
generator_map = self.get_test_generator_map()
for waterfall in self.waterfalls:
for bot_name, tester in waterfall['machines'].iteritems():
for suite_type, suite in tester.get('test_suites', {}).iteritems():
if suite_type not in generator_map:
raise self.unknown_test_suite_type(suite_type, bot_name,
waterfall['name'])
if suite not in self.test_suites:
raise self.unknown_test_suite(suite, bot_name, waterfall['name'])
suites_seen.add(suite)
# Since we didn't resolve the configuration files, this set
# includes both composition test suites and regular ones.
resolved_suites = set()
for suite_name in suites_seen:
suite = self.test_suites[suite_name]
for sub_suite in suite:
resolved_suites.add(sub_suite)
resolved_suites.add(suite_name)
# At this point, every key in test_suites.pyl should be referenced.
missing_suites = set(self.test_suites.keys()) - resolved_suites
if missing_suites:
raise BBGenErr('The following test suites were unreferenced by bots on '
'the waterfalls: ' + str(missing_suites))
# All test suite exceptions must refer to bots on the waterfall.
all_bots = set()
missing_bots = set()
for waterfall in self.waterfalls:
for bot_name, tester in waterfall['machines'].iteritems():
all_bots.add(bot_name)
# In order to disambiguate between bots with the same name on
# different waterfalls, support has been added to various
# exceptions for concatenating the waterfall name after the bot
# name.
all_bots.add(bot_name + ' ' + waterfall['name'])
for exception in self.exceptions.itervalues():
removals = (exception.get('remove_from', []) +
exception.get('remove_gtest_from', []) +
exception.get('modifications', {}).keys())
for removal in removals:
if removal not in all_bots:
missing_bots.add(removal)
missing_bots = missing_bots - set(builders_that_dont_exist)
if missing_bots:
raise BBGenErr('The following nonexistent machines were referenced in '
'the test suite exceptions: ' + str(missing_bots))
# All mixins must be referenced
seen_mixins = set()
for waterfall in self.waterfalls:
seen_mixins = seen_mixins.union(waterfall.get('mixins', set()))
for bot_name, tester in waterfall['machines'].iteritems():
seen_mixins = seen_mixins.union(tester.get('mixins', set()))
for suite in self.test_suites.values():
if isinstance(suite, list):
# Don't care about this, it's a composition, which shouldn't include a
# swarming mixin.
continue
for test in suite.values():
if not isinstance(test, dict):
# Some test suites have top level keys, which currently can't be
# swarming mixin entries. Ignore them
continue
seen_mixins = seen_mixins.union(test.get('mixins', set()))
missing_mixins = set(self.mixins.keys()) - seen_mixins
if missing_mixins:
raise BBGenErr('The following mixins are unreferenced: %s. They must be'
' referenced in a waterfall, machine, or test suite.' % (
str(missing_mixins)))
# All variant references must be referenced
seen_variants = set()
for suite in self.test_suites.values():
if isinstance(suite, list):
continue
for test in suite.values():
if isinstance(test, dict):
for variant in test.get('variants', []):
if isinstance(variant, str):
seen_variants.add(variant)
missing_variants = set(self.variants.keys()) - seen_variants
if missing_variants:
raise BBGenErr('The following variants were unreferenced: %s. They must '
'be referenced in a matrix test suite under the variants '
'key.' % str(missing_variants))
def type_assert(self, node, typ, filename, verbose=False):
"""Asserts that the Python AST node |node| is of type |typ|.
If verbose is set, it prints out some helpful context lines, showing where
exactly the error occurred in the file.
"""
if not isinstance(node, typ):
if verbose:
lines = [""] + self.read_file(filename).splitlines()
context = 2
lines_start = max(node.lineno - context, 0)
# Add one to include the last line
lines_end = min(node.lineno + context, len(lines)) + 1
lines = (
['== %s ==\n' % filename] +
["<snip>\n"] +
['%d %s' % (lines_start + i, line) for i, line in enumerate(
lines[lines_start:lines_start + context])] +
['-' * 80 + '\n'] +
['%d %s' % (node.lineno, lines[node.lineno])] +
['-' * (node.col_offset + 3) + '^' + '-' * (
80 - node.col_offset - 4) + '\n'] +
['%d %s' % (node.lineno + 1 + i, line) for i, line in enumerate(
lines[node.lineno + 1:lines_end])] +
["<snip>\n"]
)
# Print out a useful message when a type assertion fails.
for l in lines:
self.print_line(l.strip())
node_dumped = ast.dump(node, annotate_fields=False)
# If the node is huge, truncate it so everything fits in a terminal
# window.
if len(node_dumped) > 60: # pragma: no cover
node_dumped = node_dumped[:30] + ' <SNIP> ' + node_dumped[-30:]
raise BBGenErr(
'Invalid .pyl file %r. Python AST node %r on line %s expected to'
' be %s, is %s' % (
filename, node_dumped,
node.lineno, typ, type(node)))
def check_ast_list_formatted(self, keys, filename, verbose,
check_sorting=True):
"""Checks if a list of ast keys are correctly formatted.
Currently only checks to ensure they're correctly sorted, and that there
are no duplicates.
Args:
keys: An python list of AST nodes.
It's a list of AST nodes instead of a list of strings because
when verbose is set, it tries to print out context of where the
diffs are in the file.
filename: The name of the file this node is from.
verbose: If set, print out diff information about how the keys are
incorrectly formatted.
check_sorting: If true, checks if the list is sorted.
Returns:
If the keys are correctly formatted.
"""
if not keys:
return True
assert isinstance(keys[0], ast.Str)
keys_strs = [k.s for k in keys]
# Keys to diff against. Used below.
keys_to_diff_against = None
# If the list is properly formatted.
list_formatted = True
# Duplicates are always bad.
if len(set(keys_strs)) != len(keys_strs):
list_formatted = False
keys_to_diff_against = list(collections.OrderedDict.fromkeys(keys_strs))
if check_sorting and sorted(keys_strs) != keys_strs:
list_formatted = False
if list_formatted:
return True
if verbose:
line_num = keys[0].lineno
keys = [k.s for k in keys]
if check_sorting:
# If we have duplicates, sorting this will take care of it anyways.
keys_to_diff_against = sorted(set(keys))
# else, keys_to_diff_against is set above already
self.print_line('=' * 80)
self.print_line('(First line of keys is %s)' % line_num)
for line in difflib.context_diff(
keys, keys_to_diff_against,
fromfile='current (%r)' % filename, tofile='sorted', lineterm=''):
self.print_line(line)
self.print_line('=' * 80)
return False
def check_ast_dict_formatted(self, node, filename, verbose):
"""Checks if an ast dictionary's keys are correctly formatted.
Just a simple wrapper around check_ast_list_formatted.
Args:
node: An AST node. Assumed to be a dictionary.
filename: The name of the file this node is from.
verbose: If set, print out diff information about how the keys are
incorrectly formatted.
check_sorting: If true, checks if the list is sorted.
Returns:
If the dictionary is correctly formatted.
"""
keys = []
# The keys of this dict are ordered as ordered in the file; normal python
# dictionary keys are given an arbitrary order, but since we parsed the
# file itself, the order as given in the file is preserved.
for key in node.keys:
self.type_assert(key, ast.Str, filename, verbose)
keys.append(key)
return self.check_ast_list_formatted(keys, filename, verbose)
def check_input_files_sorting(self, verbose=False):
# TODO(https://crbug.com/886993): Add the ability for this script to
# actually format the files, rather than just complain if they're
# incorrectly formatted.
bad_files = set()
def parse_file(filename):
"""Parses and validates a .pyl file.
Returns an AST node representing the value in the pyl file."""
parsed = ast.parse(self.read_file(self.pyl_file_path(filename)))
# Must be a module.
self.type_assert(parsed, ast.Module, filename, verbose)
module = parsed.body
# Only one expression in the module.
self.type_assert(module, list, filename, verbose)
if len(module) != 1: # pragma: no cover
raise BBGenErr('Invalid .pyl file %s' % filename)
expr = module[0]
self.type_assert(expr, ast.Expr, filename, verbose)
return expr.value
# Handle this separately
filename = 'waterfalls.pyl'
value = parse_file(filename)
# Value should be a list.
self.type_assert(value, ast.List, filename, verbose)
keys = []
for val in value.elts:
self.type_assert(val, ast.Dict, filename, verbose)
waterfall_name = None
for key, val in zip(val.keys, val.values):
self.type_assert(key, ast.Str, filename, verbose)
if key.s == 'machines':
if not self.check_ast_dict_formatted(val, filename, verbose):
bad_files.add(filename)
if key.s == "name":
self.type_assert(val, ast.Str, filename, verbose)
waterfall_name = val
assert waterfall_name
keys.append(waterfall_name)
if not self.check_ast_list_formatted(keys, filename, verbose):
bad_files.add(filename)
for filename in (
'mixins.pyl',
'test_suites.pyl',
'test_suite_exceptions.pyl',
):
value = parse_file(filename)
# Value should be a dictionary.
self.type_assert(value, ast.Dict, filename, verbose)
if not self.check_ast_dict_formatted(
value, filename, verbose):
bad_files.add(filename)
if filename == 'test_suites.pyl':
expected_keys = ['basic_suites',
'compound_suites',
'matrix_compound_suites']
actual_keys = [node.s for node in value.keys]
assert all(key in expected_keys for key in actual_keys), (
'Invalid %r file; expected keys %r, got %r' % (
filename, expected_keys, actual_keys))
suite_dicts = [node for node in value.values]
# Only two keys should mean only 1 or 2 values
assert len(suite_dicts) <= 3
for suite_group in suite_dicts:
if not self.check_ast_dict_formatted(
suite_group, filename, verbose):
bad_files.add(filename)
for key, suite in zip(value.keys, value.values):
# The compound suites are checked in
# 'check_composition_type_test_suites()'
if key.s == 'basic_suites':
for group in suite.values:
if not self.check_ast_dict_formatted(group, filename, verbose):
bad_files.add(filename)
break
elif filename == 'test_suite_exceptions.pyl':
# Check the values for each test.
for test in value.values:
for kind, node in zip(test.keys, test.values):
if isinstance(node, ast.Dict):
if not self.check_ast_dict_formatted(node, filename, verbose):
bad_files.add(filename)
elif kind.s == 'remove_from':
# Don't care about sorting; these are usually grouped, since the
# same bug can affect multiple builders. Do want to make sure
# there aren't duplicates.
if not self.check_ast_list_formatted(node.elts, filename, verbose,
check_sorting=False):
bad_files.add(filename)
if bad_files:
raise BBGenErr(
'The following files have invalid keys: %s\n. They are either '
'unsorted, or have duplicates. Re-run this with --verbose to see '
'more details.' % ', '.join(bad_files))
def check_output_file_consistency(self, verbose=False):
self.load_configuration_files()
# All waterfalls/bucket .json files must have been written
# by this script already.
self.resolve_configuration_files()
ungenerated_files = set()
for filename, expected_contents in self.generate_outputs().items():
expected = self.jsonify(expected_contents)
file_path = filename + '.json'
current = self.read_file(self.pyl_file_path(file_path))
if expected != current:
ungenerated_files.add(filename)
if verbose: # pragma: no cover
self.print_line('File ' + filename +
'.json did not have the following expected '
'contents:')
for line in difflib.unified_diff(
expected.splitlines(),
current.splitlines(),
fromfile='expected', tofile='current'):
self.print_line(line)
if ungenerated_files:
raise BBGenErr(
'The following files have not been properly '
'autogenerated by generate_buildbot_json.py: ' +
', '.join([filename + '.json' for filename in ungenerated_files]))
def check_consistency(self, verbose=False):
self.check_input_file_consistency(verbose) # pragma: no cover
self.check_output_file_consistency(verbose) # pragma: no cover
def parse_args(self, argv): # pragma: no cover
# RawTextHelpFormatter allows for styling of help statement
parser = argparse.ArgumentParser(formatter_class=
argparse.RawTextHelpFormatter)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-c', '--check', action='store_true', help=
'Do consistency checks of configuration and generated files and then '
'exit. Used during presubmit. Causes the tool to not generate any files.')
group.add_argument(
'--query', type=str, help=
("Returns raw JSON information of buildbots and tests.\n" +
"Examples:\n" +
" List all bots (all info):\n" +
" --query bots\n\n" +
" List all bots and only their associated tests:\n" +
" --query bots/tests\n\n" +
" List all information about 'bot1' " +
"(make sure you have quotes):\n" +
" --query bot/'bot1'\n\n" +
" List tests running for 'bot1' (make sure you have quotes):\n" +
" --query bot/'bot1'/tests\n\n" +
" List all tests:\n" +
" --query tests\n\n" +
" List all tests and the bots running them:\n" +
" --query tests/bots\n\n"+
" List all tests that satisfy multiple parameters\n" +
" (separation of parameters by '&' symbol):\n" +
" --query tests/'device_os:Android&device_type:hammerhead'\n\n" +
" List all tests that run with a specific flag:\n" +
" --query bots/'--test-launcher-print-test-studio=always'\n\n" +
" List specific test (make sure you have quotes):\n"
" --query test/'test1'\n\n"
" List all bots running 'test1' " +
"(make sure you have quotes):\n" +
" --query test/'test1'/bots" ))
parser.add_argument(
'-n', '--new-files', action='store_true', help=
'Write output files as .new.json. Useful during development so old and '
'new files can be looked at side-by-side.')
parser.add_argument(
'-v', '--verbose', action='store_true', help=
'Increases verbosity. Affects consistency checks.')
parser.add_argument(
'waterfall_filters', metavar='waterfalls', type=str, nargs='*',
help='Optional list of waterfalls to generate.')
parser.add_argument(
'--pyl-files-dir', type=os.path.realpath,
help='Path to the directory containing the input .pyl files.')
parser.add_argument(
'--json', help=
("Outputs results into a json file. Only works with query function.\n" +
"Examples:\n" +
" Outputs file into specified json file: \n" +
" --json <file-name-here.json>"))
parser.add_argument(
'--infra-config-dir',
help='Path to the LUCI services configuration directory',
default=os.path.abspath(
os.path.join(os.path.dirname(__file__),
'..', '..', 'infra', 'config')))
self.args = parser.parse_args(argv)
if self.args.json and not self.args.query:
parser.error("The --json flag can only be used with --query.")
self.args.infra_config_dir = os.path.abspath(self.args.infra_config_dir)
def does_test_match(self, test_info, params_dict):
"""Checks to see if the test matches the parameters given.
Compares the provided test_info with the params_dict to see
if the bot matches the parameters given. If so, returns True.
Else, returns false.
Args:
test_info (dict): Information about a specific bot provided
in the format shown in waterfalls.pyl
params_dict (dict): Dictionary of parameters and their values
to look for in the bot
Ex: {
'device_os':'android',
'--flag':True,
'mixins': ['mixin1', 'mixin2'],
'ex_key':'ex_value'
}
"""
DIMENSION_PARAMS = ['device_os', 'device_type', 'os',
'kvm', 'pool', 'integrity'] # dimension parameters
SWARMING_PARAMS = ['shards', 'hard_timeout', 'idempotent',
'can_use_on_swarming_builders']
for param in params_dict:
# if dimension parameter
if param in DIMENSION_PARAMS or param in SWARMING_PARAMS:
if not 'swarming' in test_info:
return False
swarming = test_info['swarming']
if param in SWARMING_PARAMS:
if not param in swarming:
return False
if not str(swarming[param]) == params_dict[param]:
return False
else:
if not 'dimension_sets' in swarming:
return False
d_set = swarming['dimension_sets']
# only looking at the first dimension set
if not param in d_set[0]:
return False
if not d_set[0][param] == params_dict[param]:
return False
# if flag
elif param.startswith('--'):
if not 'args' in test_info:
return False
if not param in test_info['args']:
return False
# not dimension parameter/flag/mixin
else:
if not param in test_info:
return False
if not test_info[param] == params_dict[param]:
return False
return True
def error_msg(self, msg):
"""Prints an error message.
In addition to a catered error message, also prints
out where the user can find more help. Then, program exits.
"""
self.print_line(msg + (' If you need more information, ' +
'please run with -h or --help to see valid commands.'))
sys.exit(1)
def find_bots_that_run_test(self, test, bots):
matching_bots = []
for bot in bots:
bot_info = bots[bot]
tests = self.flatten_tests_for_bot(bot_info)
for test_info in tests:
test_name = ""
if 'name' in test_info:
test_name = test_info['name']
elif 'test' in test_info:
test_name = test_info['test']
if not test_name == test:
continue
matching_bots.append(bot)
return matching_bots
def find_tests_with_params(self, tests, params_dict):
matching_tests = []
for test_name in tests:
test_info = tests[test_name]
if not self.does_test_match(test_info, params_dict):
continue
if not test_name in matching_tests:
matching_tests.append(test_name)
return matching_tests
def flatten_waterfalls_for_query(self, waterfalls):
bots = {}
for waterfall in waterfalls:
waterfall_tests = self.generate_output_tests(waterfall)
for bot in waterfall_tests:
bot_info = waterfall_tests[bot]
bots[bot] = bot_info
return bots
def flatten_tests_for_bot(self, bot_info):
"""Returns a list of flattened tests.
Returns a list of tests not grouped by test category
for a specific bot.
"""
TEST_CATS = self.get_test_generator_map().keys()
tests = []
for test_cat in TEST_CATS:
if not test_cat in bot_info:
continue
test_cat_tests = bot_info[test_cat]
tests = tests + test_cat_tests
return tests
def flatten_tests_for_query(self, test_suites):
"""Returns a flattened dictionary of tests.
Returns a dictionary of tests associate with their
configuration, not grouped by their test suite.
"""
tests = {}
for test_suite in test_suites.itervalues():
for test in test_suite:
test_info = test_suite[test]
test_name = test
if 'name' in test_info:
test_name = test_info['name']
tests[test_name] = test_info
return tests
def parse_query_filter_params(self, params):
"""Parses the filter parameters.
Creates a dictionary from the parameters provided
to filter the bot array.
"""
params_dict = {}
for p in params:
# flag
if p.startswith("--"):
params_dict[p] = True
else:
pair = p.split(":")
if len(pair) != 2:
self.error_msg('Invalid command.')
# regular parameters
if pair[1].lower() == "true":
params_dict[pair[0]] = True
elif pair[1].lower() == "false":
params_dict[pair[0]] = False
else:
params_dict[pair[0]] = pair[1]
return params_dict
def get_test_suites_dict(self, bots):
"""Returns a dictionary of bots and their tests.
Returns a dictionary of bots and a list of their associated tests.
"""
test_suite_dict = dict()
for bot in bots:
bot_info = bots[bot]
tests = self.flatten_tests_for_bot(bot_info)
test_suite_dict[bot] = tests
return test_suite_dict
def output_query_result(self, result, json_file=None):
"""Outputs the result of the query.
If a json file parameter name is provided, then
the result is output into the json file. If not,
then the result is printed to the console.
"""
output = json.dumps(result, indent=2)
if json_file:
self.write_file(json_file, output)
else:
self.print_line(output)
return
def query(self, args):
"""Queries tests or bots.
Depending on the arguments provided, outputs a json of
tests or bots matching the appropriate optional parameters provided.
"""
# split up query statement
query = args.query.split('/')
self.load_configuration_files()
self.resolve_configuration_files()
# flatten bots json
tests = self.test_suites
bots = self.flatten_waterfalls_for_query(self.waterfalls)
cmd_class = query[0]
# For queries starting with 'bots'
if cmd_class == "bots":
if len(query) == 1:
return self.output_query_result(bots, args.json)
# query with specific parameters
elif len(query) == 2:
if query[1] == 'tests':
test_suites_dict = self.get_test_suites_dict(bots)
return self.output_query_result(test_suites_dict, args.json)
else:
self.error_msg("This query should be in the format: bots/tests.")
else:
self.error_msg("This query should have 0 or 1 '/', found %s instead."
% str(len(query)-1))
# For queries starting with 'bot'
elif cmd_class == "bot":
if not len(query) == 2 and not len(query) == 3:
self.error_msg("Command should have 1 or 2 '/', found %s instead."
% str(len(query)-1))
bot_id = query[1]
if not bot_id in bots:
self.error_msg("No bot named '" + bot_id + "' found.")
bot_info = bots[bot_id]
if len(query) == 2:
return self.output_query_result(bot_info, args.json)
if not query[2] == 'tests':
self.error_msg("The query should be in the format:" +
"bot/<bot-name>/tests.")
bot_tests = self.flatten_tests_for_bot(bot_info)
return self.output_query_result(bot_tests, args.json)
# For queries starting with 'tests'
elif cmd_class == "tests":
if not len(query) == 1 and not len(query) == 2:
self.error_msg("The query should have 0 or 1 '/', found %s instead."
% str(len(query)-1))
flattened_tests = self.flatten_tests_for_query(tests)
if len(query) == 1:
return self.output_query_result(flattened_tests, args.json)
# create params dict
params = query[1].split('&')
params_dict = self.parse_query_filter_params(params)
matching_bots = self.find_tests_with_params(flattened_tests, params_dict)
return self.output_query_result(matching_bots)
# For queries starting with 'test'
elif cmd_class == "test":
if not len(query) == 2 and not len(query) == 3:
self.error_msg("The query should have 1 or 2 '/', found %s instead."
% str(len(query)-1))
test_id = query[1]
if len(query) == 2:
flattened_tests = self.flatten_tests_for_query(tests)
for test in flattened_tests:
if test == test_id:
return self.output_query_result(flattened_tests[test], args.json)
self.error_msg("There is no test named %s." % test_id)
if not query[2] == 'bots':
self.error_msg("The query should be in the format: " +
"test/<test-name>/bots")
bots_for_test = self.find_bots_that_run_test(test_id, bots)
return self.output_query_result(bots_for_test)
else:
self.error_msg("Your command did not match any valid commands." +
"Try starting with 'bots', 'bot', 'tests', or 'test'.")
def main(self, argv): # pragma: no cover
self.parse_args(argv)
if self.args.check:
self.check_consistency(verbose=self.args.verbose)
elif self.args.query:
self.query(self.args)
else:
self.write_json_result(self.generate_outputs())
return 0
if __name__ == "__main__": # pragma: no cover
generator = BBJSONGenerator()
sys.exit(generator.main(sys.argv[1:]))
| bsd-3-clause |
mkurg/study | corpus/crawler.py | 1 | 1878 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib.request
import re
import csv
initial_url = 'http://www.mr-info.ru/12017-ministr-postavil-pyaterku-nash-gorod-poluchil-pochetnuyu-gramotu-za-surdlimpiadu.html'
pages_number = 1800
all_urls = []
all_urls.append(initial_url)
downloaded_urls = []
downloaded_pages = {}
try:
with open('downloaded.csv', encoding='utf-8') as downloaded_files:
dw_files = csv.reader(downloaded_files, delimiter='\t')
for row in dw_files:
downloaded_urls.append(row[0])
except FileNotFoundError:
pass
try:
with open('urls.txt') as urls_file:
for line in urls_file:
all_urls.append(line.strip())
except FileNotFoundError:
pass
def download_page(url):
"""Downloads page by URL and puts it into the folder
'html_pages'"""
success = True
try:
page = urllib.request.urlopen(url)
except HTTPError:
success = False
return [success, page]
page = page.read()
page = page.decode("cp1251")
try:
filename = re.search('([a-z0-9]*-)*[a-z0-9]*\.html', url).group(0)
except AttributeError:
success = False
return [success, page]
with open('raw_html/' + filename, 'w', encoding='utf-8') as local_file:
local_file.write(page)
if success == True:
downloaded_urls.append(url)
with open('downloaded.csv', 'a') as downloaded_files:
dw_files = csv.writer(downloaded_files, delimiter='\t')
dw_files.writerow([url, 'raw_html/' + filename])
return [success, page]
def extract_urls(page):
urls = re.findall('(http://www\.mr-info\.ru/([a-z0-9]*-)*[a-z0-9]*\.html)', page)
for i in urls:
all_urls.append(i[0])
for i in all_urls:
if pages_number > 0:
if i not in downloaded_urls:
a = download_page(i)
if a[0] == True:
extract_urls(a[1])
pages_number -= 1
print(str(pages_number) + '\n' + i)
with open('urls.txt', 'w') as urls_file:
for i in set(all_urls):
urls_file.write(i + '\n') | gpl-2.0 |
cdparks/hugo | src/parse.py | 1 | 2142 | # encoding: utf-8
from __future__ import unicode_literals, print_function
__all__ = ['parse']
from collections import namedtuple
from src.vm import symbols, Push
Token = namedtuple('Token', 'tag value')
EOL, OPERATOR, INTEGER, OTHER = range(4)
def tokenize(line):
'''This was much more interesting when the language used S-Expressions
instead of RPN.
'''
for word in line.split():
if word in symbols:
yield Token(OPERATOR, symbols[word])
elif word.isdigit():
yield Token(INTEGER, int(word))
else:
yield Token(OTHER, word)
yield Token(EOL, '<EOL>')
def parseExpr(line, maxstack, label, stream):
'''Verify that current line is a valid RPN expression. Return maximum
stack size and expression as a list of symbols.
'''
expression = [Push(label)]
stack = 1
for token in stream:
if token.tag == INTEGER:
stack += 1
maxstack = max(stack, maxstack)
expression.append(Push(token.value))
elif token.tag == OPERATOR:
instruction = token.value
if stack < instruction.pop:
raise SyntaxError("{}: Too few arguments to {} at label {}".format(line, instruction.sym, label))
stack -= instruction.pop
stack += instruction.push
maxstack = max(stack, maxstack)
expression.append(instruction)
elif token.tag == EOL:
break
else:
raise SyntaxError("{}: Unexpected token '{}' in expression at label {}".format(line, token.value, label))
if stack != 1:
raise SyntaxError("{}: Malformed expression at label {}".format(line, label))
return maxstack, expression
def parse(filename):
'''Parse file and return a dictionary mapping labels to expressions.'''
source = {}
maxstack = 1
for i, line in enumerate(open(filename)):
stream = tokenize(line.strip())
token = next(stream)
if token.tag == INTEGER:
maxstack, source[token.value] = parseExpr(i, maxstack, token.value, stream)
return maxstack, source
| mit |
aarpon/obit_microscopy_core_technology | core-plugins/microscopy/3/dss/drop-boxes/MicroscopyDropbox/GenericTIFFSeriesMaximumIntensityProjectionGenerationAlgorithm.py | 2 | 1343 | # -*- coding: utf-8 -*-
'''
Created on Apr 27, 2016
@author: Aaron Ponti
'''
from ch.systemsx.cisd.openbis.dss.etl.dto.api.impl import MaximumIntensityProjectionGenerationAlgorithm
class GenericTIFFSeriesMaximumIntensityProjectionGenerationAlgorithm(MaximumIntensityProjectionGenerationAlgorithm):
'''
Custom MaximumIntensityProjectionGenerationAlgorithm for Generic TIFF Series
that makes sure that the first timepoint in a series is registered for
creation of the representative thumbnail.
'''
def __init__(self, datasetTypeCode, width, height, filename):
"""
Constructor
"""
# Call the parent base constructor
MaximumIntensityProjectionGenerationAlgorithm.__init__(self,
datasetTypeCode, width, height, filename)
def imageToBeIgnored(self, image):
"""
Overrides the parent imageToBeIgnored method. The selection of which
series should be used to create the representative thumbnail is done
in GenericTIFFSeriesCompositeDatasetConfig. Here we prevent the base
MaximumIntensityProjectionGenerationAlgorithm.imageToBeIgnored() method
to make a decision based on the timepoint (== 0), since we cannot know
which is the first time point in a Generic TIFF Series.
"""
return False
| apache-2.0 |
crmccreary/openerp_server | openerp/addons/project_timesheet/__openerp__.py | 9 | 2262 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bill Time on Tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
Synchronization of project task work entries with timesheet entries.
====================================================================
This module lets you transfer the entries under tasks defined for Project Management to
the Timesheet line entries for particular date and particular user with the effect of creating, editing and deleting either ways.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/invoice_task_work.jpeg', 'images/my_timesheet.jpeg', 'images/working_hour.jpeg'],
'depends': ['project', 'hr_timesheet_sheet', 'hr_timesheet_invoice', 'account_analytic_analysis'],
'init_xml': ['project_timesheet_data.xml'],
'update_xml': ["security/ir.model.access.csv","process/project_timesheet_process.xml", "report/task_report_view.xml", "project_timesheet_view.xml"],
'demo_xml': ["project_timesheet_demo.xml"],
'test': [
'test/worktask_entry_to_timesheetline_entry.yml',
'test/work_timesheet.yml',
],
'installable': True,
'auto_install': False,
'certificate': '0075123647453',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ddurando/pox.carp | pox/proto/arp_responder.py | 39 | 8783 | # Copyright 2011,2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An ARP utility that can learn and proxy ARPs, and can also answer queries
from a list of static entries.
This adds the "arp" object to the console, which you can use to look at
or modify the ARP table.
Add ARP entries on commandline like:
arp_responder --<IP>=<MAC> --<IP>=<MAC>
Leave MAC unspecified if you want to use the switch MAC.
"""
from pox.core import core
import pox
log = core.getLogger()
from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST
from pox.lib.packet.arp import arp
from pox.lib.packet.vlan import vlan
from pox.lib.addresses import IPAddr, EthAddr
from pox.lib.util import dpid_to_str, str_to_bool
from pox.lib.recoco import Timer
from pox.lib.revent import EventHalt
import pox.openflow.libopenflow_01 as of
import time
# Timeout for ARP entries
ARP_TIMEOUT = 60 * 4
class Entry (object):
"""
We use the MAC to answer ARP replies.
We use the timeout so that if an entry is older than ARP_TIMEOUT, we
flood the ARP request rather than try to answer it ourselves.
"""
def __init__ (self, mac, static = None, flood = None):
self.timeout = time.time() + ARP_TIMEOUT
self.static = False
self.flood = True
if mac is True:
# Means use switch's MAC, implies static/noflood
self.mac = True
self.static = True
self.flood = False
else:
self.mac = EthAddr(mac)
if static is not None:
self.static = static
if flood is not None:
self.flood = flood
def __eq__ (self, other):
if isinstance(other, Entry):
return (self.static,self.mac)==(other.static,other.mac)
else:
return self.mac == other
def __ne__ (self, other):
return not self.__eq__(other)
@property
def is_expired (self):
if self.static: return False
return time.time() > self.timeout
class ARPTable (dict):
def __repr__ (self):
o = []
for k,e in self.iteritems():
t = int(e.timeout - time.time())
if t < 0:
t = "X"
else:
t = str(t) + "s left"
if e.static: t = "-"
mac = e.mac
if mac is True: mac = "<Switch MAC>"
o.append((k,"%-17s %-20s %3s" % (k, mac, t)))
for k,t in _failed_queries.iteritems():
if k not in self:
t = int(time.time() - t)
o.append((k,"%-17s %-20s %3ss ago" % (k, '?', t)))
o.sort()
o = [e[1] for e in o]
o.insert(0,"-- ARP Table -----")
if len(o) == 1:
o.append("<< Empty >>")
return "\n".join(o)
def __setitem__ (self, key, val):
key = IPAddr(key)
if not isinstance(val, Entry):
val = Entry(val)
dict.__setitem__(self, key, val)
def __delitem__ (self, key):
key = IPAddr(key)
dict.__delitem__(self, key)
def set (self, key, value=True, static=True):
if not isinstance(value, Entry):
value = Entry(value, static=static)
self[key] = value
def _dpid_to_mac (dpid):
# Should maybe look at internal port MAC instead?
return EthAddr("%012x" % (dpid & 0xffFFffFFffFF,))
def _handle_expiration ():
for k,e in _arp_table.items():
if e.is_expired:
del _arp_table[k]
for k,t in _failed_queries.items():
if time.time() - t > ARP_TIMEOUT:
del _failed_queries[k]
class ARPResponder (object):
def __init__ (self):
# This timer handles expiring stuff
self._expire_timer = Timer(5, _handle_expiration, recurring=True)
core.addListeners(self)
def _handle_GoingUpEvent (self, event):
core.openflow.addListeners(self)
log.debug("Up...")
def _handle_ConnectionUp (self, event):
if _install_flow:
fm = of.ofp_flow_mod()
fm.priority = 0x7000 # Pretty high
fm.match.dl_type = ethernet.ARP_TYPE
fm.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
event.connection.send(fm)
def _handle_PacketIn (self, event):
# Note: arp.hwsrc is not necessarily equal to ethernet.src
# (one such example are arp replies generated by this module itself
# as ethernet mac is set to switch dpid) so we should be careful
# to use only arp addresses in the learning code!
squelch = False
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%s: ignoring unparsed packet", dpid_to_str(dpid))
return
a = packet.find('arp')
if not a: return
log.debug("%s ARP %s %s => %s", dpid_to_str(dpid),
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode,
'op:%i' % (a.opcode,)), str(a.protosrc), str(a.protodst))
if a.prototype == arp.PROTO_TYPE_IP:
if a.hwtype == arp.HW_TYPE_ETHERNET:
if a.protosrc != 0:
if _learn:
# Learn or update port/MAC info
if a.protosrc in _arp_table:
if _arp_table[a.protosrc] != a.hwsrc:
log.warn("%s RE-learned %s: %s->%s", dpid_to_str(dpid),
a.protosrc, _arp_table[a.protosrc].mac, a.hwsrc)
else:
log.info("%s learned %s", dpid_to_str(dpid), a.protosrc)
_arp_table[a.protosrc] = Entry(a.hwsrc)
if a.opcode == arp.REQUEST:
# Maybe we can answer
if a.protodst in _arp_table:
# We have an answer...
r = arp()
r.hwtype = a.hwtype
r.prototype = a.prototype
r.hwlen = a.hwlen
r.protolen = a.protolen
r.opcode = arp.REPLY
r.hwdst = a.hwsrc
r.protodst = a.protosrc
r.protosrc = a.protodst
mac = _arp_table[a.protodst].mac
if mac is True:
# Special case -- use ourself
mac = _dpid_to_mac(dpid)
r.hwsrc = mac
e = ethernet(type=packet.type, src=_dpid_to_mac(dpid),
dst=a.hwsrc)
e.payload = r
if packet.type == ethernet.VLAN_TYPE:
v_rcv = packet.find('vlan')
e.payload = vlan(eth_type = e.type,
payload = e.payload,
id = v_rcv.id,
pcp = v_rcv.pcp)
e.type = ethernet.VLAN_TYPE
log.info("%s answering ARP for %s" % (dpid_to_str(dpid),
str(r.protosrc)))
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port =
of.OFPP_IN_PORT))
msg.in_port = inport
event.connection.send(msg)
return EventHalt if _eat_packets else None
else:
# Keep track of failed queries
squelch = a.protodst in _failed_queries
_failed_queries[a.protodst] = time.time()
if self._check_for_flood(dpid, a):
# Didn't know how to handle this ARP, so just flood it
msg = "%s flooding ARP %s %s => %s" % (dpid_to_str(dpid),
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode,
'op:%i' % (a.opcode,)), a.protosrc, a.protodst)
if squelch:
log.debug(msg)
else:
log.info(msg)
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.data = event.ofp
event.connection.send(msg.pack())
return EventHalt if _eat_packets else None
def _check_for_flood (self, dpid, a):
"""
Return True if you want to flood this
"""
if a.protodst in _arp_table:
return _arp_table[a.protodst].flood
return True
_arp_table = ARPTable() # IPAddr -> Entry
_install_flow = None
_eat_packets = None
_failed_queries = {} # IP -> time : queries we couldn't answer
_learn = None
def launch (timeout=ARP_TIMEOUT, no_flow=False, eat_packets=True,
no_learn=False, **kw):
global ARP_TIMEOUT, _install_flow, _eat_packets, _learn
ARP_TIMEOUT = timeout
_install_flow = not no_flow
_eat_packets = str_to_bool(eat_packets)
_learn = not no_learn
core.Interactive.variables['arp'] = _arp_table
for k,v in kw.iteritems():
_arp_table[IPAddr(k)] = Entry(v, static=True)
core.registerNew(ARPResponder)
| apache-2.0 |
DavidNorman/tensorflow | tensorflow/tools/compatibility/tf_upgrade_v2_safety.py | 2 | 2681 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* to 2.0 TensorFlow using SAFETY mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.tools.compatibility import all_renames_v2
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import module_deprecations_v2
class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
self.function_keyword_renames = {}
self.symbol_renames = {}
self.change_to_function = {}
self.function_reorders = {}
self.function_warnings = {}
self.function_transformers = {}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
## Inform about the addons mappings
for symbol, replacement in all_renames_v2.addons_symbol_mappings.items():
warning = (
ast_edits.WARNING, (
"(Manual edit required) `{}` has been migrated to `{}` in "
"TensorFlow Addons. The API spec may have changed during the "
"migration. Please see https://github.com/tensorflow/addons "
"for more info.").format(symbol, replacement))
self.function_warnings[symbol] = warning
# List module renames. Right now, we just support renames from a module
# names that don't contain '.'.
self.import_renames = {
"tensorflow": ast_edits.ImportRename(
"tensorflow.compat.v1",
excluded_prefixes=["tensorflow.contrib",
"tensorflow.flags",
"tensorflow.compat.v1",
"tensorflow.compat.v2"])
}
self.inserts_after_imports = {
("tensorflow", None): ["tensorflow.disable_v2_behavior()"],
("tensorflow", "tf"): ["tf.disable_v2_behavior()"],
}
# TODO(kaftan,annarev): specify replacement from TensorFlow import to
# compat.v1 import.
| apache-2.0 |
xkcd1253/Mimi | flask/lib/python2.7/site-packages/whoosh/query/qcore.py | 3 | 22480 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
import copy
from array import array
from whoosh import matching
from whoosh.compat import u
from whoosh.reading import TermNotFound
from whoosh.compat import methodcaller
# Exceptions
class QueryError(Exception):
"""Error encountered while running a query.
"""
pass
# Functions
def error_query(msg, q=None):
"""Returns the query in the second argument (or a :class:`NullQuery` if the
second argument is not given) with its ``error`` attribute set to
``msg``.
"""
if q is None:
q = _NullQuery()
q.error = msg
return q
def token_lists(q, phrases=True):
"""Returns the terms in the query tree, with the query hierarchy
represented as nested lists.
"""
if q.is_leaf():
from whoosh.query import Phrase
if phrases or not isinstance(q, Phrase):
return list(q.tokens())
else:
ls = []
for qq in q.children():
t = token_lists(qq, phrases=phrases)
if len(t) == 1:
t = t[0]
if t:
ls.append(t)
return ls
# Utility classes
class Lowest(object):
"""A value that is always compares lower than any other object except
itself.
"""
def __cmp__(self, other):
if other.__class__ is Lowest:
return 0
return -1
def __eq__(self, other):
return self.__class__ is type(other)
def __lt__(self, other):
return type(other) is not self.__class__
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
class Highest(object):
"""A value that is always compares higher than any other object except
itself.
"""
def __cmp__(self, other):
if other.__class__ is Highest:
return 0
return 1
def __eq__(self, other):
return self.__class__ is type(other)
def __lt__(self, other):
return type(other) is self.__class__
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
Lowest = Lowest()
Highest = Highest()
# Base classes
class Query(object):
"""Abstract base class for all queries.
Note that this base class implements __or__, __and__, and __sub__ to allow
slightly more convenient composition of query objects::
>>> Term("content", u"a") | Term("content", u"b")
Or([Term("content", u"a"), Term("content", u"b")])
>>> Term("content", u"a") & Term("content", u"b")
And([Term("content", u"a"), Term("content", u"b")])
>>> Term("content", u"a") - Term("content", u"b")
And([Term("content", u"a"), Not(Term("content", u"b"))])
"""
# For queries produced by the query parser, record where in the user
# query this object originated
startchar = endchar = None
# For queries produced by the query parser, records an error that resulted
# in this query
error = None
def __unicode__(self):
raise NotImplementedError(self.__class__.__name__)
def __getitem__(self, item):
raise NotImplementedError
def __or__(self, query):
"""Allows you to use | between query objects to wrap them in an Or
query.
"""
from whoosh.query import Or
return Or([self, query]).normalize()
def __and__(self, query):
"""Allows you to use & between query objects to wrap them in an And
query.
"""
from whoosh.query import And
return And([self, query]).normalize()
def __sub__(self, query):
"""Allows you to use - between query objects to add the right-hand
query as a "NOT" query.
"""
from whoosh.query import And, Not
return And([self, Not(query)]).normalize()
def __hash__(self):
raise NotImplementedError
def __ne__(self, other):
return not self.__eq__(other)
def is_leaf(self):
"""Returns True if this is a leaf node in the query tree, or False if
this query has sub-queries.
"""
return True
def children(self):
"""Returns an iterator of the subqueries of this object.
"""
return iter([])
def is_range(self):
"""Returns True if this object searches for values within a range.
"""
return False
def has_terms(self):
"""Returns True if this specific object represents a search for a
specific term (as opposed to a pattern, as in Wildcard and Prefix) or
terms (i.e., whether the ``replace()`` method does something
meaningful on this instance).
"""
return False
def apply(self, fn):
"""If this query has children, calls the given function on each child
and returns a new copy of this node with the new children returned by
the function. If this is a leaf node, simply returns this object.
This is useful for writing functions that transform a query tree. For
example, this function changes all Term objects in a query tree into
Variations objects::
def term2var(q):
if isinstance(q, Term):
return Variations(q.fieldname, q.text)
else:
return q.apply(term2var)
q = And([Term("f", "alfa"),
Or([Term("f", "bravo"),
Not(Term("f", "charlie"))])])
q = term2var(q)
Note that this method does not automatically create copies of nodes.
To avoid modifying the original tree, your function should call the
:meth:`Query.copy` method on nodes before changing their attributes.
"""
return self
def accept(self, fn):
"""Applies the given function to this query's subqueries (if any) and
then to this query itself::
def boost_phrases(q):
if isintance(q, Phrase):
q.boost *= 2.0
return q
myquery = myquery.accept(boost_phrases)
This method automatically creates copies of the nodes in the original
tree before passing them to your function, so your function can change
attributes on nodes without altering the original tree.
This method is less flexible than using :meth:`Query.apply` (in fact
it's implemented using that method) but is often more straightforward.
"""
def fn_wrapper(q):
q = q.apply(fn_wrapper)
return fn(q)
return fn_wrapper(self)
def replace(self, fieldname, oldtext, newtext):
"""Returns a copy of this query with oldtext replaced by newtext (if
oldtext was anywhere in this query).
Note that this returns a *new* query with the given text replaced. It
*does not* modify the original query "in place".
"""
# The default implementation uses the apply method to "pass down" the
# replace() method call
if self.is_leaf():
return copy.copy(self)
else:
return self.apply(methodcaller("replace", fieldname, oldtext,
newtext))
def copy(self):
"""Deprecated, just use ``copy.deepcopy``.
"""
return copy.deepcopy(self)
def all_terms(self, phrases=True):
"""Returns a set of all terms in this query tree.
This method exists for backwards-compatibility. Use iter_all_terms()
instead.
:param phrases: Whether to add words found in Phrase queries.
:rtype: set
"""
return set(self.iter_all_terms(phrases=phrases))
def terms(self, phrases=False):
"""Yields zero or more (fieldname, text) pairs queried by this object.
You can check whether a query object targets specific terms before you
call this method using :meth:`Query.has_terms`.
To get all terms in a query tree, use :meth:`Query.iter_all_terms`.
"""
return iter(())
def expanded_terms(self, ixreader):
return self.terms()
def existing_terms(self, ixreader, phrases=True, expand=False, fieldname=None):
"""Returns a set of all byteterms in this query tree that exist in
the given ixreader.
:param ixreader: A :class:`whoosh.reading.IndexReader` object.
:param phrases: Whether to add words found in Phrase queries.
:param expand: If True, queries that match multiple terms
will return all matching expansions.
:rtype: set
"""
schema = ixreader.schema
termset = set()
for q in self.leaves():
if fieldname and fieldname != q.field():
continue
if expand:
terms = q.expanded_terms(ixreader)
else:
terms = q.terms(phrases)
for fieldname, text in terms:
if (fieldname, text) in termset:
continue
if fieldname in schema:
field = schema[fieldname]
btext = field.to_bytes(text)
if (fieldname, btext) in ixreader:
termset.add((fieldname, btext))
return termset
def leaves(self):
"""Returns an iterator of all the leaf queries in this query tree as a
flat series.
"""
if self.is_leaf():
yield self
else:
for q in self.children():
for qq in q.leaves():
yield qq
def iter_all_terms(self, phrases=True):
"""Returns an iterator of (fieldname, text) pairs for all terms in
this query tree.
>>> qp = qparser.QueryParser("text", myindex.schema)
>>> q = myparser.parse("alfa bravo title:charlie")
>>> # List the terms in a query
>>> list(q.iter_all_terms())
[("text", "alfa"), ("text", "bravo"), ("title", "charlie")]
>>> # Get a set of all terms in the query that don't exist in the index
>>> r = myindex.reader()
>>> missing = set(t for t in q.iter_all_terms() if t not in r)
set([("text", "alfa"), ("title", "charlie")])
>>> # All terms in the query that occur in fewer than 5 documents in
>>> # the index
>>> [t for t in q.iter_all_terms() if r.doc_frequency(t[0], t[1]) < 5]
[("title", "charlie")]
:param phrases: Whether to add words found in Phrase queries.
"""
for q in self.leaves():
if q.has_terms():
for t in q.terms(phrases=phrases):
yield t
def all_tokens(self, boost=1.0):
"""Returns an iterator of :class:`analysis.Token` objects corresponding
to all terms in this query tree. The Token objects will have the
``fieldname``, ``text``, and ``boost`` attributes set. If the query
was built by the query parser, they Token objects will also have
``startchar`` and ``endchar`` attributes indexing into the original
user query.
"""
if self.is_leaf():
for token in self.tokens(boost):
yield token
else:
boost *= self.boost if hasattr(self, "boost") else 1.0
for child in self.children():
for token in child.all_tokens(boost):
yield token
def tokens(self, boost=1.0, exreader=None):
"""Yields zero or more :class:`analysis.Token` objects corresponding to
the terms searched for by this query object. You can check whether a
query object targets specific terms before you call this method using
:meth:`Query.has_terms`.
The Token objects will have the ``fieldname``, ``text``, and ``boost``
attributes set. If the query was built by the query parser, they Token
objects will also have ``startchar`` and ``endchar`` attributes
indexing into the original user query.
To get all tokens for a query tree, use :meth:`Query.all_tokens`.
:param exreader: a reader to use to expand multiterm queries such as
prefixes and wildcards. The default is None meaning do not expand.
"""
return iter(())
def requires(self):
"""Returns a set of queries that are *known* to be required to match
for the entire query to match. Note that other queries might also turn
out to be required but not be determinable by examining the static
query.
>>> a = Term("f", u"a")
>>> b = Term("f", u"b")
>>> And([a, b]).requires()
set([Term("f", u"a"), Term("f", u"b")])
>>> Or([a, b]).requires()
set([])
>>> AndMaybe(a, b).requires()
set([Term("f", u"a")])
>>> a.requires()
set([Term("f", u"a")])
"""
# Subclasses should implement the _add_required_to(qset) method
return set([self])
def field(self):
"""Returns the field this query matches in, or None if this query does
not match in a single field.
"""
return self.fieldname
def with_boost(self, boost):
"""Returns a COPY of this query with the boost set to the given value.
If a query type does not accept a boost itself, it will try to pass the
boost on to its children, if any.
"""
q = self.copy()
q.boost = boost
return q
def estimate_size(self, ixreader):
"""Returns an estimate of how many documents this query could
potentially match (for example, the estimated size of a simple term
query is the document frequency of the term). It is permissible to
overestimate, but not to underestimate.
"""
raise NotImplementedError
def estimate_min_size(self, ixreader):
"""Returns an estimate of the minimum number of documents this query
could potentially match.
"""
return self.estimate_size(ixreader)
def matcher(self, searcher, context=None):
"""Returns a :class:`~whoosh.matching.Matcher` object you can use to
retrieve documents and scores matching this query.
:rtype: :class:`whoosh.matching.Matcher`
"""
raise NotImplementedError
def docs(self, searcher):
"""Returns an iterator of docnums matching this query.
>>> with my_index.searcher() as searcher:
... list(my_query.docs(searcher))
[10, 34, 78, 103]
:param searcher: A :class:`whoosh.searching.Searcher` object.
"""
try:
context = searcher.boolean_context()
return self.matcher(searcher, context).all_ids()
except TermNotFound:
return iter([])
def deletion_docs(self, searcher):
"""Returns an iterator of docnums matching this query for the purpose
of deletion. The :meth:`~whoosh.writing.IndexWriter.delete_by_query`
method will use this method when deciding what documents to delete,
allowing special queries (e.g. nested queries) to override what
documents are deleted. The default implementation just forwards to
:meth:`Query.docs`.
"""
return self.docs(searcher)
def normalize(self):
"""Returns a recursively "normalized" form of this query. The
normalized form removes redundancy and empty queries. This is called
automatically on query trees created by the query parser, but you may
want to call it yourself if you're writing your own parser or building
your own queries.
>>> q = And([And([Term("f", u"a"),
... Term("f", u"b")]),
... Term("f", u"c"), Or([])])
>>> q.normalize()
And([Term("f", u"a"), Term("f", u"b"), Term("f", u"c")])
Note that this returns a *new, normalized* query. It *does not* modify
the original query "in place".
"""
return self
def simplify(self, ixreader):
"""Returns a recursively simplified form of this query, where
"second-order" queries (such as Prefix and Variations) are re-written
into lower-level queries (such as Term and Or).
"""
return self
# Null query
class _NullQuery(Query):
"Represents a query that won't match anything."
boost = 1.0
def __init__(self):
self.error = None
def __unicode__(self):
return u("<_NullQuery>")
def __call__(self):
return self
def __repr__(self):
return "<%s>" % (self.__class__.__name__)
def __eq__(self, other):
return isinstance(other, _NullQuery)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(self)
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def field(self):
return None
def estimate_size(self, ixreader):
return 0
def normalize(self):
return self
def simplify(self, ixreader):
return self
def docs(self, searcher):
return []
def matcher(self, searcher, context=None):
return matching.NullMatcher()
NullQuery = _NullQuery()
# Every
class Every(Query):
"""A query that matches every document containing any term in a given
field. If you don't specify a field, the query matches every document.
>>> # Match any documents with something in the "path" field
>>> q = Every("path")
>>> # Matcher every document
>>> q = Every()
The unfielded form (matching every document) is efficient.
The fielded is more efficient than a prefix query with an empty prefix or a
'*' wildcard, but it can still be very slow on large indexes. It requires
the searcher to read the full posting list of every term in the given
field.
Instead of using this query it is much more efficient when you create the
index to include a single term that appears in all documents that have the
field you want to match.
For example, instead of this::
# Match all documents that have something in the "path" field
q = Every("path")
Do this when indexing::
# Add an extra field that indicates whether a document has a path
schema = fields.Schema(path=fields.ID, has_path=fields.ID)
# When indexing, set the "has_path" field based on whether the document
# has anything in the "path" field
writer.add_document(text=text_value1)
writer.add_document(text=text_value2, path=path_value2, has_path="t")
Then to find all documents with a path::
q = Term("has_path", "t")
"""
def __init__(self, fieldname=None, boost=1.0):
"""
:param fieldname: the name of the field to match, or ``None`` or ``*``
to match all documents.
"""
if not fieldname or fieldname == "*":
fieldname = None
self.fieldname = fieldname
self.boost = boost
def __repr__(self):
return "%s(%r, boost=%s)" % (self.__class__.__name__, self.fieldname,
self.boost)
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.boost == other.boost)
def __unicode__(self):
return u("%s:*") % self.fieldname
__str__ = __unicode__
def __hash__(self):
return hash(self.fieldname)
def estimate_size(self, ixreader):
return ixreader.doc_count()
def matcher(self, searcher, context=None):
fieldname = self.fieldname
reader = searcher.reader()
if fieldname in (None, "", "*"):
# This takes into account deletions
doclist = array("I", reader.all_doc_ids())
else:
# This is a hacky hack, but just create an in-memory set of all the
# document numbers of every term in the field. This is SLOOOW for
# large indexes
doclist = set()
for text in searcher.lexicon(fieldname):
pr = searcher.postings(fieldname, text)
doclist.update(pr.all_ids())
doclist = sorted(doclist)
return matching.ListMatcher(doclist, all_weights=self.boost)
| gpl-2.0 |
michalliu/chromium-depot_tools | third_party/oauth2client/xsrfutil.py | 230 | 3368 | #!/usr/bin/python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for creating & verifying XSRF tokens."""
__authors__ = [
'"Doug Coker" <dcoker@google.com>',
'"Joe Gregorio" <jcgregorio@google.com>',
]
import base64
import hmac
import os # for urandom
import time
from oauth2client import util
# Delimiter character
DELIMITER = ':'
# 1 hour in seconds
DEFAULT_TIMEOUT_SECS = 1*60*60
@util.positional(2)
def generate_token(key, user_id, action_id="", when=None):
"""Generates a URL-safe token for the given user, action, time tuple.
Args:
key: secret key to use.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
when: the time in seconds since the epoch at which the user was
authorized for this action. If not set the current time is used.
Returns:
A string XSRF protection token.
"""
when = when or int(time.time())
digester = hmac.new(key)
digester.update(str(user_id))
digester.update(DELIMITER)
digester.update(action_id)
digester.update(DELIMITER)
digester.update(str(when))
digest = digester.digest()
token = base64.urlsafe_b64encode('%s%s%d' % (digest,
DELIMITER,
when))
return token
@util.positional(3)
def validate_token(key, token, user_id, action_id="", current_time=None):
"""Validates that the given token authorizes the user for the action.
Tokens are invalid if the time of issue is too old or if the token
does not match what generateToken outputs (i.e. the token was forged).
Args:
key: secret key to use.
token: a string of the token generated by generateToken.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
Returns:
A boolean - True if the user is authorized for the action, False
otherwise.
"""
if not token:
return False
try:
decoded = base64.urlsafe_b64decode(str(token))
token_time = long(decoded.split(DELIMITER)[-1])
except (TypeError, ValueError):
return False
if current_time is None:
current_time = time.time()
# If the token is too old it's not valid.
if current_time - token_time > DEFAULT_TIMEOUT_SECS:
return False
# The given token should match the generated one with the same time.
expected_token = generate_token(key, user_id, action_id=action_id,
when=token_time)
if len(token) != len(expected_token):
return False
# Perform constant time comparison to avoid timing attacks
different = 0
for x, y in zip(token, expected_token):
different |= ord(x) ^ ord(y)
if different:
return False
return True
| bsd-3-clause |
Plain-Andy-legacy/android_external_chromium_org | chrome/test/ispy/common/ispy_utils.py | 88 | 10891 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Internal utilities for managing I-Spy test results in Google Cloud Storage.
See the ispy.ispy_api module for the external API.
"""
import collections
import itertools
import json
import os
import sys
import image_tools
_INVALID_EXPECTATION_CHARS = ['/', '\\', ' ', '"', '\'']
def IsValidExpectationName(expectation_name):
return not any(c in _INVALID_EXPECTATION_CHARS for c in expectation_name)
def GetExpectationPath(expectation, file_name=''):
"""Get the path to a test file in the given test run and expectation.
Args:
expectation: name of the expectation.
file_name: name of the file.
Returns:
the path as a string relative to the bucket.
"""
return 'expectations/%s/%s' % (expectation, file_name)
def GetFailurePath(test_run, expectation, file_name=''):
"""Get the path to a failure file in the given test run and test.
Args:
test_run: name of the test run.
expectation: name of the expectation.
file_name: name of the file.
Returns:
the path as a string relative to the bucket.
"""
return GetTestRunPath(test_run, '%s/%s' % (expectation, file_name))
def GetTestRunPath(test_run, file_name=''):
"""Get the path to a the given test run.
Args:
test_run: name of the test run.
file_name: name of the file.
Returns:
the path as a string relative to the bucket.
"""
return 'failures/%s/%s' % (test_run, file_name)
class ISpyUtils(object):
"""Utility functions for working with an I-Spy google storage bucket."""
def __init__(self, cloud_bucket):
"""Initialize with a cloud bucket instance to supply GS functionality.
Args:
cloud_bucket: An object implementing the cloud_bucket.BaseCloudBucket
interface.
"""
self.cloud_bucket = cloud_bucket
def UploadImage(self, full_path, image):
"""Uploads an image to a location in GS.
Args:
full_path: the path to the file in GS including the file extension.
image: a RGB PIL.Image to be uploaded.
"""
self.cloud_bucket.UploadFile(
full_path, image_tools.EncodePNG(image), 'image/png')
def DownloadImage(self, full_path):
"""Downloads an image from a location in GS.
Args:
full_path: the path to the file in GS including the file extension.
Returns:
The downloaded RGB PIL.Image.
Raises:
cloud_bucket.NotFoundError: if the path to the image is not valid.
"""
return image_tools.DecodePNG(self.cloud_bucket.DownloadFile(full_path))
def UpdateImage(self, full_path, image):
"""Updates an existing image in GS, preserving permissions and metadata.
Args:
full_path: the path to the file in GS including the file extension.
image: a RGB PIL.Image.
"""
self.cloud_bucket.UpdateFile(full_path, image_tools.EncodePNG(image))
def GenerateExpectation(self, expectation, images):
"""Creates and uploads an expectation to GS from a set of images and name.
This method generates a mask from the uploaded images, then
uploads the mask and first of the images to GS as a expectation.
Args:
expectation: name for this expectation, any existing expectation with the
name will be replaced.
images: a list of RGB encoded PIL.Images
Raises:
ValueError: if the expectation name is invalid.
"""
if not IsValidExpectationName(expectation):
raise ValueError("Expectation name contains an illegal character: %s." %
str(_INVALID_EXPECTATION_CHARS))
mask = image_tools.InflateMask(image_tools.CreateMask(images), 7)
self.UploadImage(
GetExpectationPath(expectation, 'expected.png'), images[0])
self.UploadImage(GetExpectationPath(expectation, 'mask.png'), mask)
def PerformComparison(self, test_run, expectation, actual):
"""Runs an image comparison, and uploads discrepancies to GS.
Args:
test_run: the name of the test_run.
expectation: the name of the expectation to use for comparison.
actual: an RGB-encoded PIL.Image that is the actual result.
Raises:
cloud_bucket.NotFoundError: if the given expectation is not found.
ValueError: if the expectation name is invalid.
"""
if not IsValidExpectationName(expectation):
raise ValueError("Expectation name contains an illegal character: %s." %
str(_INVALID_EXPECTATION_CHARS))
expectation_tuple = self.GetExpectation(expectation)
if not image_tools.SameImage(
actual, expectation_tuple.expected, mask=expectation_tuple.mask):
self.UploadImage(
GetFailurePath(test_run, expectation, 'actual.png'), actual)
diff, diff_pxls = image_tools.VisualizeImageDifferences(
expectation_tuple.expected, actual, mask=expectation_tuple.mask)
self.UploadImage(GetFailurePath(test_run, expectation, 'diff.png'), diff)
self.cloud_bucket.UploadFile(
GetFailurePath(test_run, expectation, 'info.txt'),
json.dumps({
'different_pixels': diff_pxls,
'fraction_different':
diff_pxls / float(actual.size[0] * actual.size[1])}),
'application/json')
def GetExpectation(self, expectation):
"""Returns the given expectation from GS.
Args:
expectation: the name of the expectation to get.
Returns:
A named tuple: 'Expectation', containing two images: expected and mask.
Raises:
cloud_bucket.NotFoundError: if the test is not found in GS.
"""
Expectation = collections.namedtuple('Expectation', ['expected', 'mask'])
return Expectation(self.DownloadImage(GetExpectationPath(expectation,
'expected.png')),
self.DownloadImage(GetExpectationPath(expectation,
'mask.png')))
def ExpectationExists(self, expectation):
"""Returns whether the given expectation exists in GS.
Args:
expectation: the name of the expectation to check.
Returns:
A boolean indicating whether the test exists.
"""
expected_image_exists = self.cloud_bucket.FileExists(
GetExpectationPath(expectation, 'expected.png'))
mask_image_exists = self.cloud_bucket.FileExists(
GetExpectationPath(expectation, 'mask.png'))
return expected_image_exists and mask_image_exists
def FailureExists(self, test_run, expectation):
"""Returns whether a failure for the expectation exists for the given run.
Args:
test_run: the name of the test_run.
expectation: the name of the expectation that failed.
Returns:
A boolean indicating whether the failure exists.
"""
actual_image_exists = self.cloud_bucket.FileExists(
GetFailurePath(test_run, expectation, 'actual.png'))
test_exists = self.ExpectationExists(expectation)
info_exists = self.cloud_bucket.FileExists(
GetFailurePath(test_run, expectation, 'info.txt'))
return test_exists and actual_image_exists and info_exists
def RemoveExpectation(self, expectation):
"""Removes an expectation and all associated failures with that test.
Args:
expectation: the name of the expectation to remove.
"""
test_paths = self.cloud_bucket.GetAllPaths(
GetExpectationPath(expectation))
for path in test_paths:
self.cloud_bucket.RemoveFile(path)
def GenerateExpectationPinkOut(self, expectation, images, pint_out, rgb):
"""Uploads an ispy-test to GS with the pink_out workaround.
Args:
expectation: the name of the expectation to be uploaded.
images: a json encoded list of base64 encoded png images.
pink_out: an image.
RGB: a json list representing the RGB values of a color to mask out.
Raises:
ValueError: if expectation name is invalid.
"""
if not IsValidExpectationName(expectation):
raise ValueError("Expectation name contains an illegal character: %s." %
str(_INVALID_EXPECTATION_CHARS))
# convert the pink_out into a mask
black = (0, 0, 0, 255)
white = (255, 255, 255, 255)
pink_out.putdata(
[black if px == (rgb[0], rgb[1], rgb[2], 255) else white
for px in pink_out.getdata()])
mask = image_tools.CreateMask(images)
mask = image_tools.InflateMask(image_tools.CreateMask(images), 7)
combined_mask = image_tools.AddMasks([mask, pink_out])
self.UploadImage(GetExpectationPath(expectation, 'expected.png'), images[0])
self.UploadImage(GetExpectationPath(expectation, 'mask.png'), combined_mask)
def RemoveFailure(self, test_run, expectation):
"""Removes a failure from GS.
Args:
test_run: the name of the test_run.
expectation: the expectation on which the failure to be removed occured.
"""
failure_paths = self.cloud_bucket.GetAllPaths(
GetFailurePath(test_run, expectation))
for path in failure_paths:
self.cloud_bucket.RemoveFile(path)
def GetFailure(self, test_run, expectation):
"""Returns a given test failure's expected, diff, and actual images.
Args:
test_run: the name of the test_run.
expectation: the name of the expectation the result corresponds to.
Returns:
A named tuple: Failure containing three images: expected, diff, and
actual.
Raises:
cloud_bucket.NotFoundError: if the result is not found in GS.
"""
expected = self.DownloadImage(
GetExpectationPath(expectation, 'expected.png'))
actual = self.DownloadImage(
GetFailurePath(test_run, expectation, 'actual.png'))
diff = self.DownloadImage(
GetFailurePath(test_run, expectation, 'diff.png'))
info = json.loads(self.cloud_bucket.DownloadFile(
GetFailurePath(test_run, expectation, 'info.txt')))
Failure = collections.namedtuple(
'Failure', ['expected', 'diff', 'actual', 'info'])
return Failure(expected, diff, actual, info)
def GetAllPaths(self, prefix, max_keys=None, marker=None, delimiter=None):
"""Gets urls to all files in GS whose path starts with a given prefix.
Args:
prefix: the prefix to filter files in GS by.
max_keys: Integer. Specifies the maximum number of objects returned
marker: String. Only objects whose fullpath starts lexicographically
after marker (exclusively) will be returned
delimiter: String. Turns on directory mode, specifies characters
to be used as directory separators
Returns:
a list containing urls to all objects that started with
the prefix.
"""
return self.cloud_bucket.GetAllPaths(
prefix, max_keys=max_keys, marker=marker, delimiter=delimiter)
| bsd-3-clause |
m-a-d-n-e-s-s/madness | src/madness/mra/tools/mathutil.py | 6 | 31181 |
# The stuff in this file is now obsolete and is
# here to keep the old twoscalecoeff code working.
# Don't write new code using this stuff.
from array import *
from math import sqrt
def vector(n):
return array('d',[0.0]*n)
def zerovector(n):
return vector(n)
def zeromatrix(n,m):
a = range(n)
for i in range(n):
a[i] = zerovector(m)
return a
def copyvector(x):
n = len(x)
a = vector(n)
for i in range(n):
a[i] = x[i]
return a
def copymatrix(x):
n = len(x)
m = len(x[0])
a = zeromatrix(n,m)
for i in range(n):
for j in range(m):
a[i][j] = x[i][j]
return a
def transpose(x):
n = len(x)
m = len(x[0])
a = zeromatrix(m,n)
for i in range(n):
for j in range(m):
a[j][i] = x[i][j]
return a
def dot(a,b):
sum = 0
for i in range(len(a)):
sum = sum + a[i]*b[i]
return sum
def vector_norm(x):
'''
A translation of the BLAS routine dnrm2
'''
n = len(x)
if n < 1:
return 0.0
elif n == 1:
return abs(x[0])
else:
scale = 0.0
ssq = 1.0
for i in range(n):
if x[i] != 0.0:
absxi = abs(x[i])
if scale < absxi:
ssq = 1.0 + ssq*(scale/absxi)**2
scale = absxi
else:
ssq = ssq + (absxi/scale)**2
return scale*sqrt(ssq)
def mxm(a,b):
n = len(a)
k = len(a[0])
kk= len(b)
m = len(b[0])
if kk != k:
raise "matrices do not conform for multiplication"
c = zeromatrix(n,m)
for i in range(n):
for j in range(m):
sum = 0.0
for l in range(k):
sum = sum + a[i][l]*b[l][j]
c[i][j] = sum
return c
sparse_mtxm_nflop = 0.0
def sparse_mtxm(a,b):
global sparse_mtxm_nflop
'''
C = AT * B using sparsity in both. Currently assumes full storage.
'''
k = len(a)
kk= len(b)
n = len(a[0])
m = len(b[0])
if kk != k:
raise "matrices do not conform for multiplication"
ct = zeromatrix(m,n) # This is the transpose of the result
nflop = 0.0
for l in range(k):
al = a[l]
bl = b[l]
list = [] # List of non-zero a[l][i] for given l
for i in range(n):
if al[i]: list.append(i)
numl = float(len(list))
for j in range(m):
blj = bl[j]
if blj:
ctj = ct[j]
nflop = nflop + numl
for i in list:
ctj[i] = ctj[i] + al[i]*blj
sparse_mtxm_nflop = sparse_mtxm_nflop + nflop
return transpose(ct)
def mxv(a,b):
n = len(a)
k = len(a[0])
kk = len(b)
if k != kk:
raise "matrix and vector do not conform for multiplication"
c = zerovector(n)
for i in range(n):
c[i] = dot(a[i],b)
return c
def printvector(a):
n = len(a)
for i in range(n):
print ("%+15.8e "%float(a[i])),
if ((i+1)%5) == 0 and i != (n-1):
print " "
print " "
def printmatrix(a):
n = len(a)
m = len(a[0])
for jlo in range(0,m,5):
print " ",
for j in range(jlo,min(jlo+5,m)):
print (" %5i " % j),
print " "
for i in range(n):
print ("%4i " % i),
for j in range(jlo,min(jlo+5,m)):
print ("%+15.8e " % float(a[i][j])),
print " "
def numderiv(func,x,step,eps):
'''
Use central differences to compute the gradient and diagonal
elements of the Hessian.
func(x) = function to be differentiated
x[] = point at which to differentiate
step[] = remembers finite difference step between
. successive calls. Set to zero on first call
. or set close to appropriate value
eps = expected precision in func
Some care is taken to adjust the step so that the gradient and
Hessian diagonal are estimated with about 4 digits of precision
but some noise is unavaoidable due either to the noise in the
function or cubic/higher terms in the Taylor expansion.
'''
n = len(x)
g = zerovector(n)
h = zerovector(n)
f0 = func(x)
for i in range(n):
if step[i] == 0.0:
step[i] = max(abs(x[i])*0.01,0.0001)
xi = x[i]
while 1:
x[i] = xi + step[i]
f1 = func(x)
measure = 1e4*eps*max(abs(f0),eps)
if abs(f1-f0) < measure:
#print ' Increasing step ',i,step[i],abs(f1-f0)
step[i] = step[i]*2.0
elif abs(f1-f0) > 10.0*measure:
#print ' Decreasing step ',i,step[i],abs(f1-f0)
step[i] = step[i]/3.0
else:
break
x[i] = xi - step[i]
fm1 = func(x)
x[i] = xi
g[i] = (f1 - fm1)/(2*step[i])
h[i] = (f1 + fm1 - 2.0*f0)/(step[i]*step[i])
return (f0,g,h)
def quadfit(alpha0, f0, alpha1, f1, alpha2, f2):
'''
Given 3 points compute the gradient and hessian at point 0
using a quadratic fit.
'''
delta1 = alpha1 - alpha0
delta2 = alpha2 - alpha0
d1 = (f1 - f0)/delta1
d2 = (f2 - f0)/delta2
h0 = 2.0*(d1 - d2)/(delta1-delta2)
g0 = d1 - 0.5*h0*delta1
test1 = f0 + g0*delta1 + 0.5*h0*delta1*delta1
test2 = f0 + g0*delta2 + 0.5*h0*delta2*delta2
return (f0, g0, h0)
def takestep(x0, s, alpha):
x = zerovector(len(x0))
for j in range(len(x)):
x[j] = x0[j] + s[j]*alpha
return x
def quadratic_step(trust, g0, h0, prnt=1):
if h0 > 0:
delta2 = -g0/h0
if abs(delta2) > trust:
if prnt:
print " Step restriction: %f %f " % (delta2, trust)
delta2 = abs(trust*delta2)/delta2
else:
if prnt:
print " Negative curvature "
delta2 = -abs(trust*g0)/g0
return delta2
def linesearch(func, x0, s, lsgrad, eps, prnt=1):
# Assume here that some halfway OK preconditioning
# is being used so we expect a step around unity.
# Nevertheless, must exercise some caution.
# First step in small increments until we've either
# bracketed the minimum or gone downhil with enough
# energy difference to start fitting
if prnt:
print " Line search: step alpha grad hess value"
print " ---- --------- -------- -------- ----------------"
trust = 0.2
alpha0 = 0.0
f0 = func(x0)
if prnt:
print " %9.2e %8.1e %16.8e" % \
(alpha0, lsgrad, f0)
if lsgrad < 0:
alpha1 = alpha0 + trust
else:
alpha1 = alpha0 - trust
f1 = func(takestep(x0,s,alpha1))
if prnt:
print " %9.2e %16.8e" % \
(alpha1, f1)
snorm = vector_norm(s)
while f1 > f0:
if trust*snorm < sqrt(eps):
if prnt:
print " system is too badly conditioned for initial step"
return (alpha0,f0) # Cannot seem to find my way
trust = trust * 0.5
if lsgrad < 0:
alpha1 = alpha0 + trust
else:
alpha1 = alpha0 - trust
f1 = func(takestep(x0,s,alpha1))
if prnt:
print " %9.2e %16.8e" % \
(alpha1, f1)
g0 = lsgrad
h0 = (f1-f0-alpha1*g0)/alpha1**2
if f1 < f0:
g0 = g0 + h0*(alpha1 - alpha0)
alpha0, alpha1, f0, f1 = alpha1, alpha0, f1, f0
alpha2 = alpha0 + quadratic_step(trust,g0,h0,prnt)
nbackstep =0
for iter in range(1,10):
f2 = func(takestep(x0,s,alpha2))
#print ' alphas ', alpha0, alpha1, alpha2
#print ' fs ', f0, f1, f2
if iter == 1:
f2prev = f2
# Check for convergence or insufficient precision to proceed further
if (abs(f0-f1)<(10*eps)) and (abs(f1-f2)<(10*eps)):
if prnt:
print " ",
print " Insufficient precision ... terminating LS"
break
if (f2-f2prev) > 0:
# New point is higher than previous worst
if nbackstep < 3:
nbackstep = nbackstep + 1
if prnt:
print " ",
print " Back stepping due to uphill step"
trust = max(0.01,0.2*abs(alpha2 - alpha0)) # Reduce trust radius
alpha2 = alpha0 + 0.2*(alpha2 - alpha0)
continue
elif (f2-f0) < 0:
trust = min(4.0,trust*2.0) # Seem to have narrowed the search
nbackstep = 0
f2prev = f2
# Order in increasing energy
if f1 < f0:
alpha0, alpha1, f0, f1 = alpha1, alpha0, f1, f0
if f2 < f0:
alpha0, alpha2, f0, f2 = alpha2, alpha0, f2, f0
if f2 < f1:
alpha1, alpha2, f1, f2 = alpha2, alpha1, f2, f1
(f0, g0, h0) = quadfit(alpha0, f0, alpha1, f1, alpha2, f2)
if prnt:
print " %4i %9.2e %8.1e %8.1e %16.8e" % \
(iter, alpha0, g0, h0, f0)
if (h0>0.0) and (abs(g0) < 0.03*abs(lsgrad)):
if prnt:
print " ",
print " gradient reduced 30-fold ... terminating LS"
break
# Determine the next step
delta = quadratic_step(trust,g0,h0,prnt)
alpha2 = alpha0 + delta
df = g0*delta + 0.5*h0*delta*delta
if abs(df) < 10.0*eps:
if prnt:
print " ",
print " projected energy reduction < 10*eps ... terminating LS"
break
return (alpha0, f0)
def jacobi(a):
'''
Diagonalize a real symmetric matrix using the variable threshold
cyclic Jacobi method. The input matrix is unmodified.
(v,e) = jacobi(a)
Input: a[n][m] is a real symmetric matrix
Returns: (v,e) where v is the list of eigenvectors and e is an
vector of the corresponding eigenvalues in ascending order.
v[k] is a vector containing the kth eigenvector. These satisfy
A*Vt = Vt*e
or
V*A = e*V
or
sum(j)(a[i][j]v[k][j]) = e[k]*v[k][i]
'''
a = copymatrix(a)
n = len(a)
m = len(a[0])
if n != m:
raise 'Matrix must be square'
for i in range(n):
for j in range(m):
if a[i][j] != a[j][i]:
raise ' Matrix must be symmetric'
tolmin = 5.0e-16
tol = 1e-2
v = zeromatrix(n,n)
for i in range(n):
v[i][i] = 1.0
maxd = 0.0
for i in range(n):
maxd = max(abs(a[i][i]),maxd)
nrotsum = 0
for iter in range(50):
maxdaij = 0.0
nrot = 0
for i in range(n):
for j in range(i+1,n): # j>i
aii = a[i][i]
ajj = a[j][j]
aij = a[i][j]
daij = abs(aij)
maxdaij = max(maxdaij,daij/maxd)
if daij > tol*maxd: # Screen small elements
s = ajj - aii
ds = abs(s)
if daij > (tolmin*ds): # Check for sufficient precision
nrot = nrot + 1
if (tolmin*daij) > ds:
c = s = 1/sqrt(2.)
else:
t = aij/s
u = 0.25/sqrt(0.25+t*t)
c = sqrt(0.5+u)
s = 2.*t*u/c
for k in range(i+1):
t = a[k][i]
u = a[k][j]
a[k][i] = c*t - s*u
a[k][j] = s*t + c*u
ai = a[i]
aj = a[j]
for k in range(i+1,j):
t = ai[k]
u = a[k][j]
ai[k] = c*t - s*u
a[k][j] = s*t + c*u
a[j][j] = s*aij + c*ajj
a[i][i] = c*a[i][i] - s*(c*aij - s*ajj)
for k in range(j,n):
t = ai[k]
u = aj[k]
ai[k] = c*t - s*u
aj[k] = s*t + c*u
vi = v[i]
vj = v[j]
for k in range(n):
t = vi[k]
u = vj[k]
vi[k] = c*t - s*u
vj[k] = s*t + c*u
a[j][i] = a[i][j] = 0.0
maxd = max(maxd,abs(a[i][i]),abs(a[j][j]))
#print iter, tol, maxdaij, nrot, '!'
nrotsum = nrotsum + nrot
if nrot == 0 and tol <= tolmin:
break
tol = min(tol,maxdaij*1e-1,maxdaij**2)
tol = max(tol,tolmin)
#print "nrotsum", nrotsum
if nrot != 0:
raise "Jacobi iteration did not converge in 50 passes"
# Sort eigenvectors and values into increasing order
e = zerovector(n)
for i in range(n):
e[i] = a[i][i]
for j in range(i):
if e[j] > e[i]:
(e[i],e[j]) = (e[j],e[i])
(v[i],v[j]) = (v[j],v[i])
return (v,e)
def hessian_update_bfgs(hp, dx, g, gp):
'''
Apply the BFGS update to the approximate Hessian h[][].
hp[][] = Hessian matrix from previous iteration
dx[] = Step from previous iteration
. (dx[] = x[] - xp[] where xp[] is the previous point)
g[] = gradient at current point
gp[] = gradient at previous point
Returns the updated hessian
'''
n = len(hp)
hdx = mxv(hp,dx)
dg = zerovector(n)
for i in range(n):
dg[i] = g[i] - gp[i]
dxhdx = dot(dx,hdx)
dxdx = dot(dx,dx)
dxdg = dot(dx,dg)
dgdg = dot(dg,dg)
h = copymatrix(hp)
if (dxdx > 0.0) and (dgdg > 0.0) and (abs(dxdg/sqrt(dxdx*dgdg)) > 1.e-8):
for i in range(n):
for j in range(n):
h[i][j] = h[i][j] + dg[i]*dg[j]/dxdg - hdx[i]*hdx[j]/dxhdx
else:
print ' BFGS not updating dxdg (%e), dgdg (%e), dxhdx (%f), dxdx(%e)' % (dxdg, dgdg, dxhdx, dxdx)
return h
def quasinr(func, guess, tol, eps, printvar=None, prnt=1):
'''
Unconstrained minimization of a function of n variables
without analytic derivatives using quasi-Newtwon with BFGS update
and numerical gradients.
func(x) is a function that takes an array of n values and
returns the function value
guess[] is a vector of n values for the initial guess
tol is the convergence criterion for the maximum value
of the gradient
eps is the expected precision in the function value
printvar(x) is an optional user function to print the values of
parameters each macro iteration
'''
n = len(guess)
x = copyvector(guess)
s = zerovector(n)
g = zerovector(n)
gp = zerovector(n)
step = zerovector(n)
hessian = zeromatrix(n,n)
alpha = 0.0
for iter in range(50*n):
(value,g,h) = numderiv(func, x, step, eps)
gnrm = vector_norm(g)
if prnt:
print ' '
print ' iter gnrm value '
print ' ---- --------- ----------------'
print "%4i %9.2e %16.8e" % (iter,gnrm,value)
if (prnt and printvar):
printvar(x)
if gnrm < tol:
if prnt: print ' Converged!'
break
if iter == 0:
for i in range(n):
hessian[i][i] = max(abs(h[i]),1e-4)
else:
hessian = hessian_update_bfgs(hessian, s, g, gp)
(v,e) = jacobi(hessian)
emax = max(map(abs,e))
emin = emax*eps*100.0 # Control noise in small eigenvalues
if prnt:
print '\n Eigenvalues of the Hessian:'
printvector(e)
# Transform to spectral form, take step, transform back
gs = mxv(v,g)
for i in range(n):
if e[i] < emin:
if prnt:
print ' Mode %d: small/neg eigenvalue (%f).' % (i, e[i])
s[i] = -gs[i]/emin
else:
s[i] = -gs[i]/e[i]
s = mxv(transpose(v),s)
# Apply overall step restriction ... better LS obviates this
scale = 1.0
for i in range(n):
trust = abs(x[i])
if trust == 0.0: trust = 0.1 # ????
if abs(s[i]) > trust:
if prnt:
print ' restricting ', i, trust, abs(x[i]), \
abs(x[i]/sqrt(abs(hessian[i][i]))), s[i]
scale = min(scale,trust/abs(s[i]))
if scale != 1.0:
for i in range(n):
s[i] = s[i]*scale
(alpha,value) = linesearch(func, x, s, dot(s,g), eps, prnt)
if alpha == 0.0:
if prnt:
print ' Insufficient precision to proceed further'
break
for i in range(n):
s[i] = s[i]*alpha
x[i] = x[i] + s[i]
gp[i]= g[i]
return (gnrm,x)
def cgminold(func, dfunc, guess, tol):
'''
Simple conjugate gradient assuming analtyic derivatives.
'''
n = len(guess)
x = copyvector(guess)
s = zerovector(n)
g = zerovector(n)
gp= zerovector(n)
value = func(x)
for iter in range(10*n):
g = dfunc(x)
gmax = max(map(abs,g))
print ' '
print ' iter gmax value '
print ' ---- --------- ----------------'
print "%4i %9.2e %16.8f" % (iter,gmax,value)
if gmax < tol:
print ' Converged!'
break
if (iter == 0) or ((iter%20) == 0):
beta = 0.0
else:
beta = (dot(g,g) - dot(g,gp))/(dot(s,g)-dot(s,gp))
for i in range(n):
s[i] = -g[i] + beta*s[i]
(alpha,value) = linesearch(func, x, s, dot(s,g), 1e-12)
for i in range(n):
s[i] = s[i]*alpha
x[i] = x[i] + s[i]
gp[i]= g[i]
return (value,x)
def cgmin(func, dfunc, guess, tol, precond=None, reset=None):
'''
Conjugate gradient with optional preconditioning and
use of analytic gradients.
'''
n = len(guess)
x = copyvector(guess)
s = zerovector(n)
g = zerovector(n)
gp= zerovector(n)
value = func(x)
if not reset:
reset = n
reset = min(reset,n)
for iter in range(10*n):
g = dfunc(x)
gmax = max(map(abs,g))
print ' '
print ' iter gmax value '
print ' ---- --------- ----------------'
print "%4i %9.2e %16.8f" % (iter,gmax,value)
if gmax < tol:
print ' Converged!'
break
if precond:
precondg = precond(x,g)
else:
precondg = g
if (iter % reset) == 0:
beta = 0.0
else:
beta = (dot(precondg,g) - dot(precondg,gp))/(dot(s,g)-dot(s,gp))
for i in range(n):
s[i] = -precondg[i] + beta*s[i]
(alpha,value) = linesearch(func, x, s, dot(s,g),
max(1e-16,abs(value)*1e-12))
for i in range(n):
s[i] = s[i]*alpha
x[i] = x[i] + s[i]
gp[i]= g[i]
return (value,x)
def cgmin2(func, guess, tol, eps, printvar=None,reset=None):
'''
Unconstrained minimization of a function of n variables
without analytic derivatives using conjugate gradient with
diagonal preconditioning.
func(x) is a function that takes a vector of n values and
returns the function value
guess[] is a vector of n values for the initial guess
tol is the convergence criterion for the maximum value
of the gradient
eps is the expected precision in the function value
printvar(x) is an optional user function to print the values of
parameters each iteration
reset is the number of iterations between forced resets of the
conjugacy. In principle this could be n but noise in the
numerical gradients makes a smaller number a better choice.
'''
n = len(guess)
x = copyvector(guess)
s = zerovector(n)
g = zerovector(n)
gp = zerovector(n)
step = zerovector(n)
precondg = zerovector(n)
alpha = 0.0
if not reset:
reset = n
reset = min(reset,n)
for iter in range(50*n):
(value,g,hh) = numderiv(func, x, step, eps)
gmax = max(map(abs,g))
print ' '
print ' iter gmax value '
print ' ---- --------- ----------------'
print "%4i %9.2e %16.8f" % (iter,gmax,value)
if (printvar):
printvar(x)
if gmax < tol:
print ' Converged!'
break
if (iter % reset) == 0:
# On the first iteration or if not applying conjugacy
# we can recompute the diagonal preconditioner
h = copyvector(hh)
for i in range(n):
h[i] = max(abs(h[i]),1e-6)
# Preconditioning with the diagonal of the Hessian
for i in range(n):
precondg[i] = g[i] / h[i]
# Should be able to reset every n steps but noisy gradients
# means that we don't have enough info.
if (iter % reset) == 0:
if iter != 0:
print" Resetting conjugacy"
beta = 0.0
else:
beta = (dot(precondg,g) - dot(precondg,gp))/(dot(s,g)-dot(s,gp))
for i in range(n):
s[i] = -precondg[i] + beta*s[i]
(alpha,value) = linesearch(func, x, s, dot(s,g), eps)
if alpha == 0.0:
# LS failed, probably due to lack of precision.
if beta != 0.0:
print "LS failed - trying preconditioned steepest descent direction"
for i in range(n):
s[i] = -g[i]
(alpha,value) = linesearch(func, x, s, dot(s,g), eps)
if alpha == 0.0:
print " Insufficient precision to proceed further"
break
for i in range(n):
s[i] = s[i]*alpha
x[i] = x[i] + s[i]
gp[i]= g[i]
return (value,x)
def choleski(A):
'''
Returns the Choleski factorization of a square positive definite
symmetric matrix. Lij i<=j and A=L.Lt
Raises an exception if A is not symmetric or is singular.
'''
n = len(A)
m = len(A[0])
if n != m:
raise ValueError,"choleski factorization requires square matrix"
for i in range(n):
for j in range(n):
aij = A[i][j]
aji = A[j][i]
if abs(aij - aji) > (1e-15*max(abs(aij),abs(aji))):
raise ValueError,"choleski factorization requires symmetric matrix"
L = zeromatrix(n,n)
for k in range(n):
sum = 0.0
for m in range(k):
sum = sum + L[k][m]*L[k][m]
if A[k][k] <= sum:
raise ValueError, "choleski factorization requires positive definite matrix"
L[k][k] = sqrt(A[k][k]-sum)
for i in range(k+1,n):
sum = 0.0
for m in range(k):
sum = sum + L[i][m]*L[k][m]
L[k][i] = L[i][k] = (A[i][k] - sum)/L[k][k]
return L
def forward_elimination(L,b):
'''
In solving LUx=b, the first step is the forward elimination, y=L^-1.b
'''
n = len(L)
y = zerovector(n)
y[n-1] = b[n-1]/L[n-1][n-1]
for i in range(n):
sum = 0.0
for j in range(i):
sum = sum + L[i][j]*y[j]
y[i] = (b[i] - sum)/L[i][i]
return y
def backward_substitution(U,y):
'''
In solving LUx=b, the second step is the backward substitution, x=U^-1y
'''
n = len(U)
x = zerovector(n)
x[n-1] = y[n-1]/U[n-1][n-1]
for i in range(n-2,-1,-1):
sum = 0.0
for j in range(i+1,n):
sum = sum + U[i][j]*x[j]
x[i] = (y[i] - sum)/U[i][i]
return x
def choleski_solve(A,b):
'''
Solve Ax=b using Choleski factorization of A.
'''
L = choleski(A)
y = forward_elimination(L,b)
return backward_substitution(L,y)
def davidson(A,thresh=1e-6,maxsub=10,guess=None):
'''
Return the lowest eigenvalue and corresponding vector of A
'''
n = len(A)
if guess:
x = [copyvector(guess)]
scale = 1.0/sqrt(vector_norm(x[0]))
for i in range(n):
x[0][i] = x[0][i]*scale
else:
x = [zerovector(n)]
imin = 0
for i in range(1,n):
if A[i] < A[imin]:
imin = i
x[0][imin] = 1.0
maxsub = min(maxsub,n)
Ax = []
nsub = 1
for iter in range(10*n):
Ax.append(mxv(A, x[nsub-1]))
xAx = mxm(x,transpose(Ax))
for j in range(nsub-1):
for k in range(j+1,nsub):
xAx[k][j] = xAx[j][k]
#print 'Reduced matrix'
#printmatrix(xAx)
(v,e) = jacobi(xAx)
#print 'Reduced evecs'
#printmatrix(v)
#print 'Reduced evals'
#printvector(e)
z = zerovector(n)
bestx = zerovector(n)
err = 0.0
for i in range(n):
Axi = 0.0
xi = 0.0
for j in range(nsub):
xi = xi + v[0][j]* x[j][i]
Axi=Axi + v[0][j]*Ax[j][i]
denom = (A[i][i] - e[0])
if (denom < 0.01):
denom = 0.01
step = -(Axi - e[0]*xi)/denom
#print i, step, Axi, e[0], xi, denom
err = err + step*step
z[i] = xi + step
bestx[i] = xi
scale = 1.0/sqrt(vector_norm(bestx))
for i in range(n):
bestx[i] = bestx[i]*scale
print "%5d %5d %20.10f %9.1e" % (iter, nsub, e[0], sqrt(err))
if sqrt(err) < thresh:
return (bestx,e[0])
for loop in range(2):
for j in range(nsub):
zj = dot(z,x[j])
for i in range(n):
z[i] = z[i] - zj*x[j][i]
scale = 1.0/vector_norm(z)
for i in range(n):
z[i] = z[i]*scale
if nsub < maxsub:
x.append(z)
nsub = nsub + 1
else:
nsub = 1
x = [bestx]
Ax = []
raise "davidson did not converge"
if __name__ == '__main__':
def precond(g):
# Used to test optional preconditioner for cgmin().
precondg = copyvector(g)
for i in range(len(g)):
precondg[i] = precondg[i]/(i+2.0)
return precondg
def df(x):
d = zerovector(len(x))
for i in range(len(x)):
d[i] = x[i]*(i+1)
for j in range(len(x)):
d[i] = d[i] + x[j]
return d
def f(x):
sum = 0.0
for i in range(len(x)):
for j in range(len(x)):
sum = sum + 0.5*x[i]*x[j]
for i in range(len(x)):
sum = sum + 0.5*x[i]*x[i]*(i+1)
return sum
import random
print '\n\n TESTING DAVIDSON DIAGONALIZATION'
n = 8
a = zeromatrix(n,n)
for i in range(n):
for j in range(i,n):
a[j][i] = a[i][j] = random.random()-0.5
a[i][i] = a[i][i]*n
(v,e) = jacobi(a)
print ' Eval from jacobi', e[0]
davidson(a)
sys.exit()
print '\n\n TESTING SPARSE MATRIX PRODUCT'
n = 13
m = 17
k = 11
a = zeromatrix(k,n)
b = zeromatrix(k,m)
for i in range(k):
for j in range(n):
value = random.random()
if (value < 0.25): a[i][j] = value
for j in range(m):
value = random.random()
if (value < 0.25): b[i][j] = value
cc= mxm(transpose(a),b)
c = sparse_mtxm(a,b)
err = 0.0
for i in range(n):
for j in range(m):
err = err + abs(c[i][j]-cc[i][j])
print "sparse flops:", sparse_mtxm_nflop, "dense flops:", n*m*k
if (err < 1e-14):
print " OK"
else:
print " FAILED"
import sys
sys.exit()
print '\n\n TESTING JACOBI EIGENSOLVER\n\n'
n = 5
a = zeromatrix(n,n)
for i in range(n):
for j in range(i,n):
a[j][i] = a[i][j] = (i*j+1.)/(i+j+1.)
(v,e)= jacobi(a)
print ' eigenvalues'
printvector(e)
#print ' v '
#printmatrix(v)
ev = mxm(v,a)
for i in range(n):
norm = dot(v[i],v[i])
if abs(norm-1.0) > 1e-14:
print ' Error in eigenvector norm', i, norm
etest = dot(v[i],ev[i])
if abs(etest-e[i]) > 1e-14*max(abs(e[0]),abs(e[-1])):
print ' Error in eigenvalue ', i, e[i], etest
err = 0.0
for j in range(n):
err = max(err,abs(ev[i][j] - e[i]*v[i][j]))
err = err/(n*max(1.0,abs(e[i])))
if err > 1e-12:
print ' Error in eigenvector ', i, err
sys.exit()
print '\n\n TESTING QUASI-NR SOLVER \n\n'
quasinr(f, [1.,0.5,0.3,-0.4], 1e-4, 1e-10)
print '\n\n TESTING GC WITH NUM. GRAD. AND DIAG. PRECOND.\n\n'
cgmin2(f, [1.,0.5,0.3,-0.4], 1e-4, 1e-10, reset=20)
print '\n\n TESTING GC WITH ANAL. GRAD. AND WITHOUT OPTIONAL PRECOND.\n\n'
cgmin(f, df, [1.,0.5,0.3,-0.4], 1e-4)
print '\n\n TESTING GC WITH ANAL. GRAD. AND WITH OPTIONAL PRECOND.\n\n'
cgmin(f, df, [1.,0.5,0.3,-0.4], 1e-4, precond=precond)
print '\n\n TESTING GC WITH ANAL. GRAD. AND NO PRECOND.\n\n'
cgminold(f, df, [1.,0.5,0.3,-0.4], 1e-4)
print '\n\n TESTING THE CHOLESKI FACTORIZATION\n\n'
n = 25
A = zeromatrix(n,n)
b = zerovector(n)
for i in range(n):
A[i][i] = 10.*(i+1) # Construct A to be positive definite
b[i] = 1.0/(i+1.0)
for j in range(i+1,n):
A[j][i] = A[i][j] = (i+j)/(2.0+n)
x = choleski_solve(A,b)
Ax = mxv(A,x)
err = 0.0
for i in range(n):
err = err + abs(Ax[i] - b[i])
print ' Cholesky linear equation solution error is ', err
| gpl-2.0 |
brettdh/rbtools | rbtools/commands/patch.py | 1 | 6111 | import re
from rbtools.api.errors import APIError
from rbtools.commands import Command, CommandError, Option
from rbtools.utils.filesystem import make_tempfile
# MARKDOWN_ESCAPED_CHARS comes from markdown.Markdown.ESCAPED_CHARS. We don't
# want to have a dependency on markdown for rbtools, so we just copy it into
# here.
MARKDOWN_ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
'(', ')', '>', '#', '+', '-', '.', '!']
MARKDOWN_SPECIAL_CHARS = re.escape(r''.join(MARKDOWN_ESCAPED_CHARS))
UNESCAPE_CHARS_RE = re.compile(r'\\([%s])' % MARKDOWN_SPECIAL_CHARS)
class Patch(Command):
"""Applies a specific patch from a RB server.
The patch file indicated by the request id is downloaded from the
server and then applied locally."""
name = "patch"
author = "The Review Board Project"
args = "<review-request-id>"
option_list = [
Option("-c", "--commit",
dest="commit",
action="store_true",
default=False,
help="Commit using information fetched "
"from the review request (Git only)."),
Option("--diff-revision",
dest="diff_revision",
default=None,
help="revision id of diff to be used as patch"),
Option("--px",
dest="px",
default=None,
help="numerical pX argument for patch"),
Option("--print",
dest="patch_stdout",
action="store_true",
default=False,
help="print patch to stdout instead of applying"),
Command.server_options,
Command.repository_options,
]
def get_patch(self, request_id, api_root, diff_revision=None):
"""Return the diff as a string, the used diff revision and its basedir.
If a diff revision is not specified, then this will look at the most
recent diff.
"""
try:
diffs = api_root.get_diffs(review_request_id=request_id)
except APIError, e:
raise CommandError("Error getting diffs: %s" % e)
# Use the latest diff if a diff revision was not given.
# Since diff revisions start a 1, increment by one, and
# never skip a number, the latest diff revisions number
# should be equal to the number of diffs.
if diff_revision is None:
diff_revision = diffs.total_results
try:
diff = diffs.get_item(diff_revision)
diff_body = diff.get_patch().data
base_dir = getattr(diff, 'basedir', None) or ''
except APIError:
raise CommandError('The specified diff revision does not exist.')
return diff_body, diff_revision, base_dir
def apply_patch(self, repository_info, tool, request_id, diff_revision,
diff_file_path, base_dir):
"""Apply patch patch_file and display results to user."""
print ("Patch is being applied from request %s with diff revision "
" %s." % (request_id, diff_revision))
tool.apply_patch(diff_file_path, repository_info.base_path,
base_dir, self.options.px)
def _extract_commit_message(self, review_request):
"""Returns a commit message based on the review request.
The commit message returned contains the Summary, Description, Bugs,
and Testing Done fields from the review request, if available.
"""
info = []
summary = review_request.summary
description = review_request.description
testing_done = review_request.testing_done
if not description.startswith(summary):
info.append(summary)
info.append(description)
if testing_done:
info.append('Testing Done:\n%s' % testing_done)
if review_request.bugs_closed:
info.append('Bugs closed: %s'
% ', '.join(review_request.bugs_closed))
info.append('Reviewed at %s' % review_request.absolute_url)
return '\n\n'.join(info)
def main(self, request_id):
"""Run the command."""
repository_info, tool = self.initialize_scm_tool(
client_name=self.options.repository_type)
server_url = self.get_server_url(repository_info, tool)
api_client, api_root = self.get_api(server_url)
# Get the patch, the used patch ID and base dir for the diff
diff_body, diff_revision, base_dir = self.get_patch(
request_id,
api_root,
self.options.diff_revision)
if self.options.patch_stdout:
print diff_body
else:
try:
if tool.has_pending_changes():
message = 'Working directory is not clean.'
if not self.options.commit:
print 'Warning: %s' % message
else:
raise CommandError(message)
except NotImplementedError:
pass
tmp_patch_file = make_tempfile(diff_body)
self.apply_patch(repository_info, tool, request_id, diff_revision,
tmp_patch_file, base_dir)
if self.options.commit:
try:
review_request = api_root.get_review_request(
review_request_id=request_id,
force_text_type='plain')
except APIError, e:
raise CommandError('Error getting review request %s: %s'
% (request_id, e))
message = self._extract_commit_message(review_request)
author = review_request.get_submitter()
try:
tool.create_commit(message, author)
print('Changes committed to current branch.')
except NotImplementedError:
raise CommandError('--commit is not supported with %s'
% tool.name)
| mit |
agconti/njode | env/lib/python2.7/site-packages/django/contrib/gis/gdal/tests/test_driver.py | 106 | 1143 | import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.gdal import Driver, OGRException
valid_drivers = ('ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN',
'Memory', 'CSV', 'GML', 'KML')
invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp')
aliases = {'eSrI': 'ESRI Shapefile',
'TigER/linE': 'TIGER',
'SHAPE': 'ESRI Shapefile',
'sHp': 'ESRI Shapefile',
}
@skipUnless(HAS_GDAL, "GDAL is required")
class DriverTest(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid OGR Data Source Drivers."
for i in invalid_drivers:
self.assertRaises(OGRException, Driver, i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
| bsd-3-clause |
ondras/TeaJS | deps/v8/build/gyp/test/variables/commands/gyptest-commands-ignore-env.py | 330 | 1466 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test that environment variables are ignored when --ignore-environment is
specified.
"""
import os
import TestGyp
test = TestGyp.TestGyp(format='gypd')
os.environ['GYP_DEFINES'] = 'FOO=BAR'
os.environ['GYP_GENERATORS'] = 'foo'
os.environ['GYP_GENERATOR_FLAGS'] = 'genflag=foo'
os.environ['GYP_GENERATOR_OUTPUT'] = 'somedir'
expect = test.read('commands.gyp.ignore-env.stdout').replace('\r\n', '\n')
test.run_gyp('commands.gyp',
'--debug', 'variables',
'--ignore-environment',
stdout=expect, ignore_line_numbers=True)
# Verify the commands.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands.gypd').replace('\r', '')
expect = test.read('commands.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `commands.gypd'"
test.diff(expect, contents, 'commands.gypd ')
test.fail_test()
test.pass_test()
| bsd-3-clause |
litchfield/django | django/contrib/gis/geos/collections.py | 292 | 4986 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
import json
from ctypes import byref, c_int, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.geometry import (
GEOSGeometry, ProjectInterpolateMixin,
)
from django.contrib.gis.geos.libgeos import get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing, LineString
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.utils.six.moves import range
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__)
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
# ### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def json(self):
if self.__class__.__name__ == 'GeometryCollection':
return json.dumps({
'type': self.__class__.__name__,
'geometries': [
{'type': geom.__class__.__name__, 'coordinates': geom.coords}
for geom in self
],
})
return super(GeometryCollection, self).json
geojson = json
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join(g.kml for g in self)
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple(g.tuple for g in self)
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(ProjectInterpolateMixin, GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| bsd-3-clause |
openstack/trove | trove/common/swift.py | 1 | 1414 | # Copyright 2021 Catalyst Cloud Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def parse_location(location):
storage_url = "/".join(location.split('/')[:-2])
container_name = location.split('/')[-2]
object_name = location.split('/')[-1]
return storage_url, container_name, object_name
def _get_attr(original):
"""Get a friendly name from an object header key."""
key = original.replace('-', '_')
key = key.replace('x_object_meta_', '')
return key
def get_metadata(client, location, extra_attrs=[]):
_, container_name, object_name = parse_location(location)
headers = client.head_object(container_name, object_name)
meta = {}
for key, value in headers.items():
if key.startswith('x-object-meta'):
meta[_get_attr(key)] = value
for key in extra_attrs:
meta[key] = headers.get(key)
return meta
| apache-2.0 |
he7d3r/revscoring | tests/extractors/api/tests/test_datasources.py | 2 | 1784 | import pickle
import mwapi
from revscoring.datasources import revision_oriented as ro
from revscoring.extractors.api.datasources import (LastUserRevDoc,
PageCreationRevDoc,
PropertySuggestionDoc,
RevDocById, UserInfoDoc)
from revscoring.extractors.api.extractor import Extractor
def test_rev_doc_by_id():
extractor = Extractor(mwapi.Session("foobar"))
rev_doc_by_id = RevDocById(ro.revision, extractor)
hash(rev_doc_by_id)
assert pickle.loads(pickle.dumps(rev_doc_by_id)) == rev_doc_by_id
def test_page_creation_rev_doc():
extractor = Extractor(mwapi.Session("foobar"))
page_creation_rev_doc = PageCreationRevDoc(ro.revision.page, extractor)
hash(page_creation_rev_doc)
assert (pickle.loads(pickle.dumps(page_creation_rev_doc)) ==
page_creation_rev_doc)
def test_property_suggestion_doc():
extractor = Extractor(mwapi.Session("foobar"))
property_suggestion_doc = PropertySuggestionDoc(ro.revision.page, extractor)
hash(property_suggestion_doc)
assert (pickle.loads(pickle.dumps(property_suggestion_doc)) ==
property_suggestion_doc)
def test_user_info_doc():
extractor = Extractor(mwapi.Session("foobar"))
user_info_doc = UserInfoDoc(ro.revision.user, extractor)
hash(user_info_doc)
assert (pickle.loads(pickle.dumps(user_info_doc)) ==
user_info_doc)
def test_last_user_rev_doc():
extractor = Extractor(mwapi.Session("foobar"))
last_user_rev_doc = LastUserRevDoc(ro.revision, extractor)
hash(last_user_rev_doc)
assert (pickle.loads(pickle.dumps(last_user_rev_doc)) ==
last_user_rev_doc)
| mit |
xtacocorex/CHIP_IO | CHIP_IO/LRADC.py | 1 | 6284 | # Copyright (c) 2016 Robert Wolterman
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import time
# Global Variables
DEBUG = False
DEVICE_EXIST = True
# Default Sample Rate Variables
SAMPLE_RATE_32P25 = 32.25
SAMPLE_RATE_62O5 = 62.5
SAMPLE_RATE_125 = 125
SAMPLE_RATE_250 = 250
SAMPLE_RATES = []
# Scale Factor
SCALE_FACTOR = 31.25
# File Locations
LRADC_BASE_DEVICE_FILE = "/sys/bus/iio/devices/iio:device0"
AVAILABLE_SAMPLE_RATE_FILE = "/sampling_frequency_available"
SCALE_FACTOR_FILE = "/in_voltage_scale"
CURRENT_SAMPLE_RATE_FILE = "/in_voltage_sampling_frequency"
RAW_VOLTAGE_CHAN0_FILE = "/in_voltage0_raw"
RAW_VOLTAGE_CHAN1_FILE = "/in_voltage1_raw"
def toggle_debug():
global DEBUG
if DEBUG:
DEBUG = False
print("debug disabled")
else:
DEBUG = True
print("debug enabled")
def setup(rate=250):
# First we determine if the device exists
if not os.path.exists(LRADC_BASE_DEVICE_FILE):
global DEVICE_EXIST
DEVICE_EXIST = False
raise Exception("LRADC Device does not exist")
else:
# Set the Sample Rate
set_sample_rate(rate)
def get_device_exist():
return DEVICE_EXIST
def get_scale_factor():
# If we do not have a device, lets throw an exception
if not DEVICE_EXIST:
raise Exception("LRADC Device does not exist")
# Get the data from the file
f = open(LRADC_BASE_DEVICE_FILE+SCALE_FACTOR_FILE,"r")
dat = f.readline()
f.close()
# Set the Scale Factor
global SCALE_FACTOR
SCALE_FACTOR = float(dat.strip())
# Debug
if DEBUG:
print("lradc.get_scale_factor: {0}".format(SCALE_FACTOR))
return SCALE_FACTOR
def get_allowable_sample_rates():
# If we do not have a device, lets throw an exception
if not DEVICE_EXIST:
raise Exception("LRADC Device does not exist")
# Get the data from the file
f = open(LRADC_BASE_DEVICE_FILE+AVAILABLE_SAMPLE_RATE_FILE,"r")
dat = f.readline()
f.close()
global SAMPLE_RATES
tmp = dat.strip().split(" ")
for i in range(len(tmp)):
if "." in tmp[i]:
tmp[i] = float(tmp[i])
else:
tmp[i] = int(tmp[i])
SAMPLE_RATES = tmp
# Debug
if DEBUG:
print("lradc.get_allowable_sample_rates:")
for rate in SAMPLE_RATES:
print("{0}".format(rate))
return tuple(SAMPLE_RATES)
def set_sample_rate(rate):
# If we do not have a device, lets throw an exception
if not DEVICE_EXIST:
raise Exception("LRADC Device does not exist")
# Check to see if the rates were gathered already
global SAMPLE_RATES
if SAMPLE_RATES == []:
tmp = get_allowable_sample_rates()
# Range check the input rate
if rate not in SAMPLE_RATES:
raise ValueError("Input Rate an Acceptable Value")
# Debug
if DEBUG:
print("lradc.set_sample_rate: {0}".format(rate))
# Write the rate
f = open(LRADC_BASE_DEVICE_FILE+CURRENT_SAMPLE_RATE_FILE,"w")
mystr = "%.2f" % rate
f.write(mystr)
f.close()
# Verify write went well
crate = get_sample_rate()
if crate != rate:
raise Exception("Unable to write new Sampling Rate")
def get_sample_rate():
# If we do not have a device, lets throw an exception
if not DEVICE_EXIST:
raise Exception("LRADC Device does not exist")
# Get the data from the file
f = open(LRADC_BASE_DEVICE_FILE+CURRENT_SAMPLE_RATE_FILE,"r")
dat = f.read()
f.close()
dat = dat.strip()
if "." in dat:
dat = float(dat)
else:
dat = int(dat)
# Debug
if DEBUG:
print("lradc.get_sample_rate: {0}".format(dat))
return dat
def get_chan0_raw():
# If we do not have a device, lets throw an exception
if not DEVICE_EXIST:
raise Exception("LRADC Device does not exist")
# Get the data from the file
f = open(LRADC_BASE_DEVICE_FILE+RAW_VOLTAGE_CHAN0_FILE,"r")
dat = f.readline()
f.close()
dat = float(dat.strip())
# Debug
if DEBUG:
print("lradc.get_chan0_raw: {0}".format(dat))
return dat
def get_chan1_raw():
# If we do not have a device, lets throw an exception
if not DEVICE_EXIST:
raise Exception("LRADC Device does not exist")
# Get the data from the file
f = open(LRADC_BASE_DEVICE_FILE+RAW_VOLTAGE_CHAN1_FILE,"r")
dat = f.readline()
f.close()
dat = float(dat.strip())
# Debug
if DEBUG:
print("lradc.get_chan1_raw: {0}".format(dat))
return dat
def get_chan0():
# If we do not have a device, lets throw an exception
if not DEVICE_EXIST:
raise Exception("LRADC Device does not exist")
# Get the raw data first
dat = get_chan0_raw()
# Apply scale factor
dat *= SCALE_FACTOR
# Debug
if DEBUG:
print("lradc.get_chan0: {0}".format(dat))
return dat
def get_chan1():
# If we do not have a device, lets throw an exception
if not DEVICE_EXIST:
raise Exception("LRADC Device does not exist")
# Get the raw data first
dat = get_chan1_raw()
# Apply scale factor
dat *= SCALE_FACTOR
# Debug
if DEBUG:
print("lradc.get_chan1: {0}".format(dat))
return dat
| mit |
PriceChild/ansible | test/units/modules/cloud/amazon/test_s3.py | 49 | 1262 | import pytest
boto = pytest.importorskip("boto")
import unittest
import ansible.modules.cloud.amazon.s3 as s3
from ansible.module_utils.six.moves.urllib.parse import urlparse
class TestUrlparse(unittest.TestCase):
def test_urlparse(self):
actual = urlparse("http://test.com/here")
self.assertEqual("http", actual.scheme)
self.assertEqual("test.com", actual.netloc)
self.assertEqual("/here", actual.path)
def test_is_fakes3(self):
actual = s3.is_fakes3("fakes3://bla.blubb")
self.assertEqual(True, actual)
def test_is_walrus(self):
actual = s3.is_walrus("trulywalrus_but_invalid_url")
#I don't know if this makes sense, but this is the current behaviour...
self.assertEqual(True, actual)
actual = s3.is_walrus("http://notwalrus.amazonaws.com")
self.assertEqual(False, actual)
def test_get_s3_connection(self):
aws_connect_kwargs = dict(aws_access_key_id="access_key",
aws_secret_access_key="secret_key")
location=None
rgw=True
s3_url="http://bla.blubb"
actual = s3.get_s3_connection(aws_connect_kwargs, location, rgw, s3_url)
self.assertEqual("bla.blubb", actual.host)
| gpl-3.0 |
deyton/swift | utils/GYBUnicodeDataUtils.py | 65 | 24816 | # ===--- GYBUnicodeDataUtils.py ----------------------*- coding: utf-8 -*-===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import codecs
import re
class UnicodeProperty(object):
"""Abstract base class for Unicode properties."""
def __init__(self):
raise NotImplementedError(
"UnicodeProperty.__init__ is not implemented.")
def get_default_value(self):
raise NotImplementedError(
"UnicodeProperty.get_default_value is not implemented.")
def get_value(self, cp):
raise NotImplementedError(
"UnicodeProperty.get_value is not implemented.")
def to_numeric_value(self, value):
raise NotImplementedError(
"UnicodeProperty.to_numeric_value is not implemented.")
def get_numeric_value(self, cp):
raise NotImplementedError(
"UnicodeProperty.get_numeric_value is not implemented.")
class GraphemeClusterBreakPropertyTable(UnicodeProperty):
"""Grapheme_Cluster_Break property."""
# An array of tuples (start_code_point, end_code_point, value).
property_value_ranges = []
property_values = [None for i in range(0, 0x110000)]
# Note: Numeric values (including the names) should be consistent with
# '_GraphemeClusterBreakPropertyValue' enum on the Swift side, and with
# 'GraphemeClusterBreakProperty' in the compiler C++ code. If there is a
# reason for either of those to differ, then this mapping can be overridden
# after an instance of this class is created.
numeric_value_table = {
'Other': 0,
'CR': 1,
'LF': 2,
'Control': 3,
'Extend': 4,
'Regional_Indicator': 5,
'Prepend': 6,
'SpacingMark': 7,
'L': 8,
'V': 9,
'T': 10,
'LV': 11,
'LVT': 12,
}
def __init__(self, grapheme_break_property_file_name):
# Build 'self.symbolic_values' -- an array that maps numeric property
# values to symbolic values.
self.symbolic_values = \
[None] * (max(self.numeric_value_table.values()) + 1)
for k, v in self.numeric_value_table.items():
self.symbolic_values[v] = k
# Load the data file.
with codecs.open(
grapheme_break_property_file_name,
encoding='utf-8',
errors='strict') as f:
for line in f:
# Strip comments.
line = re.sub('#.*', '', line)
# Single code point?
m = re.match('([0-9A-F]+) +; +([a-zA-Z]+) ', line)
if m:
code_point = int(m.group(1), 16)
value = m.group(2)
self.property_value_ranges += \
[(code_point, code_point, value)]
continue
# Range of code points?
m = re.match(
'([0-9A-F]+)..([0-9A-F]+) +; +([a-zA-Z_]+) ', line)
if m:
start_code_point = int(m.group(1), 16)
end_code_point = int(m.group(2), 16)
value = m.group(3)
self.property_value_ranges += \
[(start_code_point, end_code_point, value)]
# Prepare a flat lookup table for fast access.
for cp in range(0, 0x110000):
self.property_values[cp] = self.get_default_value()
for start_code_pt, end_code_pt, val in self.property_value_ranges:
for cp in range(start_code_pt, end_code_pt + 1):
self.property_values[cp] = val
def get_default_value(self):
return 'Other'
def get_value(self, cp):
return self.property_values[cp]
def to_numeric_value(self, value):
return self.numeric_value_table[value]
def get_numeric_value(self, cp):
return self.to_numeric_value(self.get_value(cp))
# BMP code points are 16-bit values. The code point value is split as
# follows:
#
# 8 bits 8 bits
# +-------------------------+-------------------------+
# | 15 14 13 12 11 10 9 8 | 7 6 5 4 3 2 1 0 |
# +-------------------------+-------------------------+
# first-level index data offset
#
# Supplementary code points (U+XXXX where XXXX > 0xffff) are 21-bit values.
# The code point value is split as follows:
#
# 5 bits 8 bits 8 bits
# +----------------+-------------------------+-------------------------+
# | 20 19 18 17 16 | 15 14 13 12 11 10 9 8 | 7 6 5 4 3 2 1 0 |
# +----------------+-------------------------+-------------------------+
# first-level second-level index data offset
# index
#
# The actual number of bits are just trie parameters. They affect the size of
# the lookup tables (and thus, lookup time), but do not change the overall
# structure of the trie.
#
# Here and below 'supp' stands for 'supplementary characters'.
#
# Property data for BMP code points is stored as a one-stage trie.
# A trie with one lookup table consists of two memory blocks:
#
# First-level lookup table
# +-----+-----+-----+-----+--...--+
# | * | * | * | * | |
# +--|--+--|--+--|--+--|--+--...--+
# | | | \ The references don't form
# | \____| \___, a systematic pattern
# | | |
# | | | Data storage
# +-V--------++-V--------++-V--------++---...---+
# | data || data || data || |
# +----------++----------++----------++---...---+
#
# In order to fetch data for a given code point, you need to:
# * load from the first-level lookup table using first-level index; this will
# give you the number of the data block that you should use.
# * load from the data block applying the data offset.
#
# Property data for supplementary code points is stored as a two-stage trie.
# A trie with two-stage lookup tables consists of three memory blocks. The
# following drawing explains how it is implemented:
#
# First-level lookup table
# +-----+-----+-----+-----+-----+--...--+
# | * | * | * | * | * | |
# +--|--+--|--+--|--+--|--+--|--+--...--+
# | | | | \ The references don't form
# ,__/ | \____| \___, a systematic pattern
# / | | |
# | | | | Second-level lookup table
# +-V--------++-V--------++-V--------++-V--------++---...---+
# | ******** || ******** || ******** || || |
# +-||||||||-++-||||||||-++-||||||||-++----------++---...---+
# \\\|//// ||||||VV |VVV|V|V
# \\|/// |||||| / | |
# \|// |||||| / | |
# |/ ||||| \__|___. \ \ The references don't form
# | |||| \___|__. \ | \ a systematic pattern
# | ||| \____| \ \__| \
# | || \_____|__. \___|___\ ...___.
# | | \______| \____| \___, | Data storage
# +-V-----++-V-----++-V-----++-V-----++-V-----++-V-----++---...---+
# | data || data || data || data || || || |
# +-------++-------++-------++-------++-------++-------++---...---+
#
# In order to fetch data for a given code point, you need to:
# * load from the first-level lookup table using first-level index; this will
# give you the number of the second-level lookup table that you should use.
# * load from the chosen second-level lookup table using the second-level
# index, which will give you the number of the data block that you should
# use.
# * load from the data block applying the data offset.
#
# First- and second-level lookup tables in the general case contain 16-bit
# words; that will be sufficient to store a trie that does not compress at all.
# But in many cases, after trie compression there will be fewer than 256
# unique second-level lookup tables and/or data storage blocks, which allows
# one to use 8-bit words in lookup tables.
#
# The bitwidth of data depends on the application of the trie.
#
# The supp tables contain entries for BMP code units to simplify trie
# implementation, but those BMP entries are filled with the default value, so
# they compress well.
class UnicodeTrieGenerator(object):
# Note: if you change any of these parameters, don't forget to update the
# ASCII art above.
bmp_first_level_index_bits = 8
supp_first_level_index_bits = 5
supp_second_level_index_bits = 8
def get_bmp_first_level_index(self, cp):
return cp >> self.bmp_data_offset_bits
def get_bmp_data_offset(self, cp):
return cp & ((1 << self.bmp_data_offset_bits) - 1)
def get_supp_first_level_index(self, cp):
return cp >> \
(self.supp_second_level_index_bits + self.supp_data_offset_bits)
def get_supp_second_level_index(self, cp):
return (cp >> self.supp_data_offset_bits) & \
((1 << self.supp_second_level_index_bits) - 1)
def get_supp_data_offset(self, cp):
return cp & ((1 << self.supp_data_offset_bits) - 1)
def __init__(self):
"""Create a trie generator with default parameters."""
pass
def create_tables(self):
"""Compute derived parameter values and create internal data
structures.
Don't change parameter values after calling this method.
"""
self.bmp_data_offset_bits = 16 - self.bmp_first_level_index_bits
self.supp_data_offset_bits = \
21 - self.supp_first_level_index_bits - \
self.supp_second_level_index_bits
# The maximum value of the first level index for supp tables. It is
# not equal to ((1 << supp_first_level_index_bits) - 1), because
# maximum Unicode code point value is not 2^21-1 (0x1fffff), it is
# 0x10ffff.
self.supp_first_level_index_max = \
0x10ffff >> \
(self.supp_second_level_index_bits + self.supp_data_offset_bits)
# A mapping from BMP first-level index to BMP data block index.
self.bmp_lookup = \
[i for i in range(0, 1 << self.bmp_first_level_index_bits)]
# An array of BMP data blocks.
self.bmp_data = [
[-1 for i in range(0, 1 << self.bmp_data_offset_bits)]
for i in range(0, 1 << self.bmp_first_level_index_bits)
]
# A mapping from supp first-level index to an index of the second-level
# lookup table.
self.supp_lookup1 = \
[i for i in range(0, self.supp_first_level_index_max + 1)]
# An array of second-level lookup tables. Each second-level lookup
# table is a mapping from a supp second-level index to supp data block
# index.
self.supp_lookup2 = [
[j for j in range(i << self.supp_second_level_index_bits,
(i + 1) << self.supp_second_level_index_bits)]
for i in range(0, self.supp_first_level_index_max + 1)
]
# An array of supp data blocks.
self.supp_data = [
[-1 for i in range(0, 1 << self.supp_data_offset_bits)]
for i in range(0, (self.supp_first_level_index_max + 1) *
(1 << self.supp_second_level_index_bits))
]
def splat(self, value):
for i in range(0, len(self.bmp_data)):
for j in range(0, len(self.bmp_data[i])):
self.bmp_data[i][j] = value
for i in range(0, len(self.supp_data)):
for j in range(0, len(self.supp_data[i])):
self.supp_data[i][j] = value
def set_value(self, cp, value):
if cp <= 0xffff:
data_block_index = self.bmp_lookup[
self.get_bmp_first_level_index(cp)]
self.bmp_data[data_block_index][
self.get_bmp_data_offset(cp)] = value
else:
second_lookup_index = self.supp_lookup1[
self.get_supp_first_level_index(cp)]
data_block_index = self.supp_lookup2[second_lookup_index][
self.get_supp_second_level_index(cp)]
self.supp_data[data_block_index][
self.get_supp_data_offset(cp)] = value
def get_value(self, cp):
if cp <= 0xffff:
data_block_index = self.bmp_lookup[
self.get_bmp_first_level_index(cp)]
return self.bmp_data[data_block_index][
self.get_bmp_data_offset(cp)]
else:
second_lookup_index = self.supp_lookup1[
self.get_supp_first_level_index(cp)]
data_block_index = self.supp_lookup2[second_lookup_index][
self.get_supp_second_level_index(cp)]
return self.supp_data[data_block_index][
self.get_supp_data_offset(cp)]
def fill_from_unicode_property(self, unicode_property):
self.splat(unicode_property.get_default_value())
for cp in range(0, 0x110000):
self.set_value(cp, unicode_property.get_value(cp))
def verify(self, unicode_property):
for cp in range(0, 0x110000):
expected_value = unicode_property.get_value(cp)
actual_value = self.get_value(cp)
assert(expected_value == actual_value)
def freeze(self):
"""Compress internal trie representation.
Don't mutate the trie after calling this method.
"""
def remap_indexes(indexes, old_idx, new_idx):
def map_index(idx):
if idx == old_idx:
return new_idx
elif idx > old_idx:
return idx - 1
else:
return idx
# NOTE: Python 2's `map` function returns a list. Where Python 3's
# `map` function returns an iterator. To work around this the
# result of the `map` is explicitly converted to a `list`.
return list(map(map_index, indexes))
# If self.bmp_data contains identical data blocks, keep the first one,
# remove duplicates and change the indexes in self.bmp_lookup to point
# to the first one.
i = 0
while i < len(self.bmp_data):
j = i + 1
while j < len(self.bmp_data):
if self.bmp_data[i] == self.bmp_data[j]:
self.bmp_data.pop(j)
self.bmp_lookup = \
remap_indexes(self.bmp_lookup, old_idx=j, new_idx=i)
else:
j += 1
i += 1
# For supp tables, perform bottom-up deduplication: first, deduplicate
# data blocks. The algorithm is the same as above, but operates on
# self.supp_data/supp_lookup2.
i = 0
while i < len(self.supp_data):
j = i + 1
while j < len(self.supp_data):
if self.supp_data[i] == self.supp_data[j]:
self.supp_data.pop(j)
for k in range(0, len(self.supp_lookup2)):
self.supp_lookup2[k] = \
remap_indexes(self.supp_lookup2[k],
old_idx=j, new_idx=i)
else:
j += 1
i += 1
# Next, deduplicate second-level lookup tables.
# Same as above, but for supp_lookup1/supp_lookup2.
i = 0
while i < len(self.supp_lookup2):
j = i + 1
while j < len(self.supp_lookup2):
if self.supp_lookup2[i] == self.supp_lookup2[j]:
self.supp_lookup2.pop(j)
self.supp_lookup1 = \
remap_indexes(self.supp_lookup1, old_idx=j, new_idx=i)
else:
j += 1
i += 1
def _int_to_le_bytes(self, data, width):
if width == 1:
assert(data & ~0xff == 0)
return [data]
if width == 2:
assert(data & ~0xffff == 0)
return [data & 0xff, data & 0xff00]
assert(False)
def _int_list_to_le_bytes(self, ints, width):
return [
byte
for elt in ints
for byte in self._int_to_le_bytes(elt, width)]
def serialize(self, unicode_property):
self.bmp_lookup_bytes_per_entry = 1 if len(self.bmp_data) < 256 else 2
self.bmp_data_bytes_per_entry = 1
self.supp_lookup1_bytes_per_entry = 1 if len(self.supp_lookup2) < 256 \
else 2
self.supp_lookup2_bytes_per_entry = 1 if len(self.supp_data) < 256 \
else 2
self.supp_data_bytes_per_entry = 1
bmp_lookup_words = list(self.bmp_lookup)
bmp_data_words = [
unicode_property.to_numeric_value(elt)
for block in self.bmp_data
for elt in block]
supp_lookup1_words = list(self.supp_lookup1)
supp_lookup2_words = [
elt for block in self.supp_lookup2 for elt in block]
supp_data_words = [
unicode_property.to_numeric_value(elt)
for block in self.supp_data
for elt in block]
bmp_lookup_bytes = self._int_list_to_le_bytes(
bmp_lookup_words, self.bmp_lookup_bytes_per_entry)
bmp_data_bytes = self._int_list_to_le_bytes(
bmp_data_words, self.bmp_data_bytes_per_entry)
supp_lookup1_bytes = self._int_list_to_le_bytes(
supp_lookup1_words, self.supp_lookup1_bytes_per_entry)
supp_lookup2_bytes = self._int_list_to_le_bytes(
supp_lookup2_words, self.supp_lookup2_bytes_per_entry)
supp_data_bytes = self._int_list_to_le_bytes(
supp_data_words, self.supp_data_bytes_per_entry)
self.trie_bytes = []
self.bmp_lookup_bytes_offset = 0
self.trie_bytes += bmp_lookup_bytes
self.bmp_data_bytes_offset = len(self.trie_bytes)
self.trie_bytes += bmp_data_bytes
self.supp_lookup1_bytes_offset = len(self.trie_bytes)
self.trie_bytes += supp_lookup1_bytes
self.supp_lookup2_bytes_offset = len(self.trie_bytes)
self.trie_bytes += supp_lookup2_bytes
self.supp_data_bytes_offset = len(self.trie_bytes)
self.trie_bytes += supp_data_bytes
def get_extended_grapheme_cluster_rules_matrix(grapheme_cluster_break_table):
any_value = \
grapheme_cluster_break_table.symbolic_values
# Rules to determine extended grapheme cluster boundaries, as defined in
# 'Grapheme Break Chart',
# http://www.unicode.org/Public/6.3.0/ucd/auxiliary/GraphemeBreakTest.html,
# Unicode 6.3.0.
#
# The Unicode 7.0.0 draft does not change these rules.
#
# As in the referenced document, the rules are specified in order of
# decreasing priority.
rules = [
(['CR'], 'no_boundary', ['LF']),
(['Control', 'CR', 'LF'], 'boundary', any_value),
(any_value, 'boundary', ['Control', 'CR', 'LF']),
(['L'], 'no_boundary', ['L', 'V', 'LV', 'LVT']),
(['LV', 'V'], 'no_boundary', ['V', 'T']),
(['LVT', 'T'], 'no_boundary', ['T']),
(['Regional_Indicator'], 'no_boundary', ['Regional_Indicator']),
(any_value, 'no_boundary', ['Extend']),
(any_value, 'no_boundary', ['SpacingMark']),
(['Prepend'], 'no_boundary', any_value),
(any_value, 'boundary', any_value),
]
# Expand the rules into a matrix.
rules_matrix = {}
for first in any_value:
rules_matrix[first] = \
dict.fromkeys(any_value, None)
# Iterate over rules in the order of increasing priority.
for first_list, action, second_list in reversed(rules):
for first in first_list:
for second in second_list:
rules_matrix[first][second] = action
# Make sure we can pack one row of the matrix into a 'uint16_t'.
assert(len(any_value) <= 16)
result = []
for first in any_value:
# Retrieve a row that corresponds to this first code point.
row = rules_matrix[first]
# Change strings into bits.
bits = [row[second] == 'no_boundary' for second in any_value]
# Pack bits into an integer.
packed = sum([bits[i] * pow(2, i) for i in range(0, len(bits))])
result += [packed]
return result
def get_grapheme_cluster_break_tests_as_utf8(grapheme_break_test_file_name):
def _convert_line(line):
# Strip comments.
line = re.sub('#.*', '', line).strip()
if line == "":
return None
test = ""
curr_bytes = 0
boundaries = []
# Match a list of code points.
for token in line.split(" "):
if token == u"÷":
boundaries += [curr_bytes]
elif token == u"×":
pass
else:
code_point = int(token, 16)
# Tests from Unicode spec have isolated surrogates in them.
# Our segmentation algorithm works on UTF-8 sequences, so
# encoding a surrogate would produce an invalid code unit
# sequence. Instead of trying to emulate the maximal subpart
# algorithm for inserting U+FFFD in Python, we just replace
# every isolated surrogate with U+200B, which also has
# Grapheme_Cluster_Break equal to 'Control' and test
# separately that we handle ill-formed UTF-8 sequences.
if code_point >= 0xd800 and code_point <= 0xdfff:
code_point = 0x200b
code_point = (b'\U%(cp)08x' % {b'cp': code_point}).decode(
'unicode_escape', 'strict')
as_utf8_bytes = bytearray(code_point.encode('utf8', 'strict'))
as_utf8_escaped = ''.join(
['\\x%(byte)02x' % {'byte': byte}
for byte in as_utf8_bytes])
test += as_utf8_escaped
curr_bytes += len(as_utf8_bytes)
return (test, boundaries)
# Self-test.
assert(_convert_line(u'÷ 0903 × 0308 ÷ AC01 ÷ # abc') == (
'\\xe0\\xa4\\x83\\xcc\\x88\\xea\\xb0\\x81', [0, 5, 8]))
assert(_convert_line(u'÷ D800 ÷ # abc') == ('\\xe2\\x80\\x8b', [0, 3]))
result = []
with codecs.open(
grapheme_break_test_file_name,
encoding='utf-8',
errors='strict') as f:
for line in f:
test = _convert_line(line)
if test:
result += [test]
return result
def get_grapheme_cluster_break_tests_as_unicode_scalars(
grapheme_break_test_file_name):
def _convert_line(line):
# Strip comments.
line = re.sub('#.*', '', line).strip()
if line == "":
return None
test = []
curr_code_points = 0
boundaries = []
# Match a list of code points.
for token in line.split(" "):
if token == "÷":
boundaries += [curr_code_points]
elif token == "×":
pass
else:
code_point = int(token, 16)
# Tests from Unicode spec have isolated surrogates in them. Our
# segmentation algorithm works on UTF-16 sequences, so encoding
# a surrogate would produce an invalid code unit sequence.
# Instead of trying to emulate the maximal subpart algorithm
# for inserting U+FFFD in Python, we just replace every
# isolated surrogate with U+200B, which also has
# Grapheme_Cluster_Break equal to 'Control' and test separately
# that we handle ill-formed UTF-8 sequences.
if code_point >= 0xd800 and code_point <= 0xdfff:
code_point = 0x200b
test += [code_point]
curr_code_points += 1
return (test, boundaries)
# Self-test.
assert(_convert_line('÷ 0903 × 0308 ÷ AC01 ÷ # abc') == ([
0x0903, 0x0308, 0xac01], [0, 2, 3]))
assert(_convert_line('÷ D800 ÷ # abc') == ([0x200b], [0, 1]))
result = []
with open(grapheme_break_test_file_name, 'rb') as f:
for line in f:
test = _convert_line(line)
if test:
result += [test]
return result
| apache-2.0 |
berth64/modded_modded_1257ad | source/process_scripts.py | 1 | 1737 | import sys
sys.dont_write_bytecode = True
import string
from module_info import *
from module_scripts import *
from process_common import *
from process_operations import *
# Lav's export_dir tweak
export_dir = '%s/' % export_dir.replace('\\', '/').rstrip('/')
def save_scripts(variable_list,variable_uses,scripts,tag_uses,quick_strings):
file = open(export_dir + "scripts.txt","w")
file.write("scriptsfile version 1\n")
file.write("%d\n"%len(scripts))
temp_list = []
list_type = type(temp_list)
for i_script in xrange(len(scripts)):
func = scripts[i_script]
if (type(func[1]) == list_type):
file.write("%s -1\n"%(convert_to_identifier(func[0])))
save_statement_block(file,convert_to_identifier(func[0]), 0,func[1], variable_list,variable_uses,tag_uses,quick_strings)
else:
file.write("%s %f\n"%(convert_to_identifier(func[0]), func[1]))
save_statement_block(file,convert_to_identifier(func[0]), 0,func[2], variable_list,variable_uses,tag_uses,quick_strings)
file.write("\n")
file.close()
def save_python_header():
file = open("./ID_scripts.py","w")
for i_script in xrange(len(scripts)):
file.write("script_%s = %d\n"%(convert_to_identifier(scripts[i_script][0]),i_script))
file.write("\n\n")
file.close()
print "Exporting scripts..."
save_python_header()
variable_uses = []
variables = load_variables(export_dir, variable_uses)
tag_uses = load_tag_uses(export_dir)
quick_strings = load_quick_strings(export_dir)
save_scripts(variables,variable_uses,scripts,tag_uses,quick_strings)
save_variables(export_dir,variables,variable_uses)
save_tag_uses(export_dir, tag_uses)
save_quick_strings(export_dir,quick_strings)
| agpl-3.0 |
tarikkdiry/Flock | flask/lib/python2.7/site-packages/click/exceptions.py | 199 | 6788 | from ._compat import PY2, filename_to_ui, get_text_stderr
from .utils import echo
class ClickException(Exception):
"""An exception that Click can handle and show to the user."""
#: The exit code for this exception
exit_code = 1
def __init__(self, message):
if PY2:
if message is not None:
message = message.encode('utf-8')
Exception.__init__(self, message)
self.message = message
def format_message(self):
return self.message
def show(self, file=None):
if file is None:
file = get_text_stderr()
echo('Error: %s' % self.format_message(), file=file)
class UsageError(ClickException):
"""An internal exception that signals a usage error. This typically
aborts any further handling.
:param message: the error message to display.
:param ctx: optionally the context that caused this error. Click will
fill in the context automatically in some situations.
"""
exit_code = 2
def __init__(self, message, ctx=None):
ClickException.__init__(self, message)
self.ctx = ctx
def show(self, file=None):
if file is None:
file = get_text_stderr()
color = None
if self.ctx is not None:
color = self.ctx.color
echo(self.ctx.get_usage() + '\n', file=file, color=color)
echo('Error: %s' % self.format_message(), file=file, color=color)
class BadParameter(UsageError):
"""An exception that formats out a standardized error message for a
bad parameter. This is useful when thrown from a callback or type as
Click will attach contextual information to it (for instance, which
parameter it is).
.. versionadded:: 2.0
:param param: the parameter object that caused this error. This can
be left out, and Click will attach this info itself
if possible.
:param param_hint: a string that shows up as parameter name. This
can be used as alternative to `param` in cases
where custom validation should happen. If it is
a string it's used as such, if it's a list then
each item is quoted and separated.
"""
def __init__(self, message, ctx=None, param=None,
param_hint=None):
UsageError.__init__(self, message, ctx)
self.param = param
self.param_hint = param_hint
def format_message(self):
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.opts or [self.param.human_readable_name]
else:
return 'Invalid value: %s' % self.message
if isinstance(param_hint, (tuple, list)):
param_hint = ' / '.join('"%s"' % x for x in param_hint)
return 'Invalid value for %s: %s' % (param_hint, self.message)
class MissingParameter(BadParameter):
"""Raised if click required an option or argument but it was not
provided when invoking the script.
.. versionadded:: 4.0
:param param_type: a string that indicates the type of the parameter.
The default is to inherit the parameter type from
the given `param`. Valid values are ``'parameter'``,
``'option'`` or ``'argument'``.
"""
def __init__(self, message=None, ctx=None, param=None,
param_hint=None, param_type=None):
BadParameter.__init__(self, message, ctx, param, param_hint)
self.param_type = param_type
def format_message(self):
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.opts or [self.param.human_readable_name]
else:
param_hint = None
if isinstance(param_hint, (tuple, list)):
param_hint = ' / '.join('"%s"' % x for x in param_hint)
param_type = self.param_type
if param_type is None and self.param is not None:
param_type = self.param.param_type_name
msg = self.message
if self.param is not None:
msg_extra = self.param.type.get_missing_message(self.param)
if msg_extra:
if msg:
msg += '. ' + msg_extra
else:
msg = msg_extra
return 'Missing %s%s%s%s' % (
param_type,
param_hint and ' %s' % param_hint or '',
msg and '. ' or '.',
msg or '',
)
class NoSuchOption(UsageError):
"""Raised if click attempted to handle an option that does not
exist.
.. versionadded:: 4.0
"""
def __init__(self, option_name, message=None, possibilities=None,
ctx=None):
if message is None:
message = 'no such option: %s' % option_name
UsageError.__init__(self, message, ctx)
self.option_name = option_name
self.possibilities = possibilities
def format_message(self):
bits = [self.message]
if self.possibilities:
if len(self.possibilities) == 1:
bits.append('Did you mean %s?' % self.possibilities[0])
else:
possibilities = sorted(self.possibilities)
bits.append('(Possible options: %s)' % ', '.join(possibilities))
return ' '.join(bits)
class BadOptionUsage(UsageError):
"""Raised if an option is generally supplied but the use of the option
was incorrect. This is for instance raised if the number of arguments
for an option is not correct.
.. versionadded:: 4.0
"""
def __init__(self, message, ctx=None):
UsageError.__init__(self, message, ctx)
class BadArgumentUsage(UsageError):
"""Raised if an argument is generally supplied but the use of the argument
was incorrect. This is for instance raised if the number of values
for an argument is not correct.
.. versionadded:: 6.0
"""
def __init__(self, message, ctx=None):
UsageError.__init__(self, message, ctx)
class FileError(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename, hint=None):
ui_filename = filename_to_ui(filename)
if hint is None:
hint = 'unknown error'
ClickException.__init__(self, hint)
self.ui_filename = ui_filename
self.filename = filename
def format_message(self):
return 'Could not open file %s: %s' % (self.ui_filename, self.message)
class Abort(RuntimeError):
"""An internal signalling exception that signals Click to abort."""
| bsd-3-clause |
shsingh/ansible | lib/ansible/modules/network/f5/bigiq_application_https_waf.py | 38 | 33685 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigiq_application_https_waf
short_description: Manages BIG-IQ HTTPS WAF applications
description:
- Manages BIG-IQ applications used for load balancing an HTTPS application on port 443
with a Web Application Firewall (WAF) using an ASM Rapid Deployment policy.
version_added: 2.6
options:
name:
description:
- Name of the new application.
type: str
required: True
description:
description:
- Description of the application.
type: str
servers:
description:
- A list of servers that the application is hosted on.
- If you are familiar with other BIG-IP setting, you might also refer to this
list as the list of pool members.
- When creating a new application, at least one server is required.
suboptions:
address:
description:
- The IP address of the server.
type: str
port:
description:
- The port of the server.
type: str
default: 80
type: list
inbound_virtual:
description:
- Settings to configure the virtual which will receive the inbound connection.
- This virtual will be used to host the HTTPS endpoint of the application.
- Traffic destined to the C(redirect_virtual) will be offloaded to this
parameter to ensure that proper redirection from insecure, to secure, occurs.
suboptions:
address:
description:
- Specifies destination IP address information to which the virtual server
sends traffic.
- This parameter is required when creating a new application.
type: str
netmask:
description:
- Specifies the netmask to associate with the given C(destination).
- This parameter is required when creating a new application.
type: str
port:
description:
- The port that the virtual listens for connections on.
- When creating a new application, if this parameter is not specified, the
default value of C(443) will be used.
type: str
default: 443
type: dict
redirect_virtual:
description:
- Settings to configure the virtual which will receive the connection to be
redirected.
- This virtual will be used to host the HTTP endpoint of the application.
- Traffic destined to this parameter will be offloaded to the
C(inbound_virtual) parameter to ensure that proper redirection from insecure,
to secure, occurs.
suboptions:
address:
description:
- Specifies destination IP address information to which the virtual server
sends traffic.
- This parameter is required when creating a new application.
type: str
netmask:
description:
- Specifies the netmask to associate with the given C(destination).
- This parameter is required when creating a new application.
type: str
port:
description:
- The port that the virtual listens for connections on.
- When creating a new application, if this parameter is not specified, the
default value of C(80) will be used.
type: str
default: 80
type: dict
client_ssl_profile:
description:
- Specifies the SSL profile for managing client-side SSL traffic.
suboptions:
name:
description:
- The name of the client SSL profile to created and used.
- When creating a new application, if this value is not specified, the
default value of C(clientssl) will be used.
type: str
cert_key_chain:
description:
- One or more certificates and keys to associate with the SSL profile.
- This option is always a list. The keys in the list dictate the details
of the client/key/chain/passphrase combination.
- Note that BIG-IPs can only have one of each type of each certificate/key
type. This means that you can only have one RSA, one DSA, and one ECDSA
per profile.
- If you attempt to assign two RSA, DSA, or ECDSA certificate/key combo,
the device will reject this.
- This list is a complex list that specifies a number of keys.
- When creating a new profile, if this parameter is not specified, the
default value of C(inherit) will be used.
suboptions:
cert:
description:
- Specifies a cert name for use.
type: str
required: True
key:
description:
- Specifies a key name.
type: str
required: True
chain:
description:
- Specifies a certificate chain that is relevant to the certificate and
key mentioned earlier.
- This key is optional.
type: str
passphrase:
description:
- Contains the passphrase of the key file, should it require one.
- Passphrases are encrypted on the remote BIG-IP device.
type: str
type: raw
type: dict
service_environment:
description:
- Specifies the name of service environment that the application will be
deployed to.
- When creating a new application, this parameter is required.
type: str
add_analytics:
description:
- Collects statistics of the BIG-IP that the application is deployed to.
- This parameter is only relevant when specifying a C(service_environment) which
is a BIG-IP; not an SSG.
type: bool
default: no
domain_names:
description:
- Specifies host names that are used to access the web application that this
security policy protects.
- When creating a new application, this parameter is required.
type: list
state:
description:
- The state of the resource on the system.
- When C(present), guarantees that the resource exists with the provided attributes.
- When C(absent), removes the resource from the system.
type: str
choices:
- absent
- present
default: present
wait:
description:
- If the module should wait for the application to be created, deleted or updated.
type: bool
default: yes
extends_documentation_fragment: f5
notes:
- This module will not work on BIGIQ version 6.1.x or greater.
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Load balance an HTTPS application on port 443 with a WAF using ASM
bigiq_application_https_waf:
name: my-app
description: Redirect HTTP to HTTPS via WAF
service_environment: my-ssg
servers:
- address: 1.2.3.4
port: 8080
- address: 5.6.7.8
port: 8080
inbound_virtual:
address: 2.2.2.2
netmask: 255.255.255.255
port: 443
redirect_virtual:
address: 2.2.2.2
netmask: 255.255.255.255
port: 80
provider:
password: secret
server: lb.mydomain.com
user: admin
state: present
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the application of the resource.
returned: changed
type: str
sample: My application
service_environment:
description: The environment which the service was deployed to.
returned: changed
type: str
sample: my-ssg1
inbound_virtual_destination:
description: The destination of the virtual that was created.
returned: changed
type: str
sample: 6.7.8.9
inbound_virtual_netmask:
description: The network mask of the provided inbound destination.
returned: changed
type: str
sample: 255.255.255.0
inbound_virtual_port:
description: The port the inbound virtual address listens on.
returned: changed
type: int
sample: 80
servers:
description: List of servers, and their ports, that make up the application.
type: complex
returned: changed
contains:
address:
description: The IP address of the server.
returned: changed
type: str
sample: 2.3.4.5
port:
description: The port that the server listens on.
returned: changed
type: int
sample: 8080
sample: hash/dictionary of values
'''
import time
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
try:
from library.module_utils.network.f5.bigiq import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.icontrol import bigiq_version
except ImportError:
from ansible.module_utils.network.f5.bigiq import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.icontrol import bigiq_version
class Parameters(AnsibleF5Parameters):
api_map = {
'templateReference': 'template_reference',
'subPath': 'sub_path',
'ssgReference': 'ssg_reference',
'configSetName': 'config_set_name',
'defaultDeviceReference': 'default_device_reference',
'addAnalytics': 'add_analytics',
'domains': 'domain_names'
}
api_attributes = [
'resources', 'description', 'configSetName', 'subPath', 'templateReference',
'ssgReference', 'defaultDeviceReference', 'addAnalytics', 'domains'
]
returnables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'ssg_reference', 'default_device_reference', 'servers', 'inbound_virtual',
'redirect_virtual', 'client_ssl_profile', 'add_analytics', 'domain_names'
]
updatables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'ssg_reference', 'default_device_reference', 'servers', 'add_analytics', 'domain_names'
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def http_profile(self):
return "profile_http"
@property
def config_set_name(self):
return self.name
@property
def sub_path(self):
return self.name
@property
def template_reference(self):
filter = "name+eq+'Default-f5-HTTPS-WAF-lb-template'"
uri = "https://{0}:{1}/mgmt/cm/global/templates/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No default HTTP LB template was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def default_device_reference(self):
if is_valid_ip(self.service_environment):
# An IP address was specified
filter = "address+eq+'{0}'".format(self.service_environment)
else:
# Assume a hostname was specified
filter = "hostname+eq+'{0}'".format(self.service_environment)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-adccore-allbigipDevices/devices/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def ssg_reference(self):
filter = "name+eq+'{0}'".format(self.service_environment)
uri = "https://{0}:{1}/mgmt/cm/cloud/service-scaling-groups/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def domain_names(self):
if self._values['domain_names'] is None:
return None
result = []
for domain in self._values['domain_names']:
result.append(
dict(
domainName=domain
)
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def resources(self):
result = dict()
result.update(self.http_profile)
result.update(self.http_monitor)
result.update(self.inbound_virtual_server)
result.update(self.redirect_virtual_server)
result.update(self.pool)
result.update(self.nodes)
result.update(self.ssl_profile)
return result
@property
def inbound_virtual_server(self):
result = dict()
result['ltm:virtual:90735960bf4b'] = [
dict(
parameters=dict(
name='default_vs',
destinationAddress=self.inbound_virtual['address'],
mask=self.inbound_virtual['netmask'],
destinationPort=self.inbound_virtual['port']
),
subcollectionResources=self.inbound_profiles
)
]
return result
@property
def inbound_profiles(self):
result = {
'profiles:78b1bcfdafad': [
dict(
parameters=dict()
)
],
'profiles:2f52acac9fde': [
dict(
parameters=dict()
)
],
'profiles:9448fe71611e': [
dict(
parameters=dict()
)
]
}
return result
@property
def redirect_virtual_server(self):
result = dict()
result['ltm:virtual:3341f412b980'] = [
dict(
parameters=dict(
name='default_redirect_vs',
destinationAddress=self.redirect_virtual['address'],
mask=self.redirect_virtual['netmask'],
destinationPort=self.redirect_virtual['port']
),
subcollectionResources=self.redirect_profiles
)
]
return result
@property
def redirect_profiles(self):
result = {
'profiles:2f52acac9fde': [
dict(
parameters=dict()
)
],
'profiles:9448fe71611e': [
dict(
parameters=dict()
)
]
}
return result
@property
def pool(self):
result = dict()
result['ltm:pool:8bc5b256f9d1'] = [
dict(
parameters=dict(
name='pool_0'
),
subcollectionResources=self.pool_members
)
]
return result
@property
def pool_members(self):
result = dict()
result['members:dec6d24dc625'] = []
for x in self.servers:
member = dict(
parameters=dict(
port=x['port'],
nodeReference=dict(
link='#/resources/ltm:node:c072248f8e6a/{0}'.format(x['address']),
fullPath='# {0}'.format(x['address'])
)
)
)
result['members:dec6d24dc625'].append(member)
return result
@property
def http_profile(self):
result = dict()
result['ltm:profile:http:2f52acac9fde'] = [
dict(
parameters=dict(
name='profile_http'
)
)
]
return result
@property
def http_monitor(self):
result = dict()
result['ltm:monitor:http:18765a198150'] = [
dict(
parameters=dict(
name='monitor-http'
)
)
]
return result
@property
def nodes(self):
result = dict()
result['ltm:node:c072248f8e6a'] = []
for x in self.servers:
tmp = dict(
parameters=dict(
name=x['address'],
address=x['address']
)
)
result['ltm:node:c072248f8e6a'].append(tmp)
return result
@property
def node_addresses(self):
result = [x['address'] for x in self.servers]
return result
@property
def ssl_profile(self):
result = dict()
result['ltm:profile:client-ssl:78b1bcfdafad'] = [
dict(
parameters=dict(
name='clientssl',
certKeyChain=self.cert_key_chains
)
)
]
return result
def _get_cert_references(self):
result = dict()
uri = "https://{0}:{1}/mgmt/cm/adc-core/working-config/sys/file/ssl-cert/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
for cert in response['items']:
key = fq_name(cert['partition'], cert['name'])
result[key] = cert['selfLink']
return result
def _get_key_references(self):
result = dict()
uri = "https://{0}:{1}/mgmt/cm/adc-core/working-config/sys/file/ssl-key/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
for cert in response['items']:
key = fq_name(cert['partition'], cert['name'])
result[key] = cert['selfLink']
return result
@property
def cert_key_chains(self):
result = []
if self.client_ssl_profile is None:
return None
if 'cert_key_chain' not in self.client_ssl_profile:
return None
kc = self.client_ssl_profile['cert_key_chain']
if isinstance(kc, string_types) and kc != 'inherit':
raise F5ModuleError(
"Only the 'inherit' setting is available when 'cert_key_chain' is a string."
)
if not isinstance(kc, list):
raise F5ModuleError(
"The value of 'cert_key_chain' is not one of the supported types."
)
cert_references = self._get_cert_references()
key_references = self._get_key_references()
for idx, x in enumerate(kc):
tmp = dict(
name='clientssl{0}'.format(idx)
)
if 'cert' not in x:
raise F5ModuleError(
"A 'cert' option is required when specifying the 'cert_key_chain' parameter.."
)
elif x['cert'] not in cert_references:
raise F5ModuleError(
"The specified 'cert' was not found. Did you specify its full path?"
)
else:
key = x['cert']
tmp['certReference'] = dict(
link=cert_references[key],
fullPath=key
)
if 'key' not in x:
raise F5ModuleError(
"A 'key' option is required when specifying the 'cert_key_chain' parameter.."
)
elif x['key'] not in key_references:
raise F5ModuleError(
"The specified 'key' was not found. Did you specify its full path?"
)
else:
key = x['key']
tmp['keyReference'] = dict(
link=key_references[key],
fullPath=key
)
if 'chain' in x and x['chain'] not in cert_references:
raise F5ModuleError(
"The specified 'key' was not found. Did you specify its full path?"
)
else:
key = x['chain']
tmp['chainReference'] = dict(
link=cert_references[key],
fullPath=key
)
if 'passphrase' in x:
tmp['passphrase'] = x['passphrase']
result.append(tmp)
return result
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.want.client = self.client
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
self.changes.client = self.client
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
self.changes.client = self.client
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def check_bigiq_version(self):
version = bigiq_version(self.client)
if LooseVersion(version) >= LooseVersion('6.1.0'):
raise F5ModuleError(
'Module supports only BIGIQ version 6.0.x or lower.'
)
def exec_module(self):
self.check_bigiq_version()
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return False
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/ap/query/v1/tenants/default/reports/AllApplicationsList?$filter=name+eq+'{2}'".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and 'result' in response and 'totalItems' in response['result'] and response['result']['totalItems'] == 0:
return False
return True
def remove(self):
if self.module.check_mode:
return True
self_link = self.remove_from_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def has_no_service_environment(self):
if self.want.default_device_reference is None and self.want.ssg_reference is None:
return True
return False
def create(self):
if self.want.service_environment is None:
raise F5ModuleError(
"A 'service_environment' must be specified when creating a new application."
)
if self.want.servers is None:
raise F5ModuleError(
"At least one 'servers' item is needed when creating a new application."
)
if self.want.inbound_virtual is None:
raise F5ModuleError(
"An 'inbound_virtual' must be specified when creating a new application."
)
if self.want.domain_names is None:
raise F5ModuleError(
"You must provide at least one value in the 'domain_names' parameter."
)
self._set_changed_options()
if self.has_no_service_environment():
raise F5ModuleError(
"The specified 'service_environment' ({0}) was not found.".format(self.want.service_environment)
)
if self.module.check_mode:
return True
self_link = self.create_on_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if not self.exists():
raise F5ModuleError(
"Failed to deploy application."
)
return True
def create_on_device(self):
params = self.changes.api_params()
params['mode'] = 'CREATE'
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
params = dict(
configSetName=self.want.name,
mode='DELETE'
)
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def wait_for_apply_template_task(self, self_link):
host = 'https://{0}:{1}'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = self_link.replace('https://localhost', host)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if response['status'] == 'FINISHED' and response.get('currentStep', None) == 'DONE':
return True
elif 'errorMessage' in response:
raise F5ModuleError(response['errorMessage'])
time.sleep(5)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
servers=dict(
type='list',
options=dict(
address=dict(required=True),
port=dict(default=80)
)
),
inbound_virtual=dict(
type='dict',
options=dict(
address=dict(required=True),
netmask=dict(required=True),
port=dict(default=443)
)
),
redirect_virtual=dict(
type='dict',
options=dict(
address=dict(required=True),
netmask=dict(required=True),
port=dict(default=80)
)
),
service_environment=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
client_ssl_profile=dict(
type='dict',
name=dict(default='clientssl'),
cert_key_chain=dict(
type='raw',
options=dict(
cert=dict(),
key=dict(),
chain=dict(),
passphrase=dict()
)
)
),
add_analytics=dict(type='bool', default='no'),
domain_names=dict(type='list'),
wait=dict(type='bool', default='yes')
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['inherit_cert_key_chain', 'cert_key_chain']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
bihealth/vcfpy | tests/test_reader_parse_subset.py | 1 | 1479 | # -*- coding: utf-8 -*-
"""Tests for reading with parsing only a subset of samples"""
from vcfpy import Reader, Writer, Call, UnparsedCall
def test_reading_parse_subset(tmpdir, multisample_vcf_file):
# Perform record-wise copying, saving results in records
records = []
out_path = str(tmpdir.mkdir("output").join("output.vcf"))
with Reader.from_path(multisample_vcf_file, parsed_samples=["NA00001"]) as reader:
with Writer.from_path(out_path, reader.header) as writer:
for record in reader:
records.append(record)
writer.write_record(record)
# Check resulting records, checking the first and last records is enough
assert len(records) == 5
assert set(records[0].call_for_sample.keys()) == {"NA00001", "NA00002", "NA00003"}
assert set(records[-1].call_for_sample.keys()) == {"NA00001", "NA00002", "NA00003"}
assert isinstance(records[0].call_for_sample["NA00001"], Call)
assert isinstance(records[0].call_for_sample["NA00002"], UnparsedCall)
assert isinstance(records[0].call_for_sample["NA00003"], UnparsedCall)
assert isinstance(records[-1].call_for_sample["NA00001"], Call)
assert isinstance(records[-1].call_for_sample["NA00002"], UnparsedCall)
assert isinstance(records[-1].call_for_sample["NA00003"], UnparsedCall)
# Check resulting file
with open(multisample_vcf_file, "rt") as inf, open(out_path, "rt") as outf:
assert inf.read() == outf.read()
| mit |
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_run_ica.py | 3 | 1512 | # doc:slow-example
"""
================================
Compute ICA components on epochs
================================
ICA is fit to MEG raw data.
We assume that the non-stationary EOG artifacts have already been removed.
The sources matching the ECG are automatically found and displayed.
Subsequently, artefact detection and rejection quality are assessed.
Finally, the impact on the evoked ERF is visualized.
Note that this example does quite a bit of processing, so even on a
fast machine it can take about a minute to complete.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne.preprocessing import ICA, create_ecg_epochs
from mne.datasets import sample
print(__doc__)
###############################################################################
# Fit ICA model using the FastICA algorithm, detect and inspect components
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 30, method='iir')
raw.pick_types(meg=True, eeg=False, exclude='bads', stim=True)
# longer + more epochs for more artifact exposure
events = mne.find_events(raw, stim_channel='STI 014')
epochs = mne.Epochs(raw, events, event_id=None, tmin=-0.2, tmax=0.5)
ica = ICA(n_components=0.95, method='fastica').fit(epochs)
ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs)
ica.plot_components(ecg_inds)
| bsd-3-clause |
rpdillon/wikid | wikid/docutils/parsers/rst/languages/af.py | 57 | 3502 | # $Id: af.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Jannie Hofmeyr <jhsh@sun.ac.za>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Afrikaans-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'aandag': 'attention',
'versigtig': 'caution',
'gevaar': 'danger',
'fout': 'error',
'wenk': 'hint',
'belangrik': 'important',
'nota': 'note',
'tip': 'tip', # hint and tip both have the same translation: wenk
'waarskuwing': 'warning',
'vermaning': 'admonition',
'kantstreep': 'sidebar',
'onderwerp': 'topic',
'lynblok': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubriek': 'rubric',
'epigraaf': 'epigraph',
'hoogtepunte': 'highlights',
'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'vrae': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
'meta': 'meta',
#'beeldkaart': 'imagemap',
'beeld': 'image',
'figuur': 'figure',
'insluiting': 'include',
'rou': 'raw',
'vervang': 'replace',
'unicode': 'unicode', # should this be translated? unikode
'datum': 'date',
'klas': 'class',
'role (translation required)': 'role',
'default-role (translation required)': 'default-role',
'title (translation required)': 'title',
'inhoud': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'voetnote': 'footnotes',
#'aanhalings': 'citations',
'teikennotas': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Afrikaans name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
'afkorting': 'abbreviation',
'ab': 'abbreviation',
'akroniem': 'acronym',
'ac': 'acronym',
'indeks': 'index',
'i': 'index',
'voetskrif': 'subscript',
'sub': 'subscript',
'boskrif': 'superscript',
'sup': 'superscript',
'titelverwysing': 'title-reference',
'titel': 'title-reference',
't': 'title-reference',
'pep-verwysing': 'pep-reference',
'pep': 'pep-reference',
'rfc-verwysing': 'rfc-reference',
'rfc': 'rfc-reference',
'nadruk': 'emphasis',
'sterk': 'strong',
'literal (translation required)': 'literal',
'benoemde verwysing': 'named-reference',
'anonieme verwysing': 'anonymous-reference',
'voetnootverwysing': 'footnote-reference',
'aanhalingverwysing': 'citation-reference',
'vervangingsverwysing': 'substitution-reference',
'teiken': 'target',
'uri-verwysing': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'rou': 'raw',}
"""Mapping of Afrikaans role names to canonical role names for interpreted text.
"""
| gpl-3.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Static_Shear_Behaviour/Yield_Surface_Friction_Limit/Load_At_Angle_180_degrees/compare_HDF5_ALL.py | 424 | 3382 | #!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# the real essi hdf5 results
h5_result_new = sys.argv[1]
h5_result_ori = sys.argv[2]
disp_pass_or_fail=h5diff_disp(h5_result_ori,h5_result_new)
Gauss_pass_or_fail = 1
try:
Gauss_pass_or_fail=h5diff_Gauss_output(h5_result_ori,h5_result_new)
except KeyError:
pass
Element_Output_pass_or_fail = 1
try:
Element_Output_pass_or_fail=h5diff_Element_output(h5_result_ori,h5_result_new)
except KeyError:
pass
if disp_pass_or_fail and Gauss_pass_or_fail and Element_Output_pass_or_fail:
print headOK(), "All hdf5 results are the same."
print headOKCASE(),"-----------Done this case!-----------------"
else:
if disp_pass_or_fail==0:
print headFailed(),"-----------Displacement has mismatches!-----------------"
if Gauss_pass_or_fail==0:
print headFailed(),"-----------StressStrain has mismatches!-----------------"
if Element_Output_pass_or_fail==0:
print headFailed(),"-----------Element output has mismatches!-----------------"
# # The allowable tolerance between the ori_vals and new_vals values.
# tolerance=1e-5
# machine_epsilon=1e-16
# ori_vals=[]
# new_vals=[]
# ori_vals.append(find_max_disp(h5_result_ori,0))
# new_vals.append(find_max_disp(h5_result_new,0))
# # if multiple steps, compare the max_disp of random steps
# Nstep = find_disp_Nstep(h5_result_ori)
# if Nstep>5 :
# for i in xrange(1,4):
# test_step=random.randint(1,Nstep-1)
# ori_vals.append(find_max_disp(h5_result_ori,test_step))
# new_vals.append(find_max_disp(h5_result_new,test_step))
# # calculate the errors
# errors=[]
# for index, x in enumerate(ori_vals):
# if(abs(x))>machine_epsilon:
# errors.append(abs((new_vals[index]-x)/x))
# else:
# errors.append(machine_epsilon)
# # compare and form the flags
# flags=[]
# for item in errors:
# if abs(item)<tolerance:
# flags.append('pass')
# else:
# flags.append('failed')
# # print the results
# case_flag=1
# print headrun() , "-----------Testing results-----------------"
# print headstep() ,'{0} {1} {2} {3}'.format('back_value ','new_value ','error ','flag')
# for index, x in enumerate(errors):
# if(abs(x)<tolerance):
# print headOK() ,'{0:e} {1:e} {2:0.2f} {3}'.format(ori_vals[index],new_vals[index], x, flags[index] )
# else:
# case_flag=0
# print headFailed() ,'{0:e} {1:e} {2:0.2f} {3}'.format(ori_vals[index],new_vals[index], x, flags[index] )
# if(case_flag==1):
# print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# automatically find the script directory.
# sys.path.append("/home/yuan/Dropbox/3essi_self_verification/test_suite/scripts" )
# script_dir=sys.argv[1]
# print headstart() , "Running test cases..."
# print headlocation(), os.path.dirname(os.path.abspath(__file__))
# file_in=open("ori_vals_values.txt","r")
# Input the 1st line, which is the ori_vals value.
# ori_vals= float(file_in.readline())
# Input the 2nd line, which is the HDF5 output filename.
# new_vals=find_max_disp(file_in.readline());
# file_in.close() | cc0-1.0 |
asm0dey/Flexget | tests/test_rottentomatoes.py | 1 | 2026 | from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
from nose.plugins.attrib import attr
class TestRottenTomatoesLookup(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
# tests search
- {title: 'Toy Story'}
- {title: 'The Matrix'}
- {title: 'Star Wars: Episode I - The Phantom Menace (in 3D)'}
# tests direct id
- {title: '[Group] Taken 720p', rt_id: 770680780}
# tests title + year
- {title: 'Rush.Hour[1998]1080p[Eng]-FOO'}
# test short title, with repack and without year
- {title: 'Up.REPACK.720p.Bluray.x264-FlexGet'}
rottentomatoes_lookup: yes
"""
@attr(online=True)
def test_rottentomatoes_lookup(self):
self.execute_task('test')
# check that these were created
assert self.task.find_entry(rt_name='Toy Story', rt_year=1995, rt_id=9559, imdb_id='tt0114709'), \
'Didn\'t populate RT info for Toy Story'
assert self.task.find_entry(imdb_id='tt0114709'), \
'Didn\'t populate imdb_id info for Toy Story'
assert self.task.find_entry(rt_name='The Matrix', rt_year=1999, rt_id=12897, imdb_id='tt0133093'), \
'Didn\'t populate RT info for The Matrix'
assert self.task.find_entry(rt_name='Star Wars: Episode I - The Phantom Menace',
rt_year=1999, rt_id=10008), \
'Didn\'t populate RT info for Star Wars: Episode I - The Phantom Menace (in 3D)'
assert self.task.find_entry(rt_name='Taken', rt_year=2008, rt_id=770680780), \
'Didn\'t populate RT info for Taken'
assert self.task.find_entry(rt_name='Rush Hour', rt_year=1998, rt_id=10201), \
'Didn\'t populate RT info for Rush Hour'
assert self.task.find_entry(rt_name='Up', rt_year=2009, rt_id=770671912), \
'Didn\'t populate RT info for Up'
| mit |
varchild/android_kernel_htc_msm8660 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
mszewczy/odoo | addons/mrp_operations/report/mrp_workorder_analysis.py | 312 | 3336 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
import openerp.addons.decimal_precision as dp
class mrp_workorder(osv.osv):
_name = "mrp.workorder"
_description = "Work Order Report"
_auto = False
_columns = {
'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'date': fields.date('Date', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), readonly=True),
'state': fields.selection([('draft','Draft'),('startworking', 'In Progress'),('pause','Pause'),('cancel','Cancelled'),('done','Finished')], 'Status', readonly=True),
'total_hours': fields.float('Total Hours', readonly=True),
'total_cycles': fields.float('Total Cycles', readonly=True),
'delay': fields.float('Delay', readonly=True),
'production_id': fields.many2one('mrp.production', 'Production', readonly=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', readonly=True)
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'mrp_workorder')
cr.execute("""
create or replace view mrp_workorder as (
select
date(wl.date_planned) as date,
min(wl.id) as id,
mp.product_id as product_id,
sum(wl.hour) as total_hours,
avg(wl.delay) as delay,
(w.costs_hour*sum(wl.hour)) as total_cost,
wl.production_id as production_id,
wl.workcenter_id as workcenter_id,
sum(wl.cycle) as total_cycles,
count(*) as nbr,
sum(mp.product_qty) as product_qty,
wl.state as state
from mrp_production_workcenter_line wl
left join mrp_workcenter w on (w.id = wl.workcenter_id)
left join mrp_production mp on (mp.id = wl.production_id)
group by
w.costs_hour, mp.product_id, mp.name, wl.state, wl.date_planned, wl.production_id, wl.workcenter_id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kisna72/django | django/template/loader.py | 196 | 6232 | import warnings
from django.utils.deprecation import RemovedInDjango110Warning
from . import engines
from .backends.django import DjangoTemplates
from .engine import (
_context_instance_undefined, _dictionary_undefined, _dirs_undefined,
)
from .exceptions import TemplateDoesNotExist
from .loaders import base
def get_template(template_name, dirs=_dirs_undefined, using=None):
"""
Loads and returns a template for the given name.
Raises TemplateDoesNotExist if no such template exists.
"""
chain = []
engines = _engine_list(using)
for engine in engines:
try:
# This is required for deprecating the dirs argument. Simply
# return engine.get_template(template_name) in Django 1.10.
if isinstance(engine, DjangoTemplates):
return engine.get_template(template_name, dirs)
elif dirs is not _dirs_undefined:
warnings.warn(
"Skipping template backend %s because its get_template "
"method doesn't support the dirs argument." % engine.name,
stacklevel=2)
else:
return engine.get_template(template_name)
except TemplateDoesNotExist as e:
chain.append(e)
raise TemplateDoesNotExist(template_name, chain=chain)
def select_template(template_name_list, dirs=_dirs_undefined, using=None):
"""
Loads and returns a template for one of the given names.
Tries names in order and returns the first template found.
Raises TemplateDoesNotExist if no such template exists.
"""
chain = []
engines = _engine_list(using)
for template_name in template_name_list:
for engine in engines:
try:
# This is required for deprecating the dirs argument. Simply
# use engine.get_template(template_name) in Django 1.10.
if isinstance(engine, DjangoTemplates):
return engine.get_template(template_name, dirs)
elif dirs is not _dirs_undefined:
warnings.warn(
"Skipping template backend %s because its get_template "
"method doesn't support the dirs argument." % engine.name,
stacklevel=2)
else:
return engine.get_template(template_name)
except TemplateDoesNotExist as e:
chain.append(e)
if template_name_list:
raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)
else:
raise TemplateDoesNotExist("No template names provided")
def render_to_string(template_name, context=None,
context_instance=_context_instance_undefined,
dirs=_dirs_undefined,
dictionary=_dictionary_undefined,
request=None, using=None):
"""
Loads a template and renders it with a context. Returns a string.
template_name may be a string or a list of strings.
"""
if (context_instance is _context_instance_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
if isinstance(template_name, (list, tuple)):
template = select_template(template_name, using=using)
else:
template = get_template(template_name, using=using)
return template.render(context, request)
else:
chain = []
# Some deprecated arguments were passed - use the legacy code path
for engine in _engine_list(using):
try:
# This is required for deprecating properly arguments specific
# to Django templates. Remove Engine.render_to_string() at the
# same time as this code path in Django 1.10.
if isinstance(engine, DjangoTemplates):
if request is not None:
raise ValueError(
"render_to_string doesn't support the request argument "
"when some deprecated arguments are passed.")
# Hack -- use the internal Engine instance of DjangoTemplates.
return engine.engine.render_to_string(
template_name, context, context_instance, dirs, dictionary)
elif context_instance is not _context_instance_undefined:
warnings.warn(
"Skipping template backend %s because its render_to_string "
"method doesn't support the context_instance argument." %
engine.name, stacklevel=2)
elif dirs is not _dirs_undefined:
warnings.warn(
"Skipping template backend %s because its render_to_string "
"method doesn't support the dirs argument." % engine.name,
stacklevel=2)
elif dictionary is not _dictionary_undefined:
warnings.warn(
"Skipping template backend %s because its render_to_string "
"method doesn't support the dictionary argument." %
engine.name, stacklevel=2)
except TemplateDoesNotExist as e:
chain.append(e)
continue
if template_name:
if isinstance(template_name, (list, tuple)):
template_name = ', '.join(template_name)
raise TemplateDoesNotExist(template_name, chain=chain)
else:
raise TemplateDoesNotExist("No template names provided")
def _engine_list(using=None):
return engines.all() if using is None else [engines[using]]
class BaseLoader(base.Loader):
_accepts_engine_in_init = False
def __init__(self, *args, **kwargs):
warnings.warn(
"django.template.loader.BaseLoader was superseded by "
"django.template.loaders.base.Loader.",
RemovedInDjango110Warning, stacklevel=2)
super(BaseLoader, self).__init__(*args, **kwargs)
| bsd-3-clause |
eark-project/earkweb | taskbackend/ip_state.py | 1 | 8531 | from xml.dom import minidom
from xml.etree.ElementTree import Element, SubElement
from xml.etree import ElementTree as Etree
from eatb.utils.datetime import current_timestamp
from eatb.xml.xmlutils import prettify
from taskbackend.taskconfig import TaskConfig
class IpState(object):
"""
TaskExecutionXml class which represents an XML document to persist task execution parameters and results.
The class can be initiated by parameters (static method from_parameters), by XML content string (static
method from_content), or by an XML file path (static method from_path). Furthermore, it provides methods
to manipulate and/or read element values of the XML document.
"""
doc_content = None
ted = None
doc_path = None
def __init__(self, doc_content, ted):
self.doc_content = doc_content
self.ted = ted
@classmethod
def from_content(cls, doc_content):
"""
Alternative constructor (initialise from content string)
@type doc_content: str
@param doc_content: doc_content
@rtype: TaskExecutionXml
@return: TaskExecutionXml object
"""
doc_content = doc_content
ted = Etree.fromstring(doc_content)
return cls(doc_content, ted)
@classmethod
def from_path(cls, xml_file_path):
"""
Alternative constructor (initialise from xml file)
@type xml_file_path: str
@param xml_file_path: xml_file_path
@rtype: TaskExecutionXml
@return: TaskExecutionXml object
"""
with open(xml_file_path, 'r') as xml_file:
doc_content = xml_file.read()
ted = Etree.fromstring(doc_content)
xml_file.close()
return cls(doc_content, ted)
@classmethod
def from_parameters(cls, state=-1, locked_val=False, last_task_value='None'):
doc_content = prettify(cls.create_task_execution_doc(state, locked_val, last_task_value))
ted = Etree.fromstring(doc_content)
return cls(doc_content, ted)
@classmethod
def create_task_execution_doc(cls, state_val=-1, locked_val=False, last_task_value='None'):
"""
Alternative constructor (initialise from parameters)
@type state_val: int
@param state_val: state value
@rtype: xml.etree.ElementTree.Element
@return: task execution document
"""
ip_state = Element('ip_state')
state_elm = SubElement(ip_state, 'state')
state_elm.text = str(state_val)
locked_elm = SubElement(ip_state, 'locked')
locked_elm.text = str(locked_val)
last_task_elm = SubElement(ip_state, 'last_task')
last_task_elm.text = last_task_value
return ip_state
def get_last_task(self):
"""
Get last task
@rtype: str
@return: last task
"""
last_task_elm = self.ted.find('.//last_task')
last_task_value = 'None' if last_task_elm is None else last_task_elm.text
return last_task_value
def set_last_task(self, last_task_value):
"""
Set document path
@type last_task: str
@param last_task: last task
"""
last_task_elm = self.ted.find('.//last_task')
if last_task_elm is None:
last_task_elm = SubElement(self.ted, 'last_task')
last_task_elm.text = last_task_value
def get_identifier(self):
"""
Get identifier
@rtype: str
@return: identifier
"""
identifier_elm = self.ted.find('.//identifier')
identifier_value = 'None' if identifier_elm is None else identifier_elm.text
return identifier_value
def set_identifier(self, identifier_value):
"""
Set identifier
@type identifier: str
@param identifier: last task
"""
identifier_elm = self.ted.find('.//identifier')
if identifier_elm is None:
identifier_elm = SubElement(self.ted, 'identifier')
identifier_elm.text = identifier_value
def get_version(self):
"""
Get identifier
@rtype: str
@return: identifier
"""
version_elm = self.ted.find('.//version')
version_value = '00000' if version_elm is None else version_elm.text
return version_value
def set_version(self, version_value):
"""
Set identifier
@type identifier: str
@param identifier: last task
"""
version_elm = self.ted.find('.//version')
if version_elm is None:
version_elm = SubElement(self.ted, 'version')
version_elm.text = version_value
def get_doc_path(self):
"""
Get document path
@rtype: str
@return: document path
"""
return self.doc_path
def set_doc_path(self, doc_path):
"""
Set document path
@type doc_path: str
@param doc_path: document path
"""
self.doc_path = doc_path
def get_state(self):
"""
Get state value.
@rtype: int
@return: state value
"""
return int(self.ted.find('.//state').text)
def set_state(self, state_value):
"""
Set state value
@type state: int
@param state: Result success (True/False)
"""
state_elm = self.ted.find('.//state')
if state_elm is None:
state_elm = SubElement(self.ted, 'state')
state_elm.text = str(state_value)
def get_locked(self):
"""
Get locked value.
@rtype: bool
@return: locked value
"""
return self.ted.find('.//locked').text == "True"
def set_locked(self, locked_value):
"""
Set locked value
@type locked: bool
@param locked: locked (True/False)
"""
locked_elm = self.ted.find('.//locked')
if locked_elm is None:
locked_elm = SubElement(self.ted, 'locked')
locked_elm.text = str(locked_value)
def get_lastchange(self):
"""
Get lastchange value.
@rtype: str
@return: lastchange value (timestamp)
"""
lastchange_elm = self.ted.find('.//lastchange')
if lastchange_elm is None:
return ""
else:
return self.ted.find('.//lastchange').text
def set_lastchange(self, lastchange_value):
"""
Set lastchange value
@type lastchange: str
@param lastchange: lastchange (timestamp)
"""
lastchange_elm = self.ted.find('.//lastchange')
if lastchange_elm is None:
lastchange_elm = SubElement(self.ted, 'lastchange')
lastchange_elm.text = str(lastchange_value)
def get_updated_doc_content(self):
"""
Get updated document content (from task execution document)
@rtype: str
@return: Updated XML document content
"""
return Etree.tostring(self.ted, encoding='UTF-8')
def write_doc(self, xml_file_path):
"""
Write document to file
@type xml_file_path: str
@param xml_file_path: XML file path
"""
# update timestamp
self.set_lastchange(current_timestamp())
xml_content = Etree.tostring(self.ted, encoding='UTF-8')
xmlstr = minidom.parseString(Etree.tostring(self.ted)).toprettyxml(indent="\t", newl="\n", encoding="UTF-8")
with open(xml_file_path, 'w') as output_file:
output_file.write(xmlstr.decode("utf-8"))
output_file.close()
if __name__ == "__main__":
# from parameters
print("from parameters")
ted_fp = IpState.from_parameters(200, True)
print(ted_fp.get_state())
print("\n")
# from example document
print("from example document")
example_doc_content = """<?xml version="1.0" ?>
<ip_state>
<state>700</state>
</ip_state>"""
ted_fc = IpState.from_content(example_doc_content)
print(ted_fc.get_state())
ted_fc.write_doc("/tmp/test.xml")
print("\n")
# from file path
print("from file path")
ted_fc = IpState.from_path("/tmp/test.xml")
print(ted_fc.get_state())
ip_state = IpState.from_parameters(50, True)
print(ip_state.get_updated_doc_content())
print("state: %d" % ip_state.get_state())
print("\n")
ip_state.set_state(100)
print(ip_state.get_updated_doc_content())
ip_state.set_locked(True)
print(ip_state.get_updated_doc_content())
| gpl-3.0 |
pranner/CMPUT410-Lab6-Django | v1/lib/python2.7/site-packages/django/core/management/commands/startproject.py | 78 | 1264 | from importlib import import_module
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from django.utils.crypto import get_random_string
class Command(TemplateCommand):
help = ("Creates a Django project directory structure for the given "
"project name in the current directory or optionally in the "
"given directory.")
def handle(self, project_name=None, target=None, *args, **options):
self.validate_name(project_name, "project")
# Check that the project_name cannot be imported.
try:
import_module(project_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as a "
"project name. Please try another name." %
project_name)
# Create a random SECRET_KEY hash to put it in the main settings.
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
options['secret_key'] = get_random_string(50, chars)
super(Command, self).handle('project', project_name, target, **options)
| apache-2.0 |
nrwahl2/ansible | lib/ansible/modules/system/sysctl.py | 17 | 13769 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, David "DaviXX" CHANIAL <david.chanial@gmail.com>
# (c) 2014, James Tanner <tanner.jc@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: sysctl
short_description: Manage entries in sysctl.conf.
description:
- This module manipulates sysctl entries and optionally performs a C(/sbin/sysctl -p) after changing them.
version_added: "1.0"
options:
name:
description:
- The dot-separated path (aka I(key)) specifying the sysctl variable.
required: true
default: null
aliases: [ 'key' ]
value:
description:
- Desired value of the sysctl key.
required: false
default: null
aliases: [ 'val' ]
state:
description:
- Whether the entry should be present or absent in the sysctl file.
choices: [ "present", "absent" ]
default: present
ignoreerrors:
description:
- Use this option to ignore errors about unknown keys.
choices: [ "yes", "no" ]
default: no
reload:
description:
- If C(yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is
updated. If C(no), does not reload I(sysctl) even if the
C(sysctl_file) is updated.
choices: [ "yes", "no" ]
default: "yes"
sysctl_file:
description:
- Specifies the absolute path to C(sysctl.conf), if not C(/etc/sysctl.conf).
required: false
default: /etc/sysctl.conf
sysctl_set:
description:
- Verify token value with the sysctl command and set with -w if necessary
choices: [ "yes", "no" ]
required: false
version_added: 1.5
default: False
notes: []
requirements: []
author: "David CHANIAL (@davixx) <david.chanial@gmail.com>"
'''
EXAMPLES = '''
# Set vm.swappiness to 5 in /etc/sysctl.conf
- sysctl:
name: vm.swappiness
value: 5
state: present
# Remove kernel.panic entry from /etc/sysctl.conf
- sysctl:
name: kernel.panic
state: absent
sysctl_file: /etc/sysctl.conf
# Set kernel.panic to 3 in /tmp/test_sysctl.conf
- sysctl:
name: kernel.panic
value: 3
sysctl_file: /tmp/test_sysctl.conf
reload: no
# Set ip forwarding on in /proc and do not reload the sysctl file
- sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes
# Set ip forwarding on in /proc and in the sysctl file and reload if necessary
- sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes
state: present
reload: yes
'''
# ==============================================================
import os
import tempfile
from ansible.module_utils.basic import get_platform, AnsibleModule
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
from ansible.module_utils._text import to_native
class SysctlModule(object):
def __init__(self, module):
self.module = module
self.args = self.module.params
self.sysctl_cmd = self.module.get_bin_path('sysctl', required=True)
self.sysctl_file = self.args['sysctl_file']
self.proc_value = None # current token value in proc fs
self.file_value = None # current token value in file
self.file_lines = [] # all lines in the file
self.file_values = {} # dict of token values
self.changed = False # will change occur
self.set_proc = False # does sysctl need to set value
self.write_file = False # does the sysctl file need to be reloaded
self.process()
# ==============================================================
# LOGIC
# ==============================================================
def process(self):
self.platform = get_platform().lower()
# Whitespace is bad
self.args['name'] = self.args['name'].strip()
self.args['value'] = self._parse_value(self.args['value'])
thisname = self.args['name']
# get the current proc fs value
self.proc_value = self.get_token_curr_value(thisname)
# get the currect sysctl file value
self.read_sysctl_file()
if thisname not in self.file_values:
self.file_values[thisname] = None
# update file contents with desired token/value
self.fix_lines()
# what do we need to do now?
if self.file_values[thisname] is None and self.args['state'] == "present":
self.changed = True
self.write_file = True
elif self.file_values[thisname] is None and self.args['state'] == "absent":
self.changed = False
elif self.file_values[thisname] != self.args['value']:
self.changed = True
self.write_file = True
# use the sysctl command or not?
if self.args['sysctl_set']:
if self.proc_value is None:
self.changed = True
elif not self._values_is_equal(self.proc_value, self.args['value']):
self.changed = True
self.set_proc = True
# Do the work
if not self.module.check_mode:
if self.write_file:
self.write_sysctl()
if self.write_file and self.args['reload']:
self.reload_sysctl()
if self.set_proc:
self.set_token_value(self.args['name'], self.args['value'])
def _values_is_equal(self, a, b):
"""Expects two string values. It will split the string by whitespace
and compare each value. It will return True if both lists are the same,
contain the same elements and the same order."""
if a is None or b is None:
return False
a = a.split()
b = b.split()
if len(a) != len(b):
return False
return len([i for i, j in zip(a, b) if i == j]) == len(a)
def _parse_value(self, value):
if value is None:
return ''
elif isinstance(value, bool):
if value:
return '1'
else:
return '0'
elif isinstance(value, string_types):
if value.lower() in BOOLEANS_TRUE:
return '1'
elif value.lower() in BOOLEANS_FALSE:
return '0'
else:
return value.strip()
else:
return value
# ==============================================================
# SYSCTL COMMAND MANAGEMENT
# ==============================================================
# Use the sysctl command to find the current value
def get_token_curr_value(self, token):
if self.platform == 'openbsd':
# openbsd doesn't support -e, just drop it
thiscmd = "%s -n %s" % (self.sysctl_cmd, token)
else:
thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token)
rc, out, err = self.module.run_command(thiscmd)
if rc != 0:
return None
else:
return out
# Use the sysctl command to set the current value
def set_token_value(self, token, value):
if len(value.split()) > 0:
value = '"' + value + '"'
if self.platform == 'openbsd':
# openbsd doesn't accept -w, but since it's not needed, just drop it
thiscmd = "%s %s=%s" % (self.sysctl_cmd, token, value)
elif self.platform == 'freebsd':
ignore_missing = ''
if self.args['ignoreerrors']:
ignore_missing = '-i'
# freebsd doesn't accept -w, but since it's not needed, just drop it
thiscmd = "%s %s %s=%s" % (self.sysctl_cmd, ignore_missing, token, value)
else:
ignore_missing = ''
if self.args['ignoreerrors']:
ignore_missing = '-e'
thiscmd = "%s %s -w %s=%s" % (self.sysctl_cmd, ignore_missing, token, value)
rc, out, err = self.module.run_command(thiscmd)
if rc != 0:
self.module.fail_json(msg='setting %s failed: %s' % (token, out + err))
else:
return rc
# Run sysctl -p
def reload_sysctl(self):
# do it
if self.platform == 'freebsd':
# freebsd doesn't support -p, so reload the sysctl service
rc, out, err = self.module.run_command('/etc/rc.d/sysctl reload')
elif self.platform == 'openbsd':
# openbsd doesn't support -p and doesn't have a sysctl service,
# so we have to set every value with its own sysctl call
for k, v in self.file_values.items():
rc = 0
if k != self.args['name']:
rc = self.set_token_value(k, v)
if rc != 0:
break
if rc == 0 and self.args['state'] == "present":
rc = self.set_token_value(self.args['name'], self.args['value'])
else:
# system supports reloading via the -p flag to sysctl, so we'll use that
sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file]
if self.args['ignoreerrors']:
sysctl_args.insert(1, '-e')
rc, out, err = self.module.run_command(sysctl_args)
if rc != 0:
self.module.fail_json(msg="Failed to reload sysctl: %s" % str(out) + str(err))
# ==============================================================
# SYSCTL FILE MANAGEMENT
# ==============================================================
# Get the token value from the sysctl file
def read_sysctl_file(self):
lines = []
if os.path.isfile(self.sysctl_file):
try:
with open(self.sysctl_file, "r") as read_file:
lines = read_file.readlines()
except IOError as e:
self.module.fail_json(msg="Failed to open %s: %s" % (self.sysctl_file, to_native(e)))
for line in lines:
line = line.strip()
self.file_lines.append(line)
# don't split empty lines or comments or line without equal sign
if not line or line.startswith(("#", ";")) or "=" not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
self.file_values[k] = v.strip()
# Fix the value in the sysctl file content
def fix_lines(self):
checked = []
self.fixed_lines = []
for line in self.file_lines:
if not line.strip() or line.strip().startswith(("#", ";")) or "=" not in line:
self.fixed_lines.append(line)
continue
tmpline = line.strip()
k, v = tmpline.split('=', 1)
k = k.strip()
v = v.strip()
if k not in checked:
checked.append(k)
if k == self.args['name']:
if self.args['state'] == "present":
new_line = "%s=%s\n" % (k, self.args['value'])
self.fixed_lines.append(new_line)
else:
new_line = "%s=%s\n" % (k, v)
self.fixed_lines.append(new_line)
if self.args['name'] not in checked and self.args['state'] == "present":
new_line = "%s=%s\n" % (self.args['name'], self.args['value'])
self.fixed_lines.append(new_line)
# Completely rewrite the sysctl file
def write_sysctl(self):
# open a tmp file
fd, tmp_path = tempfile.mkstemp('.conf', '.ansible_m_sysctl_', os.path.dirname(self.sysctl_file))
f = open(tmp_path, "w")
try:
for l in self.fixed_lines:
f.write(l.strip() + "\n")
except IOError as e:
self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(e)))
f.flush()
f.close()
# replace the real one
self.module.atomic_move(tmp_path, self.sysctl_file)
# ==============================================================
# main
def main():
# defining module
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['key'], required=True),
value=dict(aliases=['val'], required=False, type='str'),
state=dict(default='present', choices=['present', 'absent']),
reload=dict(default=True, type='bool'),
sysctl_set=dict(default=False, type='bool'),
ignoreerrors=dict(default=False, type='bool'),
sysctl_file=dict(default='/etc/sysctl.conf', type='path')
),
supports_check_mode=True,
required_if=[('state', 'present', ['value'])],
)
if module.params['name'] is None:
module.fail_json(msg="name can not be None")
if module.params['state'] == 'present' and module.params['value'] is None:
module.fail_json(msg="value can not be None")
# In case of in-line params
if module.params['name'] == '':
module.fail_json(msg="name can not be blank")
if module.params['state'] == 'present' and module.params['value'] == '':
module.fail_json(msg="value can not be blank")
result = SysctlModule(module)
module.exit_json(changed=result.changed)
# import module snippets
if __name__ == '__main__':
main()
| gpl-3.0 |
fontenele/scrapy | scrapy/spiderloader.py | 117 | 1622 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from zope.interface import implementer
from scrapy.interfaces import ISpiderLoader
from scrapy.utils.misc import walk_modules
from scrapy.utils.spider import iter_spider_classes
@implementer(ISpiderLoader)
class SpiderLoader(object):
"""
SpiderLoader is a class which locates and loads spiders
in a Scrapy project.
"""
def __init__(self, settings):
self.spider_modules = settings.getlist('SPIDER_MODULES')
self._spiders = {}
for name in self.spider_modules:
for module in walk_modules(name):
self._load_spiders(module)
def _load_spiders(self, module):
for spcls in iter_spider_classes(module):
self._spiders[spcls.name] = spcls
@classmethod
def from_settings(cls, settings):
return cls(settings)
def load(self, spider_name):
"""
Return the Spider class for the given spider name. If the spider
name is not found, raise a KeyError.
"""
try:
return self._spiders[spider_name]
except KeyError:
raise KeyError("Spider not found: {}".format(spider_name))
def find_by_request(self, request):
"""
Return the list of spider names that can handle the given request.
"""
return [name for name, cls in self._spiders.items()
if cls.handles_request(request)]
def list(self):
"""
Return a list with the names of all spiders available in the project.
"""
return list(self._spiders.keys())
| bsd-3-clause |
LaurenceBeard/workrave | common/bin/dbusgen.py | 8 | 16555 | #!/usr/bin/python
#
# Copyright (C) 2007, 2008, 2009, 2011 Rob Caelers <robc@krandor.nl>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
"""
DBUS C++ binding generator
"""
import re
import string
import sys
import os
import xml
from Cheetah.Template import Template
from optparse import OptionParser
from xml.dom.minidom import parse
class NodeBase(object):
pass
class ArgNode(NodeBase):
def __init__(self, interface_node):
NodeBase.__init__(self)
self.interface_node = interface_node
self.name = ''
self.type = ''
self.ext_type = ''
self.direction = ''
self.hint = []
def sig(self):
return self.interface_node.type2sig(self.ext_type)
class DefaultTypeNode(NodeBase):
def __init__(self, csymbol, type_sig):
NodeBase.__init__(self)
self.csymbol = csymbol
self.type_sig = type_sig
def sig(self):
return self.type_sig
class TopNode(NodeBase):
def __init__(self, name):
NodeBase.__init__(self)
self.file_name = name
self.name = None
self.interfaces = []
def parse(self):
dom = parse(self.file_name)
nodelist = dom.getElementsByTagName('unit')
for node in nodelist:
self.handle_node(node)
def handle_node(self, node):
self.name = node.getAttribute('name')
nodelist = node.getElementsByTagName('interface')
for child in nodelist:
p = InterfaceNode(self)
p.handle(child)
self.interfaces.append(p)
class InterfaceNode(NodeBase):
def __init__(self, parent):
NodeBase.__init__(self)
self.parent = parent
self.types = {}
self.name = None
self.csymbol = None
self.qname = None
self.methods = []
self.signals = []
self.structs = []
self.sequences = []
self.dictionaries = []
self.enums = []
self.imports = []
self.add_default_types()
def handle(self, node):
self.name = node.getAttribute('name')
self.csymbol = node.getAttribute('csymbol')
self.qname = self.name.replace('.','_')
self.condition = node.getAttribute('condition')
for child in node.childNodes:
if child.nodeType == node.ELEMENT_NODE:
if child.nodeName == 'method':
p = MethodNode(self)
p.handle(child)
self.methods.append(p)
elif child.nodeName == 'signal':
p = SignalNode(self)
p.handle(child)
self.signals.append(p)
elif child.nodeName == 'struct':
p = StructNode(self)
p.handle(child)
self.structs.append(p)
elif child.nodeName == 'sequence':
p = SequenceNode(self)
p.handle(child)
self.sequences.append(p)
elif child.nodeName == 'dictionary':
p = DictionaryNode(self)
p.handle(child)
self.dictionaries.append(p)
elif child.nodeName == 'enum':
p = EnumNode(self)
p.handle(child)
self.enums.append(p)
elif child.nodeName == 'import':
p = ImportNode(self)
p.handle(child)
self.imports.append(p)
elif child.nodeName == 'type':
p = TypeNode(self)
p.handle(child)
def add_default_types(self):
self.types['void']= DefaultTypeNode('void','i')
self.types['int']= DefaultTypeNode('int','i')
self.types['uint8']= DefaultTypeNode('guint8', 'y')
self.types['int16']= DefaultTypeNode('gint16','n')
self.types['uint16']= DefaultTypeNode('guint16','q')
self.types['int32']= DefaultTypeNode('gint32','i')
self.types['uint32']= DefaultTypeNode('guint32','u')
self.types['int64']= DefaultTypeNode('gint64','x')
self.types['uint64']= DefaultTypeNode('guint64','t')
self.types['string']= DefaultTypeNode('std::string','s')
self.types['bool']= DefaultTypeNode('bool','b')
self.types['double']= DefaultTypeNode('double','d')
def type2csymbol(self, type):
if type in self.types:
return self.types[type].csymbol
else:
print 'C type of type ' + type + ' unknown'
sys.exit(1)
def type2sig(self, type):
if type in self.types:
return self.types[type].sig()
else:
print 'Signature of type ' + type + ' unknown'
sys.exit(1)
class MethodNode(NodeBase):
def __init__(self, parent):
NodeBase.__init__(self)
self.parent = parent
self.name = None
self.csymbol = None
self.qname = None
self.condition = ""
self.params = []
self.num_in_args = 0
self.num_out_args = 0
def handle(self, node):
self.name = node.getAttribute('name')
self.csymbol = node.getAttribute('csymbol')
self.qname = self.name.replace('.','_')
self.condition = node.getAttribute('condition')
for child in node.childNodes:
if child.nodeType == node.ELEMENT_NODE:
if child.nodeName == 'arg':
self.handle_arg(child)
def handle_arg(self, node):
p = ArgNode(self.parent)
p.name = node.getAttribute('name')
p.type = node.getAttribute('type')
p.ext_type = node.getAttribute('ext_type')
p.direction = node.getAttribute('direction')
p.bind = node.getAttribute('bind')
if p.ext_type == '':
p.ext_type = p.type;
if p.direction == 'in':
self.num_in_args = self.num_in_args + 1
if p.direction == 'out':
self.num_out_args = self.num_out_args + 1
hint = node.getAttribute('hint')
if hint != None and hint != '':
p.hint = hint.split(',')
self.params.append(p)
def introspect_sig(self):
method_sig = ''
for p in self.params:
if p.direction != 'bind':
param_sig = self.parent.type2sig(p.ext_type)
method_sig = method_sig + '%s\\0%s\\0%s\\0' % (p.direction, param_sig, p.name)
return method_sig
def sig(self):
method_sig = ''
for p in self.params:
if p.direction != 'bind':
param_sig = self.parent.type2sig(p.ext_type)
method_sig = method_sig + '%s\\0%s\\0%s\\0' % (p.direction, param_sig, p.name)
return method_sig
def sig_of_type(self, type):
method_sig = ''
for p in self.params:
if p.direction == type:
param_sig = self.parent.type2sig(p.ext_type)
method_sig = method_sig + '%s' % (param_sig, )
return '(' + method_sig + ')'
def return_type(self):
ret = 'void'
for p in self.params:
if 'return' in p.hint:
ret = p.type
return ret
def return_name(self):
ret = 'ret'
for p in self.params:
if 'return' in p.hint:
ret = p.name
return ret
class SignalNode(NodeBase):
def __init__(self, parent):
NodeBase.__init__(self)
self.parent = parent
def handle(self, node):
self.name = node.getAttribute('name')
self.csymbol = node.getAttribute('csymbol')
self.qname = self.name.replace('.','_')
self.params = []
for child in node.childNodes:
if child.nodeType == node.ELEMENT_NODE:
if child.nodeName == 'arg':
self.handle_arg(child)
def handle_arg(self, node):
p = ArgNode(self.parent)
p.name = node.getAttribute('name')
p.type = node.getAttribute('type')
p.ext_type = node.getAttribute('ext_type')
if p.ext_type == '':
p.ext_type = p.type;
hint = node.getAttribute('hint')
if hint != None and hint != '':
p.hint = hint.split(',')
self.params.append(p)
def introspect_sig(self):
method_sig = ''
for p in self.params:
param_sig = self.parent.type2sig(p.ext_type)
method_sig = method_sig + '%s\\0%s\\0' % (param_sig, p.name)
return method_sig
def sig(self):
method_sig = ''
for p in self.params:
param_sig = self.parent.type2sig(p.ext_type)
method_sig = method_sig + param_sig
return '(' + method_sig + ')'
def return_type(self):
ret = 'void'
for p in self.params:
if 'return' in p.hint:
ret = p.type
return ret
def return_name(self):
ret = 'ret'
for p in self.params:
if 'return' in p.hint:
ret = p.name
return ret
class StructNode(NodeBase):
def __init__(self, parent):
NodeBase.__init__(self)
self.parent = parent
def handle(self, node):
self.name = node.getAttribute('name')
self.csymbol = node.getAttribute('csymbol')
self.qname = self.name.replace('.','_')
self.fields = []
for child in node.childNodes:
if child.nodeType == node.ELEMENT_NODE:
if child.nodeName == 'field':
self.handle_field(child)
self.parent.types[self.name] = self
def handle_field(self, node):
arg = ArgNode(self.parent)
arg.name = node.getAttribute('name')
arg.type = node.getAttribute('type')
arg.ext_type = node.getAttribute('ext_type')
if arg.ext_type == '':
arg.ext_type = arg.type;
self.fields.append(arg)
def sig(self):
struct_sig = ''
for f in self.fields:
field_sig = self.parent.type2sig(f.ext_type)
struct_sig = struct_sig + field_sig
return '(' + struct_sig + ')'
class SequenceNode(NodeBase):
def __init__(self, parent):
NodeBase.__init__(self)
self.parent = parent
def handle(self, node):
self.name = node.getAttribute('name')
self.csymbol = node.getAttribute('csymbol')
self.qname = self.name.replace('.','_')
self.container_type = node.getAttribute('container')
self.data_type = node.getAttribute('type')
self.parent.types[self.name] = self
def sig(self):
return 'a' + self.parent.type2sig(self.data_type)
class DictionaryNode(NodeBase):
def __init__(self, parent):
NodeBase.__init__(self)
self.parent = parent
def handle(self, node):
self.name = node.getAttribute('name')
self.csymbol = node.getAttribute('csymbol')
self.qname = self.name.replace('.','_')
self.key_type = node.getAttribute('key_type')
self.value_type = node.getAttribute('value_type')
if self.csymbol == '':
self.csymbol = 'std::map<%s,%s>' % ( self.parent.type2csymbol(self.key_type),
self.parent.type2csymbol(self.value_type))
self.parent.types[self.name] = self
def sig(self):
return 'e{' + \
self.parent.type2sig(self.key_type) + \
self.parent.type2sig(self.value_type) + '}'
class EnumNode(NodeBase):
def __init__(self, parent):
NodeBase.__init__(self)
self.parent = parent
self.count = 0
def handle(self, node):
self.name = node.getAttribute('name')
self.csymbol = node.getAttribute('csymbol')
self.qname = self.name.replace('.','_')
self.values = []
for child in node.childNodes:
if child.nodeType == node.ELEMENT_NODE:
if child.nodeName == 'value':
self.handle_value(child)
self.parent.types[self.name] = self
def handle_value(self, node):
arg = ArgNode(self.parent)
val = node.getAttribute('value')
if val != '':
self.count = int(val)
arg.name = node.getAttribute('name')
arg.csymbol = node.getAttribute('csymbol')
arg.value = self.count
self.values.append(arg)
def sig(self):
return 's'
class TypeNode(NodeBase):
def __init__(self, parent):
NodeBase.__init__(self)
self.parent = parent
self.count = 0
def handle(self, node):
self.name = node.getAttribute('name')
self.csymbol = node.getAttribute('csymbol')
self.qname = self.name.replace('.','_')
self.parent.types[self.name] = self
def sig(self):
print 'Signature of type ' + self.name + ' unknown'
sys.exit(1)
class ImportNode(NodeBase):
def __init__(self, parent):
NodeBase.__init__(self)
self.parent = parent
self.includes = []
self.namespaces = []
def handle(self, node):
for child in node.childNodes:
if child.nodeType == node.ELEMENT_NODE:
if child.nodeName == 'include':
self.handle_include(child)
elif child.nodeName == 'namespace':
self.handle_namespace(child)
def handle_include(self, node):
self.includes.append(node.getAttribute('name'))
def handle_namespace(self, node):
self.namespaces.append(node.getAttribute('name'))
# Main program
if __name__ == '__main__':
usage = "usage: %prog [options] <introspect.xml>"
parser = OptionParser(usage=usage)
parser.add_option("-l", "--language",
dest="language",
help="Generate stubs for this language")
parser.add_option("-g", "--gio",
action="store_true", dest="gio",
help="Generate GIO based stubs")
parser.add_option("-c", "--client",
action="store_true", dest="client",
help="Generate client stubs")
parser.add_option("-s", "--server",
action="store_true", dest="server",
help="Generate server stubs"
)
(options, args) = parser.parse_args()
templates = []
directory = os.path.dirname(sys.argv[0])
brand = "freedesktop"
if options.gio:
brand = "gio"
if options.language:
if options.language == 'C':
header_ext=".h"
elif options.language == 'C++':
if options.client:
templates.append(directory+"/DBus-client-template-" + brand + ".cc")
templates.append(directory+"/DBus-client-template-" + brand + ".hh")
if options.server:
templates.append(directory+"/DBus-template-" + brand + ".cc")
templates.append(directory+"/DBus-template-" + brand + ".hh")
header_ext=".hh"
elif options.language == 'xml':
templates.append(directory+"/DBus-xml.xml")
header_ext=".xml"
else:
parser.error("Unsupported language: " + options.language)
sys.exit(1)
if len(templates) == 0:
parser.error("Specify language")
sys.exit(1)
binding = TopNode(args[0])
binding.parse()
binding.include_filename = binding.name + header_ext
for template_name in templates:
t = Template(file=template_name)
t.model = binding
s = str(t)
ext = os.path.splitext(template_name)[1]
f = open(binding.name + ext, 'w+')
try:
f.write(s)
finally:
f.close()
| gpl-3.0 |
tolstoyevsky/pieman | pieman/bin/image_attrs.py | 1 | 1777 | #!/usr/bin/env python3
# Copyright (C) 2017-2021 Evgeny Golyshev <eugulixes@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""A utility that reads pieman.yml from stdin and fetches the specified attribute from it. """
import sys
from argparse import ArgumentParser
from pieman import attrs
def fail(message):
"""Writes the specified message to stderr and exits with a non-zero exit
code. """
sys.stderr.write(message + '\n')
sys.exit(1)
def main():
"""The main entry point. """
parser = ArgumentParser()
parser.add_argument('root', nargs='*')
args = parser.parse_args()
attributes_list = attrs.AttributesList(sys.stdin)
try:
attr = attributes_list.get_attribute(args.root)
except attrs.RootDoesNotExist:
fail('There is no root named {}'.format(args.root))
except attrs.AttributeDoesNotExist as exc:
fail(str(exc))
except attrs.UnknownAttribute:
fail('{} attribute is unknown'.format(args.root[-1]))
try:
attr.echo()
except attrs.UnprintableType:
fail('{} attribute is not supposed to be printed'.format(args.root[-1]))
if __name__ == "__main__":
main()
| gpl-3.0 |
terrycojones/dark-matter | bin/dna-to-aa.py | 3 | 1728 | #!/usr/bin/env python
"""
Read DNA FASTA from stdin and print AA FASTA to stdout. If a minimum
ORF length is given, only print AA sequences that have an ORF of at least
that length.
Note that start and stop codons will be present in the output. If you actually
want to just output all ORFs, use extract-ORFs.py directly instead (or pipe
the output of this program into extract-ORFs.py --type aa).
"""
from __future__ import print_function
import sys
import argparse
from Bio.Data.CodonTable import TranslationError
from dark.reads import addFASTACommandLineOptions, parseFASTACommandLineOptions
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Convert DNA to AA',
epilog='Given DNA FASTA on stdin, output AA FASTA to stdout. '
'Optionally, filter by minimum required ORF length.'
)
parser.add_argument(
'--minORFLength', metavar='LEN', type=int, default=None,
help='Translations to AA that do not contain an ORF of at least '
'this length will not be produced.')
addFASTACommandLineOptions(parser)
args = parser.parse_args()
reads = parseFASTACommandLineOptions(args)
write = sys.stdout.write
minORFLength = args.minORFLength
for read in reads:
try:
for translation in read.translations():
if (minORFLength is None or
translation.maximumORFLength() >= minORFLength):
write(translation.toString('fasta'))
except TranslationError as error:
print('Could not translate read %r sequence '
'%r (%s).' % (read.id, read.sequence, error),
file=sys.stderr)
sys.exit(1)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.