repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
druuu/django
|
django/contrib/gis/gdal/geometries.py
|
337
|
24056
|
"""
The OGRGeometry is a wrapper for using the OGR Geometry class
(see http://www.gdal.org/ogr/classOGRGeometry.html). OGRGeometry
may be instantiated when reading geometries from OGR Data Sources
(e.g. SHP files), or when given OGC WKT (a string).
While the 'full' API is not present yet, the API is "pythonic" unlike
the traditional and "next-generation" OGR Python bindings. One major
advantage OGR Geometries have over their GEOS counterparts is support
for spatial reference systems and their transformation.
Example:
>>> from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, SpatialReference
>>> wkt1, wkt2 = 'POINT(-90 30)', 'POLYGON((0 0, 5 0, 5 5, 0 5)'
>>> pnt = OGRGeometry(wkt1)
>>> print(pnt)
POINT (-90 30)
>>> mpnt = OGRGeometry(OGRGeomType('MultiPoint'), SpatialReference('WGS84'))
>>> mpnt.add(wkt1)
>>> mpnt.add(wkt1)
>>> print(mpnt)
MULTIPOINT (-90 30,-90 30)
>>> print(mpnt.srs.name)
WGS 84
>>> print(mpnt.srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> mpnt.transform_to(SpatialReference('NAD27'))
>>> print(mpnt.proj)
+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs
>>> print(mpnt)
MULTIPOINT (-89.999930378602485 29.999797886557641,-89.999930378602485 29.999797886557641)
The OGRGeomType class is to make it easy to specify an OGR geometry type:
>>> from django.contrib.gis.gdal import OGRGeomType
>>> gt1 = OGRGeomType(3) # Using an integer for the type
>>> gt2 = OGRGeomType('Polygon') # Using a string
>>> gt3 = OGRGeomType('POLYGON') # It's case-insensitive
>>> print(gt1 == 3, gt1 == 'Polygon') # Equivalence works w/non-OGRGeomType objects
True True
"""
import sys
from binascii import a2b_hex, b2a_hex
from ctypes import byref, c_char_p, c_double, c_ubyte, c_void_p, string_at
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import (
GDALException, OGRIndexError, SRSException,
)
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.prototypes import geom as capi, srs as srs_api
from django.contrib.gis.gdal.srs import CoordTransform, SpatialReference
from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex
from django.utils import six
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_G_* routines are relevant here.
class OGRGeometry(GDALBase):
"Generally encapsulates an OGR geometry."
def __init__(self, geom_input, srs=None):
"Initializes Geometry on either WKT or an OGR pointer as input."
str_instance = isinstance(geom_input, six.string_types)
# If HEX, unpack input to a binary buffer.
if str_instance and hex_regex.match(geom_input):
geom_input = six.memoryview(a2b_hex(geom_input.upper().encode()))
str_instance = False
# Constructing the geometry,
if str_instance:
wkt_m = wkt_regex.match(geom_input)
json_m = json_regex.match(geom_input)
if wkt_m:
if wkt_m.group('srid'):
# If there's EWKT, set the SRS w/value of the SRID.
srs = int(wkt_m.group('srid'))
if wkt_m.group('type').upper() == 'LINEARRING':
# OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.
# See http://trac.osgeo.org/gdal/ticket/1992.
g = capi.create_geom(OGRGeomType(wkt_m.group('type')).num)
capi.import_wkt(g, byref(c_char_p(wkt_m.group('wkt').encode())))
else:
g = capi.from_wkt(byref(c_char_p(wkt_m.group('wkt').encode())), None, byref(c_void_p()))
elif json_m:
g = capi.from_json(geom_input.encode())
else:
# Seeing if the input is a valid short-hand string
# (e.g., 'Point', 'POLYGON').
OGRGeomType(geom_input)
g = capi.create_geom(OGRGeomType(geom_input).num)
elif isinstance(geom_input, six.memoryview):
# WKB was passed in
g = capi.from_wkb(bytes(geom_input), None, byref(c_void_p()), len(geom_input))
elif isinstance(geom_input, OGRGeomType):
# OGRGeomType was passed in, an empty geometry will be created.
g = capi.create_geom(geom_input.num)
elif isinstance(geom_input, self.ptr_type):
# OGR pointer (c_void_p) was the input.
g = geom_input
else:
raise GDALException('Invalid input type for OGR Geometry construction: %s' % type(geom_input))
# Now checking the Geometry pointer before finishing initialization
# by setting the pointer for the object.
if not g:
raise GDALException('Cannot create OGR Geometry from input: %s' % str(geom_input))
self.ptr = g
# Assigning the SpatialReference object to the geometry, if valid.
if srs:
self.srs = srs
# Setting the class depending upon the OGR Geometry Type
self.__class__ = GEO_CLASSES[self.geom_type.num]
def __del__(self):
"Deletes this Geometry."
if self._ptr and capi:
capi.destroy_geom(self._ptr)
# Pickle routines
def __getstate__(self):
srs = self.srs
if srs:
srs = srs.wkt
else:
srs = None
return bytes(self.wkb), srs
def __setstate__(self, state):
wkb, srs = state
ptr = capi.from_wkb(wkb, None, byref(c_void_p()), len(wkb))
if not ptr:
raise GDALException('Invalid OGRGeometry loaded from pickled state.')
self.ptr = ptr
self.srs = srs
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
return OGRGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (
x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))
# ### Geometry set-like operations ###
# g = g1 | g2
def __or__(self, other):
"Returns the union of the two geometries."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
def __eq__(self, other):
"Is this Geometry equal to the other?"
if isinstance(other, OGRGeometry):
return self.equals(other)
else:
return False
def __ne__(self, other):
"Tests for inequality."
return not (self == other)
def __str__(self):
"WKT is used for the string representation."
return self.wkt
# #### Geometry Properties ####
@property
def dimension(self):
"Returns 0 for points, 1 for lines, and 2 for surfaces."
return capi.get_dims(self.ptr)
def _get_coord_dim(self):
"Returns the coordinate dimension of the Geometry."
return capi.get_coord_dim(self.ptr)
def _set_coord_dim(self, dim):
"Sets the coordinate dimension of this Geometry."
if dim not in (2, 3):
raise ValueError('Geometry dimension must be either 2 or 3')
capi.set_coord_dim(self.ptr, dim)
coord_dim = property(_get_coord_dim, _set_coord_dim)
@property
def geom_count(self):
"The number of elements in this Geometry."
return capi.get_geom_count(self.ptr)
@property
def point_count(self):
"Returns the number of Points in this Geometry."
return capi.get_point_count(self.ptr)
@property
def num_points(self):
"Alias for `point_count` (same name method in GEOS API.)"
return self.point_count
@property
def num_coords(self):
"Alais for `point_count`."
return self.point_count
@property
def geom_type(self):
"Returns the Type for this Geometry."
return OGRGeomType(capi.get_geom_type(self.ptr))
@property
def geom_name(self):
"Returns the Name of this Geometry."
return capi.get_geom_name(self.ptr)
@property
def area(self):
"Returns the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise."
return capi.get_area(self.ptr)
@property
def envelope(self):
"Returns the envelope for this Geometry."
# TODO: Fix Envelope() for Point geometries.
return Envelope(capi.get_envelope(self.ptr, byref(OGREnvelope())))
@property
def extent(self):
"Returns the envelope as a 4-tuple, instead of as an Envelope object."
return self.envelope.tuple
# #### SpatialReference-related Properties ####
# The SRS property
def _get_srs(self):
"Returns the Spatial Reference for this Geometry."
try:
srs_ptr = capi.get_geom_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(srs_ptr))
except SRSException:
return None
def _set_srs(self, srs):
"Sets the SpatialReference for this geometry."
# Do not have to clone the `SpatialReference` object pointer because
# when it is assigned to this `OGRGeometry` it's internal OGR
# reference count is incremented, and will likewise be released
# (decremented) when this geometry's destructor is called.
if isinstance(srs, SpatialReference):
srs_ptr = srs.ptr
elif isinstance(srs, six.integer_types + six.string_types):
sr = SpatialReference(srs)
srs_ptr = sr.ptr
else:
raise TypeError('Cannot assign spatial reference with object of type: %s' % type(srs))
capi.assign_srs(self.ptr, srs_ptr)
srs = property(_get_srs, _set_srs)
# The SRID property
def _get_srid(self):
srs = self.srs
if srs:
return srs.srid
return None
def _set_srid(self, srid):
if isinstance(srid, six.integer_types):
self.srs = srid
else:
raise TypeError('SRID must be set with an integer.')
srid = property(_get_srid, _set_srid)
# #### Output Methods ####
@property
def geos(self):
"Returns a GEOSGeometry object from this OGRGeometry."
from django.contrib.gis.geos import GEOSGeometry
return GEOSGeometry(self.wkb, self.srid)
@property
def gml(self):
"Returns the GML representation of the Geometry."
return capi.to_gml(self.ptr)
@property
def hex(self):
"Returns the hexadecimal representation of the WKB (a string)."
return b2a_hex(self.wkb).upper()
@property
def json(self):
"""
Returns the GeoJSON representation of this Geometry.
"""
return capi.to_json(self.ptr)
geojson = json
@property
def kml(self):
"Returns the KML representation of the Geometry."
return capi.to_kml(self.ptr, None)
@property
def wkb_size(self):
"Returns the size of the WKB buffer."
return capi.get_wkbsize(self.ptr)
@property
def wkb(self):
"Returns the WKB representation of the Geometry."
if sys.byteorder == 'little':
byteorder = 1 # wkbNDR (from ogr_core.h)
else:
byteorder = 0 # wkbXDR
sz = self.wkb_size
# Creating the unsigned character buffer, and passing it in by reference.
buf = (c_ubyte * sz)()
capi.to_wkb(self.ptr, byteorder, byref(buf))
# Returning a buffer of the string at the pointer.
return six.memoryview(string_at(buf, sz))
@property
def wkt(self):
"Returns the WKT representation of the Geometry."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def ewkt(self):
"Returns the EWKT representation of the Geometry."
srs = self.srs
if srs and srs.srid:
return 'SRID=%s;%s' % (srs.srid, self.wkt)
else:
return self.wkt
# #### Geometry Methods ####
def clone(self):
"Clones this OGR Geometry."
return OGRGeometry(capi.clone_geom(self.ptr), self.srs)
def close_rings(self):
"""
If there are any rings within this geometry that have not been
closed, this routine will do so by adding the starting point at the
end.
"""
# Closing the open rings.
capi.geom_close_rings(self.ptr)
def transform(self, coord_trans, clone=False):
"""
Transforms this geometry to a different spatial reference system.
May take a CoordTransform object, a SpatialReference object, string
WKT or PROJ.4, and/or an integer SRID. By default nothing is returned
and the geometry is transformed in-place. However, if the `clone`
keyword is set, then a transformed clone of this geometry will be
returned.
"""
if clone:
klone = self.clone()
klone.transform(coord_trans)
return klone
# Depending on the input type, use the appropriate OGR routine
# to perform the transformation.
if isinstance(coord_trans, CoordTransform):
capi.geom_transform(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, SpatialReference):
capi.geom_transform_to(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, six.integer_types + six.string_types):
sr = SpatialReference(coord_trans)
capi.geom_transform_to(self.ptr, sr.ptr)
else:
raise TypeError('Transform only accepts CoordTransform, '
'SpatialReference, string, and integer objects.')
def transform_to(self, srs):
"For backwards-compatibility."
self.transform(srs)
# #### Topology Methods ####
def _topology(self, func, other):
"""A generalized function for topology operations, takes a GDAL function and
the other geometry to perform the operation on."""
if not isinstance(other, OGRGeometry):
raise TypeError('Must use another OGRGeometry object for topology operations!')
# Returning the output of the given function with the other geometry's
# pointer.
return func(self.ptr, other.ptr)
def intersects(self, other):
"Returns True if this geometry intersects with the other."
return self._topology(capi.ogr_intersects, other)
def equals(self, other):
"Returns True if this geometry is equivalent to the other."
return self._topology(capi.ogr_equals, other)
def disjoint(self, other):
"Returns True if this geometry and the other are spatially disjoint."
return self._topology(capi.ogr_disjoint, other)
def touches(self, other):
"Returns True if this geometry touches the other."
return self._topology(capi.ogr_touches, other)
def crosses(self, other):
"Returns True if this geometry crosses the other."
return self._topology(capi.ogr_crosses, other)
def within(self, other):
"Returns True if this geometry is within the other."
return self._topology(capi.ogr_within, other)
def contains(self, other):
"Returns True if this geometry contains the other."
return self._topology(capi.ogr_contains, other)
def overlaps(self, other):
"Returns True if this geometry overlaps the other."
return self._topology(capi.ogr_overlaps, other)
# #### Geometry-generation Methods ####
def _geomgen(self, gen_func, other=None):
"A helper routine for the OGR routines that generate geometries."
if isinstance(other, OGRGeometry):
return OGRGeometry(gen_func(self.ptr, other.ptr), self.srs)
else:
return OGRGeometry(gen_func(self.ptr), self.srs)
@property
def boundary(self):
"Returns the boundary of this geometry."
return self._geomgen(capi.get_boundary)
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points in
this Geometry.
"""
return self._geomgen(capi.geom_convex_hull)
def difference(self, other):
"""
Returns a new geometry consisting of the region which is the difference
of this geometry and the other.
"""
return self._geomgen(capi.geom_diff, other)
def intersection(self, other):
"""
Returns a new geometry consisting of the region of intersection of this
geometry and the other.
"""
return self._geomgen(capi.geom_intersection, other)
def sym_difference(self, other):
"""
Returns a new geometry which is the symmetric difference of this
geometry and the other.
"""
return self._geomgen(capi.geom_sym_diff, other)
def union(self, other):
"""
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
"""
return self._geomgen(capi.geom_union, other)
# The subclasses for OGR Geometry.
class Point(OGRGeometry):
@property
def x(self):
"Returns the X coordinate for this Point."
return capi.getx(self.ptr, 0)
@property
def y(self):
"Returns the Y coordinate for this Point."
return capi.gety(self.ptr, 0)
@property
def z(self):
"Returns the Z coordinate for this Point."
if self.coord_dim == 3:
return capi.getz(self.ptr, 0)
@property
def tuple(self):
"Returns the tuple of this point."
if self.coord_dim == 2:
return (self.x, self.y)
elif self.coord_dim == 3:
return (self.x, self.y, self.z)
coords = tuple
class LineString(OGRGeometry):
def __getitem__(self, index):
"Returns the Point at the given index."
if index >= 0 and index < self.point_count:
x, y, z = c_double(), c_double(), c_double()
capi.get_point(self.ptr, index, byref(x), byref(y), byref(z))
dim = self.coord_dim
if dim == 1:
return (x.value,)
elif dim == 2:
return (x.value, y.value)
elif dim == 3:
return (x.value, y.value, z.value)
else:
raise OGRIndexError('index out of range: %s' % str(index))
def __iter__(self):
"Iterates over each point in the LineString."
for i in range(self.point_count):
yield self[i]
def __len__(self):
"The length returns the number of points in the LineString."
return self.point_count
@property
def tuple(self):
"Returns the tuple representation of this LineString."
return tuple(self[i] for i in range(len(self)))
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function.
"""
return [func(self.ptr, i) for i in range(len(self))]
@property
def x(self):
"Returns the X coordinates in a list."
return self._listarr(capi.getx)
@property
def y(self):
"Returns the Y coordinates in a list."
return self._listarr(capi.gety)
@property
def z(self):
"Returns the Z coordinates in a list."
if self.coord_dim == 3:
return self._listarr(capi.getz)
# LinearRings are used in Polygons.
class LinearRing(LineString):
pass
class Polygon(OGRGeometry):
def __len__(self):
"The number of interior rings in this Polygon."
return self.geom_count
def __iter__(self):
"Iterates through each ring in the Polygon."
for i in range(self.geom_count):
yield self[i]
def __getitem__(self, index):
"Gets the ring at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
# Polygon Properties
@property
def shell(self):
"Returns the shell of this Polygon."
return self[0] # First ring is the shell
exterior_ring = shell
@property
def tuple(self):
"Returns a tuple of LinearRing coordinate tuples."
return tuple(self[i].tuple for i in range(self.geom_count))
coords = tuple
@property
def point_count(self):
"The number of Points in this Polygon."
# Summing up the number of points in each ring of the Polygon.
return sum(self[i].point_count for i in range(self.geom_count))
@property
def centroid(self):
"Returns the centroid (a Point) of this Polygon."
# The centroid is a Point, create a geometry for this.
p = OGRGeometry(OGRGeomType('Point'))
capi.get_centroid(self.ptr, p.ptr)
return p
# Geometry Collection base class.
class GeometryCollection(OGRGeometry):
"The Geometry Collection class."
def __getitem__(self, index):
"Gets the Geometry at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
def __iter__(self):
"Iterates over each Geometry."
for i in range(self.geom_count):
yield self[i]
def __len__(self):
"The number of geometries in this Geometry Collection."
return self.geom_count
def add(self, geom):
"Add the geometry to this Geometry Collection."
if isinstance(geom, OGRGeometry):
if isinstance(geom, self.__class__):
for g in geom:
capi.add_geom(self.ptr, g.ptr)
else:
capi.add_geom(self.ptr, geom.ptr)
elif isinstance(geom, six.string_types):
tmp = OGRGeometry(geom)
capi.add_geom(self.ptr, tmp.ptr)
else:
raise GDALException('Must add an OGRGeometry.')
@property
def point_count(self):
"The number of Points in this Geometry Collection."
# Summing up the number of points in each geometry in this collection
return sum(self[i].point_count for i in range(self.geom_count))
@property
def tuple(self):
"Returns a tuple representation of this Geometry Collection."
return tuple(self[i].tuple for i in range(self.geom_count))
coords = tuple
# Multiple Geometry types.
class MultiPoint(GeometryCollection):
pass
class MultiLineString(GeometryCollection):
pass
class MultiPolygon(GeometryCollection):
pass
# Class mapping dictionary (using the OGRwkbGeometryType as the key)
GEO_CLASSES = {1: Point,
2: LineString,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
101: LinearRing,
1 + OGRGeomType.wkb25bit: Point,
2 + OGRGeomType.wkb25bit: LineString,
3 + OGRGeomType.wkb25bit: Polygon,
4 + OGRGeomType.wkb25bit: MultiPoint,
5 + OGRGeomType.wkb25bit: MultiLineString,
6 + OGRGeomType.wkb25bit: MultiPolygon,
7 + OGRGeomType.wkb25bit: GeometryCollection,
}
|
bsd-3-clause
|
insertnamehere1/maraschino
|
lib/flask/templating.py
|
19
|
4417
|
# -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
return dict(
config=reqctx.app.config,
request=reqctx.request,
session=reqctx.session,
g=reqctx.g
)
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in self.app.blueprints.itervalues():
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in self.app.blueprints.iteritems():
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name, **context):
"""Renders a template from the template folder with the given
context.
:param template_name: the name of the template to be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _request_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_template(template_name),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param template_name: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _request_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
|
mit
|
hwu25/AppPkg
|
Applications/Python/Python-2.7.2/Tools/scripts/ifdef.py
|
10
|
3829
|
#! /usr/bin/env python
# Selectively preprocess #ifdef / #ifndef statements.
# Usage:
# ifdef [-Dname] ... [-Uname] ... [file] ...
#
# This scans the file(s), looking for #ifdef and #ifndef preprocessor
# commands that test for one of the names mentioned in the -D and -U
# options. On standard output it writes a copy of the input file(s)
# minus those code sections that are suppressed by the selected
# combination of defined/undefined symbols. The #if(n)def/#else/#else
# lines themselfs (if the #if(n)def tests for one of the mentioned
# names) are removed as well.
# Features: Arbitrary nesting of recognized and unrecognized
# preprocesor statements works correctly. Unrecognized #if* commands
# are left in place, so it will never remove too much, only too
# little. It does accept whitespace around the '#' character.
# Restrictions: There should be no comments or other symbols on the
# #if(n)def lines. The effect of #define/#undef commands in the input
# file or in included files is not taken into account. Tests using
# #if and the defined() pseudo function are not recognized. The #elif
# command is not recognized. Improperly nesting is not detected.
# Lines that look like preprocessor commands but which are actually
# part of comments or string literals will be mistaken for
# preprocessor commands.
import sys
import getopt
defs = []
undefs = []
def main():
opts, args = getopt.getopt(sys.argv[1:], 'D:U:')
for o, a in opts:
if o == '-D':
defs.append(a)
if o == '-U':
undefs.append(a)
if not args:
args = ['-']
for filename in args:
if filename == '-':
process(sys.stdin, sys.stdout)
else:
f = open(filename, 'r')
process(f, sys.stdout)
f.close()
def process(fpi, fpo):
keywords = ('if', 'ifdef', 'ifndef', 'else', 'endif')
ok = 1
stack = []
while 1:
line = fpi.readline()
if not line: break
while line[-2:] == '\\\n':
nextline = fpi.readline()
if not nextline: break
line = line + nextline
tmp = line.strip()
if tmp[:1] != '#':
if ok: fpo.write(line)
continue
tmp = tmp[1:].strip()
words = tmp.split()
keyword = words[0]
if keyword not in keywords:
if ok: fpo.write(line)
continue
if keyword in ('ifdef', 'ifndef') and len(words) == 2:
if keyword == 'ifdef':
ko = 1
else:
ko = 0
word = words[1]
if word in defs:
stack.append((ok, ko, word))
if not ko: ok = 0
elif word in undefs:
stack.append((ok, not ko, word))
if ko: ok = 0
else:
stack.append((ok, -1, word))
if ok: fpo.write(line)
elif keyword == 'if':
stack.append((ok, -1, ''))
if ok: fpo.write(line)
elif keyword == 'else' and stack:
s_ok, s_ko, s_word = stack[-1]
if s_ko < 0:
if ok: fpo.write(line)
else:
s_ko = not s_ko
ok = s_ok
if not s_ko: ok = 0
stack[-1] = s_ok, s_ko, s_word
elif keyword == 'endif' and stack:
s_ok, s_ko, s_word = stack[-1]
if s_ko < 0:
if ok: fpo.write(line)
del stack[-1]
ok = s_ok
else:
sys.stderr.write('Unknown keyword %s\n' % keyword)
if stack:
sys.stderr.write('stack: %s\n' % stack)
if __name__ == '__main__':
main()
|
bsd-2-clause
|
kimshinelove/naver-npm
|
node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py
|
1407
|
47697
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
|
artistic-2.0
|
geoadmin/mapproxy
|
mapproxy/test/system/test_watermark.py
|
5
|
3475
|
# This file is part of the MapProxy project.
# Copyright (C) 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement, division
from io import BytesIO
from mapproxy.compat.image import Image
from mapproxy.request.wms import WMS111MapRequest
from mapproxy.test.http import mock_httpd
from mapproxy.test.image import tmp_image
from mapproxy.test.system import module_setup, module_teardown, SystemTest, make_base_config
from nose.tools import eq_
test_config = {}
base_config = make_base_config(test_config)
def setup_module():
module_setup(test_config, 'watermark.yaml', with_cache_data=True)
def teardown_module():
module_teardown(test_config)
class WatermarkTest(SystemTest):
config = test_config
def setup(self):
SystemTest.setup(self)
self.common_map_req = WMS111MapRequest(url='/service?', param=dict(service='WMS',
version='1.1.1', bbox='-180,0,0,80', width='200', height='200',
layers='watermark', srs='EPSG:4326', format='image/png',
styles='', request='GetMap'))
def test_watermark_tile(self):
with tmp_image((256, 256), format='png', color=(0, 0, 0)) as img:
expected_req = ({'path': r'/service?LAYERs=blank&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetMap&HEIGHT=256&SRS=EPSG%3A4326&styles='
'&VERSION=1.1.1&BBOX=-180.0,-90.0,0.0,90.0'
'&WIDTH=256'},
{'body': img.read(), 'headers': {'content-type': 'image/jpeg'}})
with mock_httpd(('localhost', 42423), [expected_req]):
resp = self.app.get('/tms/1.0.0/watermark/EPSG4326/0/0/0.png')
eq_(resp.content_type, 'image/png')
img = Image.open(BytesIO(resp.body))
colors = img.getcolors()
assert len(colors) >= 2
eq_(sorted(colors)[-1][1], (0, 0, 0))
def test_transparent_watermark_tile(self):
with tmp_image((256, 256), format='png', color=(0, 0, 0, 0), mode='RGBA') as img:
expected_req = ({'path': r'/service?LAYERs=blank&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetMap&HEIGHT=256&SRS=EPSG%3A4326&styles='
'&VERSION=1.1.1&BBOX=-180.0,-90.0,0.0,90.0'
'&WIDTH=256'},
{'body': img.read(), 'headers': {'content-type': 'image/jpeg'}})
with mock_httpd(('localhost', 42423), [expected_req]):
resp = self.app.get('/tms/1.0.0/watermark_transp/EPSG4326/0/0/0.png')
eq_(resp.content_type, 'image/png')
img = Image.open(BytesIO(resp.body))
colors = img.getcolors()
assert len(colors) >= 2
eq_(sorted(colors)[-1][1], (0, 0, 0, 0))
|
apache-2.0
|
kr41/ggrc-core
|
src/ggrc_workflows/migrations/versions/20150714220605_3605dca868e4_add_slug_to_task_group_task.py
|
7
|
1213
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""add slug to task group task
Revision ID: 3605dca868e4
Revises: 1431e7094e26
Create Date: 2015-07-14 22:06:05.063945
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "3605dca868e4"
down_revision = "1431e7094e26"
_table_name = "task_group_tasks"
_column_name = "slug"
_slug_prefix = "TASK-"
_constraint_name = "unique_{}".format(_column_name)
def upgrade():
""" Add and fill a unique slug column """
op.add_column(
_table_name,
sa.Column(_column_name, sa.String(length=250), nullable=True)
)
op.execute("UPDATE {table_name} SET slug = CONCAT('{prefix}', id)".format(
table_name=_table_name,
prefix=_slug_prefix
))
op.alter_column(
_table_name,
_column_name,
existing_type=sa.String(length=250),
nullable=False
)
op.create_unique_constraint(_constraint_name, _table_name, [_column_name])
def downgrade():
""" Remove slug column from task group tasks """
op.drop_constraint(_constraint_name, _table_name, type_="unique")
op.drop_column(_table_name, _column_name)
|
apache-2.0
|
ubuntustudio-kernel/ubuntu-raring-lowlatency
|
scripts/tracing/draw_functrace.py
|
14676
|
3560
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
gpl-2.0
|
missionpinball/mpf-examples
|
tests/test_TutorialStep16.py
|
1
|
6127
|
from mpfmc.tests.MpfIntegrationTestCase import MpfIntegrationTestCase
class TestTutorialStep16(MpfIntegrationTestCase):
def getConfigFile(self):
return 'config.yaml'
def getMachinePath(self):
return self.get_abs_path('tutorial_step_16')
def get_platform(self):
return 'smart_virtual'
def test_game(self):
# start active switches should start with 5 balls in the trough
self.machine.ball_controller.num_balls_known = 5
self.assertEqual(5, self.machine.ball_devices.bd_trough.balls)
# player hits start
self.machine.switch_controller.process_switch('s_start', 1)
self.machine.switch_controller.process_switch('s_start', 0)
self.advance_time_and_run(2)
self.assertEqual(1, self.machine.ball_devices.bd_plunger.balls)
self.assertEqual(4, self.machine.ball_devices.bd_trough.balls)
self.assertEqual(0, self.machine.ball_devices.playfield.balls)
# make sure the base game mode is active
self.assertTrue(self.machine.mode_controller.is_active('base'))
# player hits the launch button
self.machine.switch_controller.process_switch('s_launch', 1)
self.machine.switch_controller.process_switch('s_launch', 0)
self.advance_time_and_run(2)
# ball is on the pf
# 100 points
self.machine.switch_controller.process_switch('s_right_inlane',
1)
self.machine.switch_controller.process_switch('s_right_inlane',
0)
self.advance_time_and_run(1)
self.assertEqual(100, self.machine.game.player.score)
# player should get 1000 points for hitting the flipper button
self.machine.switch_controller.process_switch('s_left_flipper',
1)
self.machine.switch_controller.process_switch('s_left_flipper',
0)
self.advance_time_and_run(1)
self.assertEqual(1100, self.machine.game.player.score)
self.advance_time_and_run(2)
self.assertEqual(1, self.machine.game.player.ball)
self.assertEqual(0, self.machine.ball_devices.bd_plunger.balls)
self.assertEqual(4, self.machine.ball_devices.bd_trough.balls)
self.assertEqual(1, self.machine.ball_devices.playfield.balls)
# lets drop the ball in some devices and make sure it kicks it out
self.machine.switch_controller.process_switch('s_eject', 1)
self.advance_time_and_run(2)
self.assertEqual(1, self.machine.ball_devices.playfield.balls)
self.machine.switch_controller.process_switch('s_bottom_popper', 1)
self.advance_time_and_run(3)
self.assertEqual(1, self.machine.ball_devices.playfield.balls)
self.machine.switch_controller.process_switch('s_top_popper', 1)
self.advance_time_and_run(3)
self.assertEqual(1, self.machine.ball_devices.playfield.balls)
# ball drains, game goes to ball 2
self.machine.default_platform.add_ball_to_device(
self.machine.ball_devices.bd_trough)
self.advance_time_and_run(1)
self.assertEqual(2, self.machine.game.player.ball)
self.assertEqual(1, self.machine.ball_devices.bd_plunger.balls)
self.assertEqual(4, self.machine.ball_devices.bd_trough.balls)
self.assertEqual(0, self.machine.ball_devices.playfield.balls)
# repeat above cycle for ball 2
# player hits the launch button
self.machine.switch_controller.process_switch('s_launch', 1)
self.machine.switch_controller.process_switch('s_launch', 0)
self.advance_time_and_run(2)
# ball is on the pf
self.machine.switch_controller.process_switch('s_right_inlane', 1)
self.machine.switch_controller.process_switch('s_right_inlane', 0)
self.advance_time_and_run(2)
self.assertEqual(2, self.machine.game.player.ball)
self.assertEqual(0, self.machine.ball_devices.bd_plunger.balls)
self.assertEqual(4, self.machine.ball_devices.bd_trough.balls)
self.assertEqual(1, self.machine.ball_devices.playfield.balls)
# ball drains, game goes to ball 3
self.machine.default_platform.add_ball_to_device(
self.machine.ball_devices.bd_trough)
self.advance_time_and_run(1)
self.assertEqual(3, self.machine.game.player.ball)
self.assertEqual(1, self.machine.ball_devices.bd_plunger.balls)
self.assertEqual(4, self.machine.ball_devices.bd_trough.balls)
self.assertEqual(0, self.machine.ball_devices.playfield.balls)
# repeat above cycle for ball 3
# player hits the launch button
self.machine.switch_controller.process_switch('s_launch', 1)
self.machine.switch_controller.process_switch('s_launch', 0)
self.advance_time_and_run(2)
# ball is on the pf
# this time let's test the timeout with no pf switch hit
# self.machine.switch_controller.process_switch('s_right_inlane', 1)
# self.machine.switch_controller.process_switch('s_right_inlane', 0)
self.advance_time_and_run(2)
self.assertEqual(3, self.machine.game.player.ball)
self.assertEqual(0, self.machine.ball_devices.bd_plunger.balls)
self.assertEqual(4, self.machine.ball_devices.bd_trough.balls)
self.assertEqual(1, self.machine.ball_devices.playfield.balls)
# ball drains, game ends
self.machine.default_platform.add_ball_to_device(
self.machine.ball_devices.bd_trough)
self.advance_time_and_run(1)
self.assertIsNone(self.machine.game)
self.assertEqual(0, self.machine.ball_devices.bd_plunger.balls)
self.assertEqual(5, self.machine.ball_devices.bd_trough.balls)
self.assertEqual(0, self.machine.ball_devices.playfield.balls)
# make sure the base game mode is not active
self.assertFalse(self.machine.mode_controller.is_active('base'))
|
mit
|
ahmedaljazzar/edx-platform
|
lms/djangoapps/survey/tests/test_signals.py
|
11
|
1876
|
"""
Test signal handlers for the survey app
"""
from openedx.core.djangoapps.user_api.accounts.tests.retirement_helpers import fake_completed_retirement
from student.tests.factories import UserFactory
from survey.models import SurveyAnswer
from survey.tests.factories import SurveyAnswerFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from lms.djangoapps.survey.signals import _listen_for_lms_retire
class SurveyRetireSignalTests(ModuleStoreTestCase):
"""
Test the _listen_for_lms_retire signal
"""
shard = 4
def test_success_answers_exist(self):
"""
Basic success path for users that have answers in the table
"""
answer = SurveyAnswerFactory(field_value="test value")
_listen_for_lms_retire(sender=self.__class__, user=answer.user)
# All values for this user should now be empty string
self.assertFalse(SurveyAnswer.objects.filter(user=answer.user).exclude(field_value='').exists())
def test_success_no_answers(self):
"""
Basic success path for users who have no answers, should simply not error
"""
user = UserFactory()
_listen_for_lms_retire(sender=self.__class__, user=user)
def test_idempotent(self):
"""
Tests that re-running a retirement multiple times does not throw an error
"""
answer = SurveyAnswerFactory(field_value="test value")
# Run twice to make sure no errors are raised
_listen_for_lms_retire(sender=self.__class__, user=answer.user)
fake_completed_retirement(answer.user)
_listen_for_lms_retire(sender=self.__class__, user=answer.user)
# All values for this user should still be here and just be an empty string
self.assertFalse(SurveyAnswer.objects.filter(user=answer.user).exclude(field_value='').exists())
|
agpl-3.0
|
goldeneye-source/ges-python
|
lib/test/test_urllib2net.py
|
7
|
12602
|
import unittest
from test import support
from test.test_urllib2 import sanepathname2url
import os
import socket
import urllib.error
import urllib.request
import sys
try:
import ssl
except ImportError:
ssl = None
support.requires("network")
TIMEOUT = 60 # seconds
def _retry_thrice(func, exc, *args, **kwargs):
for i in range(3):
try:
return func(*args, **kwargs)
except exc as e:
last_exc = e
continue
except:
raise
raise last_exc
def _wrap_with_retry_thrice(func, exc):
def wrapped(*args, **kwargs):
return _retry_thrice(func, exc, *args, **kwargs)
return wrapped
# Connecting to remote hosts is flaky. Make it more robust by retrying
# the connection several times.
_urlopen_with_retry = _wrap_with_retry_thrice(urllib.request.urlopen,
urllib.error.URLError)
class AuthTests(unittest.TestCase):
"""Tests urllib2 authentication features."""
## Disabled at the moment since there is no page under python.org which
## could be used to HTTP authentication.
#
# def test_basic_auth(self):
# import http.client
#
# test_url = "http://www.python.org/test/test_urllib2/basic_auth"
# test_hostport = "www.python.org"
# test_realm = 'Test Realm'
# test_user = 'test.test_urllib2net'
# test_password = 'blah'
#
# # failure
# try:
# _urlopen_with_retry(test_url)
# except urllib2.HTTPError, exc:
# self.assertEqual(exc.code, 401)
# else:
# self.fail("urlopen() should have failed with 401")
#
# # success
# auth_handler = urllib2.HTTPBasicAuthHandler()
# auth_handler.add_password(test_realm, test_hostport,
# test_user, test_password)
# opener = urllib2.build_opener(auth_handler)
# f = opener.open('http://localhost/')
# response = _urlopen_with_retry("http://www.python.org/")
#
# # The 'userinfo' URL component is deprecated by RFC 3986 for security
# # reasons, let's not implement it! (it's already implemented for proxy
# # specification strings (that is, URLs or authorities specifying a
# # proxy), so we must keep that)
# self.assertRaises(http.client.InvalidURL,
# urllib2.urlopen, "http://evil:thing@example.com")
class CloseSocketTest(unittest.TestCase):
def test_close(self):
# calling .close() on urllib2's response objects should close the
# underlying socket
url = "http://www.example.com/"
with support.transient_internet(url):
response = _urlopen_with_retry(url)
sock = response.fp
self.assertFalse(sock.closed)
response.close()
self.assertTrue(sock.closed)
class OtherNetworkTests(unittest.TestCase):
def setUp(self):
if 0: # for debugging
import logging
logger = logging.getLogger("test_urllib2net")
logger.addHandler(logging.StreamHandler())
# XXX The rest of these tests aren't very good -- they don't check much.
# They do sometimes catch some major disasters, though.
def test_ftp(self):
urls = [
'ftp://ftp.kernel.org/pub/linux/kernel/README',
'ftp://ftp.kernel.org/pub/linux/kernel/non-existent-file',
#'ftp://ftp.kernel.org/pub/leenox/kernel/test',
'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC'
'/research-reports/00README-Legal-Rules-Regs',
]
self._test_urls(urls, self._extra_handlers())
def test_file(self):
TESTFN = support.TESTFN
f = open(TESTFN, 'w')
try:
f.write('hi there\n')
f.close()
urls = [
'file:' + sanepathname2url(os.path.abspath(TESTFN)),
('file:///nonsensename/etc/passwd', None,
urllib.error.URLError),
]
self._test_urls(urls, self._extra_handlers(), retry=True)
finally:
os.remove(TESTFN)
self.assertRaises(ValueError, urllib.request.urlopen,'./relative_path/to/file')
# XXX Following test depends on machine configurations that are internal
# to CNRI. Need to set up a public server with the right authentication
# configuration for test purposes.
## def test_cnri(self):
## if socket.gethostname() == 'bitdiddle':
## localhost = 'bitdiddle.cnri.reston.va.us'
## elif socket.gethostname() == 'bitdiddle.concentric.net':
## localhost = 'localhost'
## else:
## localhost = None
## if localhost is not None:
## urls = [
## 'file://%s/etc/passwd' % localhost,
## 'http://%s/simple/' % localhost,
## 'http://%s/digest/' % localhost,
## 'http://%s/not/found.h' % localhost,
## ]
## bauth = HTTPBasicAuthHandler()
## bauth.add_password('basic_test_realm', localhost, 'jhylton',
## 'password')
## dauth = HTTPDigestAuthHandler()
## dauth.add_password('digest_test_realm', localhost, 'jhylton',
## 'password')
## self._test_urls(urls, self._extra_handlers()+[bauth, dauth])
def test_urlwithfrag(self):
urlwith_frag = "https://docs.python.org/2/glossary.html#glossary"
with support.transient_internet(urlwith_frag):
req = urllib.request.Request(urlwith_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res.geturl(),
"https://docs.python.org/2/glossary.html#glossary")
def test_redirect_url_withfrag(self):
redirect_url_with_frag = "http://bit.ly/1iSHToT"
with support.transient_internet(redirect_url_with_frag):
req = urllib.request.Request(redirect_url_with_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res.geturl(),
"https://docs.python.org/3.4/glossary.html#term-global-interpreter-lock")
def test_custom_headers(self):
url = "http://www.example.com"
with support.transient_internet(url):
opener = urllib.request.build_opener()
request = urllib.request.Request(url)
self.assertFalse(request.header_items())
opener.open(request)
self.assertTrue(request.header_items())
self.assertTrue(request.has_header('User-agent'))
request.add_header('User-Agent','Test-Agent')
opener.open(request)
self.assertEqual(request.get_header('User-agent'),'Test-Agent')
def test_sites_no_connection_close(self):
# Some sites do not send Connection: close header.
# Verify that those work properly. (#issue12576)
URL = 'http://www.imdb.com' # mangles Connection:close
with support.transient_internet(URL):
try:
with urllib.request.urlopen(URL) as res:
pass
except ValueError as e:
self.fail("urlopen failed for site not sending \
Connection:close")
else:
self.assertTrue(res)
req = urllib.request.urlopen(URL)
res = req.read()
self.assertTrue(res)
def _test_urls(self, urls, handlers, retry=True):
import time
import logging
debug = logging.getLogger("test_urllib2").debug
urlopen = urllib.request.build_opener(*handlers).open
if retry:
urlopen = _wrap_with_retry_thrice(urlopen, urllib.error.URLError)
for url in urls:
if isinstance(url, tuple):
url, req, expected_err = url
else:
req = expected_err = None
with support.transient_internet(url):
debug(url)
try:
f = urlopen(url, req, TIMEOUT)
except OSError as err:
debug(err)
if expected_err:
msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" %
(expected_err, url, req, type(err), err))
self.assertIsInstance(err, expected_err, msg)
except urllib.error.URLError as err:
if isinstance(err[0], socket.timeout):
print("<timeout: %s>" % url, file=sys.stderr)
continue
else:
raise
else:
try:
with support.time_out, \
support.socket_peer_reset, \
support.ioerror_peer_reset:
buf = f.read()
debug("read %d bytes" % len(buf))
except socket.timeout:
print("<timeout: %s>" % url, file=sys.stderr)
f.close()
debug("******** next url coming up...")
time.sleep(0.1)
def _extra_handlers(self):
handlers = []
cfh = urllib.request.CacheFTPHandler()
self.addCleanup(cfh.clear_cache)
cfh.setTimeout(1)
handlers.append(cfh)
return handlers
class TimeoutTest(unittest.TestCase):
def test_http_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
url = "http://www.example.com"
with support.transient_internet(url, timeout=None):
u = _urlopen_with_retry(url)
self.addCleanup(u.close)
self.assertIsNone(u.fp.raw._sock.gettimeout())
def test_http_default_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
url = "http://www.example.com"
with support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.raw._sock.gettimeout(), 60)
def test_http_no_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
url = "http://www.example.com"
with support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url, timeout=None)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(u.fp.raw._sock.gettimeout())
def test_http_timeout(self):
url = "http://www.example.com"
with support.transient_internet(url):
u = _urlopen_with_retry(url, timeout=120)
self.addCleanup(u.close)
self.assertEqual(u.fp.raw._sock.gettimeout(), 120)
FTP_HOST = "ftp://ftp.mirror.nl/pub/gnu/"
def test_ftp_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
with support.transient_internet(self.FTP_HOST, timeout=None):
u = _urlopen_with_retry(self.FTP_HOST)
self.addCleanup(u.close)
self.assertIsNone(u.fp.fp.raw._sock.gettimeout())
def test_ftp_default_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
with support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
def test_ftp_no_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
with support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST, timeout=None)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(u.fp.fp.raw._sock.gettimeout())
def test_ftp_timeout(self):
with support.transient_internet(self.FTP_HOST):
u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
self.addCleanup(u.close)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
|
dimagol/trex-core
|
scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dns.py
|
3
|
24849
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
DNS: Domain Name System.
"""
import socket,struct
from scapy.packet import *
from scapy.fields import *
from scapy.ansmachine import *
from scapy.layers.inet import IP, UDP
from scapy.utils import str2bytes
class DNSStrField(StrField):
def h2i(self, pkt, x):
if type(x) == str:
x = x.encode('ascii')
if x == b"":
return b"."
return x
def i2m(self, pkt, x):
if type(x) == str:
x = x.encode('ascii')
if x == b".":
return b"\x00"
x = [k[:63] for k in x.split(b".")] # Truncate chunks that cannot be encoded (more than 63 bytes..)
x = map(lambda y: bytes([len(y)]) + y, x)
x = b"".join(x)
if x[-1] != 0:
x += b"\x00"
return x
def getfield(self, pkt, s):
n = b""
#if ord(s[0]) == 0:
if (s[0]) == 0:
return s[1:], b"."
while 1:
#l = ord(s[0])
l = (s[0])
s = s[1:]
if not l:
break
if l & 0xc0:
raise Scapy_Exception("DNS message can't be compressed at this point!")
else:
n += s[:l]+b"."
s = s[l:]
return s, n
class DNSRRCountField(ShortField):
holds_packets=1
def __init__(self, name, default, rr):
ShortField.__init__(self, name, default)
self.rr = rr
def _countRR(self, pkt):
x = getattr(pkt,self.rr)
i = 0
while isinstance(x, DNSRR) or isinstance(x, DNSQR) or isdnssecRR(x):
x = x.payload
i += 1
return i
def i2m(self, pkt, x):
if x is None:
x = self._countRR(pkt)
return x
def i2h(self, pkt, x):
if x is None:
x = self._countRR(pkt)
return x
def DNSgetstr(s,p):
name = b""
q = 0
jpath = [p]
while 1:
if p >= len(s):
warning("DNS RR prematured end (ofs=%i, len=%i)"%(p,len(s)))
break
#l = ord(s[p])
l = s[p]
p += 1
if l & 0xc0:
if not q:
q = p+1
if p >= len(s):
warning("DNS incomplete jump token at (ofs=%i)" % p)
break
p = ((l & 0x3f) << 8) + s[p] - 12
if p in jpath:
warning("DNS decompression loop detected")
break
jpath.append(p)
continue
elif l > 0:
name += s[p:p+l]+b"."
p += l
continue
break
if q:
p = q
return name,p
class DNSRRField(StrField):
holds_packets=1
def __init__(self, name, countfld, passon=1):
StrField.__init__(self, name, None)
self.countfld = countfld
self.passon = passon
def i2m(self, pkt, x):
if x is None:
return b""
return bytes(x)
def decodeRR(self, name, s, p):
ret = s[p:p+10]
type,cls,ttl,rdlen = struct.unpack("!HHIH", ret)
p += 10
rr = DNSRR(b"\x00"+ret+s[p:p+rdlen])
if type in [2, 3, 4, 5]:
rr.rdata = DNSgetstr(s,p)[0]
del(rr.rdlen)
elif type in dnsRRdispatcher.keys():
rr = dnsRRdispatcher[type](b"\x00"+ret+s[p:p+rdlen])
else:
del(rr.rdlen)
p += rdlen
rr.rrname = name
return rr,p
def getfield(self, pkt, s):
if type(s) is tuple :
s,p = s
else:
p = 0
ret = None
c = getattr(pkt, self.countfld)
if c > len(s):
warning("wrong value: DNS.%s=%i" % (self.countfld,c))
return s,b""
while c:
c -= 1
name,p = DNSgetstr(s,p)
rr,p = self.decodeRR(name, s, p)
if ret is None:
ret = rr
else:
ret.add_payload(rr)
if self.passon:
return (s,p),ret
else:
return s[p:],ret
class DNSQRField(DNSRRField):
holds_packets=1
def decodeRR(self, name, s, p):
ret = s[p:p+4]
p += 4
rr = DNSQR(b"\x00"+ret)
rr.qname = name
return rr,p
class RDataField(StrLenField):
def m2i(self, pkt, s):
family = None
if pkt.type == 1: # A
family = socket.AF_INET
elif pkt.type == 12: # PTR
s = DNSgetstr(s, 0)[0]
elif pkt.type == 16: # TXT
ret_s = b""
tmp_s = s
# RDATA contains a list of strings, each are prepended with
# a byte containing the size of the following string.
while tmp_s:
tmp_len = struct.unpack("!B", bytes([tmp_s[0]]))[0] + 1
if tmp_len > len(tmp_s):
warning("DNS RR TXT prematured end of character-string (size=%i, remaining bytes=%i)" % (tmp_len, len(tmp_s)))
ret_s += tmp_s[1:tmp_len]
tmp_s = tmp_s[tmp_len:]
s = ret_s
elif pkt.type == 28: # AAAA
family = socket.AF_INET6
if family is not None:
s = inet_ntop(family, s)
return s
def i2m(self, pkt, s):
if pkt.type == 1: # A
if s:
if type(s) is bytes:
s = s.decode('ascii')
s = inet_aton(s)
elif pkt.type in [2,3,4,5]: # NS, MD, MF, CNAME
s = b"".join(map(lambda x: bytes([len(x)]) + x, s.split(b".")))
#if ord(s[-1]):
if s[-1]:
s += b"\x00"
elif pkt.type == 16: # TXT
if s:
ret_s = b""
# The initial string must be splitted into a list of strings
# prepended with theirs sizes.
while len(s) >= 255:
ret_s += b"\xff" + s[:255]
s = s[255:]
# The remaining string is less than 255 bytes long
if len(s):
ret_s += struct.pack("!B", len(s)) + s
s = ret_s
elif pkt.type == 28: # AAAA
if s:
s = inet_pton(socket.AF_INET6, s)
return s
class RDLenField(Field):
def __init__(self, name):
Field.__init__(self, name, None, "H")
def i2m(self, pkt, x):
if x is None:
rdataf = pkt.get_field("rdata")
x = len(rdataf.i2m(pkt, pkt.rdata))
return x
def i2h(self, pkt, x):
if x is None:
rdataf = pkt.get_field("rdata")
x = len(rdataf.i2m(pkt, pkt.rdata))
return x
class DNS(Packet):
name = "DNS"
fields_desc = [ ShortField("id", 0),
BitField("qr", 0, 1),
BitEnumField("opcode", 0, 4, {0:"QUERY",1:"IQUERY",2:"STATUS"}),
BitField("aa", 0, 1),
BitField("tc", 0, 1),
BitField("rd", 0, 1),
BitField("ra", 0, 1),
BitField("z", 0, 1),
# AD and CD bits are defined in RFC 2535
BitField("ad", 0, 1), # Authentic Data
BitField("cd", 0, 1), # Checking Disabled
BitEnumField("rcode", 0, 4, {0:"ok", 1:"format-error", 2:"server-failure", 3:"name-error", 4:"not-implemented", 5:"refused"}),
DNSRRCountField("qdcount", None, "qd"),
DNSRRCountField("ancount", None, "an"),
DNSRRCountField("nscount", None, "ns"),
DNSRRCountField("arcount", None, "ar"),
DNSQRField("qd", "qdcount"),
DNSRRField("an", "ancount"),
DNSRRField("ns", "nscount"),
DNSRRField("ar", "arcount",0) ]
def answers(self, other):
return (isinstance(other, DNS)
and self.id == other.id
and self.qr == 1
and other.qr == 0)
def mysummary(self):
type = ["Qry","Ans"][self.qr]
name = ""
if self.qr:
type = "Ans"
if self.ancount > 0 and isinstance(self.an, DNSRR):
name = ' "%s"' % self.an.getstrval("rdata")
else:
type = "Qry"
if self.qdcount > 0 and isinstance(self.qd, DNSQR):
name = ' "%s"' % self.qd.getstrval("qname")
return 'DNS %s%s ' % (type, name)
dnstypes = { 0:"ANY", 255:"ALL",
1:"A", 2:"NS", 3:"MD", 4:"MF", 5:"CNAME", 6:"SOA", 7: "MB", 8:"MG",
9:"MR",10:"NULL",11:"WKS",12:"PTR",13:"HINFO",14:"MINFO",15:"MX",16:"TXT",
17:"RP",18:"AFSDB",28:"AAAA", 33:"SRV",38:"A6",39:"DNAME",
41:"OPT", 43:"DS", 46:"RRSIG", 47:"NSEC", 48:"DNSKEY",
50: "NSEC3", 51: "NSEC3PARAM", 32769:"DLV" }
dnsqtypes = {251:"IXFR",252:"AXFR",253:"MAILB",254:"MAILA",255:"ALL"}
dnsqtypes.update(dnstypes)
dnsclasses = {1: 'IN', 2: 'CS', 3: 'CH', 4: 'HS', 255: 'ANY'}
class DNSQR(Packet):
name = "DNS Question Record"
show_indent=0
fields_desc = [ DNSStrField("qname",b""),
ShortEnumField("qtype", 1, dnsqtypes),
ShortEnumField("qclass", 1, dnsclasses) ]
# RFC 2671 - Extension Mechanisms for DNS (EDNS0)
class EDNS0TLV(Packet):
name = "DNS EDNS0 TLV"
fields_desc = [ ShortEnumField("optcode", 0, { 0: "Reserved", 1: "LLQ", 2: "UL", 3: "NSID", 4: "Reserved", 5: "PING" }),
FieldLenField("optlen", None, "optdata", fmt="H"),
StrLenField("optdata", b"", length_from=lambda pkt: pkt.optlen) ]
def extract_padding(self, p):
return b"", p
class DNSRROPT(Packet):
name = "DNS OPT Resource Record"
fields_desc = [ DNSStrField("rrname",b""),
ShortEnumField("type", 41, dnstypes),
ShortField("rclass", 4096),
ByteField("extrcode", 0),
ByteField("version", 0),
# version 0 means EDNS0
BitEnumField("z", 32768, 16, { 32768: "D0" }),
# D0 means DNSSEC OK from RFC 3225
FieldLenField("rdlen", None, length_of="rdata", fmt="H"),
PacketListField("rdata", [], EDNS0TLV, length_from=lambda pkt: pkt.rdlen) ]
# RFC 4034 - Resource Records for the DNS Security Extensions
# 09/2013 from http://www.iana.org/assignments/dns-sec-alg-numbers/dns-sec-alg-numbers.xhtml
dnssecalgotypes = { 0:"Reserved", 1:"RSA/MD5", 2:"Diffie-Hellman", 3:"DSA/SHA-1",
4:"Reserved", 5:"RSA/SHA-1", 6:"DSA-NSEC3-SHA1",
7:"RSASHA1-NSEC3-SHA1", 8:"RSA/SHA-256", 9:"Reserved",
10:"RSA/SHA-512", 11:"Reserved", 12:"GOST R 34.10-2001",
13:"ECDSA Curve P-256 with SHA-256", 14: "ECDSA Curve P-384 with SHA-384",
252:"Reserved for Indirect Keys", 253:"Private algorithms - domain name",
254:"Private algorithms - OID", 255:"Reserved" }
# 09/2013 from http://www.iana.org/assignments/ds-rr-types/ds-rr-types.xhtml
dnssecdigesttypes = { 0:"Reserved", 1:"SHA-1", 2:"SHA-256", 3:"GOST R 34.11-94", 4:"SHA-384" }
class TimeField(IntField):
def any2i(self, pkt, x):
if type(x) == str:
import time, calendar
t = time.strptime(x, "%Y%m%d%H%M%S")
return int(calendar.timegm(t))
return x
def i2repr(self, pkt, x):
import time
x = self.i2h(pkt, x)
t = time.strftime("%Y%m%d%H%M%S", time.gmtime(x))
return "%s (%d)" % (t ,x)
def bitmap2RRlist(bitmap):
"""
Decode the 'Type Bit Maps' field of the NSEC Resource Record into an
integer list.
"""
# RFC 4034, 4.1.2. The Type Bit Maps Field
RRlist = []
while bitmap:
if len(bitmap) < 2:
warning("bitmap too short (%i)" % len(bitmap))
return
#window_block = ord(bitmap[0]) # window number
window_block = (bitmap[0]) # window number
offset = 256*window_block # offset of the Ressource Record
#bitmap_len = ord(bitmap[0]) # length of the bitmap in bytes
bitmap_len = (bitmap[1]) # length of the bitmap in bytes
if bitmap_len <= 0 or bitmap_len > 32:
warning("bitmap length is no valid (%i)" % bitmap_len)
return
tmp_bitmap = bitmap[2:2+bitmap_len]
# Let's compare each bit of tmp_bitmap and compute the real RR value
for b in range(len(tmp_bitmap)):
v = 128
for i in range(8):
#if ord(tmp_bitmap[b]) & v:
if (tmp_bitmap[b]) & v:
# each of the RR is encoded as a bit
RRlist += [ offset + b*8 + i ]
v = v >> 1
# Next block if any
bitmap = bitmap[2+bitmap_len:]
return RRlist
def RRlist2bitmap(lst):
"""
Encode a list of integers representing Resource Records to a bitmap field
used in the NSEC Resource Record.
"""
# RFC 4034, 4.1.2. The Type Bit Maps Field
import math
bitmap = b""
lst = list(set(lst))
lst.sort()
#lst = filter(lambda x: x <= 65535, lst)
#lst = map(lambda x: abs(x), lst)
lst = [ abs(x) for x in lst if x<= 65535 ]
# number of window blocks
max_window_blocks = int(math.ceil(lst[-1] / 256.))
min_window_blocks = int(math.floor(lst[0] / 256.))
if min_window_blocks == max_window_blocks:
max_window_blocks += 1
for wb in range(min_window_blocks, max_window_blocks+1):
# First, filter out RR not encoded in the current window block
# i.e. keep everything between 256*wb <= 256*(wb+1)
#rrlist = filter(lambda x: 256*wb <= x and x < 256*(wb+1), lst)
rrlist = [ x for x in lst if 256*wb <= x and x < 256*(wb+1) ]
rrlist.sort()
if rrlist == []:
continue
# Compute the number of bytes used to store the bitmap
if rrlist[-1] == 0: # only one element in the list
bs = 1
else:
max = rrlist[-1] - 256*wb
#bs = int(math.ceil(max / 8)) + 1 # use at least 1 byte
bs = int(max // 8) + 1 # use at least 1 byte
if bs > 32: # Don't encode more than 256 bits / values
bs = 32
bitmap += struct.pack("B", wb)
bitmap += struct.pack("B", bs)
# Generate the bitmap
for tmp in range(bs):
v = 0
# Remove out of range Ressource Records
#tmp_rrlist = filter(lambda x: 256*wb+8*tmp <= x and x < 256*wb+8*tmp+8, rrlist)
tmp_rrlist = [ x for x in rrlist if 256*wb+8*tmp <= x and x < 256*wb+8*tmp+8 ]
if not tmp_rrlist == []:
# 1. rescale to fit into 8 bits
tmp_rrlist = map(lambda x: (x-256*wb)-(tmp*8), tmp_rrlist)
# 2. x gives the bit position ; compute the corresponding value
tmp_rrlist = map(lambda x: 2**(7-x) , tmp_rrlist)
# 3. sum everything
#v = reduce(lambda x,y: x+y, tmp_rrlist)
v = sum(tmp_rrlist)
bitmap += struct.pack("B", v)
return bitmap
class RRlistField(StrField):
def h2i(self, pkt, x):
if type(x) == list:
return RRlist2bitmap(x)
return x
def i2repr(self, pkt, x):
x = self.i2h(pkt, x)
rrlist = bitmap2RRlist(x)
return [ dnstypes.get(rr, rr) for rr in rrlist ]
class _DNSRRdummy(Packet):
name = "Dummy class that implements post_build() for Ressource Records"
def post_build(self, pkt, pay):
if not self.rdlen == None:
return pkt
lrrname = len(self.fields_desc[0].i2m(b"", self.getfieldval("rrname")))
l = len(pkt) - lrrname - 10
pkt = pkt[:lrrname+8] + struct.pack("!H", l) + pkt[lrrname+8+2:]
return pkt
class DNSRRSOA(_DNSRRdummy):
name = "DNS SOA Resource Record"
fields_desc = [ DNSStrField("rrname",b""),
ShortEnumField("type", 6, dnstypes),
ShortEnumField("rclass", 1, dnsclasses),
IntField("ttl", 0),
ShortField("rdlen", None),
DNSStrField("mname", b""),
DNSStrField("rname", b""),
IntField("serial", 0),
IntField("refresh", 0),
IntField("retry", 0),
IntField("expire", 0),
IntField("minimum", 0)
]
class DNSRRRSIG(_DNSRRdummy):
name = "DNS RRSIG Resource Record"
fields_desc = [ DNSStrField("rrname",b""),
ShortEnumField("type", 46, dnstypes),
ShortEnumField("rclass", 1, dnsclasses),
IntField("ttl", 0),
ShortField("rdlen", None),
ShortEnumField("typecovered", 1, dnstypes),
ByteEnumField("algorithm", 5, dnssecalgotypes),
ByteField("labels", 0),
IntField("originalttl", 0),
TimeField("expiration", 0),
TimeField("inception", 0),
ShortField("keytag", 0),
DNSStrField("signersname", b""),
StrField("signature", b"")
]
class DNSRRNSEC(_DNSRRdummy):
name = "DNS NSEC Resource Record"
fields_desc = [ DNSStrField("rrname",b""),
ShortEnumField("type", 47, dnstypes),
ShortEnumField("rclass", 1, dnsclasses),
IntField("ttl", 0),
ShortField("rdlen", None),
DNSStrField("nextname", b""),
RRlistField("typebitmaps", b"")
]
class DNSRRDNSKEY(_DNSRRdummy):
name = "DNS DNSKEY Resource Record"
fields_desc = [ DNSStrField("rrname",b""),
ShortEnumField("type", 48, dnstypes),
ShortEnumField("rclass", 1, dnsclasses),
IntField("ttl", 0),
ShortField("rdlen", None),
FlagsField("flags", 256, 16, "S???????Z???????"),
# S: Secure Entry Point
# Z: Zone Key
ByteField("protocol", 3),
ByteEnumField("algorithm", 5, dnssecalgotypes),
StrField("publickey", b"")
]
class DNSRRDS(_DNSRRdummy):
name = "DNS DS Resource Record"
fields_desc = [ DNSStrField("rrname",b""),
ShortEnumField("type", 43, dnstypes),
ShortEnumField("rclass", 1, dnsclasses),
IntField("ttl", 0),
ShortField("rdlen", None),
ShortField("keytag", 0),
ByteEnumField("algorithm", 5, dnssecalgotypes),
ByteEnumField("digesttype", 5, dnssecdigesttypes),
StrField("digest", b"")
]
# RFC 5074 - DNSSEC Lookaside Validation (DLV)
class DNSRRDLV(DNSRRDS):
name = "DNS DLV Resource Record"
def __init__(self, *args, **kargs):
DNSRRDS.__init__(self, *args, **kargs)
if not kargs.get('type', 0):
self.type = 32769
# RFC 5155 - DNS Security (DNSSEC) Hashed Authenticated Denial of Existence
class DNSRRNSEC3(_DNSRRdummy):
name = "DNS NSEC3 Resource Record"
fields_desc = [ DNSStrField("rrname",b""),
ShortEnumField("type", 50, dnstypes),
ShortEnumField("rclass", 1, dnsclasses),
IntField("ttl", 0),
ShortField("rdlen", None),
ByteField("hashalg", 0),
BitEnumField("flags", 0, 8, {1:"Opt-Out"}),
ShortField("iterations", 0),
FieldLenField("saltlength", 0, fmt="!B", length_of="salt"),
StrLenField("salt", b"", length_from=lambda x: x.saltlength),
FieldLenField("hashlength", 0, fmt="!B", length_of="nexthashedownername"),
StrLenField("nexthashedownername", b"", length_from=lambda x: x.hashlength),
RRlistField("typebitmaps", b"")
]
class DNSRRNSEC3PARAM(_DNSRRdummy):
name = "DNS NSEC3PARAM Resource Record"
fields_desc = [ DNSStrField("rrname",b""),
ShortEnumField("type", 51, dnstypes),
ShortEnumField("rclass", 1, dnsclasses),
IntField("ttl", 0),
ShortField("rdlen", None),
ByteField("hashalg", 0),
ByteField("flags", 0),
ShortField("iterations", 0),
FieldLenField("saltlength", 0, fmt="!B", length_of="salt"),
StrLenField("salt", b"", length_from=lambda pkt: pkt.saltlength)
]
dnssecclasses = [ DNSRROPT, DNSRRRSIG, DNSRRDLV, DNSRRDNSKEY, DNSRRNSEC, DNSRRDS, DNSRRNSEC3, DNSRRNSEC3PARAM ]
def isdnssecRR(obj):
list = [ isinstance (obj, cls) for cls in dnssecclasses ]
ret = False
for i in list:
ret = ret or i
return ret
dnsRRdispatcher = { #6: DNSRRSOA,
41: DNSRROPT, # RFC 1671
43: DNSRRDS, # RFC 4034
46: DNSRRRSIG, # RFC 4034
47: DNSRRNSEC, # RFC 4034
48: DNSRRDNSKEY, # RFC 4034
50: DNSRRNSEC3, # RFC 5155
51: DNSRRNSEC3PARAM, # RFC 5155
32769: DNSRRDLV # RFC 4431
}
class DNSRR(Packet):
name = "DNS Resource Record"
show_indent=0
fields_desc = [ DNSStrField("rrname",""),
ShortEnumField("type", 1, dnstypes),
ShortEnumField("rclass", 1, dnsclasses),
IntField("ttl", 0),
RDLenField("rdlen"),
RDataField("rdata", "", length_from=lambda pkt:pkt.rdlen) ]
bind_layers( UDP, DNS, dport=53)
bind_layers( UDP, DNS, sport=53)
@conf.commands.register
def dyndns_add(nameserver, name, rdata, type="A", ttl=10):
"""Send a DNS add message to a nameserver for "name" to have a new "rdata"
dyndns_add(nameserver, name, rdata, type="A", ttl=10) -> result code (0=ok)
example: dyndns_add("ns1.toto.com", "dyn.toto.com", "127.0.0.1")
RFC2136
"""
zone = name[name.find(".")+1:]
r=sr1(IP(dst=nameserver)/UDP()/DNS(opcode=5,
qd=[DNSQR(qname=zone, qtype="SOA")],
ns=[DNSRR(rrname=name, type="A",
ttl=ttl, rdata=rdata)]),
verbose=0, timeout=5)
if r and r.haslayer(DNS):
return r.getlayer(DNS).rcode
else:
return -1
@conf.commands.register
def dyndns_del(nameserver, name, type="ALL", ttl=10):
"""Send a DNS delete message to a nameserver for "name"
dyndns_del(nameserver, name, type="ANY", ttl=10) -> result code (0=ok)
example: dyndns_del("ns1.toto.com", "dyn.toto.com")
RFC2136
"""
zone = name[name.find(".")+1:]
r=sr1(IP(dst=nameserver)/UDP()/DNS(opcode=5,
qd=[DNSQR(qname=zone, qtype="SOA")],
ns=[DNSRR(rrname=name, type=type,
rclass="ANY", ttl=0, rdata=b"")]),
verbose=0, timeout=5)
if r and r.haslayer(DNS):
return r.getlayer(DNS).rcode
else:
return -1
class DNS_am(AnsweringMachine):
function_name="dns_spoof"
filter = "udp port 53"
def parse_options(self, joker="192.168.1.1", match=None):
if match is None:
self.match = {}
else:
self.match = match
self.joker=joker
def is_request(self, req):
return req.haslayer(DNS) and req.getlayer(DNS).qr == 0
def make_reply(self, req):
ip = req.getlayer(IP)
dns = req.getlayer(DNS)
resp = IP(dst=ip.src, src=ip.dst)/UDP(dport=ip.sport,sport=ip.dport)
rdata = self.match.get(dns.qd.qname, self.joker)
resp /= DNS(id=dns.id, qr=1, qd=dns.qd,
an=DNSRR(rrname=dns.qd.qname, ttl=10, rdata=rdata))
return resp
|
apache-2.0
|
NeCTAR-RC/ceilometer
|
ceilometer/tests/hardware/inspector/test_snmp.py
|
4
|
9033
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Intel Corp
#
# Authors: Lianhao Lu <lianhao.lu@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/hardware/inspector/snmp/inspector.py
"""
from ceilometer.hardware.inspector import snmp
from ceilometer.openstack.common.fixture import mockpatch
from ceilometer.openstack.common import network_utils
from ceilometer.tests import base as test_base
from ceilometer.tests.hardware.inspector import base
Base = base.InspectorBaseTest
class FakeMac(object):
def __init__(self):
self.val = "0x%s" % Base.network[0][0].mac
def prettyPrint(self):
return str(self.val)
ins = snmp.SNMPInspector
GETCMD_MAP = {
ins._cpu_1_min_load_oid: (None,
None,
0,
[('',
Base.cpu[0].cpu_1_min,
)],
),
ins._cpu_5_min_load_oid: (None,
None,
0,
[('',
Base.cpu[0].cpu_5_min,
)],
),
ins._cpu_15_min_load_oid: (None,
None,
0,
[('',
Base.cpu[0].cpu_15_min,
)],
),
ins._memory_total_oid: (None,
None,
0,
[('',
Base.memory[0].total,
)],
),
ins._memory_used_oid: (None,
None,
0,
[('',
Base.memory[0].used,
)],
),
ins._disk_path_oid + '.1': (None,
None,
0,
[('',
Base.diskspace[0][0].path,
)],
),
ins._disk_device_oid + '.1': (None,
None,
0,
[('',
Base.diskspace[0][0].device,
)],
),
ins._disk_size_oid + '.1': (None,
None,
0,
[('',
Base.diskspace[0][1].size,
)],
),
ins._disk_used_oid + '.1': (None,
None,
0,
[('',
Base.diskspace[0][1].used,
)],
),
ins._disk_path_oid + '.2': (None,
None,
0,
[('',
Base.diskspace[1][0].path,
)],
),
ins._disk_device_oid + '.2': (None,
None,
0,
[('',
Base.diskspace[1][0].device,
)],
),
ins._disk_size_oid + '.2': (None,
None,
0,
[('',
Base.diskspace[1][1].size,
)],
),
ins._disk_used_oid + '.2': (None,
None,
0,
[('',
Base.diskspace[1][1].used,
)],
),
ins._interface_name_oid + '.1': (None,
None,
0,
[('',
Base.network[0][0].name,
)],
),
ins._interface_mac_oid + '.1': (None,
None,
0,
[('',
FakeMac(),
)],
),
ins._interface_bandwidth_oid + '.1': (None,
None,
0,
[('',
Base.network[0][1].bandwidth * 8,
)],
),
ins._interface_received_oid + '.1': (None,
None,
0,
[('',
Base.network[0][1].rx_bytes,
)],
),
ins._interface_transmitted_oid + '.1': (None,
None,
0,
[('',
Base.network[0][1].tx_bytes,
)],
),
ins._interface_error_oid + '.1': (None,
None,
0,
[('',
Base.network[0][1].error,
)],
),
}
NEXTCMD_MAP = {
ins._disk_index_oid: (None,
None,
0,
[[('1.3.6.1.4.1.2021.9.1.1.1', 1)],
[('1.3.6.1.4.1.2021.9.1.1.2', 2)]]),
ins._interface_index_oid: (None,
None,
0,
[[('1.3.6.1.2.1.2.2.1.1.1', 1)],
]),
ins._interface_ip_oid: (None,
None,
0,
[[('1.3.6.1.2.1.4.20.1.2.10.0.0.1',
1)],
]),
}
def faux_getCmd(authData, transportTarget, oid):
try:
return GETCMD_MAP[oid]
except KeyError:
return ("faux_getCmd Error", None, 0, [])
def faux_nextCmd(authData, transportTarget, oid):
try:
return NEXTCMD_MAP[oid]
except KeyError:
return ("faux_nextCmd Error", None, 0, [])
class TestSNMPInspector(Base, test_base.BaseTestCase):
def setUp(self):
super(TestSNMPInspector, self).setUp()
self.inspector = snmp.SNMPInspector()
self.host = network_utils.urlsplit("snmp://localhost")
self.useFixture(mockpatch.PatchObject(
self.inspector._cmdGen, 'getCmd', new=faux_getCmd))
self.useFixture(mockpatch.PatchObject(
self.inspector._cmdGen, 'nextCmd', new=faux_nextCmd))
def test_get_security_name(self):
self.assertEqual(self.inspector._get_security_name(self.host),
self.inspector._security_name)
host2 = network_utils.urlsplit("snmp://foo:80?security_name=fake")
self.assertEqual(self.inspector._get_security_name(host2),
'fake')
def test_get_cmd_error(self):
self.useFixture(mockpatch.PatchObject(
self.inspector, '_memory_total_oid', new='failure'))
def get_list(func, *args, **kwargs):
return list(func(*args, **kwargs))
self.assertRaises(snmp.SNMPException,
get_list,
self.inspector.inspect_memory,
self.host)
|
apache-2.0
|
mozilla/kitsune
|
kitsune/messages/tasks.py
|
1
|
2038
|
import logging
from django.conf import settings
from django.contrib.sites.models import Site
from django.urls import reverse
from django.utils.translation import ugettext as _
from celery import task
from kitsune.messages.models import InboxMessage
from kitsune.sumo.email_utils import make_mail, safe_translation, send_messages
log = logging.getLogger("k.task")
@task()
def email_private_message(inbox_message_id):
"""Send notification of a new private message."""
inbox_message = InboxMessage.objects.get(id=inbox_message_id)
log.debug("Sending email for user (%s)" % (inbox_message.to,))
user = inbox_message.to
@safe_translation
def _send_mail(locale):
# Avoid circular import issues
from kitsune.users.templatetags.jinja_helpers import display_name
subject = _("[SUMO] You have a new private message from [{sender}]")
subject = subject.format(sender=display_name(inbox_message.sender))
msg_url = reverse("messages.read", kwargs={"msgid": inbox_message.id})
settings_url = reverse("users.edit_settings")
from kitsune.sumo.templatetags.jinja_helpers import add_utm
context = {
"sender": inbox_message.sender,
"message": inbox_message.message,
"message_html": inbox_message.content_parsed,
"message_url": add_utm(msg_url, "messages-new"),
"unsubscribe_url": add_utm(settings_url, "messages-new"),
"host": Site.objects.get_current().domain,
}
mail = make_mail(
subject=subject,
text_template="messages/email/private_message.ltxt",
html_template="messages/email/private_message.html",
context_vars=context,
from_email=settings.TIDINGS_FROM_ADDRESS,
to_email=inbox_message.to.email,
)
send_messages([mail])
if hasattr(user, "profile"):
locale = user.profile.locale
else:
locale = settings.WIKI_DEFAULT_LANGUAGE
_send_mail(locale)
|
bsd-3-clause
|
wikilinks/nel
|
nel/process/tag.py
|
1
|
6517
|
#!/usr/bin/env python
import os
import sys
import re
import string
from itertools import izip
from time import time
from bisect import bisect_left
from subprocess import Popen, PIPE
from cStringIO import StringIO
import spacy
from .process import Process
from ..model import recognition
from ..doc import Mention, Chain, Candidate
from ..util import group, spanset_insert, tcp_socket, byte_to_char_map
from nel import logging
log = logging.getLogger()
class Tagger(Process):
""" Tags and performs naive coref over mentions in tokenised text. """
def __call__(self, doc):
doc.chains = [Chain(mentions=[m]) for m in self.tag(doc)]
return doc
def tag(self, doc):
raise NotImplementedError
def mention_over_tokens(self, doc, i, j, tag=None):
toks = doc.tokens[i:j]
begin = toks[0].begin
end = toks[-1].end
text = doc.text[begin:end]
return Mention(begin, text, tag)
@classmethod
def iter_options(cls):
for c in globals().itervalues():
if c != cls and isinstance(c, type) and issubclass(c, cls):
yield c
class SpacyTagger(Tagger):
def __init__(self, spacy_model = None):
self.spacy_model = spacy_model or 'en_default'
log.debug('Using spacy entity tagger (%s)...', spacy_model)
self.nlp = spacy.load(self.spacy_model)
def tag(self, doc):
spacy_doc = self.nlp(doc.text)
doc.tokens = [Mention(t.idx, t.text) for t in spacy_doc]
for ent in spacy_doc.ents:
tok_idxs = [i for i in xrange(len(ent)) if not ent[i].is_space]
if tok_idxs:
yield self.mention_over_tokens(doc, ent.start + min(tok_idxs), ent.start + max(tok_idxs) + 1, ent.label_)
class CRFTagger(Tagger):
""" Conditional random field sequence tagger """
def __init__(self, model_tag):
log.info('Loading CRF sequence classifier: %s', model_tag)
self.tagger = recognition.SequenceClassifier(model_tag)
def tag(self, doc):
offset = 0
doc.tokens = []
state = self.tagger.mapper.get_doc_state(doc)
for sentence in self.tagger.mapper.iter_sequences(doc, state):
for t in sentence:
i = 0
for i, c in enumerate(doc.text[t.idx:t.idx+len(t.text)]):
if c.isalnum():
break
doc.tokens.append(Mention(t.idx+i, t.text))
tags = self.tagger.tag(doc, sentence, state)
start, tag_type = None, None
for i, tag in enumerate(tags):
if start != None and tag[0] != 'I':
yield self.mention_over_tokens(doc, start, i + offset, tag_type)
start, tag_type = None, None
if tag[0] == 'B':
parts = tag.split('-')
if len(parts) == 2:
tag_type = parts[1]
start = i + offset
if start != None:
yield self.mention_over_tokens(doc, start, i + offset + 1, tag_type)
offset += len(sentence)
class StanfordTagger(Tagger):
""" Tag documents via a hosted Stanford NER service """
def __init__(self, host, port):
self.host = host
self.port = port
def to_mention(self, doc, start, end):
return mention
@staticmethod
def get_span_end(indexes, start, max_sz=1024):
end = bisect_left(indexes, max_sz, lo=start)-1
# if we can't find a step less than max_sz, we try to
# take the smallest possible step and hope for the best
if end <= start:
end = start + 1
return end
def tag(self, doc):
start_time = time()
tokens = [t.text.replace('\n', ' ').replace('\r',' ') for t in doc.tokens]
# the insanity below is motivated by the following:
# - stanford doesn't tag everything we send it if we send it too much
# - therefore we need to slice up the text into chunks of at most max_size
# - however, we can't slice between sentences or tokens without hurting the tagger accuracy
# - stanford doesn't return offsets, so we must keep tokens we send and tags returned aligned
if tokens:
acc = 0
token_offsets = []
character_to_token_offset = {0:-1}
for i, t in enumerate(tokens):
acc += len(t) + 1
token_offsets.append(acc)
character_to_token_offset[acc] = i
# calculate character indexes of sentence delimiting tokens
indexes = [0] + [token_offsets[i] for i,t in enumerate(tokens) if t == '.']
if token_offsets and indexes[-1] != (token_offsets[-1]):
indexes.append(token_offsets[-1])
tags = []
si, ei = 0, self.get_span_end(indexes, 0)
while True:
chunk_start_tok_idx = character_to_token_offset[indexes[si]]+1
chunk_end_tok_idx = character_to_token_offset[indexes[ei]]+1
text = ' '.join(tokens[chunk_start_tok_idx:chunk_end_tok_idx]).encode('utf-8')
# todo: investigate why svc blows up if we don't RC after each chunk
with tcp_socket(self.host, self.port) as s:
s.sendall(text+'\n')
buf = ''
while True:
buf += s.recv(4096)
if buf[-1] == '\n' or len(buf) > 10*len(text):
break
sentences = buf.split('\n')
tags += [t.split('/')[-1] for s in sentences for t in s.split(' ')[:-1]]
if ei+1 == len(indexes):
break
else:
si, ei = ei, self.get_span_end(indexes, ei)
if len(tags) != len(tokens):
raise Exception('Tokenisation error: #tags != #tokens')
start = None
last = 'O'
for i, (txt, tag) in enumerate(izip(tokens,tags)):
if tag != last:
if last != 'O':
yield self.mention_over_tokens(doc, start, i)
last = tag
start = i
i += 1
if last != 'O':
yield self.mention_over_tokens(doc, start, i)
log.debug('Tagged doc (%s) with %i tokens in %.2fs', doc.id, len(doc.tokens), time() - start_time)
|
mit
|
pthaike/keras
|
keras/datasets/imdb.py
|
37
|
1855
|
from __future__ import absolute_import
import six.moves.cPickle
import gzip
from .data_utils import get_file
import random
from six.moves import zip
import numpy as np
def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113,
start_char=1, oov_char=2, index_from=3):
path = get_file(path, origin="https://s3.amazonaws.com/text-datasets/imdb.pkl")
if path.endswith(".gz"):
f = gzip.open(path, 'rb')
else:
f = open(path, 'rb')
X, labels = six.moves.cPickle.load(f)
f.close()
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(labels)
if start_char is not None:
X = [[start_char] + [w + index_from for w in x] for x in X]
elif index_from:
X = [[w + index_from for w in x] for x in X]
if maxlen:
new_X = []
new_labels = []
for x, y in zip(X, labels):
if len(x) < maxlen:
new_X.append(x)
new_labels.append(y)
X = new_X
labels = new_labels
if not nb_words:
nb_words = max([max(x) for x in X])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]
else:
nX = []
for x in X:
nx = []
for w in x:
if (w >= nb_words or w < skip_top):
nx.append(w)
nX.append(nx)
X = nX
X_train = X[:int(len(X)*(1-test_split))]
y_train = labels[:int(len(X)*(1-test_split))]
X_test = X[int(len(X)*(1-test_split)):]
y_test = labels[int(len(X)*(1-test_split)):]
return (X_train, y_train), (X_test, y_test)
|
mit
|
MrNuggles/HeyBoet-Telegram-Bot
|
temboo/Library/Google/Plus/Domains/Media/Insert.py
|
5
|
6021
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Insert
# Adds a new media item to an album.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Insert(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Insert Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Insert, self).__init__(temboo_session, '/Library/Google/Plus/Domains/Media/Insert')
def new_input_set(self):
return InsertInputSet()
def _make_result_set(self, result, path):
return InsertResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return InsertChoreographyExecution(session, exec_id, path)
class InsertInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Insert
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(InsertInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(InsertInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(InsertInputSet, self)._set_input('ClientSecret', value)
def set_Collection(self, value):
"""
Set the value of the Collection input for this Choreo. ((optional, string) Currently the acceptable values are "cloud". (Upload the media to share on Google+).)
"""
super(InsertInputSet, self)._set_input('Collection', value)
def set_ContentType(self, value):
"""
Set the value of the ContentType input for this Choreo. ((conditional, string) The Content-Type of the file that is being uploaded (i.e. image/jpg). Required when specifying the FileContent input.)
"""
super(InsertInputSet, self)._set_input('ContentType', value)
def set_DisplayName(self, value):
"""
Set the value of the DisplayName input for this Choreo. ((optional, string) The display name for the media. If this parameter is not provided, Google assigns a GUID to the media resource.)
"""
super(InsertInputSet, self)._set_input('DisplayName', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Selector specifying a subset of fields to include in the response.)
"""
super(InsertInputSet, self)._set_input('Fields', value)
def set_FileContent(self, value):
"""
Set the value of the FileContent input for this Choreo. ((conditional, string) The Base64 encoded contents of the file to upload.)
"""
super(InsertInputSet, self)._set_input('FileContent', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(InsertInputSet, self)._set_input('RefreshToken', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) The ID of the user to create the activity on behalf of. The value "me" is set as the default to indicate the authenticated user.)
"""
super(InsertInputSet, self)._set_input('UserID', value)
class InsertResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Insert Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class InsertChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return InsertResultSet(response, path)
|
gpl-3.0
|
beblount/Steer-Clear-Backend-Web
|
env/Lib/site-packages/sqlalchemy/util/topological.py
|
60
|
2794
|
# util/topological.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Topological sorting algorithms."""
from ..exc import CircularDependencyError
from .. import util
__all__ = ['sort', 'sort_as_subsets', 'find_cycles']
def sort_as_subsets(tuples, allitems, deterministic_order=False):
edges = util.defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
Set = util.OrderedSet if deterministic_order else set
todo = Set(allitems)
while todo:
output = Set()
for node in todo:
if todo.isdisjoint(edges[node]):
output.add(node)
if not output:
raise CircularDependencyError(
"Circular dependency detected.",
find_cycles(tuples, allitems),
_gen_edges(edges)
)
todo.difference_update(output)
yield output
def sort(tuples, allitems, deterministic_order=False):
"""sort the given list of items by dependency.
'tuples' is a list of tuples representing a partial ordering.
'deterministic_order' keeps items within a dependency tier in list order.
"""
for set_ in sort_as_subsets(tuples, allitems, deterministic_order):
for s in set_:
yield s
def find_cycles(tuples, allitems):
# adapted from:
# http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
edges = util.defaultdict(set)
for parent, child in tuples:
edges[parent].add(child)
nodes_to_test = set(edges)
output = set()
# we'd like to find all nodes that are
# involved in cycles, so we do the full
# pass through the whole thing for each
# node in the original list.
# we can go just through parent edge nodes.
# if a node is only a child and never a parent,
# by definition it can't be part of a cycle. same
# if it's not in the edges at all.
for node in nodes_to_test:
stack = [node]
todo = nodes_to_test.difference(stack)
while stack:
top = stack[-1]
for node in edges[top]:
if node in stack:
cyc = stack[stack.index(node):]
todo.difference_update(cyc)
output.update(cyc)
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return output
def _gen_edges(edges):
return set([
(right, left)
for left in edges
for right in edges[left]
])
|
mit
|
rubbish/oh-my-zsh
|
plugins/git-prompt/gitstatus.py
|
343
|
2372
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from subprocess import Popen, PIPE
import re
# change those symbols to whatever you prefer
symbols = {
'ahead of': '↑',
'behind': '↓',
'staged': '♦',
'changed': '‣',
'untracked': '…',
'clean': '⚡',
'unmerged': '≠',
'sha1': ':'
}
output, error = Popen(
['git', 'status'], stdout=PIPE, stderr=PIPE, universal_newlines=True).communicate()
if error:
import sys
sys.exit(0)
lines = output.splitlines()
behead_re = re.compile(
r"^# Your branch is (ahead of|behind) '(.*)' by (\d+) commit")
diverge_re = re.compile(r"^# and have (\d+) and (\d+) different")
status = ''
staged = re.compile(r'^# Changes to be committed:$', re.MULTILINE)
changed = re.compile(r'^# Changed but not updated:$', re.MULTILINE)
untracked = re.compile(r'^# Untracked files:$', re.MULTILINE)
unmerged = re.compile(r'^# Unmerged paths:$', re.MULTILINE)
def execute(*command):
out, err = Popen(stdout=PIPE, stderr=PIPE, *command).communicate()
if not err:
nb = len(out.splitlines())
else:
nb = '?'
return nb
if staged.search(output):
nb = execute(
['git', 'diff', '--staged', '--name-only', '--diff-filter=ACDMRT'])
status += '%s%s' % (symbols['staged'], nb)
if unmerged.search(output):
nb = execute(['git', 'diff', '--staged', '--name-only', '--diff-filter=U'])
status += '%s%s' % (symbols['unmerged'], nb)
if changed.search(output):
nb = execute(['git', 'diff', '--name-only', '--diff-filter=ACDMRT'])
status += '%s%s' % (symbols['changed'], nb)
if untracked.search(output):
status += symbols['untracked']
if status == '':
status = symbols['clean']
remote = ''
bline = lines[0]
if bline.find('Not currently on any branch') != -1:
branch = symbols['sha1'] + Popen([
'git',
'rev-parse',
'--short',
'HEAD'], stdout=PIPE).communicate()[0][:-1]
else:
branch = bline.split(' ')[-1]
bstatusline = lines[1]
match = behead_re.match(bstatusline)
if match:
remote = symbols[match.groups()[0]]
remote += match.groups()[2]
elif lines[2:]:
div_match = diverge_re.match(lines[2])
if div_match:
remote = "{behind}{1}{ahead of}{0}".format(
*div_match.groups(), **symbols)
print('\n'.join([branch, remote, status]))
|
mit
|
ProvidencePlan/Profiles
|
communityprofiles/maps/forms.py
|
2
|
3018
|
from django import forms
from maps.models import ShapeFile
from django.core.exceptions import ValidationError
from django.db.models.fields.files import FieldFile
from maps.utils import *
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import GEOSGeometry, MultiPolygon
class ShapeFileForm(forms.ModelForm):
class Meta:
model = ShapeFile
def clean_label_column(self):
if self.cleaned_data['label_column'] == None:
raise ValidationError('Label Column is required')
return self.cleaned_data['label_column']
def clean_geo_key_column (self):
if self.cleaned_data['geo_key_column'] == None:
raise ValidationError('Geo Key is required')
return self.cleaned_data['geo_key_column']
def clean_shape_file(self):
if 'label_column' not in self.cleaned_data or 'geo_key_column' not in self.cleaned_data:
raise ValidationError('Label Column and Geo Key column is required')
file = self.cleaned_data['shape_file']
if file is None:
raise ValidationError('Please Choose a .zip file')
if file != False:
targ_dir = "/tmp/profiles_sf_tmp"
if isinstance(file, FieldFile):
file = os.path.abspath(os.path.join(file.path))
# make targ_dir if it doesnt exist
if not os.path.exists(targ_dir):
os.makedirs(targ_dir)
shp_file_name = unzip_file(file, targ_dir)
if shp_file_name is not None:
shp_file = os.path.abspath(os.path.join(targ_dir, shp_file_name))
ds = DataSource(shp_file)
layer = ds[0]
# all we have to do here is validate the contents of the shapefile
projection = layer.srs.name.lower() # example GCS_North_American_1983, GCS_WGS_1984 TODO: there should be a better way to check this
#IDEALLY EPSG:4326
if 'mercator' in projection or 'mercator' not in projection: # ALWAYS TRUE #TODO: need a way to validate shapefile projections
# Now check that Label Column is in the layer
if self.cleaned_data['label_column'] not in layer.fields:
raise ValidationError('Invalid Label Column. Fields are %s' % ', '.join(layer.fields))
# check if Geo Key Column is valid
print self.cleaned_data
if self.cleaned_data['geo_key_column'] not in layer.fields:
raise ValidationError('Invalid Geo Key Column. Fields are %s' % ', '.join(layer.fields))
return file
else:
raise ValidationError('Invalid Shapefile. Please Use Mercator Projection, preferably EPSG:4326')
else:
raise ValidationError('Invalid Shapefile')
#else:
# # it is a FieldFile which means they are just resaving
# return file
|
mit
|
ml-lab/pylearn2
|
pylearn2/datasets/tests/test_cifar10.py
|
3
|
3549
|
import unittest
import numpy as np
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.space import Conv2DSpace
from pylearn2.testing.skip import skip_if_no_data
class TestCIFAR10(unittest.TestCase):
def setUp(self):
skip_if_no_data()
self.test = CIFAR10(which_set='test')
def test_topo(self):
"""Tests that a topological batch has 4 dimensions"""
train = CIFAR10(which_set='train')
topo = train.get_batch_topo(1)
assert topo.ndim == 4
def test_topo_c01b(self):
"""
Tests that a topological batch with axes ('c',0,1,'b')
can be dimshuffled back to match the standard ('b',0,1,'c')
format.
"""
batch_size = 100
c01b_test = CIFAR10(which_set='test', axes=('c', 0, 1, 'b'))
c01b_X = c01b_test.X[0:batch_size, :]
c01b = c01b_test.get_topological_view(c01b_X)
assert c01b.shape == (3, 32, 32, batch_size)
b01c = c01b.transpose(3, 1, 2, 0)
b01c_X = self.test.X[0:batch_size, :]
assert c01b_X.shape == b01c_X.shape
assert np.all(c01b_X == b01c_X)
b01c_direct = self.test.get_topological_view(b01c_X)
assert b01c_direct.shape == b01c.shape
assert np.all(b01c_direct == b01c)
def test_iterator(self):
# Tests that batches returned by an iterator with topological
# data_specs are the same as the ones returned by calling
# get_topological_view on the dataset with the corresponding order
batch_size = 100
b01c_X = self.test.X[0:batch_size, :]
b01c_topo = self.test.get_topological_view(b01c_X)
b01c_b01c_it = self.test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(32, 32),
num_channels=3,
axes=('b', 0, 1, 'c')),
'features'))
b01c_b01c = b01c_b01c_it.next()
assert np.all(b01c_topo == b01c_b01c)
c01b_test = CIFAR10(which_set='test', axes=('c', 0, 1, 'b'))
c01b_X = c01b_test.X[0:batch_size, :]
c01b_topo = c01b_test.get_topological_view(c01b_X)
c01b_c01b_it = c01b_test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(32, 32),
num_channels=3,
axes=('c', 0, 1, 'b')),
'features'))
c01b_c01b = c01b_c01b_it.next()
assert np.all(c01b_topo == c01b_c01b)
# Also check that samples from iterators with the same data_specs
# with Conv2DSpace do not depend on the axes of the dataset
b01c_c01b_it = self.test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(32, 32),
num_channels=3,
axes=('c', 0, 1, 'b')),
'features'))
b01c_c01b = b01c_c01b_it.next()
assert np.all(b01c_c01b == c01b_c01b)
c01b_b01c_it = c01b_test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(32, 32),
num_channels=3,
axes=('b', 0, 1, 'c')),
'features'))
c01b_b01c = c01b_b01c_it.next()
assert np.all(c01b_b01c == b01c_b01c)
|
bsd-3-clause
|
CloudServer/nova
|
nova/api/openstack/compute/contrib/flavor_disabled.py
|
79
|
2223
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor Disabled API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
authorize = extensions.soft_extension_authorizer('compute', 'flavor_disabled')
class FlavorDisabledController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = "%s:disabled" % Flavor_disabled.alias
flavor[key] = db_flavor['disabled']
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class Flavor_disabled(extensions.ExtensionDescriptor):
"""Support to show the disabled status of a flavor."""
name = "FlavorDisabled"
alias = "OS-FLV-DISABLED"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_disabled/api/v1.1")
updated = "2012-08-29T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorDisabledController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
|
apache-2.0
|
Jeff-Tian/mybnb
|
Python27/Tools/Scripts/ndiff.py
|
12
|
3942
|
#! /usr/bin/env python
# Module ndiff version 1.7.0
# Released to the public domain 08-Dec-2000,
# by Tim Peters (tim.one@home.com).
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
# ndiff.py is now simply a front-end to the difflib.ndiff() function.
# Originally, it contained the difflib.SequenceMatcher class as well.
# This completes the raiding of reusable code from this formerly
# self-contained script.
"""ndiff [-q] file1 file2
or
ndiff (-r1 | -r2) < ndiff_output > file1_or_file2
Print a human-friendly file difference report to stdout. Both inter-
and intra-line differences are noted. In the second form, recreate file1
(-r1) or file2 (-r2) on stdout, from an ndiff report on stdin.
In the first form, if -q ("quiet") is not specified, the first two lines
of output are
-: file1
+: file2
Each remaining line begins with a two-letter code:
"- " line unique to file1
"+ " line unique to file2
" " line common to both files
"? " line not present in either input file
Lines beginning with "? " attempt to guide the eye to intraline
differences, and were not present in either input file. These lines can be
confusing if the source files contain tab characters.
The first file can be recovered by retaining only lines that begin with
" " or "- ", and deleting those 2-character prefixes; use ndiff with -r1.
The second file can be recovered similarly, but by retaining only " " and
"+ " lines; use ndiff with -r2; or, on Unix, the second file can be
recovered by piping the output through
sed -n '/^[+ ] /s/^..//p'
"""
__version__ = 1, 7, 0
import difflib, sys
def fail(msg):
out = sys.stderr.write
out(msg + "\n\n")
out(__doc__)
return 0
# open a file & return the file object; gripe and return 0 if it
# couldn't be opened
def fopen(fname):
try:
return open(fname, 'U')
except IOError, detail:
return fail("couldn't open " + fname + ": " + str(detail))
# open two files & spray the diff to stdout; return false iff a problem
def fcompare(f1name, f2name):
f1 = fopen(f1name)
f2 = fopen(f2name)
if not f1 or not f2:
return 0
a = f1.readlines(); f1.close()
b = f2.readlines(); f2.close()
for line in difflib.ndiff(a, b):
print line,
return 1
# crack args (sys.argv[1:] is normal) & compare;
# return false iff a problem
def main(args):
import getopt
try:
opts, args = getopt.getopt(args, "qr:")
except getopt.error, detail:
return fail(str(detail))
noisy = 1
qseen = rseen = 0
for opt, val in opts:
if opt == "-q":
qseen = 1
noisy = 0
elif opt == "-r":
rseen = 1
whichfile = val
if qseen and rseen:
return fail("can't specify both -q and -r")
if rseen:
if args:
return fail("no args allowed with -r option")
if whichfile in ("1", "2"):
restore(whichfile)
return 1
return fail("-r value must be 1 or 2")
if len(args) != 2:
return fail("need 2 filename args")
f1name, f2name = args
if noisy:
print '-:', f1name
print '+:', f2name
return fcompare(f1name, f2name)
# read ndiff output from stdin, and print file1 (which=='1') or
# file2 (which=='2') to stdout
def restore(which):
restored = difflib.restore(sys.stdin.readlines(), which)
sys.stdout.writelines(restored)
if __name__ == '__main__':
args = sys.argv[1:]
if "-profile" in args:
import profile, pstats
args.remove("-profile")
statf = "ndiff.pro"
profile.run("main(args)", statf)
stats = pstats.Stats(statf)
stats.strip_dirs().sort_stats('time').print_stats()
else:
main(args)
|
apache-2.0
|
Sarah-Alsinan/muypicky
|
lib/python3.6/site-packages/django/contrib/contenttypes/management/__init__.py
|
60
|
4919
|
from django.apps import apps as global_apps
from django.db import DEFAULT_DB_ALIAS, migrations, router, transaction
from django.db.utils import IntegrityError
from django.utils import six
class RenameContentType(migrations.RunPython):
def __init__(self, app_label, old_model, new_model):
self.app_label = app_label
self.old_model = old_model
self.new_model = new_model
super(RenameContentType, self).__init__(self.rename_forward, self.rename_backward)
def _rename(self, apps, schema_editor, old_model, new_model):
ContentType = apps.get_model('contenttypes', 'ContentType')
db = schema_editor.connection.alias
if not router.allow_migrate_model(db, ContentType):
return
try:
content_type = ContentType.objects.db_manager(db).get_by_natural_key(self.app_label, old_model)
except ContentType.DoesNotExist:
pass
else:
content_type.model = new_model
try:
with transaction.atomic(using=db):
content_type.save(update_fields={'model'})
except IntegrityError:
# Gracefully fallback if a stale content type causes a
# conflict as remove_stale_contenttypes will take care of
# asking the user what should be done next.
content_type.model = old_model
else:
# Clear the cache as the `get_by_natual_key()` call will cache
# the renamed ContentType instance by its old model name.
ContentType.objects.clear_cache()
def rename_forward(self, apps, schema_editor):
self._rename(apps, schema_editor, self.old_model, self.new_model)
def rename_backward(self, apps, schema_editor):
self._rename(apps, schema_editor, self.new_model, self.old_model)
def inject_rename_contenttypes_operations(plan=None, apps=global_apps, using=DEFAULT_DB_ALIAS, **kwargs):
"""
Insert a `RenameContentType` operation after every planned `RenameModel`
operation.
"""
if plan is None:
return
# Determine whether or not the ContentType model is available.
try:
ContentType = apps.get_model('contenttypes', 'ContentType')
except LookupError:
available = False
else:
if not router.allow_migrate_model(using, ContentType):
return
available = True
for migration, backward in plan:
if ((migration.app_label, migration.name) == ('contenttypes', '0001_initial')):
# There's no point in going forward if the initial contenttypes
# migration is unapplied as the ContentType model will be
# unavailable from this point.
if backward:
break
else:
available = True
continue
# The ContentType model is not available yet.
if not available:
continue
inserts = []
for index, operation in enumerate(migration.operations):
if isinstance(operation, migrations.RenameModel):
operation = RenameContentType(
migration.app_label, operation.old_name_lower, operation.new_name_lower
)
inserts.append((index + 1, operation))
for inserted, (index, operation) in enumerate(inserts):
migration.operations.insert(inserted + index, operation)
def get_contenttypes_and_models(app_config, using, ContentType):
if not router.allow_migrate_model(using, ContentType):
return None, None
ContentType.objects.clear_cache()
content_types = {
ct.model: ct
for ct in ContentType.objects.using(using).filter(app_label=app_config.label)
}
app_models = {
model._meta.model_name: model
for model in app_config.get_models()
}
return content_types, app_models
def create_contenttypes(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, apps=global_apps, **kwargs):
"""
Creates content types for models in the given app.
"""
if not app_config.models_module:
return
app_label = app_config.label
try:
app_config = apps.get_app_config(app_label)
ContentType = apps.get_model('contenttypes', 'ContentType')
except LookupError:
return
content_types, app_models = get_contenttypes_and_models(app_config, using, ContentType)
if not app_models:
return
cts = [
ContentType(
app_label=app_label,
model=model_name,
)
for (model_name, model) in six.iteritems(app_models)
if model_name not in content_types
]
ContentType.objects.using(using).bulk_create(cts)
if verbosity >= 2:
for ct in cts:
print("Adding content type '%s | %s'" % (ct.app_label, ct.model))
|
mit
|
anestv/pa
|
test/external/requests/packages/urllib3/request.py
|
83
|
5557
|
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if encode_multipart:
body, content_type = encode_multipart_formdata(
fields or {}, boundary=multipart_boundary)
else:
body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded')
if headers is None:
headers = self.headers
headers_ = {'Content-Type': content_type}
headers_.update(headers)
return self.urlopen(method, url, body=body, headers=headers_,
**urlopen_kw)
|
artistic-2.0
|
michallula/marathon-proxy-manager
|
src/marathon_proxy_manager/nginx.py
|
1
|
6399
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Lingaro
import os
import subprocess
from marathon import MarathonClient
from jinja2 import Environment, ChoiceLoader, FileSystemLoader, PackageLoader
class MarathonProxyManagerCommand(object):
DEFAULT_TEMPLATE_LOADER = PackageLoader(u'marathon_proxy_manager', u'templates')
DEFAULT_MARATHON_URL = u'http://localhost:8080'
DEFAULT_DOMAIN = u'localhost'
DEFAULT_TEMPLATE_PATH = None
DEFAULT_TEMPLATE_NAME = u'nginx.tmpl'
DEFAULT_CONF_DIR = u'/etc/nginx'
DEFAULT_OUT_DIR = None
DEFAULT_DELETE_UNUSED = False
DEFAULT_OVERRIDE = False
DEFAULT_RELOAD = False
DEFAULT_GENERATE_FOR_SUSPENDED = False
DEFAULT_APPS = ()
DEFAULT_EXCLUDE = ()
_template = None
_template_env = None
_marathon_cli = None
@classmethod
def create_marathon_client(cls, marathon_url):
return MarathonClient(marathon_url)
@classmethod
def create_template_loader(cls, template_path=None):
if template_path is not None:
return ChoiceLoader([
FileSystemLoader(template_path),
cls.DEFAULT_TEMPLATE_LOADER
])
else:
return cls.DEFAULT_TEMPLATE_LOADER
@classmethod
def create_template_env(cls, loader):
return Environment(loader=loader)
@classmethod
def reload_nginx_conf(cls):
subprocess.call([u"service", u"nginx", u"reload"])
@property
def template_env(self):
if self._template_env is None:
self._template_env = self.create_template_env(
loader=self.create_template_loader(
template_path=self._template_path
)
)
return self._template_env
@property
def marathon_cli(self):
if self._marathon_cli is None:
self._marathon_cli = self.create_marathon_client(self._marathon_url)
return self._marathon_cli
@property
def template(self):
if self._template is None:
self._template = self.template_env.get_template(self._template_name)
return self._template
def __init__(self, *args, **kwargs):
self._marathon_url = kwargs.get(u'marathon_url', self.DEFAULT_MARATHON_URL)
self._conf_dir = kwargs.get(u'conf_dir', self.DEFAULT_CONF_DIR) or self.DEFAULT_CONF_DIR
self._out_dir = kwargs.get(u'output_dir', self.DEFAULT_OUT_DIR) or self._conf_dir
self._template_path = kwargs.get(u'template_dir', self.DEFAULT_TEMPLATE_PATH)
self._template_name = kwargs.get(u'template_name', self.DEFAULT_TEMPLATE_NAME)
self._delete_unused = kwargs.get(u'delete_unused', self.DEFAULT_DELETE_UNUSED)
self._reload = kwargs.get(u'reload', self.DEFAULT_RELOAD)
self._override = kwargs.get(u'override', self.DEFAULT_OVERRIDE)
self._apps = tuple(kwargs.get(u'apps', self.DEFAULT_APPS))
self._exclude = tuple(kwargs.get(u'exclude', self.DEFAULT_EXCLUDE))
self._generate_for_suspended = kwargs.get(u'generate_for_suspended', self.DEFAULT_GENERATE_FOR_SUSPENDED)
def get_tasks(self):
return self.marathon_cli.list_tasks()
def get_apps(self):
return self.marathon_cli.list_apps()
def group_tasks(self, apps, tasks):
return dict((app, [task for task in tasks if task.app_id == app.id]) for app in apps)
def should_process(self, app, tasks):
app_name = app.id[1:]
if not self._generate_for_suspended and not bool(tasks):
return False
if self._apps:
return app_name in (set(self._apps) - set(self._exclude))
elif self._exclude:
return app_name not in self._exclude
return True
def apps_generator(self):
all_apps = self.get_apps()
all_tasks = self.get_tasks()
grouped_tasks = self.group_tasks(all_apps, all_tasks)
for app, tasks in grouped_tasks.iteritems():
if self.should_process(app, tasks):
yield (app, tasks)
def render_conf(self, app, tasks, *args, **kwargs):
return self.template.render(app=app, tasks=tasks, **dict(*args, **kwargs))
def read_conf(self, app_name):
file_path = os.path.join(self._conf_dir, u'sites-enabled', app_name)
if os.path.isfile(file_path):
with open(file_path, u'r') as file:
return file.read()
def write_conf(self, app_name, conf):
avail_file_path = os.path.join(self._out_dir, u'sites-available', app_name)
with open(avail_file_path, u'w+') as file:
file.write(conf)
enabled_file_path = os.path.join(self._out_dir, u'sites-enabled', app_name)
if self._override and os.path.isfile(enabled_file_path):
os.remove(enabled_file_path)
if not os.path.isfile(enabled_file_path):
os.symlink(avail_file_path, enabled_file_path)
def delete_unused_conf(self, apps=None, excluded=None):
apps = apps or ()
excluded = excluded or ()
modified = ()
for dir_name in (u'sites-available', u'sites-enabled'):
dir_path = os.path.join(self._out_dir, dir_name)
for file_name in os.listdir(os.path.join(dir_path)):
if (not apps or file_name in apps) and (file_name not in excluded):
path = os.path.join(dir_path, file_name)
if os.path.isfile(path):
modified += (file_name,)
os.remove(path)
return modified
def should_override(self, app_name, conf, old_conf=None):
return old_conf is None or (self._override and conf != old_conf)
def __call__(self, *args, **kwargs):
apps = ()
modified = False
for app, tasks in self.apps_generator():
app_name = app.id[1:]
conf = self.render_conf(app, tasks, *args, **kwargs)
old_conf = self.read_conf(app_name)
apps += (app_name,)
if self.should_override(app_name, conf, old_conf):
self.write_conf(app_name, conf)
modified = True
if self._delete_unused:
if self.delete_unused_conf(apps=self._apps, excluded=self._exclude + apps):
modified = True
if self._reload and modified:
self.reload_nginx_conf()
|
mit
|
barachka/odoo
|
addons/portal/portal.py
|
386
|
1361
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class portal(osv.osv):
""" A portal is simply a group of users with the flag 'is_portal' set to True.
The flag 'is_portal' makes a user group usable as a portal.
"""
_inherit = 'res.groups'
_columns = {
'is_portal': fields.boolean('Portal', help="If checked, this group is usable as a portal."),
}
|
agpl-3.0
|
bsipocz/astroML
|
doc/conf.py
|
2
|
7960
|
# -*- coding: utf-8 -*-
#
# astroML documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 6 15:37:12 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import matplotlib.sphinxext.plot_directive
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#sys.path.insert(0, os.path.abspath('../'))
# override default gen_rst
sys.path.insert(0, os.path.abspath('sphinxext'))
try:
import gen_rst
except:
pass
try:
import gen_figure_rst
except:
pass
try:
import gen_paper_rst
except:
pass
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst', 'gen_figure_rst', 'gen_paper_rst',
'sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.imgmath', 'sphinx.ext.viewcode',
'sphinx.ext.autosummary', 'sphinx.ext.mathjax',
matplotlib.sphinxext.plot_directive.__name__]
import numpy_ext.numpydoc
extensions.append('numpy_ext.numpydoc')
autosummary_generate=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'astroML'
copyright = '2012-2020, Jake Vanderplas & AstroML Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# Generate the plots for the gallery
plot_gallery = True
# Generate example gallery
figure_gallery = True
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'includes', '_templates', '_static']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'astroML'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/Logo.gif'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'astroMLdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'astroML.tex', 'astroML Documentation',
'Jake Vanderplas & astroML Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'astroML', 'astroML Documentation',
['Jake Vanderplas'], 1)
]
|
bsd-2-clause
|
opencloudinfra/orchestrator
|
venv/Lib/site-packages/django/conf/locale/ka/formats.py
|
504
|
2180
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'l, j F, Y'
TIME_FORMAT = 'h:i a'
DATETIME_FORMAT = 'j F, Y h:i a'
YEAR_MONTH_FORMAT = 'F, Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j.M.Y'
SHORT_DATETIME_FORMAT = 'j.M.Y H:i'
FIRST_DAY_OF_WEEK = 1 # (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
# '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3
|
gpl-3.0
|
arskom/spyne
|
spyne/test/protocol/test_yaml.py
|
2
|
1914
|
#!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import unittest
from spyne.test.protocol._test_dictdoc import TDictDocumentTest
from spyne.protocol.yaml import YamlDocument
from spyne import MethodContext
from spyne.application import Application
from spyne.decorator import srpc
from spyne.service import Service
from spyne.server import ServerBase
from spyne.protocol.yaml import yaml
yaml.dumps = yaml.dump
yaml.loads = yaml.load
TestYamlDocument = TDictDocumentTest(yaml, YamlDocument, YamlDocument().out_kwargs)
class Test(unittest.TestCase):
def test_invalid_input(self):
class SomeService(Service):
@srpc()
def yay():
pass
app = Application([SomeService], 'tns',
in_protocol=YamlDocument(),
out_protocol=YamlDocument())
server = ServerBase(app)
initial_ctx = MethodContext(server, MethodContext.SERVER)
initial_ctx.in_string = [b'{']
ctx, = server.generate_contexts(initial_ctx)
assert ctx.in_error.faultcode == 'Client.YamlDecodeError'
if __name__ == '__main__':
unittest.main()
|
lgpl-2.1
|
cs-shadow/phabricator-tools
|
py/abd/abdt_classicnaming.py
|
4
|
6547
|
"""Branch naming conventions for 'arcyd-review/description/base' style."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdt_classicnaming
#
# Public Classes:
# Naming
# .make_tracker_branch_from_name
# .make_tracker_branch_name
# .make_review_branch_from_name
#
# Public Assignments:
# EXAMPLE_REVIEW_BRANCH_NAME
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import phlsys_string
import abdt_naming
EXAMPLE_REVIEW_BRANCH_NAME = "arcyd-review/{description}/{base}".format(
description=abdt_naming.EXAMPLE_REVIEW_BRANCH_DESCRIPTION,
base=abdt_naming.EXAMPLE_REVIEW_BRANCH_BASE)
class Naming(object):
def __init__(self):
super(Naming, self).__init__()
# unfortunately the 'classic' scheme is too high in the namespace
# hierarchy, it creates it's branches alongside other 'top-level' arcyd
# branch namespaces
self._tracking_branch_prefix = abdt_naming.ARCYD_BRANCH_NAMESPACE
self._review_branch_prefix = 'arcyd-review/'
self._remote = 'origin'
def make_tracker_branch_from_name(self, branch_name):
"""Return the WorkingBranch for 'branch_name' or None if invalid.
Usage example:
>>> naming = Naming()
>>> make_branch = naming.make_tracker_branch_from_name
>>> make_branch('dev/arcyd/ok/mywork/master/99')
... # doctest: +NORMALIZE_WHITESPACE
abdt_naming.TrackerBranch("dev/arcyd/ok/mywork/master/99")
>>> make_branch('dev/arcyd/trackers/x/ok/r/master/do/99')
Traceback (most recent call last):
...
Error
>>> make_branch('invalid/mywork/master')
Traceback (most recent call last):
...
Error
:branch_name: string name of the working branch
:returns: WorkingBranch or None if invalid
"""
if branch_name == abdt_naming.RESERVED_BRANCH_NAME:
raise abdt_naming.Error() # ignore the reserved branch
if branch_name.startswith(abdt_naming.TRACKING_BRANCH_PREFIX):
raise abdt_naming.Error() # ignore all new tracker branches
suffix = phlsys_string.after_prefix(
branch_name, self._tracking_branch_prefix)
if not suffix:
# review branches must start with the prefix
raise abdt_naming.Error()
# suffix should be status/description/base(/...)/id
# 0 / 1 / 2 (/...)/-1
parts = suffix.split("/")
if len(parts) < 4:
raise abdt_naming.Error()
description = parts[1]
base = '/'.join(parts[2:-1])
review_branch = self._review_branch_prefix
review_branch += description
review_branch += "/" + base
return abdt_naming.TrackerBranch(
naming=self,
branch=branch_name,
review_branch=review_branch,
status=parts[0],
description=description,
base=base,
rev_id=parts[-1],
remote=self._remote)
def make_tracker_branch_name(self, status, description, base, review_id):
"""Return the unique string name of the tracker branch for params.
Working branches are of the form:
<working branch prefix>/description/base
Usage example:
>>> naming = Naming()
>>> make_name = naming.make_tracker_branch_name
>>> make_name('ok', 'mywork', 'master', 99)
'dev/arcyd/ok/mywork/master/99'
:description: string descriptive name of the branch
:base: string name of the branch to diff against and land on
:id: identifier for the review, converted to str() for convenience
:returns: string name of the working branch
"""
tracker_branch = ""
tracker_branch += self._tracking_branch_prefix
tracker_branch += status
tracker_branch += "/" + description
tracker_branch += "/" + base
tracker_branch += "/" + str(review_id)
return tracker_branch
def make_review_branch_from_name(self, branch_name):
"""Return the ReviewBranch for 'branch_name' or None if invalid.
Usage example:
>>> naming = Naming()
>>> make_branch = naming.make_review_branch_from_name
>>> make_branch('arcyd-review/mywork/master')
... # doctest: +NORMALIZE_WHITESPACE
abdt_naming.ReviewBranch("arcyd-review/mywork/master")
>>> make_branch('invalid/mywork/master')
Traceback (most recent call last):
...
Error
:branch_name: string name of the review branch
:returns: ReviewBranch or None if invalid
"""
suffix = phlsys_string.after_prefix(
branch_name, self._review_branch_prefix)
if not suffix:
# review branches must start with the prefix
raise abdt_naming.Error()
parts = suffix.split("/")
if len(parts) < 2:
# suffix should be description/base(/...)
raise abdt_naming.Error()
base = '/'.join(parts[1:])
return abdt_naming.ReviewBranch(
naming=self,
branch=branch_name,
description=parts[0],
base=base,
remote=self._remote)
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
apache-2.0
|
connorimes/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/sub/plain_wsh.py
|
499
|
1789
|
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write('sub/plain_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
|
mpl-2.0
|
GarySparrow/mFlaskWeb
|
venv/Lib/site-packages/coverage/backunittest.py
|
95
|
1515
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Implementations of unittest features from the future."""
# Use unittest2 if it's available, otherwise unittest. This gives us
# back-ported features for 2.6.
try:
import unittest2 as unittest
except ImportError:
import unittest
def unittest_has(method):
"""Does `unittest.TestCase` have `method` defined?"""
return hasattr(unittest.TestCase, method)
class TestCase(unittest.TestCase):
"""Just like unittest.TestCase, but with assert methods added.
Designed to be compatible with 3.1 unittest. Methods are only defined if
`unittest` doesn't have them.
"""
# pylint: disable=missing-docstring
# Many Pythons have this method defined. But PyPy3 has a bug with it
# somehow (https://bitbucket.org/pypy/pypy/issues/2092), so always use our
# own implementation that works everywhere, at least for the ways we're
# calling it.
def assertCountEqual(self, s1, s2):
"""Assert these have the same elements, regardless of order."""
self.assertEqual(sorted(s1), sorted(s2))
if not unittest_has('assertRaisesRegex'):
def assertRaisesRegex(self, *args, **kwargs):
return self.assertRaisesRegexp(*args, **kwargs)
if not unittest_has('assertRegex'):
def assertRegex(self, *args, **kwargs):
return self.assertRegexpMatches(*args, **kwargs)
|
mit
|
poiesisconsulting/openerp-restaurant
|
document/test_cindex.py
|
444
|
1553
|
#!/usr/bin/python
import sys
import os
import glob
import time
import logging
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG)
parser = OptionParser()
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("-C", "--content",
action="store_true", dest="docontent", default=False,
help="Disect content, rather than the file.")
parser.add_option("--delay",
action="store_true", dest="delay", default=False,
help="delay after the operation, to inspect child processes")
(options, args) = parser.parse_args()
import content_index, std_index
from content_index import cntIndex
for fname in args:
try:
if options.docontent:
fp = open(fname,'rb')
content = fp.read()
fp.close()
res = cntIndex.doIndex(content, fname, None, None, True)
else:
res = cntIndex.doIndex(None, fname, None, fname,True)
if options.verbose:
for line in res[:5]:
print line
if options.delay:
time.sleep(30)
except Exception,e:
import traceback
tb_s = reduce(lambda x, y: x+y, traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback))
except KeyboardInterrupt:
print "Keyboard interrupt"
#eof
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
z3ntu/razer-drivers
|
examples/custom_starlight.py
|
1
|
2320
|
from collections import defaultdict
import colorsys
import random
import time
import threading
from razer.client import DeviceManager
from razer.client import constants as razer_constants
# Create a DeviceManager. This is used to get specific devices
device_manager = DeviceManager()
print("Found {} Razer devices".format(len(device_manager.devices)))
print()
# Disable daemon effect syncing.
# Without this, the daemon will try to set the lighting effect to every device.
device_manager.sync_effects = False
# Helper funciton to generate interesting colors
def random_color():
rgb = colorsys.hsv_to_rgb(random.uniform(0, 1), random.uniform(0.5, 1), 1)
return tuple(map(lambda x: int(256 * x), rgb))
# Handle the startlight effect for a single key
def starlight_key(device, row, col, active):
color = random_color()
hue = random.uniform(0, 1)
start_time = time.time()
fade_time = 2
elapsed = 0
while elapsed < fade_time:
elapsed = time.time() - start_time
value = 1 - elapsed / fade_time
rgb = colorsys.hsv_to_rgb(hue, 1, value)
color = tuple(map(lambda x: int(256 * x), rgb))
device.fx.advanced.matrix[row, col] = color
value -= 0.01
# print(device, color)
time.sleep(1 / 60)
device.fx.advanced.matrix[row, col] = (0, 0, 0)
active[(row, col)] = False
# Handle the startlight effect for an entire device
def starlight_effect(device):
rows, cols = device.fx.advanced.rows, device.fx.advanced.cols
active = defaultdict(bool)
device.fx.advanced.matrix.reset()
device.fx.advanced.draw()
while True:
row, col = random.randrange(rows), random.randrange(cols)
if not active[(row, col)]:
active[(row, col)] = True
threading.Thread(target=starlight_key, args=(device, row, col, active)).start()
time.sleep(0.1)
# Spawn a manager thread for each device and wait on all of them.
threads = []
for device in device_manager.devices:
t = threading.Thread(target=starlight_effect, args=(device,), daemon=True)
t.start()
threads.append(t)
# If there are still threads, update each device.
while any(t.isAlive() for t in threads):
for device in device_manager.devices:
device.fx.advanced.draw()
time.sleep(1 / 60)
|
gpl-2.0
|
nazeehshoura/crawler
|
env/lib/python2.7/site-packages/django/contrib/gis/db/models/manager.py
|
83
|
3548
|
from django.db.models.manager import Manager
from django.contrib.gis.db.models.query import GeoQuerySet
class GeoManager(Manager):
"Overrides Manager to return Geographic QuerySets."
# This manager should be used for queries on related fields
# so that geometry columns on Oracle and MySQL are selected
# properly.
use_for_related_fields = True
def get_queryset(self):
return GeoQuerySet(self.model, using=self._db)
def area(self, *args, **kwargs):
return self.get_queryset().area(*args, **kwargs)
def centroid(self, *args, **kwargs):
return self.get_queryset().centroid(*args, **kwargs)
def collect(self, *args, **kwargs):
return self.get_queryset().collect(*args, **kwargs)
def difference(self, *args, **kwargs):
return self.get_queryset().difference(*args, **kwargs)
def distance(self, *args, **kwargs):
return self.get_queryset().distance(*args, **kwargs)
def envelope(self, *args, **kwargs):
return self.get_queryset().envelope(*args, **kwargs)
def extent(self, *args, **kwargs):
return self.get_queryset().extent(*args, **kwargs)
def extent3d(self, *args, **kwargs):
return self.get_queryset().extent3d(*args, **kwargs)
def force_rhr(self, *args, **kwargs):
return self.get_queryset().force_rhr(*args, **kwargs)
def geohash(self, *args, **kwargs):
return self.get_queryset().geohash(*args, **kwargs)
def geojson(self, *args, **kwargs):
return self.get_queryset().geojson(*args, **kwargs)
def gml(self, *args, **kwargs):
return self.get_queryset().gml(*args, **kwargs)
def intersection(self, *args, **kwargs):
return self.get_queryset().intersection(*args, **kwargs)
def kml(self, *args, **kwargs):
return self.get_queryset().kml(*args, **kwargs)
def length(self, *args, **kwargs):
return self.get_queryset().length(*args, **kwargs)
def make_line(self, *args, **kwargs):
return self.get_queryset().make_line(*args, **kwargs)
def mem_size(self, *args, **kwargs):
return self.get_queryset().mem_size(*args, **kwargs)
def num_geom(self, *args, **kwargs):
return self.get_queryset().num_geom(*args, **kwargs)
def num_points(self, *args, **kwargs):
return self.get_queryset().num_points(*args, **kwargs)
def perimeter(self, *args, **kwargs):
return self.get_queryset().perimeter(*args, **kwargs)
def point_on_surface(self, *args, **kwargs):
return self.get_queryset().point_on_surface(*args, **kwargs)
def reverse_geom(self, *args, **kwargs):
return self.get_queryset().reverse_geom(*args, **kwargs)
def scale(self, *args, **kwargs):
return self.get_queryset().scale(*args, **kwargs)
def snap_to_grid(self, *args, **kwargs):
return self.get_queryset().snap_to_grid(*args, **kwargs)
def svg(self, *args, **kwargs):
return self.get_queryset().svg(*args, **kwargs)
def sym_difference(self, *args, **kwargs):
return self.get_queryset().sym_difference(*args, **kwargs)
def transform(self, *args, **kwargs):
return self.get_queryset().transform(*args, **kwargs)
def translate(self, *args, **kwargs):
return self.get_queryset().translate(*args, **kwargs)
def union(self, *args, **kwargs):
return self.get_queryset().union(*args, **kwargs)
def unionagg(self, *args, **kwargs):
return self.get_queryset().unionagg(*args, **kwargs)
|
mit
|
xuxiao19910803/edx-platform
|
common/test/acceptance/pages/studio/course_page.py
|
172
|
1522
|
"""
Base class for pages specific to a course in Studio.
"""
import os
from opaque_keys.edx.locator import CourseLocator
from bok_choy.page_object import PageObject
from . import BASE_URL
class CoursePage(PageObject):
"""
Abstract base class for page objects specific to a course in Studio.
"""
# Overridden by subclasses to provide the relative path within the course
# Does not need to include the leading forward or trailing slash
url_path = ""
def __init__(self, browser, course_org, course_num, course_run):
"""
Initialize the page object for the course located at
`{course_org}.{course_num}.{course_run}`
These identifiers will likely change in the future.
"""
super(CoursePage, self).__init__(browser)
self.course_info = {
'course_org': course_org,
'course_num': course_num,
'course_run': course_run
}
@property
def url(self):
"""
Construct a URL to the page within the course.
"""
# TODO - is there a better way to make this agnostic to the underlying default module store?
default_store = os.environ.get('DEFAULT_STORE', 'draft')
course_key = CourseLocator(
self.course_info['course_org'],
self.course_info['course_num'],
self.course_info['course_run'],
deprecated=(default_store == 'draft')
)
return "/".join([BASE_URL, self.url_path, unicode(course_key)])
|
agpl-3.0
|
infoelliex/odoo-saas-tools
|
saas_client/models/res_user.py
|
6
|
3354
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010, 2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import models, fields, api, SUPERUSER_ID as SI, exceptions
from openerp.tools import config
from openerp.tools.translate import _
from openerp.addons.saas_utils import connector
class ResUsers(models.Model):
_name = 'res.users'
_inherit = 'res.users'
_defaults = {
'oauth_provider_id': lambda self,cr,uid,ctx=None: self.pool['ir.model.data'].xmlid_to_res_id(cr, SI, 'saas_server.saas_oauth_provider')
}
@api.model
def create(self, vals):
max_users = self.env["ir.config_parameter"].get_param("saas_client.max_users")
if max_users:
max_users = int(max_users)
cur_users = self.env['res.users'].search_count([('share', '=', False)])
if cur_users >= max_users:
raise exceptions.Warning(_('Maximimum allowed users is %(max_users)s, while you already have %(cur_users)s') % {'max_users':max_users, 'cur_users': cur_users})
return super(ResUsers, self).create(vals)
available_addons_ids = fields.Many2many(compute='_compute_addons',
comodel_name='ir.module.module',
string='Available Addons')
@api.one
def _compute_addons(self):
addon_ids = []
add_names = []
login = self.login
db = config.get('db_master')
registry = openerp.modules.registry.RegistryManager.get(db)
with registry.cursor() as cr:
ru = registry['res.users']
user_ids = ru.search(cr, SI, [('login', '=', login)])
if user_ids:
user = ru.browse(cr, SI, user_ids[0])
add_names = [x.name for x in user.plan_id.optional_addons_ids]
if add_names:
imm = self.env['ir.module.module']
addons = imm.search([('name', 'in', add_names)])
dependencies = []
for addon in addons:
dependencies += self._get_dependencies(addon)
addon_ids = list(set([x.id for x in addons] + dependencies))
self.available_addons_ids = addon_ids
def _get_dependencies(self, addon):
dependencies = []
for dep in addon.dependencies_id:
dependencies.append(dep.depend_id.id)
dependencies += self._get_dependencies(dep.depend_id)
return dependencies
|
lgpl-3.0
|
MarkuNauma2/Markus
|
py/openage/convert/stringresource.py
|
46
|
1211
|
from collections import defaultdict
from . import dataformat
from .util import dbg
class StringResource(dataformat.Exportable):
name_struct = "string_resource"
name_struct_file = "string_resource"
struct_description = "string id/language to text mapping, extracted from language.dll file."
data_format = (
(True, "id", "int32_t"),
(True, "lang", "char[16]"),
(True, "text", "std::string"),
)
def __init__(self):
self.strings = defaultdict(lambda: {})
def fill_from(self, pefile):
for lang, langstrings in pefile.strings.items():
self.strings[lang].update(langstrings)
def dump(self, filename):
data = list()
for lang, langstrings in self.strings.items():
for idx, text in langstrings.items():
entry = {
"id": idx,
"lang": lang,
"text": text,
}
data.append(entry)
data = sorted(data, key=lambda x: x["id"])
return [ dataformat.DataDefinition(self, data, filename) ]
@classmethod
def structs(cls):
return [ dataformat.StructDefinition(cls) ]
|
gpl-3.0
|
jejimenez/django
|
tests/backends/models.py
|
223
|
3397
|
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Square(models.Model):
root = models.IntegerField()
square = models.PositiveIntegerField()
def __str__(self):
return "%s ** 2 == %s" % (self.root, self.square)
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class SchoolClass(models.Model):
year = models.PositiveIntegerField()
day = models.CharField(max_length=9, blank=True)
last_updated = models.DateTimeField()
class VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ(models.Model):
class Meta:
# We need to use a short actual table name or
# we hit issue #8548 which we're not testing!
verbose_name = 'model_with_long_table_name'
primary_key_is_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz = models.AutoField(primary_key=True)
charfield_is_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz = models.CharField(max_length=100)
m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz = models.ManyToManyField(Person, blank=True)
class Tag(models.Model):
name = models.CharField(max_length=30)
content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='backend_tags')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class Post(models.Model):
name = models.CharField(max_length=30)
text = models.TextField()
tags = GenericRelation('Tag')
class Meta:
db_table = 'CaseSensitive_Post'
@python_2_unicode_compatible
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
class ReporterProxy(Reporter):
class Meta:
proxy = True
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, models.CASCADE)
reporter_proxy = models.ForeignKey(
ReporterProxy,
models.SET_NULL,
null=True,
related_name='reporter_proxy',
)
def __str__(self):
return self.headline
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=30)
date = models.DateField()
time = models.TimeField()
last_modified = models.DateTimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Object(models.Model):
related_objects = models.ManyToManyField("self", db_constraint=False, symmetrical=False)
def __str__(self):
return str(self.id)
@python_2_unicode_compatible
class ObjectReference(models.Model):
obj = models.ForeignKey(Object, models.CASCADE, db_constraint=False)
def __str__(self):
return str(self.obj_id)
class RawData(models.Model):
raw_data = models.BinaryField()
|
bsd-3-clause
|
blbarker/spark-tk
|
regression-tests/sparktkregtests/testcases/models/kmeans_test.py
|
10
|
11430
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Calculate kmeans against known dataset with known centroids """
import unittest
from sparktkregtests.lib import sparktk_test
class KMeansClustering(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Import the files to test against."""
super(KMeansClustering, self).setUp()
schema = [("Vec1", float),
("Vec2", float),
("Vec3", float),
("Vec4", float),
("Vec5", float),
("term", str)]
self.vectors = ["Vec1", "Vec2", "Vec3", "Vec4", "Vec5"]
self.frame_train = self.context.frame.import_csv(
self.get_file("kmeans_train.csv"), schema=schema)
self.frame_test = self.context.frame.import_csv(
self.get_file("kmeans_test.csv"), schema=schema)
def test_different_columns(self):
"""Tests kmeans cluster algorithm with more iterations."""
kmodel = self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[1.0, 1.0, 1.0, 1.0, 1.0],
k=5,
max_iterations=300)
# change the column names
self.frame_test.rename_columns(
{"Vec1": 'Dim1', "Vec2": 'Dim2', "Vec3": "Dim3",
"Vec4": "Dim4", "Vec5": 'Dim5'})
predicted_frame = kmodel.predict(
self.frame_test, ['Dim1', 'Dim2', 'Dim3', 'Dim4', 'Dim5'])
self._validate(kmodel, predicted_frame)
def test_add_distance_columns_twice(self):
"""tests kmeans model add distances cols twice"""
model = self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
k=5)
model.add_distance_columns(self.frame_train)
with self.assertRaisesRegexp(Exception, "conflicting column names"):
model.add_distance_columns(self.frame_train)
def test_kmeans_standard(self):
"""Tests standard usage of the kmeans cluster algorithm."""
kmodel = self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[1.0, 1.0, 1.0, 1.0, 1.0],
k=5)
predicted_frame = kmodel.predict(self.frame_test)
self._validate(kmodel, predicted_frame)
def test_column_weights(self):
"""Tests kmeans cluster algorithm with weighted values."""
kmodel = self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[0.1, 0.1, 0.1, 0.1, 0.1],
k=5)
predicted_frame = kmodel.predict(self.frame_test)
self._validate(kmodel, predicted_frame)
def test_max_iterations(self):
"""Tests kmeans cluster algorithm with more iterations."""
kmodel = self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[1.0, 1.0, 1.0, 1.0, 1.0],
k=5,
max_iterations=35)
predicted_frame = kmodel.predict(self.frame_test)
self._validate(kmodel, predicted_frame)
def test_convergence_tolerance_assign(self):
"""Tests kmeans cluster with an arbitrary convergence tol. """
kmodel = self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[1.0, 1.0, 1.0, 1.0, 1.0],
k=5,
convergence_tolerance=.000000000001)
predicted_frame = kmodel.predict(self.frame_test)
self._validate(kmodel, predicted_frame)
def test_publish(self):
"""Tests kmeans cluster publish."""
kmodel = self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[1.0, 1.0, 1.0, 1.0, 1.0],
k=5)
path = kmodel.export_to_mar(self.get_export_file(self.get_name("kmeans")))
self.assertIn("hdfs", path)
self.assertIn("kmeans", path)
def test_max_iterations_negative(self):
"""Check error on negative number of iterations."""
with self.assertRaisesRegexp(Exception, "maxIterations must be a positive value"):
self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[0.01, 0.01, 0.01, 0.01, 0.01],
k=5,
max_iterations=-3)
def test_max_iterations_bad_type(self):
"""Check error on invalid number of iterations."""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[0.01, 0.01, 0.01, 0.01, 0.01],
k=5,
max_iterations=[])
def test_k_negative(self):
"""Check error on negative number of clusters."""
with self.assertRaisesRegexp(Exception, "k must be at least 1"):
self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[0.01, 0.01, 0.01, 0.01, 0.01],
k=-5)
def test_k_bad_type(self):
"""Check error on invalid number of clusters."""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[0.01, 0.01, 0.01, 0.01, 0.01],
k=[])
def test_convergence_tol_negative(self):
"""Check error on negative convergence_tol value."""
with self.assertRaisesRegexp(Exception, "convergence tolerance must be a positive value"):
self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[0.01, 0.01, 0.01, 0.01, 0.01],
k=5,
convergence_tolerance=-0.05)
def test_convergence_tol_bad_type(self):
"""Check error on bad convergence_tol type."""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[0.01, 0.01, 0.01, 0.01, 0.01],
k=5,
convergence_tolerance=[])
def test_invalid_columns_predict(self):
"""Check error with invalid columns"""
with self.assertRaisesRegexp(Exception, "Invalid column name"):
kmodel = self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[1.0, 1.0, 1.0, 1.0, 1.0],
k=5)
self.frame_test.rename_columns(
{"Vec1": 'Dim1', "Vec2": 'Dim2', "Vec3": "Dim3",
"Vec4": "Dim4", "Vec5": 'Dim5'})
predicted_frame = kmodel.predict(self.frame_test)
print predicted_frame.inspect()
def test_too_few_columns(self):
"""Check error on invalid num of columns"""
with self.assertRaisesRegexp(Exception, "Number of columns for train and predict should be same"):
kmodel = self.context.models.clustering.kmeans.train(self.frame_train,
self.vectors,
scalings=[1.0, 1.0, 1.0, 1.0, 1.0],
k=5)
predicted_frame = kmodel.predict(self.frame_test, columns=["Vec1", "Vec2"])
def test_null_frame(self):
"""Check error on null frame."""
with self.assertRaisesRegexp(Exception, "frame cannot be None"):
self.context.models.clustering.kmeans.train(None,
self.vectors,
scalings=[0.01, 0.01, 0.01, 0.01, 0.01],
k=5)
def _validate(self, kmodel, frame, val=83379.0):
"""ensure that clusters are correct"""
# group the result by cluster and term
# term is the expected result
grouped = frame.group_by(['cluster', 'term'])
groups = grouped.take(grouped.count())
# iterate through the groups
# and assert that the expected group ("term")
# maps 1:1 with the resulting cluster
# i.e., ensure that the clusters are the same
# for expected and actual, however the names of
# the clusters may be different
for group in groups:
self.assertEquals(len(group), 2)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
DirectXMan12/nova-hacking
|
nova/db/sqlalchemy/types.py
|
12
|
1853
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Custom SQLAlchemy types."""
from sqlalchemy.dialects import postgresql
from sqlalchemy import types
from nova import utils
class IPAddress(types.TypeDecorator):
"""An SQLAlchemy type representing an IP-address."""
impl = types.String(39).with_variant(postgresql.INET(), 'postgresql')
def process_bind_param(self, value, dialect):
"""Process/Formats the value before insert it into the db."""
if dialect.name == 'postgresql':
return value
# NOTE(maurosr): The purpose here is to convert ipv6 to the shortened
# form, not validate it.
elif utils.is_valid_ipv6(value):
return utils.get_shortened_ipv6(value)
return value
class CIDR(types.TypeDecorator):
"""An SQLAlchemy type representing a CIDR definition."""
impl = types.String(43).with_variant(postgresql.INET(), 'postgresql')
def process_bind_param(self, value, dialect):
"""Process/Formats the value before insert it into the db."""
# NOTE(sdague): normalize all the inserts
if utils.is_valid_ipv6_cidr(value):
return utils.get_shortened_ipv6_cidr(value)
return value
|
apache-2.0
|
coderb0t/CouchPotatoServer
|
libs/rsa/varblock.py
|
216
|
4360
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''VARBLOCK file support
The VARBLOCK file format is as follows, where || denotes byte concatenation:
FILE := VERSION || BLOCK || BLOCK ...
BLOCK := LENGTH || DATA
LENGTH := varint-encoded length of the subsequent data. Varint comes from
Google Protobuf, and encodes an integer into a variable number of bytes.
Each byte uses the 7 lowest bits to encode the value. The highest bit set
to 1 indicates the next byte is also part of the varint. The last byte will
have this bit set to 0.
This file format is called the VARBLOCK format, in line with the varint format
used to denote the block sizes.
'''
from rsa._compat import byte, b
ZERO_BYTE = b('\x00')
VARBLOCK_VERSION = 1
def read_varint(infile):
'''Reads a varint from the file.
When the first byte to be read indicates EOF, (0, 0) is returned. When an
EOF occurs when at least one byte has been read, an EOFError exception is
raised.
@param infile: the file-like object to read from. It should have a read()
method.
@returns (varint, length), the read varint and the number of read bytes.
'''
varint = 0
read_bytes = 0
while True:
char = infile.read(1)
if len(char) == 0:
if read_bytes == 0:
return (0, 0)
raise EOFError('EOF while reading varint, value is %i so far' %
varint)
byte = ord(char)
varint += (byte & 0x7F) << (7 * read_bytes)
read_bytes += 1
if not byte & 0x80:
return (varint, read_bytes)
def write_varint(outfile, value):
'''Writes a varint to a file.
@param outfile: the file-like object to write to. It should have a write()
method.
@returns the number of written bytes.
'''
# there is a big difference between 'write the value 0' (this case) and
# 'there is nothing left to write' (the false-case of the while loop)
if value == 0:
outfile.write(ZERO_BYTE)
return 1
written_bytes = 0
while value > 0:
to_write = value & 0x7f
value = value >> 7
if value > 0:
to_write |= 0x80
outfile.write(byte(to_write))
written_bytes += 1
return written_bytes
def yield_varblocks(infile):
'''Generator, yields each block in the input file.
@param infile: file to read, is expected to have the VARBLOCK format as
described in the module's docstring.
@yields the contents of each block.
'''
# Check the version number
first_char = infile.read(1)
if len(first_char) == 0:
raise EOFError('Unable to read VARBLOCK version number')
version = ord(first_char)
if version != VARBLOCK_VERSION:
raise ValueError('VARBLOCK version %i not supported' % version)
while True:
(block_size, read_bytes) = read_varint(infile)
# EOF at block boundary, that's fine.
if read_bytes == 0 and block_size == 0:
break
block = infile.read(block_size)
read_size = len(block)
if read_size != block_size:
raise EOFError('Block size is %i, but could read only %i bytes' %
(block_size, read_size))
yield block
def yield_fixedblocks(infile, blocksize):
'''Generator, yields each block of ``blocksize`` bytes in the input file.
:param infile: file to read and separate in blocks.
:returns: a generator that yields the contents of each block
'''
while True:
block = infile.read(blocksize)
read_bytes = len(block)
if read_bytes == 0:
break
yield block
if read_bytes < blocksize:
break
|
gpl-3.0
|
monokal/qb
|
qb.py
|
1
|
8376
|
#!/usr/bin/env python3
""" A command-line client to manage qb environments. """
import argparse
import logging.handlers
import sys
from client.config import Config
from container.container import Container
from machine.machine import Machine
__author__ = "Daniel Middleton"
__email__ = "d@monokal.io"
__status__ = "Prototype"
__version__ = "1.0.0"
__all__ = []
# Path to the qb client config file.
config_path = "config.yaml"
class Client(object):
""" Class to wrap qb client functionality. """
def __init__(self):
""" Load config, configure logging, parse command-line arguments,
provide usage and invoke functions. """
# Have our own logger capture warnings so we can format them.
logging.captureWarnings(True)
self.p = logging.getLogger('qb')
self.p.setLevel(logging.INFO)
# We just print to STDOUT for now as the qb client is primarily
# intended to be used interactively.
out = logging.StreamHandler(sys.stdout)
out.setLevel(logging.DEBUG)
formatter = logging.Formatter("(qb) %(message)s")
out.setFormatter(formatter)
self.p.addHandler(out)
# Create the top-level parser.
parser = argparse.ArgumentParser(
prog="qb",
description="A command-line client to manage qb environments.",
)
# Define top-level options.
parser.add_argument('-d', '--debug',
action='store_true',
help='Output in debug verbosity.')
# Create a sub-parser for qb sub-commands.
subparsers = parser.add_subparsers()
#
# Start qb machine commands.
#
parser_machine = subparsers.add_parser('machine',
aliases=['m'],
help="Manage a qb machine.")
group_machine = parser_machine.add_mutually_exclusive_group(
required=True)
group_machine.add_argument('--create',
nargs=1,
metavar='NAME',
help="Create a qb machine.")
group_machine.add_argument('--start',
nargs=1,
metavar='NAME',
help="Start a qb machine.")
group_machine.add_argument('--stop',
nargs=1,
metavar='NAME',
help="Stop a qb machine.")
group_machine.add_argument('--remove',
nargs=1,
metavar='NAME',
help="Remove a qb machine.")
parser_machine.set_defaults(func=_Machine)
#
# End qb machine commands.
#
#
# Start qb container commands.
#
parser_container = subparsers.add_parser('container',
aliases=['c'],
help="Manage a qb container.")
group_container = parser_container.add_mutually_exclusive_group(
required=True)
group_container.add_argument('--list',
action='store_true',
help="List qb containers.")
group_container.add_argument('--create',
nargs=2,
metavar=('NAME', 'IMAGE'),
help="Create a qb container.")
group_container.add_argument('--start',
nargs=1,
metavar='NAME',
help="Start a qb container.")
group_container.add_argument('--stop',
nargs=1,
metavar='NAME',
help="Stop a qb container.")
group_container.add_argument('--remove',
nargs=1,
metavar='NAME',
help="Remove a qb container.")
parser_container.set_defaults(func=_Container)
#
# End qb container commands.
#
# Print help if no arg was provided, otherwise parse args and call the
# relevant function.
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.debug:
self.p.setLevel(logging.DEBUG)
self.p.debug("Debug mode is on.")
# Create a Config instance and load configuration from file.
config = Config()
self.config = config.load(config_path)
# Invoke the required function, passing it the parsed arguments
# and qb config.
args.func(args, self.config)
return
class _Machine(object):
""" Class to wrap qb machine functionality. """
def __init__(self, args, config):
""" Function to manage qb machine operations. """
# Use the logger object created by Client.
self.p = logging.getLogger('qb')
# Pull out the relevant values from config.
self.vagrantfile = config['machine']['vagrantfile']
# Create a Machine instance.
self.machine = Machine(self.vagrantfile)
# Invoke the required function based on the provided args.
if args.create is not None:
self.create(args.create[0])
elif args.start is not None:
self.start(args.start[0])
elif args.stop is not None:
self.stop(args.stop[0])
elif args.remove is not None:
self.remove(args.remove[0])
else:
self.p.error("Failed to invoke function.")
sys.exit(1)
return
def create(self, name):
""" Function to create a qb machine. """
self.machine.create(name)
return
def start(self, name):
""" Start a qb machine. """
self.machine.start(name)
return
def stop(self, name):
""" Stop a qb machine. """
self.machine.stop(name)
return
def remove(self, name):
""" Remove a qb machine. """
self.machine.remove(name)
return
class _Container(object):
""" Class to wrap qb container functionality. """
def __init__(self, args, config):
""" Function to manage qb container operations. """
# Use the logger object created by Client.
self.p = logging.getLogger('qb')
# Pull out the relevant values from config.
self.url = config['container']['lxd_api']['url']
self.cert = config['container']['lxd_api']['cert']
self.key = config['container']['lxd_api']['key']
# Create a Container instance.
self.container = Container(self.url, self.cert, self.key)
# Invoke the required function based on the provided args.
if args.list:
self.list()
elif args.create is not None:
self.create(args.create[0], # Name.
args.create[1]) # Image.
elif args.start is not None:
self.start(args.start[0])
elif args.stop is not None:
self.stop(args.stop[0])
elif args.remove is not None:
self.remove(args.remove[0])
else:
self.p.error("Error invoking function.")
sys.exit(1)
return
def list(self):
""" Function to list a qb containers. """
self.container.list()
return
def create(self, name, image):
""" Function to create a qb container. """
self.container.create(name, image)
return
def start(self, name):
""" Start a qb container. """
self.container.start(name)
return
def stop(self, name):
""" Stop a qb container. """
self.container.stop(name)
return
def remove(self, name):
""" Remove a qb container. """
self.container.remove(name)
return
if __name__ == "__main__":
Client()
|
gpl-3.0
|
hcsturix74/django
|
tests/template_tests/templatetags/inclusion.py
|
174
|
8479
|
import operator
from django.template import Engine, Library
from django.utils import six
engine = Engine(app_dirs=True)
register = Library()
@register.inclusion_tag('inclusion.html')
def inclusion_no_params():
"""Expected inclusion_no_params __doc__"""
return {"result": "inclusion_no_params - Expected result"}
inclusion_no_params.anything = "Expected inclusion_no_params __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_no_params_from_template():
"""Expected inclusion_no_params_from_template __doc__"""
return {"result": "inclusion_no_params_from_template - Expected result"}
inclusion_no_params_from_template.anything = "Expected inclusion_no_params_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_one_param(arg):
"""Expected inclusion_one_param __doc__"""
return {"result": "inclusion_one_param - Expected result: %s" % arg}
inclusion_one_param.anything = "Expected inclusion_one_param __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_one_param_from_template(arg):
"""Expected inclusion_one_param_from_template __doc__"""
return {"result": "inclusion_one_param_from_template - Expected result: %s" % arg}
inclusion_one_param_from_template.anything = "Expected inclusion_one_param_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=False)
def inclusion_explicit_no_context(arg):
"""Expected inclusion_explicit_no_context __doc__"""
return {"result": "inclusion_explicit_no_context - Expected result: %s" % arg}
inclusion_explicit_no_context.anything = "Expected inclusion_explicit_no_context __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=False)
def inclusion_explicit_no_context_from_template(arg):
"""Expected inclusion_explicit_no_context_from_template __doc__"""
return {"result": "inclusion_explicit_no_context_from_template - Expected result: %s" % arg}
inclusion_explicit_no_context_from_template.anything = "Expected inclusion_explicit_no_context_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_no_params_with_context(context):
"""Expected inclusion_no_params_with_context __doc__"""
return {"result": "inclusion_no_params_with_context - Expected result (context value: %s)" % context['value']}
inclusion_no_params_with_context.anything = "Expected inclusion_no_params_with_context __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=True)
def inclusion_no_params_with_context_from_template(context):
"""Expected inclusion_no_params_with_context_from_template __doc__"""
return {"result": "inclusion_no_params_with_context_from_template - Expected result (context value: %s)" % context['value']}
inclusion_no_params_with_context_from_template.anything = "Expected inclusion_no_params_with_context_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_params_and_context(context, arg):
"""Expected inclusion_params_and_context __doc__"""
return {"result": "inclusion_params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)}
inclusion_params_and_context.anything = "Expected inclusion_params_and_context __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=True)
def inclusion_params_and_context_from_template(context, arg):
"""Expected inclusion_params_and_context_from_template __doc__"""
return {"result": "inclusion_params_and_context_from_template - Expected result (context value: %s): %s" % (context['value'], arg)}
inclusion_params_and_context_from_template.anything = "Expected inclusion_params_and_context_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_two_params(one, two):
"""Expected inclusion_two_params __doc__"""
return {"result": "inclusion_two_params - Expected result: %s, %s" % (one, two)}
inclusion_two_params.anything = "Expected inclusion_two_params __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_two_params_from_template(one, two):
"""Expected inclusion_two_params_from_template __doc__"""
return {"result": "inclusion_two_params_from_template - Expected result: %s, %s" % (one, two)}
inclusion_two_params_from_template.anything = "Expected inclusion_two_params_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_one_default(one, two='hi'):
"""Expected inclusion_one_default __doc__"""
return {"result": "inclusion_one_default - Expected result: %s, %s" % (one, two)}
inclusion_one_default.anything = "Expected inclusion_one_default __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_one_default_from_template(one, two='hi'):
"""Expected inclusion_one_default_from_template __doc__"""
return {"result": "inclusion_one_default_from_template - Expected result: %s, %s" % (one, two)}
inclusion_one_default_from_template.anything = "Expected inclusion_one_default_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_unlimited_args(one, two='hi', *args):
"""Expected inclusion_unlimited_args __doc__"""
return {"result": "inclusion_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))}
inclusion_unlimited_args.anything = "Expected inclusion_unlimited_args __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_unlimited_args_from_template(one, two='hi', *args):
"""Expected inclusion_unlimited_args_from_template __doc__"""
return {"result": "inclusion_unlimited_args_from_template - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))}
inclusion_unlimited_args_from_template.anything = "Expected inclusion_unlimited_args_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_only_unlimited_args(*args):
"""Expected inclusion_only_unlimited_args __doc__"""
return {"result": "inclusion_only_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in args))}
inclusion_only_unlimited_args.anything = "Expected inclusion_only_unlimited_args __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_only_unlimited_args_from_template(*args):
"""Expected inclusion_only_unlimited_args_from_template __doc__"""
return {"result": "inclusion_only_unlimited_args_from_template - Expected result: %s" % (', '.join(six.text_type(arg) for arg in args))}
inclusion_only_unlimited_args_from_template.anything = "Expected inclusion_only_unlimited_args_from_template __dict__"
@register.inclusion_tag('test_incl_tag_current_app.html', takes_context=True)
def inclusion_tag_current_app(context):
"""Expected inclusion_tag_current_app __doc__"""
return {}
inclusion_tag_current_app.anything = "Expected inclusion_tag_current_app __dict__"
@register.inclusion_tag('test_incl_tag_use_l10n.html', takes_context=True)
def inclusion_tag_use_l10n(context):
"""Expected inclusion_tag_use_l10n __doc__"""
return {}
inclusion_tag_use_l10n.anything = "Expected inclusion_tag_use_l10n __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected inclusion_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return {"result": "inclusion_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)}
inclusion_unlimited_args_kwargs.anything = "Expected inclusion_unlimited_args_kwargs __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_tag_without_context_parameter(arg):
"""Expected inclusion_tag_without_context_parameter __doc__"""
return {}
inclusion_tag_without_context_parameter.anything = "Expected inclusion_tag_without_context_parameter __dict__"
@register.inclusion_tag('inclusion_extends1.html')
def inclusion_extends1():
return {}
@register.inclusion_tag('inclusion_extends2.html')
def inclusion_extends2():
return {}
|
bsd-3-clause
|
rodrigoasmacedo/PySPED
|
tests/testutils.py
|
9
|
2472
|
# -*- coding: utf-8 -*-
import fnmatch
import os
import pysped
def list_recursively(directory, pattern):
"""Returns files recursively from directory matching pattern
:param directory: directory to list
:param pattern: glob mattern to match
"""
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
# skip backup files
if (filename.startswith('.#') or
filename.endswith('~')):
continue
matches.append(os.path.join(root, filename))
return matches
def get_sources(root):
for dirpath in ['pysped', 'tests']:
path = os.path.join(root, dirpath)
for fname in list_recursively(path, '*.py'):
if fname.endswith('__init__.py'):
continue
yield fname
#yield os.path.join(root, 'setup.py')
class ClassInittableMetaType(type):
# pylint fails to understand this is a metaclass
def __init__(self, name, bases, namespace):
type.__init__(self, name, bases, namespace)
self.__class_init__(namespace)
class SourceTest(object):
__metaclass__ = ClassInittableMetaType
@classmethod
def __class_init__(cls, namespace):
root = os.path.dirname(os.path.dirname(pysped.__file__))
cls.root = root
for filename in get_sources(root):
testname = filename[len(root):]
if not cls.filename_filter(testname):
continue
testname = testname[:-3].replace('/', '_')
name = 'test_%s' % (testname, )
func = lambda self, r=root, f=filename: self.check_filename(r, f)
func.__name__ = name
setattr(cls, name, func)
def check_filename(self, root, filename):
pass
@classmethod
def filename_filter(cls, filename):
if cls.__name__ == 'SourceTest':
return False
else:
return True
def indent(elem, level=0):
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
|
lgpl-2.1
|
Pablo126/SSBW
|
Tarea3/tarea3/lib/python3.5/site-packages/setuptools/command/build_clib.py
|
314
|
4484
|
import distutils.command.build_clib as orig
from distutils.errors import DistutilsSetupError
from distutils import log
from setuptools.dep_util import newer_pairwise_group
class build_clib(orig.build_clib):
"""
Override the default build_clib behaviour to do the following:
1. Implement a rudimentary timestamp-based dependency system
so 'compile()' doesn't run every time.
2. Add more keys to the 'build_info' dictionary:
* obj_deps - specify dependencies for each object compiled.
this should be a dictionary mapping a key
with the source filename to a list of
dependencies. Use an empty string for global
dependencies.
* cflags - specify a list of additional flags to pass to
the compiler.
"""
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# Make sure everything is the correct type.
# obj_deps should be a dictionary of keys as sources
# and a list/tuple of files that are its dependencies.
obj_deps = build_info.get('obj_deps', dict())
if not isinstance(obj_deps, dict):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
dependencies = []
# Get the global dependencies that are specified by the '' key.
# These will go into every source's dependency list.
global_deps = obj_deps.get('', list())
if not isinstance(global_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
# Build the list to be used by newer_pairwise_group
# each source will be auto-added to its dependencies.
for source in sources:
src_deps = [source]
src_deps.extend(global_deps)
extra_deps = obj_deps.get(source, list())
if not isinstance(extra_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
src_deps.extend(extra_deps)
dependencies.append(src_deps)
expected_objects = self.compiler.object_filenames(
sources,
output_dir=self.build_temp
)
if newer_pairwise_group(dependencies, expected_objects) != ([], []):
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
cflags = build_info.get('cflags')
objects = self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
extra_postargs=cflags,
debug=self.debug
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
expected_objects,
lib_name,
output_dir=self.build_clib,
debug=self.debug
)
|
gpl-3.0
|
stackforge/watcher
|
watcher/decision_engine/goal/goals.py
|
2
|
6789
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2016 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from watcher._i18n import _
from watcher.decision_engine.goal import base
from watcher.decision_engine.goal.efficacy import specs
class Dummy(base.Goal):
"""Dummy
Reserved goal that is used for testing purposes.
"""
@classmethod
def get_name(cls):
return "dummy"
@classmethod
def get_display_name(cls):
return _("Dummy goal")
@classmethod
def get_translatable_display_name(cls):
return "Dummy goal"
@classmethod
def get_efficacy_specification(cls):
"""The efficacy spec for the current goal"""
return specs.Unclassified()
class Unclassified(base.Goal):
"""Unclassified
This goal is used to ease the development process of a strategy. Containing
no actual indicator specification, this goal can be used whenever a
strategy has yet to be formally associated with an existing goal. If the
goal achieve has been identified but there is no available implementation,
this Goal can also be used as a transitional stage.
"""
@classmethod
def get_name(cls):
return "unclassified"
@classmethod
def get_display_name(cls):
return _("Unclassified")
@classmethod
def get_translatable_display_name(cls):
return "Unclassified"
@classmethod
def get_efficacy_specification(cls):
"""The efficacy spec for the current goal"""
return specs.Unclassified()
class ServerConsolidation(base.Goal):
"""ServerConsolidation
This goal is for efficient usage of compute server resources in order to
reduce the total number of servers.
"""
@classmethod
def get_name(cls):
return "server_consolidation"
@classmethod
def get_display_name(cls):
return _("Server Consolidation")
@classmethod
def get_translatable_display_name(cls):
return "Server Consolidation"
@classmethod
def get_efficacy_specification(cls):
"""The efficacy spec for the current goal"""
return specs.ServerConsolidation()
class ThermalOptimization(base.Goal):
"""ThermalOptimization
This goal is used to balance the temperature across different servers.
"""
@classmethod
def get_name(cls):
return "thermal_optimization"
@classmethod
def get_display_name(cls):
return _("Thermal Optimization")
@classmethod
def get_translatable_display_name(cls):
return "Thermal Optimization"
@classmethod
def get_efficacy_specification(cls):
"""The efficacy spec for the current goal"""
return specs.Unclassified()
class WorkloadBalancing(base.Goal):
"""WorkloadBalancing
This goal is used to evenly distribute workloads across different servers.
"""
@classmethod
def get_name(cls):
return "workload_balancing"
@classmethod
def get_display_name(cls):
return _("Workload Balancing")
@classmethod
def get_translatable_display_name(cls):
return "Workload Balancing"
@classmethod
def get_efficacy_specification(cls):
"""The efficacy spec for the current goal"""
return specs.WorkloadBalancing()
class AirflowOptimization(base.Goal):
"""AirflowOptimization
This goal is used to optimize the airflow within a cloud infrastructure.
"""
@classmethod
def get_name(cls):
return "airflow_optimization"
@classmethod
def get_display_name(cls):
return _("Airflow Optimization")
@classmethod
def get_translatable_display_name(cls):
return "Airflow Optimization"
@classmethod
def get_efficacy_specification(cls):
"""The efficacy spec for the current goal"""
return specs.Unclassified()
class NoisyNeighborOptimization(base.Goal):
"""NoisyNeighborOptimization
This goal is used to identify and migrate a Noisy Neighbor -
a low priority VM that negatively affects performance of a high priority VM
in terms of IPC by over utilizing Last Level Cache.
"""
@classmethod
def get_name(cls):
return "noisy_neighbor"
@classmethod
def get_display_name(cls):
return _("Noisy Neighbor")
@classmethod
def get_translatable_display_name(cls):
return "Noisy Neighbor"
@classmethod
def get_efficacy_specification(cls):
"""The efficacy spec for the current goal"""
return specs.Unclassified()
class SavingEnergy(base.Goal):
"""SavingEnergy
This goal is used to reduce power consumption within a data center.
"""
@classmethod
def get_name(cls):
return "saving_energy"
@classmethod
def get_display_name(cls):
return _("Saving Energy")
@classmethod
def get_translatable_display_name(cls):
return "Saving Energy"
@classmethod
def get_efficacy_specification(cls):
"""The efficacy spec for the current goal"""
return specs.Unclassified()
class HardwareMaintenance(base.Goal):
"""HardwareMaintenance
This goal is to migrate instances and volumes on a set of compute nodes
and storage from nodes under maintenance
"""
@classmethod
def get_name(cls):
return "hardware_maintenance"
@classmethod
def get_display_name(cls):
return _("Hardware Maintenance")
@classmethod
def get_translatable_display_name(cls):
return "Hardware Maintenance"
@classmethod
def get_efficacy_specification(cls):
"""The efficacy spec for the current goal"""
return specs.HardwareMaintenance()
class ClusterMaintaining(base.Goal):
"""ClusterMaintenance
This goal is used to maintain compute nodes
without having the user's application being interrupted.
"""
@classmethod
def get_name(cls):
return "cluster_maintaining"
@classmethod
def get_display_name(cls):
return _("Cluster Maintaining")
@classmethod
def get_translatable_display_name(cls):
return "Cluster Maintaining"
@classmethod
def get_efficacy_specification(cls):
"""The efficacy spec for the current goal"""
return specs.Unclassified()
|
apache-2.0
|
matrixise/odoo
|
addons/base_gengo/__init__.py
|
377
|
1122
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Openerp sa (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_company
import ir_translation
import wizard
import controller
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
apaku/pygmailarchive
|
setup.py
|
1
|
1214
|
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "pygmailarchive",
version = "0.3.0",
author = "Andreas Pakulat",
author_email = "apaku@gmx.de",
description = ("An utility to archive Mails from GMail accounts."),
license = "BSD",
download_url = "https://github.com/downloads/apaku/pygmailarchive/pygmailarchive-0.3.0.tar.gz",
keywords = "gmail imap archive",
url = "https://github.com/apaku/pygmailarchive",
install_requires = ["IMAPClient"],
scripts = ["pygmailarchive.py"],
data_files = [('share/doc/pygmailarchive', ['README','LICENSE'])],
long_description = read('README'),
classifiers = [
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
],
)
|
bsd-2-clause
|
arm-hpc/allinea_json_analysis
|
PR_JSON_Scripts/pr_json_common.py
|
1
|
3809
|
#!/usr/bin/env python
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
sys.path.append('../JSON_Common')
import json_dict_common as jdc
def get_overview_data(jsonDict):
"""
Gets the overview element from the given dictionary of items
Args:
jsonDict (dict): A dictionary which we assume to be of a JSON format
Returns:
Value of the overview element of the given dictionary
"""
return jdc.get_dict_field_val(jsonDict, ["data", "overview"])
#### End of function get_overview_data
def get_num_processes(jsonDict):
"""
Gets the number of processes used in a program run stored in the
dictionary, which is assumed to be a JSON representation of a Performance
Report
Args:
jsonDict (dict): Dictionary of JSON values representing a Performance
Report
Returns:
The number of processes used in the program run represented by the
JSON dictionary passed in
"""
assert isinstance(jsonDict, dict)
return int(jdc.get_dict_field_val(jsonDict, ["data", "applicationDetails",
"processes", "plain"]))
#### End of function get_num_processes
def get_num_threads(jsonDict):
"""
Gets the number of threads used in a program run. Run data is stored in the
dictionary passed in, which is assumed to be a JSON representation of a
Performance Report
Args:
jsonDict (dict): Dictionary of JSON values representing a Performance
Report
Returns:
The number of threads used in the program run represented by the
JSON dictionary passed in
"""
assert isinstance(jsonDict, dict)
return int(jdc.get_dict_field_val(jsonDict, ["data", "applicationDetails",
"ompNumThreads"]))
#### End of function get_num_threads
def get_mem_per_node(jsonDict):
"""
Gets the memory available per node from the JSON dictionary passed in
Args:
jsonDict (dict): Dictionary of JSON values representing a Performance
Report
Returns:
Memory per node reported in the JSON dictionary passed in
"""
assert isinstance(jsonDict, dict)
return float(jdc.get_dict_field_val(jsonDict, ["data", "applicationDetails",
"hostMemory", "plain", "value"]))
#### End of fucntion get_mem_per_node
def get_num_nodes(jsonDict):
"""
Gets the number of nodes used from the JSON dictionary passed in
Args:
jsonDict (dict): Dictionary of JSON values representing a Performance
Report
Returns:
Number of nodes reported in the JSON dictionary passed in
"""
assert isinstance(jsonDict, dict)
return jdc.get_dict_field_val(jsonDict, ["data", "applicationDetails",
"nodes", "plain"])
### End of function get_num_nodes
def get_runtime(jsonDict):
"""
Gets the run time in the jsonDict passed through
Args:
jsonDict (dict): Dictionary of JSON values representing a Performance
Report
Returns:
The run time of the profiled run represented by the JSON dictionary
passed in
"""
assert isinstance(jsonDict, dict)
return jdc.get_dict_field_val(jsonDict, ["data", "applicationDetails",
"time", "plain"])
#### End of function get_runtime
|
apache-2.0
|
miptliot/edx-platform
|
common/lib/calc/calc/functions.py
|
279
|
1521
|
"""
Provide the mathematical functions that numpy doesn't.
Specifically, the secant/cosecant/cotangents and their inverses and
hyperbolic counterparts
"""
import numpy
# Normal Trig
def sec(arg):
"""
Secant
"""
return 1 / numpy.cos(arg)
def csc(arg):
"""
Cosecant
"""
return 1 / numpy.sin(arg)
def cot(arg):
"""
Cotangent
"""
return 1 / numpy.tan(arg)
# Inverse Trig
# http://en.wikipedia.org/wiki/Inverse_trigonometric_functions#Relationships_among_the_inverse_trigonometric_functions
def arcsec(val):
"""
Inverse secant
"""
return numpy.arccos(1. / val)
def arccsc(val):
"""
Inverse cosecant
"""
return numpy.arcsin(1. / val)
def arccot(val):
"""
Inverse cotangent
"""
if numpy.real(val) < 0:
return -numpy.pi / 2 - numpy.arctan(val)
else:
return numpy.pi / 2 - numpy.arctan(val)
# Hyperbolic Trig
def sech(arg):
"""
Hyperbolic secant
"""
return 1 / numpy.cosh(arg)
def csch(arg):
"""
Hyperbolic cosecant
"""
return 1 / numpy.sinh(arg)
def coth(arg):
"""
Hyperbolic cotangent
"""
return 1 / numpy.tanh(arg)
# And their inverses
def arcsech(val):
"""
Inverse hyperbolic secant
"""
return numpy.arccosh(1. / val)
def arccsch(val):
"""
Inverse hyperbolic cosecant
"""
return numpy.arcsinh(1. / val)
def arccoth(val):
"""
Inverse hyperbolic cotangent
"""
return numpy.arctanh(1. / val)
|
agpl-3.0
|
raildo/python-keystoneclient
|
python-keystoneclient-0.4.1.7.gdca1d42/keystoneclient/v2_0/roles.py
|
13
|
3136
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
class Role(base.Resource):
"""Represents a Keystone role."""
def __repr__(self):
return "<Role %s>" % self._info
def delete(self):
return self.manager.delete(self)
class RoleManager(base.ManagerWithFind):
"""Manager class for manipulating Keystone roles."""
resource_class = Role
def get(self, role):
return self._get("/OS-KSADM/roles/%s" % base.getid(role), "role")
def create(self, name):
"""Create a role."""
params = {"role": {"name": name}}
return self._create('/OS-KSADM/roles', params, "role")
def delete(self, role):
"""Delete a role."""
return self._delete("/OS-KSADM/roles/%s" % base.getid(role))
def list(self):
"""List all available roles."""
return self._list("/OS-KSADM/roles", "roles")
def roles_for_user(self, user, tenant=None):
user_id = base.getid(user)
if tenant:
tenant_id = base.getid(tenant)
route = "/tenants/%s/users/%s/roles"
return self._list(route % (tenant_id, user_id), "roles")
else:
return self._list("/users/%s/roles" % user_id, "roles")
def add_user_role(self, user, role, tenant=None):
"""Adds a role to a user.
If tenant is specified, the role is added just for that tenant,
otherwise the role is added globally.
"""
user_id = base.getid(user)
role_id = base.getid(role)
if tenant:
route = "/tenants/%s/users/%s/roles/OS-KSADM/%s"
params = (base.getid(tenant), user_id, role_id)
return self._update(route % params, None, "role")
else:
route = "/users/%s/roles/OS-KSADM/%s"
return self._update(route % (user_id, role_id), None, "roles")
def remove_user_role(self, user, role, tenant=None):
"""Removes a role from a user.
If tenant is specified, the role is removed just for that tenant,
otherwise the role is removed from the user's global roles.
"""
user_id = base.getid(user)
role_id = base.getid(role)
if tenant:
route = "/tenants/%s/users/%s/roles/OS-KSADM/%s"
params = (base.getid(tenant), user_id, role_id)
return self._delete(route % params)
else:
route = "/users/%s/roles/OS-KSADM/%s"
return self._delete(route % (user_id, role_id))
|
apache-2.0
|
DARKPOP/external_chromium_org_third_party_skia
|
platform_tools/android/gyp_gen/tool_makefile_writer.py
|
17
|
3228
|
#!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code for generating Android.mk for a tool."""
import android_framework_gyp
import gypd_parser
import makefile_writer
import os
import vars_dict_lib
def write_tool_android_mk(target_dir, var_dict):
"""Write Android.mk for a Skia tool.
Args:
target_dir: Destination for the makefile. Must not be None.
var_dict: VarsDict containing variables for the makefile.
"""
target_file = os.path.join(target_dir, 'Android.mk')
with open(target_file, 'w') as f:
f.write(makefile_writer.AUTOGEN_WARNING)
makefile_writer.write_local_path(f)
makefile_writer.write_clear_vars(f)
makefile_writer.write_local_vars(f, var_dict, False, None)
makefile_writer.write_include_stlport(f)
f.write('include $(BUILD_NATIVE_TEST)\n')
def generate_tool(gyp_dir, target_file, skia_trunk, dest_dir,
skia_lib_var_dict, local_module_name, local_module_tags,
desired_targets):
"""Common steps for building one of the skia tools.
Parse a gyp file and create an Android.mk for this tool.
Args:
gyp_dir: Directory containing gyp files.
target_file: gyp file for the project to be built, contained in gyp_dir.
skia_trunk: Trunk of Skia, used for determining the destination to write
'Android.mk'.
dest_dir: Destination for 'Android.mk', relative to skia_trunk. Used for
both writing relative paths in the makefile and for determining the
destination to write the it.
skia_lib_var_dict: VarsDict representing libskia. Used as a reference to
ensure we do not duplicate anything in this Android.mk.
local_module_name: Name for this tool, to set as LOCAL_MODULE.
local_module_tags: Tags to pass to LOCAL_MODULE_TAG.
desired_targets: List of targets to parse.
"""
result_file = android_framework_gyp.main(target_dir=gyp_dir,
target_file=target_file,
skia_arch_type='other',
have_neon=False)
var_dict = vars_dict_lib.VarsDict()
# Add known targets from skia_lib, so we do not reparse them.
var_dict.KNOWN_TARGETS.set(skia_lib_var_dict.KNOWN_TARGETS)
gypd_parser.parse_gypd(var_dict, result_file, dest_dir, desired_targets)
android_framework_gyp.clean_gypd_files(gyp_dir)
var_dict.LOCAL_MODULE.add(local_module_name)
for tag in local_module_tags:
var_dict.LOCAL_MODULE_TAGS.add(tag)
# No need for defines that are already in skia_lib.
for define in skia_lib_var_dict.DEFINES:
try:
var_dict.DEFINES.remove(define)
except ValueError:
# Okay if the define was not part of the parse for our tool.
pass
if skia_trunk:
full_dest = os.path.join(skia_trunk, dest_dir)
else:
full_dest = dest_dir
# If the path does not exist, create it. This will happen during testing,
# where there is no subdirectory for each tool (just a temporary folder).
if not os.path.exists(full_dest):
os.mkdir(full_dest)
write_tool_android_mk(target_dir=full_dest, var_dict=var_dict)
|
bsd-3-clause
|
koolspin/rosetta
|
filters/logger_sink.py
|
1
|
3694
|
import binascii
from graph.filter_base import FilterBase, FilterState, FilterType
from graph.pad_template import PadTemplate
from graph.pad_capabilities import PadCapabilities
from graph.input_pin import InputPin
from graph.output_pin import OutputPin
class LoggerSink(FilterBase):
"""
A logger sink filter.
Input Pins:
input - Accepts any mime type. Whatever is sent here gets logged.
"""
#########################################################################
# Note - these static methods MUST be implemented by all filters.
print('######## Executing static variable init on LoggerSink')
filter_meta = {}
filter_meta[FilterBase.FILTER_META_NAME] = "LoggerSink"
filter_meta[FilterBase.FILTER_META_DESC] = "Logs whatever data arrives on the sink pad."
filter_meta[FilterBase.FILTER_META_VER] = "0.9.0"
filter_meta[FilterBase.FILTER_META_RANK] = FilterBase.FILTER_RANK_SECONDARY
filter_meta[FilterBase.FILTER_META_ORIGIN_URL] = "https://github.com/koolspin"
filter_meta[FilterBase.FILTER_META_KLASS] = "Sink/Logger"
# Pad templates for this filter
# Note this dictionary is keyed by the actual pad name and not the name template
filter_pad_templates = {}
sink_pad_cap = PadCapabilities.create_caps_any()
sink_pad_template = PadTemplate.create_pad_always_sink([sink_pad_cap])
filter_pad_templates[FilterBase.DEFAULT_SINK_PAD_NAME] = sink_pad_template
# End of filter metadata
#########################################################################
def __init__(self, name, config_dict, graph_manager):
super().__init__(name, config_dict, graph_manager, FilterType.sink)
self._output_pin = OutputPin('output', True)
self._add_output_pin(self._output_pin)
#
mime_type_map = {}
mime_type_map['*'] = self.recv
ipin = InputPin('input', mime_type_map, self)
self._add_input_pin(ipin)
self._output_pin = OutputPin('output', True)
self._add_output_pin(self._output_pin)
# Make sure to crate the pads that are defined for this filter's template
self._create_always_pads_from_template(LoggerSink.filter_pad_templates)
def run(self):
super().run()
self._set_filter_state(FilterState.running)
def stop(self):
super().stop()
self._set_filter_state(FilterState.stopped)
def recv(self, mime_type, payload, metadata_dict):
print('Mime type: {0}'.format(mime_type))
print('meta-dict: {0}'.format(metadata_dict))
if isinstance(payload, str):
# String format, print directly
print('Payload: {0}'.format(payload))
else:
# Must be a binary format, convert to hex first
print('Payload: {0}'.format(self._stringify_payload(mime_type, payload)))
if self.filter_state == FilterState.running:
self._output_pin.send(mime_type, payload, metadata_dict)
else:
raise RuntimeError('{0} tried to process input while filter state is {1}'.format(self.filter_name, self.filter_state))
def _stringify_payload(self, mime_type, payload):
ret_string = ''
if mime_type == 'application/octet-stream':
ret_string = binascii.hexlify(payload)
else:
ret_string = payload.decode("utf-8")
return ret_string
# Note - these static methods MUST be implemented by all filters.
# TODO: Is there a better way to do this?
@staticmethod
def get_filter_metadata():
return LoggerSink.filter_meta
@staticmethod
def get_filter_pad_templates():
return LoggerSink.filter_pad_templates
|
mit
|
execuc/LCInterlocking
|
lasercut/hingesproperties.py
|
1
|
6521
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***************************************************************************
# * *
# * Copyright (c) 2016 execuc *
# * *
# * This file is part of LCInterlocking module. *
# * LCInterlocking module is free software; you can redistribute it and/or*
# * modify it under the terms of the GNU Lesser General Public *
# * License as published by the Free Software Foundation; either *
# * version 2.1 of the License, or (at your option) any later version. *
# * *
# * This module is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
import FreeCAD
from lasercut import helper
from lasercut.material import retrieve_thickness_from_biggest_face
from lasercut.makehinges import complete_hinges_properties, create_solid_corner, estimate_min_link
import copy
class HingesProperties(helper.ObjectProperties):
_allowed = ('freecad_face_1_name', 'freecad_object_1_name', 'freecad_face_2_name', 'freecad_object_2_name', 'name',
'seg_face_1', 'seg_face_2', 'extrustion_vector', 'rad_angle', 'deg_angle', 'rotation_vector'
'arc_length', 'arc_inner_radius', 'arc_outer_radius',
'min_links_nb', 'nb_link', 'thickness', 'reversed_angle')
def __init__(self, **kwargs):
super(HingesProperties, self).__init__(**kwargs)
self.freecad_object_1 = None
self.freecad_face_1 = None
self.freecad_object_2 = None
self.freecad_face_2 = None
if not kwargs['freecad_object_1'] or not kwargs['freecad_face_1']:
raise ValueError("Must defined freecad face/object")
if not kwargs['freecad_object_2'] or not kwargs['freecad_face_2']:
raise ValueError("Must defined freecad face/object")
self.freecad_object_1_name = kwargs['freecad_object_1'].Name
self.freecad_object_2_name = kwargs['freecad_object_2'].Name
if not hasattr(self, 'name'):
self.name = kwargs['freecad_object_1'].Label + " -> " + kwargs['freecad_object_2'].Label
self.arc_length = None
self.extrustion_vector = None
self.solid = None
self.seg_face_1 = None
self.seg_face_2 = None
self.arc_middle_segment = None
self.deg_angle = None
self.rad_angle = None
self.rotation_vector = None
self.thickness = None
self.min_links_nb = None
self.arc_inner_radius = None
self.arc_outer_radius = None
self.nb_link = 5
complete_hinges_properties(self, kwargs['freecad_face_1'], kwargs['freecad_face_2'], False)
#create_solid_corner(self)
self.compute_min_link(0.20)
def compute_min_link(self, clearance_width):
self.min_links_nb = estimate_min_link(self.rad_angle, self.thickness, clearance_width)
self.nb_link = self.min_links_nb + 1
def recomputeInit(self, freecad_object_1, freecad_face_1, freecad_object_2, freecad_face_2):
self.freecad_object_1 = freecad_object_1
self.freecad_face_1 = freecad_face_1
self.freecad_object_2 = freecad_object_2
self.freecad_face_2 = freecad_face_2
complete_hinges_properties(self, freecad_face_1, freecad_face_2, True)
create_solid_corner(self)
class GlobalLivingMaterialProperties(helper.ObjectProperties):
_allowed = ('new_name', 'thickness', 'laser_beam_diameter', 'freecad_object_name',
'freecad_object_label'
'generate_solid', 'dog_bone', 'link_clearance', 'solid_name',
'hinge_type', "alternate_nb_hinge", "occupancy_ratio")
HINGE_TYPE_ALTERNATE_DOUBLE = "Alternate"
def __init__(self, **kwargs):
super(GlobalLivingMaterialProperties, self).__init__(**kwargs)
#if not hasattr(self, 'freecad_object'):
# raise ValueError("Must defined freecad object")
if not hasattr(self, 'thickness'):
self.thickness = 5.0
try:
self.thickness = retrieve_thickness_from_biggest_face(kwargs['freecad_object'])
# FreeCAD.Console.PrintError("found : %f\n" % self.thickness)
except ValueError as e:
FreeCAD.Console.PrintError(e)
if not hasattr(self, 'freecad_object_name'):
self.freecad_object_name = kwargs['freecad_object'].Name
if not hasattr(self, 'name'):
self.name = kwargs['freecad_object'].Name
if not hasattr(self, 'label'):
self.label = kwargs['freecad_object'].Label
if not hasattr(self, 'laser_beam_diameter'):
self.laser_beam_diameter = self.thickness / 15.0
if not hasattr(self, 'link_clearance'):
self.link_clearance = self.laser_beam_diameter * 3.0
if not hasattr(self, 'new_name'):
self.new_name = "%s_flat" % kwargs['freecad_object'].Label
if not hasattr(self, 'solid_name'): #
self.solid_name = "%s_solid" % kwargs['freecad_object'].Label
if not hasattr(self, 'dog_bone'):
self.dog_bone = False
if not hasattr(self, 'generate_solid'):
self.generate_solid = True
if not hasattr(self, 'hinge_type'):
self.hinge_type = self.HINGE_TYPE_ALTERNATE_DOUBLE
if not hasattr(self, 'alternate_nb_hinge'):
self.alternate_nb_hinge = int(2)
if not hasattr(self, 'occupancy_ratio'):
self.occupancy_ratio = 0.8
|
lgpl-2.1
|
Juniper/nova
|
nova/tests/unit/objects/test_migration_context.py
|
6
|
4720
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from nova import exception
from nova import objects
from nova.tests.unit.objects import test_instance_numa_topology
from nova.tests.unit.objects import test_objects
from nova.tests import uuidsentinel as uuids
fake_instance_uuid = uuids.fake
fake_migration_context_obj = objects.MigrationContext()
fake_migration_context_obj.instance_uuid = fake_instance_uuid
fake_migration_context_obj.migration_id = 42
fake_migration_context_obj.new_numa_topology = (
test_instance_numa_topology.fake_obj_numa_topology.obj_clone())
fake_migration_context_obj.old_numa_topology = None
fake_migration_context_obj.new_pci_devices = objects.PciDeviceList()
fake_migration_context_obj.old_pci_devices = None
fake_migration_context_obj.new_pci_requests = (
objects.InstancePCIRequests(requests=[
objects.InstancePCIRequest(count=123, spec=[])]))
fake_migration_context_obj.old_pci_requests = None
fake_db_context = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': fake_instance_uuid,
'migration_context': jsonutils.dumps(
fake_migration_context_obj.obj_to_primitive()),
}
def get_fake_migration_context_obj(ctxt):
obj = fake_migration_context_obj.obj_clone()
obj._context = ctxt
return obj
class _TestMigrationContext(object):
def _test_get_by_instance_uuid(self, db_data):
mig_context = objects.MigrationContext.get_by_instance_uuid(
self.context, fake_db_context['instance_uuid'])
if mig_context:
self.assertEqual(fake_db_context['instance_uuid'],
mig_context.instance_uuid)
expected_mig_context = db_data and db_data.get('migration_context')
expected_mig_context = objects.MigrationContext.obj_from_db_obj(
expected_mig_context)
self.assertEqual(expected_mig_context.instance_uuid,
mig_context.instance_uuid)
self.assertEqual(expected_mig_context.migration_id,
mig_context.migration_id)
self.assertIsInstance(expected_mig_context.new_numa_topology,
mig_context.new_numa_topology.__class__)
self.assertIsInstance(expected_mig_context.old_numa_topology,
mig_context.old_numa_topology.__class__)
self.assertIsInstance(expected_mig_context.new_pci_devices,
mig_context.new_pci_devices.__class__)
self.assertIsInstance(expected_mig_context.old_pci_devices,
mig_context.old_pci_devices.__class__)
self.assertIsInstance(expected_mig_context.new_pci_requests,
mig_context.new_pci_requests.__class__)
self.assertIsInstance(expected_mig_context.old_pci_requests,
mig_context.old_pci_requests.__class__)
else:
self.assertIsNone(mig_context)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid(self, mock_get):
mock_get.return_value = fake_db_context
self._test_get_by_instance_uuid(fake_db_context)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid_none(self, mock_get):
db_context = fake_db_context.copy()
db_context['migration_context'] = None
mock_get.return_value = db_context
self._test_get_by_instance_uuid(db_context)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid_missing(self, mock_get):
mock_get.return_value = None
self.assertRaises(
exception.MigrationContextNotFound,
objects.MigrationContext.get_by_instance_uuid,
self.context, 'fake_uuid')
class TestMigrationContext(test_objects._LocalTest, _TestMigrationContext):
pass
class TestMigrationContextRemote(test_objects._RemoteTest,
_TestMigrationContext):
pass
|
apache-2.0
|
k3nnyfr/s2a_fr-nsis
|
s2a/Python/Lib/gzip.py
|
57
|
18694
|
"""Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time, os
import zlib
import io
import __builtin__
__all__ = ["GzipFile","open"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
def read32(input):
return struct.unpack("<I", input.read(4))[0]
def open(filename, mode="rb", compresslevel=9):
"""Shorthand for GzipFile(filename, mode, compresslevel).
The filename argument is required; mode defaults to 'rb'
and compresslevel defaults to 9.
"""
return GzipFile(filename, mode, compresslevel)
class GzipFile(io.BufferedIOBase):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the readinto() and truncate() methods.
"""
myfileobj = None
max_read_chunk = 10 * 1024 * 1024 # 10Mb
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, a StringIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may includes the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
Be aware that only the 'rb', 'ab', and 'wb' values should be used
for cross-platform portability.
The compresslevel argument is an integer from 0 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. 0 is no compression
at all. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the stream when compressing. All gzip compressed streams
are required to contain a timestamp. If omitted or None, the
current time is used. This module ignores the timestamp when
decompressing; however, some programs, such as gunzip, make use
of it. The format of the timestamp is the same as that of the
return value of time.time() and of the st_mtime member of the
object returned by os.stat().
"""
# Make sure we don't inadvertently enable universal newlines on the
# underlying file object - in read mode, this causes data corruption.
if mode:
mode = mode.replace('U', '')
# guarantee the file is opened in binary mode on platforms
# that care about that sort of thing
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
if filename is None:
# Issue #13781: os.fdopen() creates a fileobj with a bogus name
# attribute. Avoid saving this in the gzip header's filename field.
if hasattr(fileobj, 'name') and fileobj.name != '<fdopen>':
filename = fileobj.name
else:
filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
if mode[0:1] == 'r':
self.mode = READ
# Set flag indicating start of a new member
self._new_member = True
# Buffer data read from gzip file. extrastart is offset in
# stream where buffer starts. extrasize is number of
# bytes remaining in buffer from current stream position.
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.name = filename
# Starts small, scales exponentially
self.min_readsize = 100
elif mode[0:1] == 'w' or mode[0:1] == 'a':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise IOError, "Mode " + mode + " not supported"
self.fileobj = fileobj
self.offset = 0
self.mtime = mtime
if self.mode == WRITE:
self._write_gzip_header()
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _check_closed(self):
"""Raises a ValueError if the underlying file object has been closed.
"""
if self.closed:
raise ValueError('I/O operation on closed file.')
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
self.writebuf = []
self.bufsize = 0
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = os.path.basename(self.name)
if fname.endswith(".gz"):
fname = fname[:-3]
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags))
mtime = self.mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, long(mtime))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def _init_read(self):
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError, 'Not a gzipped file'
method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError, 'Unknown compression method'
flag = ord( self.fileobj.read(1) )
self.mtime = read32(self.fileobj)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(2)
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen = ord(self.fileobj.read(1))
xlen = xlen + 256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
def write(self,data):
self._check_closed()
if self.mode != WRITE:
import errno
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError, "write() on closed GzipFile object"
# Convert data type if called by io.BufferedWriter.
if isinstance(data, memoryview):
data = data.tobytes()
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
self.fileobj.write( self.compress.compress(data) )
self.offset += len(data)
return len(data)
def read(self, size=-1):
self._check_closed()
if self.mode != READ:
import errno
raise IOError(errno.EBADF, "read() on write-only GzipFile object")
if self.extrasize <= 0 and self.fileobj is None:
return ''
readsize = 1024
if size < 0: # get the whole thing
try:
while True:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
if size > self.extrasize:
size = self.extrasize
offset = self.offset - self.extrastart
chunk = self.extrabuf[offset: offset + size]
self.extrasize = self.extrasize - size
self.offset += size
return chunk
def _unread(self, buf):
self.extrasize = len(buf) + self.extrasize
self.offset -= len(buf)
def _read(self, size=1024):
if self.fileobj is None:
raise EOFError, "Reached EOF"
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
#
# First, check if we're at the end of the file;
# if so, it's time to stop; no more members to read.
pos = self.fileobj.tell() # Save current position
self.fileobj.seek(0, 2) # Seek to end of file
if pos == self.fileobj.tell():
raise EOFError, "Reached EOF"
else:
self.fileobj.seek( pos ) # Return to original position
self._init_read()
self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = False
# Read a chunk of data from the file
buf = self.fileobj.read(size)
# If the EOF has been reached, flush the decompression object
# and mark this object as finished.
if buf == "":
uncompress = self.decompress.flush()
self._read_eof()
self._add_read_data( uncompress )
raise EOFError, 'Reached EOF'
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
if self.decompress.unused_data != "":
# Ending case: we've come to the end of a member in the file,
# so seek back to the start of the unused data, finish up
# this member, and read a new gzip header.
# (The number of bytes to seek back is the length of the unused
# data, minus 8 because _read_eof() will rewind a further 8 bytes)
self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
self._read_eof()
self._new_member = True
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
offset = self.offset - self.extrastart
self.extrabuf = self.extrabuf[offset:] + data
self.extrasize = self.extrasize + len(data)
self.extrastart = self.offset
self.size = self.size + len(data)
def _read_eof(self):
# We've read to the end of the file, so we have to rewind in order
# to reread the 8 bytes containing the CRC and the file size.
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
self.fileobj.seek(-8, 1)
crc32 = read32(self.fileobj)
isize = read32(self.fileobj) # may exceed 2GB
if crc32 != self.crc:
raise IOError("CRC check failed %s != %s" % (hex(crc32),
hex(self.crc)))
elif isize != (self.size & 0xffffffffL):
raise IOError, "Incorrect length of data produced"
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = "\x00"
while c == "\x00":
c = self.fileobj.read(1)
if c:
self.fileobj.seek(-1, 1)
@property
def closed(self):
return self.fileobj is None
def close(self):
if self.fileobj is None:
return
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise IOError("Can't rewind in write mode")
self.fileobj.seek(0)
self._new_member = True
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.offset = 0
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence:
if whence == 1:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if self.mode == WRITE:
if offset < self.offset:
raise IOError('Negative seek in write mode')
count = offset - self.offset
for i in xrange(count // 1024):
self.write(1024 * '\0')
self.write((count % 1024) * '\0')
elif self.mode == READ:
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in xrange(count // 1024):
self.read(1024)
self.read(count % 1024)
return self.offset
def readline(self, size=-1):
if size < 0:
# Shortcut common case - newline found in buffer.
offset = self.offset - self.extrastart
i = self.extrabuf.find('\n', offset) + 1
if i > 0:
self.extrasize -= i - offset
self.offset += i - offset
return self.extrabuf[offset: i]
size = sys.maxint
readsize = self.min_readsize
else:
readsize = size
bufs = []
while size != 0:
c = self.read(readsize)
i = c.find('\n')
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if (size <= i) or (i == -1 and len(c) > size):
i = size - 1
if i >= 0 or c == '':
bufs.append(c[:i + 1]) # Add portion of last chunk
self._unread(c[i + 1:]) # Push back rest of chunk
break
# Append chunk to list, decrease 'size',
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
if readsize > self.min_readsize:
self.min_readsize = min(readsize, self.min_readsize * 2, 512)
return ''.join(bufs) # Return resulting line
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
g = sys.stdout
else:
if arg[-3:] != ".gz":
print "filename doesn't end in .gz:", repr(arg)
continue
f = open(arg, "rb")
g = __builtin__.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
else:
f = __builtin__.open(arg, "rb")
g = open(arg + ".gz", "wb")
while True:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if __name__ == '__main__':
_test()
|
gpl-3.0
|
noironetworks/nova
|
nova/tests/unit/fake_policy.py
|
30
|
17674
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
policy_data = """
{
"admin_api": "is_admin:True",
"cells_scheduler_filter:TargetCellFilter": "is_admin:True",
"context_is_admin": "role:admin or role:administrator",
"compute:create": "",
"compute:create:attach_network": "",
"compute:create:attach_volume": "",
"compute:get": "",
"compute:get_all": "",
"compute:get_all_tenants": "",
"compute:update": "",
"compute:get_instance_metadata": "",
"compute:get_all_instance_metadata": "",
"compute:get_all_instance_system_metadata": "",
"compute:update_instance_metadata": "",
"compute:delete_instance_metadata": "",
"compute:get_instance_faults": "",
"compute:get_diagnostics": "",
"compute:get_instance_diagnostics": "",
"compute:get_lock": "",
"compute:lock": "",
"compute:unlock": "",
"compute:unlock_override": "is_admin:True",
"compute:get_vnc_console": "",
"compute:get_spice_console": "",
"compute:get_rdp_console": "",
"compute:get_serial_console": "",
"compute:get_mks_console": "",
"compute:get_console_output": "",
"compute:reset_network": "",
"compute:inject_network_info": "",
"compute:add_fixed_ip": "",
"compute:remove_fixed_ip": "",
"compute:attach_volume": "",
"compute:detach_volume": "",
"compute:attach_interface": "",
"compute:detach_interface": "",
"compute:set_admin_password": "",
"compute:rescue": "",
"compute:unrescue": "",
"compute:suspend": "",
"compute:resume": "",
"compute:pause": "",
"compute:unpause": "",
"compute:start": "",
"compute:stop": "",
"compute:resize": "",
"compute:confirm_resize": "",
"compute:revert_resize": "",
"compute:rebuild": "",
"compute:reboot": "",
"compute:snapshot": "",
"compute:snapshot_volume_backed": "",
"compute:backup": "",
"compute:shelve": "",
"compute:shelve_offload": "",
"compute:unshelve": "",
"compute:security_groups:add_to_instance": "",
"compute:security_groups:remove_from_instance": "",
"compute:delete": "",
"compute:soft_delete": "",
"compute:force_delete": "",
"compute:restore": "",
"compute:swap_volume": "",
"compute:volume_snapshot_create": "",
"compute:volume_snapshot_delete": "",
"os_compute_api:servers:confirm_resize": "",
"os_compute_api:servers:create": "",
"os_compute_api:servers:create:attach_network": "",
"os_compute_api:servers:create:attach_volume": "",
"os_compute_api:servers:create:forced_host": "",
"os_compute_api:servers:delete": "",
"os_compute_api:servers:detail": "",
"os_compute_api:servers:detail:get_all_tenants": "",
"os_compute_api:servers:index": "",
"os_compute_api:servers:index:get_all_tenants": "",
"os_compute_api:servers:reboot": "",
"os_compute_api:servers:rebuild": "",
"os_compute_api:servers:resize": "",
"os_compute_api:servers:revert_resize": "",
"os_compute_api:servers:show": "",
"os_compute_api:servers:create_image": "",
"os_compute_api:servers:create_image:allow_volume_backed": "",
"os_compute_api:servers:update": "",
"os_compute_api:servers:start": "",
"os_compute_api:servers:stop": "",
"os_compute_api:os-access-ips": "",
"compute_extension:accounts": "",
"compute_extension:admin_actions:pause": "",
"compute_extension:admin_actions:unpause": "",
"compute_extension:admin_actions:suspend": "",
"compute_extension:admin_actions:resume": "",
"compute_extension:admin_actions:lock": "",
"compute_extension:admin_actions:unlock": "",
"compute_extension:admin_actions:resetNetwork": "",
"compute_extension:admin_actions:injectNetworkInfo": "",
"compute_extension:admin_actions:createBackup": "",
"compute_extension:admin_actions:migrateLive": "",
"compute_extension:admin_actions:resetState": "",
"compute_extension:admin_actions:migrate": "",
"os_compute_api:os-admin-actions:reset_network": "",
"os_compute_api:os-admin-actions:inject_network_info": "",
"os_compute_api:os-admin-actions:reset_state": "",
"os_compute_api:os-admin-password": "",
"compute_extension:aggregates": "rule:admin_api",
"os_compute_api:os-aggregates:index": "rule:admin_api",
"os_compute_api:os-aggregates:create": "rule:admin_api",
"os_compute_api:os-aggregates:show": "rule:admin_api",
"os_compute_api:os-aggregates:update": "rule:admin_api",
"os_compute_api:os-aggregates:delete": "rule:admin_api",
"os_compute_api:os-aggregates:add_host": "rule:admin_api",
"os_compute_api:os-aggregates:remove_host": "rule:admin_api",
"os_compute_api:os-aggregates:set_metadata": "rule:admin_api",
"compute_extension:agents": "",
"os_compute_api:os-agents": "",
"compute_extension:attach_interfaces": "",
"os_compute_api:os-attach-interfaces": "",
"compute_extension:baremetal_nodes": "",
"os_compute_api:os-baremetal-nodes": "",
"compute_extension:cells": "",
"compute_extension:cells:create": "rule:admin_api",
"compute_extension:cells:delete": "rule:admin_api",
"compute_extension:cells:update": "rule:admin_api",
"compute_extension:cells:sync_instances": "rule:admin_api",
"os_compute_api:os-cells": "",
"os_compute_api:os-cells:create": "rule:admin_api",
"os_compute_api:os-cells:delete": "rule:admin_api",
"os_compute_api:os-cells:update": "rule:admin_api",
"os_compute_api:os-cells:sync_instances": "rule:admin_api",
"compute_extension:certificates": "",
"os_compute_api:os-certificates:create": "",
"os_compute_api:os-certificates:show": "",
"compute_extension:cloudpipe": "",
"os_compute_api:os-cloudpipe": "",
"compute_extension:cloudpipe_update": "",
"compute_extension:config_drive": "",
"os_compute_api:os-config-drive": "",
"compute_extension:console_output": "",
"os_compute_api:os-console-output": "",
"compute_extension:consoles": "",
"os_compute_api:os-remote-consoles": "",
"os_compute_api:os-consoles:create": "",
"os_compute_api:os-consoles:delete": "",
"os_compute_api:os-consoles:index": "",
"os_compute_api:os-consoles:show": "",
"compute_extension:createserverext": "",
"os_compute_api:os-create-backup": "",
"compute_extension:deferred_delete": "",
"os_compute_api:os-deferred-delete": "",
"compute_extension:disk_config": "",
"os_compute_api:os-disk-config": "",
"compute_extension:evacuate": "is_admin:True",
"os_compute_api:os-evacuate": "is_admin:True",
"compute_extension:extended_server_attributes": "",
"os_compute_api:os-extended-server-attributes": "",
"compute_extension:extended_status": "",
"os_compute_api:os-extended-status": "",
"compute_extension:extended_availability_zone": "",
"os_compute_api:os-extended-availability-zone": "",
"compute_extension:extended_ips": "",
"compute_extension:extended_ips_mac": "",
"compute_extension:extended_vif_net": "",
"compute_extension:extended_volumes": "",
"os_compute_api:ips:index": "",
"os_compute_api:ips:show": "",
"os_compute_api:os-extended-volumes": "",
"os_compute_api:extensions": "",
"compute_extension:fixed_ips": "",
"os_compute_api:os-fixed-ips": "",
"compute_extension:flavor_access": "",
"compute_extension:flavor_access:addTenantAccess": "",
"compute_extension:flavor_access:removeTenantAccess": "",
"os_compute_api:os-flavor-access": "",
"os_compute_api:os-flavor-access:remove_tenant_access": "",
"os_compute_api:os-flavor-access:add_tenant_access": "",
"compute_extension:flavor_disabled": "",
"compute_extension:flavor_rxtx": "",
"os_compute_api:os-flavor-rxtx": "",
"compute_extension:flavor_swap": "",
"compute_extension:flavorextradata": "",
"compute_extension:flavorextraspecs:index": "",
"compute_extension:flavorextraspecs:show": "",
"compute_extension:flavorextraspecs:create": "is_admin:True",
"compute_extension:flavorextraspecs:update": "is_admin:True",
"compute_extension:flavorextraspecs:delete": "is_admin:True",
"os_compute_api:os-flavor-extra-specs:index": "",
"os_compute_api:os-flavor-extra-specs:show": "",
"os_compute_api:os-flavor-extra-specs:create": "is_admin:True",
"os_compute_api:os-flavor-extra-specs:update": "is_admin:True",
"os_compute_api:os-flavor-extra-specs:delete": "is_admin:True",
"compute_extension:flavormanage": "",
"os_compute_api:os-flavor-manage": "",
"compute_extension:floating_ip_dns": "",
"os_compute_api:os-floating-ip-dns": "",
"os_compute_api:os-floating-ip-dns:domain:update": "",
"os_compute_api:os-floating-ip-dns:domain:delete": "",
"compute_extension:floating_ip_pools": "",
"os_compute_api:os-floating-ip-pools": "",
"compute_extension:floating_ips": "",
"os_compute_api:os-floating-ips": "",
"compute_extension:floating_ips_bulk": "",
"os_compute_api:os-floating-ips-bulk": "",
"compute_extension:fping": "",
"compute_extension:fping:all_tenants": "is_admin:True",
"os_compute_api:os-fping": "",
"os_compute_api:os-fping:all_tenants": "is_admin:True",
"compute_extension:hide_server_addresses": "",
"os_compute_api:os-hide-server-addresses": "",
"compute_extension:hosts": "",
"os_compute_api:os-hosts": "rule:admin_api",
"compute_extension:hypervisors": "rule:admin_api",
"os_compute_api:os-hypervisors": "rule:admin_api",
"compute_extension:image_size": "",
"os_compute_api:image-size": "",
"compute_extension:instance_actions": "",
"os_compute_api:os-instance-actions": "",
"compute_extension:instance_actions:events": "is_admin:True",
"os_compute_api:os-instance-actions:events": "is_admin:True",
"compute_extension:instance_usage_audit_log": "rule:admin_api",
"os_compute_api:os-instance-usage-audit-log": "",
"compute_extension:keypairs": "",
"compute_extension:keypairs:index": "",
"compute_extension:keypairs:show": "",
"compute_extension:keypairs:create": "",
"compute_extension:keypairs:delete": "",
"os_compute_api:os-keypairs": "",
"os_compute_api:os-keypairs:index":
"rule:admin_api or user_id:%(user_id)s",
"os_compute_api:os-keypairs:show":
"rule:admin_api or user_id:%(user_id)s",
"os_compute_api:os-keypairs:create":
"rule:admin_api or user_id:%(user_id)s",
"os_compute_api:os-keypairs:delete":
"rule:admin_api or user_id:%(user_id)s",
"os_compute_api:os-lock-server:lock": "",
"os_compute_api:os-lock-server:unlock": "",
"os_compute_api:os-lock-server:unlock:unlock_override": "",
"os_compute_api:os-migrate-server:migrate": "",
"os_compute_api:os-migrate-server:migrate_live": "",
"compute_extension:multinic": "",
"os_compute_api:os-multinic": "",
"compute_extension:networks": "",
"compute_extension:networks:view": "",
"os_compute_api:os-networks": "",
"os_compute_api:os-networks:view": "",
"compute_extension:networks_associate": "",
"os_compute_api:os-networks-associate": "",
"compute_extension:os-tenant-networks": "",
"os_compute_api:os-tenant-networks": "",
"os_compute_api:os-pause-server:pause": "",
"os_compute_api:os-pause-server:unpause": "",
"os_compute_api:os-pci:pci_servers": "",
"os_compute_api:os-pci:index": "",
"os_compute_api:os-pci:detail": "",
"os_compute_api:os-pci:show": "",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "",
"compute_extension:quotas:delete": "",
"os_compute_api:os-quota-sets:show": "",
"os_compute_api:os-quota-sets:update": "",
"os_compute_api:os-quota-sets:delete": "",
"os_compute_api:os-quota-sets:detail": "",
"os_compute_api:os-quota-sets:defaults": "",
"compute_extension:quota_classes": "",
"os_compute_api:os-quota-class-sets:update": "",
"os_compute_api:os-quota-class-sets:show": "",
"compute_extension:rescue": "",
"os_compute_api:os-rescue": "",
"compute_extension:security_group_default_rules": "",
"os_compute_api:os-security-group-default-rules": "",
"compute_extension:security_groups": "",
"os_compute_api:os-security-groups": "",
"compute_extension:server_diagnostics": "",
"os_compute_api:os-server-diagnostics": "",
"compute_extension:server_groups": "",
"compute_extension:server_password": "",
"os_compute_api:os-server-password": "",
"compute_extension:server_usage": "",
"os_compute_api:os-server-usage": "",
"os_compute_api:os-server-groups": "",
"compute_extension:services": "",
"os_compute_api:os-services": "",
"compute_extension:shelve": "",
"compute_extension:shelveOffload": "",
"os_compute_api:os-shelve:shelve": "",
"os_compute_api:os-shelve:shelve_offload": "",
"compute_extension:simple_tenant_usage:show": "",
"compute_extension:simple_tenant_usage:list": "",
"os_compute_api:os-simple-tenant-usage:show": "",
"os_compute_api:os-simple-tenant-usage:list": "",
"compute_extension:unshelve": "",
"os_compute_api:os-shelve:unshelve": "",
"os_compute_api:os-suspend-server:suspend": "",
"os_compute_api:os-suspend-server:resume": "",
"compute_extension:users": "",
"compute_extension:virtual_interfaces": "",
"os_compute_api:os-virtual-interfaces": "",
"compute_extension:virtual_storage_arrays": "",
"compute_extension:volumes": "",
"compute_extension:volume_attachments:index": "",
"compute_extension:volume_attachments:show": "",
"compute_extension:volume_attachments:create": "",
"compute_extension:volume_attachments:update": "",
"compute_extension:volume_attachments:delete": "",
"os_compute_api:os-volumes": "",
"os_compute_api:os-volumes-attachments:index": "",
"os_compute_api:os-volumes-attachments:show": "",
"os_compute_api:os-volumes-attachments:create": "",
"os_compute_api:os-volumes-attachments:update": "",
"os_compute_api:os-volumes-attachments:delete": "",
"compute_extension:volumetypes": "",
"compute_extension:availability_zone:list": "",
"os_compute_api:os-availability-zone:list": "",
"compute_extension:availability_zone:detail": "",
"os_compute_api:os-availability-zone:detail": "",
"compute_extension:used_limits_for_admin": "is_admin:True",
"os_compute_api:os-used-limits": "is_admin:True",
"os_compute_api:limits": "",
"compute_extension:migrations:index": "is_admin:True",
"os_compute_api:os-migrations:index": "is_admin:True",
"compute_extension:os-assisted-volume-snapshots:create": "",
"compute_extension:os-assisted-volume-snapshots:delete": "",
"os_compute_api:os-assisted-volume-snapshots:create": "",
"os_compute_api:os-assisted-volume-snapshots:delete": "",
"compute_extension:console_auth_tokens": "is_admin:True",
"os_compute_api:os-console-auth-tokens": "is_admin:True",
"compute_extension:os-server-external-events:create": "rule:admin_api",
"os_compute_api:os-server-external-events:create": "rule:admin_api",
"os_compute_api:server-metadata:create": "",
"os_compute_api:server-metadata:update": "",
"os_compute_api:server-metadata:update_all": "",
"os_compute_api:server-metadata:delete": "",
"os_compute_api:server-metadata:show": "",
"os_compute_api:server-metadata:index": "",
"network:get_all": "",
"network:get": "",
"network:create": "",
"network:delete": "",
"network:associate": "",
"network:disassociate": "",
"network:get_vifs_by_instance": "",
"network:get_vif_by_mac_address": "",
"network:allocate_for_instance": "",
"network:deallocate_for_instance": "",
"network:validate_networks": "",
"network:get_instance_uuids_by_ip_filter": "",
"network:get_instance_id_by_floating_address": "",
"network:setup_networks_on_host": "",
"network:get_floating_ip": "",
"network:get_floating_ip_pools": "",
"network:get_floating_ip_by_address": "",
"network:get_floating_ips_by_project": "",
"network:get_floating_ips_by_fixed_address": "",
"network:allocate_floating_ip": "",
"network:associate_floating_ip": "",
"network:disassociate_floating_ip": "",
"network:release_floating_ip": "",
"network:migrate_instance_start": "",
"network:migrate_instance_finish": "",
"network:get_fixed_ip": "",
"network:get_fixed_ip_by_address": "",
"network:add_fixed_ip_to_instance": "",
"network:remove_fixed_ip_from_instance": "",
"network:add_network_to_project": "",
"network:get_instance_nw_info": "",
"network:get_dns_domains": "",
"network:add_dns_entry": "",
"network:modify_dns_entry": "",
"network:delete_dns_entry": "",
"network:get_dns_entries_by_address": "",
"network:get_dns_entries_by_name": "",
"network:create_private_dns_domain": "",
"network:create_public_dns_domain": "",
"network:delete_dns_domain": "",
"network:attach_external_network": "rule:admin_api"
}
"""
|
apache-2.0
|
gangadhar-kadam/nassimapp
|
patches/december_2012/stock_entry_cleanup.py
|
30
|
2060
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
# removed following fields
webnotes.reload_doc("stock", "doctype", "stock_entry")
custom_fields()
deprecate_process()
webnotes.delete_doc("doctype", "sandbox")
def custom_fields():
fields = [
{
"label": "Is Excisable Goods",
"fieldname": "is_excisable_goods",
"fieldtype": "Select",
"options": "\nYes\nNo",
"insert_after": "Company"
},
{
"label": "Excisable Goods",
"fieldname": "excisable_goods",
"fieldtype": "Select",
"options": "\nReturnable\nNon-Returnable)",
"insert_after": "Amended From"
},
{
"label": "Under Rule",
"fieldname": "under_rule",
"fieldtype": "Select",
"options": "\nOrdinary\n57 AC (5) a\n57 F (2) Non-Exc.",
"insert_after": "Remarks"
},
{
"label": "Transporter",
"fieldname": "transporter",
"fieldtype": "Data",
"options": "",
"insert_after": "Project Name"
},
{
"label": "Transfer Date",
"fieldname": "transfer_date",
"fieldtype": "Date",
"options": "",
"insert_after": "Select Print Heading"
},
]
for fld in fields:
if webnotes.conn.sql("""select name from `tabStock Entry`
where ifnull(%s, '') != '' and docstatus<2""", (fld['fieldname'])):
create_custom_field(fld)
def create_custom_field(fld):
fld.update({
"doctype": "Custom Field",
"dt": "Stock Entry",
"print_hide": 1,
"permlevel": 0
})
from webnotes.model.doclist import DocList
webnotes.insert(DocList([fld]))
def deprecate_process():
webnotes.conn.sql("""update `tabStock Entry`
set `purpose`="Material Transfer"
where process="Material Transfer" and purpose="Production Order" """)
webnotes.conn.sql("""update `tabStock Entry`
set `purpose`="Manufacture/Repack"
where (process="Backflush" and purpose="Production Order") or purpose="Other" """)
webnotes.conn.sql("""update `tabStock Entry`
set `purpose`="Subcontract"
where process="Subcontracting" """)
|
agpl-3.0
|
CxyYuan/luyoutec_lanyou5_2
|
node_modules/pangyp/gyp/pylib/gyp/win_tool.py
|
395
|
12634
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
mit
|
alexmandujano/django
|
tests/i18n/tests.py
|
42
|
70928
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import datetime
import decimal
import os
import pickle
from threading import local
from django.conf import settings
from django.core.management.utils import find_command
from django.template import Template, Context
from django.template.base import TemplateSyntaxError
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings, TransRealMixin
from django.utils import translation
from django.utils.formats import (get_format, date_format, time_format,
localize, localize_input, iter_format_modules, get_format_modules,
number_format, reset_format_cache, sanitize_separators)
from django.utils.importlib import import_module
from django.utils.numberformat import format as nformat
from django.utils._os import upath
from django.utils.safestring import mark_safe, SafeBytes, SafeString, SafeText
from django.utils import six
from django.utils.six import PY3
from django.utils.translation import (activate, deactivate,
get_language, get_language_from_request, get_language_info,
to_locale, trans_real,
gettext, gettext_lazy,
ugettext, ugettext_lazy,
ngettext, ngettext_lazy,
ungettext, ungettext_lazy,
pgettext, pgettext_lazy,
npgettext, npgettext_lazy,
check_for_language)
from django.utils.unittest import skipUnless
if find_command('xgettext'):
from .commands.extraction import (ExtractorTests, BasicExtractorTests,
JavascriptExtractorTests, IgnoredExtractorTests, SymlinkExtractorTests,
CopyPluralFormsExtractorTests, NoWrapExtractorTests,
LocationCommentsTests, KeepPotFileExtractorTests,
MultipleLocaleExtractionTests)
if find_command('msgfmt'):
from .commands.compilation import (PoFileTests, PoFileContentsTests,
PercentRenderingTests, MultipleLocaleCompilationTests,
CompilationErrorHandling)
from .forms import I18nForm, SelectDateForm, SelectDateWidget, CompanyForm
from .models import Company, TestModel
here = os.path.dirname(os.path.abspath(upath(__file__)))
extended_locale_paths = settings.LOCALE_PATHS + (
os.path.join(here, 'other', 'locale'),
)
class TranslationTests(TransRealMixin, TestCase):
def test_override(self):
activate('de')
with translation.override('pl'):
self.assertEqual(get_language(), 'pl')
self.assertEqual(get_language(), 'de')
with translation.override(None):
self.assertEqual(get_language(), settings.LANGUAGE_CODE)
self.assertEqual(get_language(), 'de')
deactivate()
def test_lazy_objects(self):
"""
Format string interpolation should work with *_lazy objects.
"""
s = ugettext_lazy('Add %(name)s')
d = {'name': 'Ringo'}
self.assertEqual('Add Ringo', s % d)
with translation.override('de', deactivate=True):
self.assertEqual('Ringo hinzuf\xfcgen', s % d)
with translation.override('pl'):
self.assertEqual('Dodaj Ringo', s % d)
# It should be possible to compare *_lazy objects.
s1 = ugettext_lazy('Add %(name)s')
self.assertEqual(True, s == s1)
s2 = gettext_lazy('Add %(name)s')
s3 = gettext_lazy('Add %(name)s')
self.assertEqual(True, s2 == s3)
self.assertEqual(True, s == s2)
s4 = ugettext_lazy('Some other string')
self.assertEqual(False, s == s4)
@skipUnless(six.PY2, "No more bytestring translations on PY3")
def test_lazy_and_bytestrings(self):
# On Python 2, (n)gettext_lazy should not transform a bytestring to unicode
self.assertEqual(gettext_lazy(b"test").upper(), b"TEST")
self.assertEqual((ngettext_lazy(b"%d test", b"%d tests") % 1).upper(), b"1 TEST")
# Other versions of lazy functions always return unicode
self.assertEqual(ugettext_lazy(b"test").upper(), "TEST")
self.assertEqual((ungettext_lazy(b"%d test", b"%d tests") % 1).upper(), "1 TEST")
self.assertEqual(pgettext_lazy(b"context", b"test").upper(), "TEST")
self.assertEqual(
(npgettext_lazy(b"context", b"%d test", b"%d tests") % 1).upper(),
"1 TEST"
)
def test_lazy_pickle(self):
s1 = ugettext_lazy("test")
self.assertEqual(six.text_type(s1), "test")
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(six.text_type(s2), "test")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ungettext_lazy(self):
simple_with_format = ungettext_lazy('%d good result', '%d good results')
simple_str_with_format = ngettext_lazy(str('%d good result'), str('%d good results'))
simple_context_with_format = npgettext_lazy('Exclamation', '%d good result', '%d good results')
simple_without_format = ungettext_lazy('good result', 'good results')
with translation.override('de'):
self.assertEqual(simple_with_format % 1, '1 gutes Resultat')
self.assertEqual(simple_with_format % 4, '4 guten Resultate')
self.assertEqual(simple_str_with_format % 1, str('1 gutes Resultat'))
self.assertEqual(simple_str_with_format % 4, str('4 guten Resultate'))
self.assertEqual(simple_context_with_format % 1, '1 gutes Resultat!')
self.assertEqual(simple_context_with_format % 4, '4 guten Resultate!')
self.assertEqual(simple_without_format % 1, 'gutes Resultat')
self.assertEqual(simple_without_format % 4, 'guten Resultate')
complex_nonlazy = ungettext_lazy('Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4)
complex_deferred = ungettext_lazy('Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num')
complex_str_nonlazy = ngettext_lazy(str('Hi %(name)s, %(num)d good result'), str('Hi %(name)s, %(num)d good results'), 4)
complex_str_deferred = ngettext_lazy(str('Hi %(name)s, %(num)d good result'), str('Hi %(name)s, %(num)d good results'), 'num')
complex_context_nonlazy = npgettext_lazy('Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4)
complex_context_deferred = npgettext_lazy('Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num')
with translation.override('de'):
self.assertEqual(complex_nonlazy % {'num': 4, 'name': 'Jim'}, 'Hallo Jim, 4 guten Resultate')
self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 1}, 'Hallo Jim, 1 gutes Resultat')
self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 5}, 'Hallo Jim, 5 guten Resultate')
with six.assertRaisesRegex(self, KeyError, 'Your dictionary lacks key.*'):
complex_deferred % {'name': 'Jim'}
self.assertEqual(complex_str_nonlazy % {'num': 4, 'name': 'Jim'}, str('Hallo Jim, 4 guten Resultate'))
self.assertEqual(complex_str_deferred % {'name': 'Jim', 'num': 1}, str('Hallo Jim, 1 gutes Resultat'))
self.assertEqual(complex_str_deferred % {'name': 'Jim', 'num': 5}, str('Hallo Jim, 5 guten Resultate'))
with six.assertRaisesRegex(self, KeyError, 'Your dictionary lacks key.*'):
complex_str_deferred % {'name': 'Jim'}
self.assertEqual(complex_context_nonlazy % {'num': 4, 'name': 'Jim'}, 'Willkommen Jim, 4 guten Resultate')
self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 1}, 'Willkommen Jim, 1 gutes Resultat')
self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 5}, 'Willkommen Jim, 5 guten Resultate')
with six.assertRaisesRegex(self, KeyError, 'Your dictionary lacks key.*'):
complex_context_deferred % {'name': 'Jim'}
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_pgettext(self):
trans_real._active = local()
trans_real._translations = {}
with translation.override('de'):
self.assertEqual(pgettext("unexisting", "May"), "May")
self.assertEqual(pgettext("month name", "May"), "Mai")
self.assertEqual(pgettext("verb", "May"), "Kann")
self.assertEqual(npgettext("search", "%d result", "%d results", 4) % 4, "4 Resultate")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_template_tags_pgettext(self):
"""
Ensure that message contexts are taken into account the {% trans %} and
{% blocktrans %} template tags.
Refs #14806.
"""
trans_real._active = local()
trans_real._translations = {}
with translation.override('de'):
# {% trans %} -----------------------------------
# Inexisting context...
t = Template('{% load i18n %}{% trans "May" context "unexisting" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'May')
# Existing context...
# Using a literal
t = Template('{% load i18n %}{% trans "May" context "month name" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context "verb" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Kann')
# Using a variable
t = Template('{% load i18n %}{% trans "May" context message_context %}')
rendered = t.render(Context({'message_context': 'month name'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context message_context %}')
rendered = t.render(Context({'message_context': 'verb'}))
self.assertEqual(rendered, 'Kann')
# Using a filter
t = Template('{% load i18n %}{% trans "May" context message_context|lower %}')
rendered = t.render(Context({'message_context': 'MONTH NAME'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context message_context|lower %}')
rendered = t.render(Context({'message_context': 'VERB'}))
self.assertEqual(rendered, 'Kann')
# Using 'as'
t = Template('{% load i18n %}{% trans "May" context "month name" as var %}Value: {{ var }}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Value: Mai')
t = Template('{% load i18n %}{% trans "May" as var context "verb" %}Value: {{ var }}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Value: Kann')
# Mis-uses
self.assertRaises(TemplateSyntaxError, Template, '{% load i18n %}{% trans "May" context as var %}{{ var }}')
self.assertRaises(TemplateSyntaxError, Template, '{% load i18n %}{% trans "May" as var context %}{{ var }}')
# {% blocktrans %} ------------------------------
# Inexisting context...
t = Template('{% load i18n %}{% blocktrans context "unexisting" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'May')
# Existing context...
# Using a literal
t = Template('{% load i18n %}{% blocktrans context "month name" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context "verb" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Kann')
# Using a variable
t = Template('{% load i18n %}{% blocktrans context message_context %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'month name'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context message_context %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'verb'}))
self.assertEqual(rendered, 'Kann')
# Using a filter
t = Template('{% load i18n %}{% blocktrans context message_context|lower %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'MONTH NAME'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context message_context|lower %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'VERB'}))
self.assertEqual(rendered, 'Kann')
# Using 'count'
t = Template('{% load i18n %}{% blocktrans count number=1 context "super search" %}{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, '1 Super-Ergebnis')
t = Template('{% load i18n %}{% blocktrans count number=2 context "super search" %}{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, '2 Super-Ergebnisse')
t = Template('{% load i18n %}{% blocktrans context "other super search" count number=1 %}{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, '1 anderen Super-Ergebnis')
t = Template('{% load i18n %}{% blocktrans context "other super search" count number=2 %}{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Using 'with'
t = Template('{% load i18n %}{% blocktrans with num_comments=5 context "comment count" %}There are {{ num_comments }} comments{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = Template('{% load i18n %}{% blocktrans with num_comments=5 context "other comment count" %}There are {{ num_comments }} comments{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Andere: Es gibt 5 Kommentare')
# Mis-uses
self.assertRaises(TemplateSyntaxError, Template, '{% load i18n %}{% blocktrans context with month="May" %}{{ month }}{% endblocktrans %}')
self.assertRaises(TemplateSyntaxError, Template, '{% load i18n %}{% blocktrans context %}{% endblocktrans %}')
self.assertRaises(TemplateSyntaxError, Template, '{% load i18n %}{% blocktrans count number=2 context %}{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}')
def test_string_concat(self):
"""
six.text_type(string_concat(...)) should not raise a TypeError - #4796
"""
import django.utils.translation
self.assertEqual('django', six.text_type(django.utils.translation.string_concat("dja", "ngo")))
def test_safe_status(self):
"""
Translating a string requiring no auto-escaping shouldn't change the "safe" status.
"""
s = mark_safe(str('Password'))
self.assertEqual(SafeString, type(s))
with translation.override('de', deactivate=True):
self.assertEqual(SafeText, type(ugettext(s)))
self.assertEqual('aPassword', SafeText('a') + s)
self.assertEqual('Passworda', s + SafeText('a'))
self.assertEqual('Passworda', s + mark_safe('a'))
self.assertEqual('aPassword', mark_safe('a') + s)
self.assertEqual('as', mark_safe('a') + mark_safe('s'))
def test_maclines(self):
"""
Translations on files with mac or dos end of lines will be converted
to unix eof in .po catalogs, and they have to match when retrieved
"""
ca_translation = trans_real.translation('ca')
ca_translation._catalog['Mac\nEOF\n'] = 'Catalan Mac\nEOF\n'
ca_translation._catalog['Win\nEOF\n'] = 'Catalan Win\nEOF\n'
with translation.override('ca', deactivate=True):
self.assertEqual('Catalan Mac\nEOF\n', ugettext('Mac\rEOF\r'))
self.assertEqual('Catalan Win\nEOF\n', ugettext('Win\r\nEOF\r\n'))
def test_to_locale(self):
"""
Tests the to_locale function and the special case of Serbian Latin
(refs #12230 and r11299)
"""
self.assertEqual(to_locale('en-us'), 'en_US')
self.assertEqual(to_locale('sr-lat'), 'sr_Lat')
def test_to_language(self):
"""
Test the to_language function
"""
self.assertEqual(trans_real.to_language('en_US'), 'en-us')
self.assertEqual(trans_real.to_language('sr_Lat'), 'sr-lat')
@override_settings(LOCALE_PATHS=(os.path.join(here, 'other', 'locale'),))
def test_bad_placeholder_1(self):
"""
Error in translation file should not crash template rendering
(%(person)s is translated as %(personne)s in fr.po)
Refs #16516.
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My name is James.')
@override_settings(LOCALE_PATHS=(os.path.join(here, 'other', 'locale'),))
def test_bad_placeholder_2(self):
"""
Error in translation file should not crash template rendering
(%(person) misses a 's' in fr.po, causing the string formatting to fail)
Refs #18393.
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My other name is James.')
class TranslationThreadSafetyTests(TestCase):
"""Specifically not using TransRealMixin here to test threading."""
def setUp(self):
self._old_language = get_language()
self._translations = trans_real._translations
# here we rely on .split() being called inside the _fetch()
# in trans_real.translation()
class sideeffect_str(str):
def split(self, *args, **kwargs):
res = str.split(self, *args, **kwargs)
trans_real._translations['en-YY'] = None
return res
trans_real._translations = {sideeffect_str('en-XX'): None}
def tearDown(self):
trans_real._translations = self._translations
activate(self._old_language)
def test_bug14894_translation_activate_thread_safety(self):
translation_count = len(trans_real._translations)
try:
translation.activate('pl')
except RuntimeError:
self.fail('translation.activate() is not thread-safe')
# make sure sideeffect_str actually added a new translation
self.assertLess(translation_count, len(trans_real._translations))
@override_settings(USE_L10N=True)
class FormattingTests(TransRealMixin, TestCase):
def setUp(self):
super(FormattingTests, self).setUp()
self.n = decimal.Decimal('66666.666')
self.f = 99999.999
self.d = datetime.date(2009, 12, 31)
self.dt = datetime.datetime(2009, 12, 31, 20, 50)
self.t = datetime.time(10, 15, 48)
self.l = 10000 if PY3 else long(10000)
self.ctxt = Context({
'n': self.n,
't': self.t,
'd': self.d,
'dt': self.dt,
'f': self.f,
'l': self.l,
})
def test_locale_independent(self):
"""
Localization of numbers
"""
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=','))
self.assertEqual('66666A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B'))
self.assertEqual('66666', nformat(self.n, decimal_sep='X', decimal_pos=0, grouping=1, thousand_sep='Y'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=','))
self.assertEqual('6B6B6B6B6A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B'))
self.assertEqual('-66666.6', nformat(-66666.666, decimal_sep='.', decimal_pos=1))
self.assertEqual('-66666.0', nformat(int('-66666'), decimal_sep='.', decimal_pos=1))
self.assertEqual('10000.0', nformat(self.l, decimal_sep='.', decimal_pos=1))
# This unusual grouping/force_grouping combination may be triggered by the intcomma filter (#17414)
self.assertEqual('10000', nformat(self.l, decimal_sep='.', decimal_pos=0, grouping=0, force_grouping=True))
# date filter
self.assertEqual('31.12.2009 в 20:50', Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt))
self.assertEqual('⌚ 10:15', Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt))
@override_settings(USE_L10N=False)
def test_l10n_disabled(self):
"""
Catalan locale with format i18n disabled translations will be used,
but not formats
"""
with translation.override('ca', deactivate=True):
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('10:15 a.m.', time_format(self.t))
self.assertEqual('des. 31, 2009', date_format(self.d))
self.assertEqual('desembre 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
self.assertEqual('66666.666', localize(self.n))
self.assertEqual('99999.999', localize(self.f))
self.assertEqual('10000', localize(self.l))
self.assertEqual('des. 31, 2009', localize(self.d))
self.assertEqual('des. 31, 2009, 8:50 p.m.', localize(self.dt))
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('des. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('des. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('10:15 a.m.', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual('12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt))
form = I18nForm({
'decimal_field': '66666,666',
'float_field': '99999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1.234',
})
self.assertEqual(False, form.is_valid())
self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['float_field'])
self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['decimal_field'])
self.assertEqual(['Introdu\xefu una data v\xe0lida.'], form.errors['date_field'])
self.assertEqual(['Introdu\xefu una data/hora v\xe0lides.'], form.errors['datetime_field'])
self.assertEqual(['Introdu\xefu un n\xfamero sencer.'], form.errors['integer_field'])
form2 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertEqual(True, form2.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form2.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">\n<option value="1">gener</option>\n<option value="2">febrer</option>\n<option value="3">mar\xe7</option>\n<option value="4">abril</option>\n<option value="5">maig</option>\n<option value="6">juny</option>\n<option value="7">juliol</option>\n<option value="8">agost</option>\n<option value="9">setembre</option>\n<option value="10">octubre</option>\n<option value="11">novembre</option>\n<option value="12" selected="selected">desembre</option>\n</select>\n<select name="mydate_day" id="id_mydate_day">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# We shouldn't change the behavior of the floatformat filter re:
# thousand separator and grouping when USE_L10N is False even
# if the USE_THOUSAND_SEPARATOR, NUMBER_GROUPING and
# THOUSAND_SEPARATOR settings are specified
with self.settings(USE_THOUSAND_SEPARATOR=True,
NUMBER_GROUPING=1, THOUSAND_SEPARATOR='!'):
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
def test_false_like_locale_formats(self):
"""
Ensure that the active locale's formats take precedence over the
default settings even if they would be interpreted as False in a
conditional test (e.g. 0 or empty string).
Refs #16938.
"""
from django.conf.locale.fr import formats as fr_formats
# Back up original formats
backup_THOUSAND_SEPARATOR = fr_formats.THOUSAND_SEPARATOR
backup_FIRST_DAY_OF_WEEK = fr_formats.FIRST_DAY_OF_WEEK
# Set formats that would get interpreted as False in a conditional test
fr_formats.THOUSAND_SEPARATOR = ''
fr_formats.FIRST_DAY_OF_WEEK = 0
reset_format_cache()
with translation.override('fr'):
with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR='!'):
self.assertEqual('', get_format('THOUSAND_SEPARATOR'))
# Even a second time (after the format has been cached)...
self.assertEqual('', get_format('THOUSAND_SEPARATOR'))
with self.settings(FIRST_DAY_OF_WEEK=1):
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
# Even a second time (after the format has been cached)...
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
# Restore original formats
fr_formats.THOUSAND_SEPARATOR = backup_THOUSAND_SEPARATOR
fr_formats.FIRST_DAY_OF_WEEK = backup_FIRST_DAY_OF_WEEK
def test_l10n_enabled(self):
# Catalan locale
with translation.override('ca', deactivate=True):
self.assertEqual('j \d\e F \d\e Y', get_format('DATE_FORMAT'))
self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual(',', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('10:15:48', time_format(self.t))
self.assertEqual('31 de desembre de 2009', date_format(self.d))
self.assertEqual('desembre del 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('31/12/2009 20:50', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66.666,666', localize(self.n))
self.assertEqual('99.999,999', localize(self.f))
self.assertEqual('10.000', localize(self.l))
self.assertEqual('True', localize(True))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666,666', localize(self.n))
self.assertEqual('99999,999', localize(self.f))
self.assertEqual('10000', localize(self.l))
self.assertEqual('31 de desembre de 2009', localize(self.d))
self.assertEqual('31 de desembre de 2009 a les 20:50', localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99.999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('10.000', Template('{{ l }}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
form3 = I18nForm({
'decimal_field': '66.666,666',
'float_field': '99.999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1.234',
})
self.assertEqual(True, form3.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form3.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form3.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form3.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form3.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form3.cleaned_data['time_field'])
self.assertEqual(1234, form3.cleaned_data['integer_field'])
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('31 de desembre de 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('31 de desembre de 2009 a les 20:50', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666,67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000,0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('10:15:48', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual('31/12/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual('31/12/2009 20:50', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt))
self.assertEqual(date_format(datetime.datetime.now(), "DATE_FORMAT"),
Template('{% now "DATE_FORMAT" %}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
form4 = I18nForm({
'decimal_field': '66666,666',
'float_field': '99999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1234',
})
self.assertEqual(True, form4.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form4.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form4.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form4.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form4.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form4.cleaned_data['time_field'])
self.assertEqual(1234, form4.cleaned_data['integer_field'])
form5 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertEqual(True, form5.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_month" id="id_mydate_month">\n<option value="1">gener</option>\n<option value="2">febrer</option>\n<option value="3">mar\xe7</option>\n<option value="4">abril</option>\n<option value="5">maig</option>\n<option value="6">juny</option>\n<option value="7">juliol</option>\n<option value="8">agost</option>\n<option value="9">setembre</option>\n<option value="10">octubre</option>\n<option value="11">novembre</option>\n<option value="12" selected="selected">desembre</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# Russian locale (with E as month)
with translation.override('ru', deactivate=True):
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_month" id="id_mydate_month">\n<option value="1">\u042f\u043d\u0432\u0430\u0440\u044c</option>\n<option value="2">\u0424\u0435\u0432\u0440\u0430\u043b\u044c</option>\n<option value="3">\u041c\u0430\u0440\u0442</option>\n<option value="4">\u0410\u043f\u0440\u0435\u043b\u044c</option>\n<option value="5">\u041c\u0430\u0439</option>\n<option value="6">\u0418\u044e\u043d\u044c</option>\n<option value="7">\u0418\u044e\u043b\u044c</option>\n<option value="8">\u0410\u0432\u0433\u0443\u0441\u0442</option>\n<option value="9">\u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c</option>\n<option value="10">\u041e\u043a\u0442\u044f\u0431\u0440\u044c</option>\n<option value="11">\u041d\u043e\u044f\u0431\u0440\u044c</option>\n<option value="12" selected="selected">\u0414\u0435\u043a\u0430\u0431\u0440\u044c</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# English locale
with translation.override('en', deactivate=True):
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('Dec. 31, 2009', date_format(self.d))
self.assertEqual('December 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.666', localize(self.n))
self.assertEqual('99,999.999', localize(self.f))
self.assertEqual('10,000', localize(self.l))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.666', localize(self.n))
self.assertEqual('99999.999', localize(self.f))
self.assertEqual('10000', localize(self.l))
self.assertEqual('Dec. 31, 2009', localize(self.d))
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99,999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('10,000', Template('{{ l }}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('Dec. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual('12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt))
form5 = I18nForm({
'decimal_field': '66666.666',
'float_field': '99999.999',
'date_field': '12/31/2009',
'datetime_field': '12/31/2009 20:50',
'time_field': '20:50',
'integer_field': '1234',
})
self.assertEqual(True, form5.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form5.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form5.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form5.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form5.cleaned_data['time_field'])
self.assertEqual(1234, form5.cleaned_data['integer_field'])
form6 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertEqual(True, form6.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form6.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">\n<option value="1">January</option>\n<option value="2">February</option>\n<option value="3">March</option>\n<option value="4">April</option>\n<option value="5">May</option>\n<option value="6">June</option>\n<option value="7">July</option>\n<option value="8">August</option>\n<option value="9">September</option>\n<option value="10">October</option>\n<option value="11">November</option>\n<option value="12" selected="selected">December</option>\n</select>\n<select name="mydate_day" id="id_mydate_day">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
def test_sub_locales(self):
"""
Check if sublocales fall back to the main locale
"""
with self.settings(USE_THOUSAND_SEPARATOR=True):
with translation.override('de-at', deactivate=True):
self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt))
with translation.override('es-us', deactivate=True):
self.assertEqual('31 de Diciembre de 2009', date_format(self.d))
def test_localized_input(self):
"""
Tests if form input is correctly localized
"""
self.maxDiff = 1200
with translation.override('de-at', deactivate=True):
form6 = CompanyForm({
'name': 'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_paid': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
self.assertEqual(True, form6.is_valid())
self.assertHTMLEqual(
form6.as_ul(),
'<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" value="acme" maxlength="50" /></li>\n<li><label for="id_date_added">Date added:</label> <input type="text" name="date_added" value="31.12.2009 06:00:00" id="id_date_added" /></li>\n<li><label for="id_cents_paid">Cents paid:</label> <input type="text" name="cents_paid" value="59,47" id="id_cents_paid" /></li>\n<li><label for="id_products_delivered">Products delivered:</label> <input type="text" name="products_delivered" value="12000" id="id_products_delivered" /></li>'
)
self.assertEqual(localize_input(datetime.datetime(2009, 12, 31, 6, 0, 0)), '31.12.2009 06:00:00')
self.assertEqual(datetime.datetime(2009, 12, 31, 6, 0, 0), form6.cleaned_data['date_added'])
with self.settings(USE_THOUSAND_SEPARATOR=True):
# Checking for the localized "products_delivered" field
self.assertInHTML('<input type="text" name="products_delivered" value="12.000" id="id_products_delivered" />', form6.as_ul())
def test_sanitize_separators(self):
"""
Tests django.utils.formats.sanitize_separators.
"""
# Non-strings are untouched
self.assertEqual(sanitize_separators(123), 123)
with translation.override('ru', deactivate=True):
# Russian locale has non-breaking space (\xa0) as thousand separator
# Check that usual space is accepted too when sanitizing inputs
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(sanitize_separators('1\xa0234\xa0567'), '1234567')
self.assertEqual(sanitize_separators('77\xa0777,777'), '77777.777')
self.assertEqual(sanitize_separators('12 345'), '12345')
self.assertEqual(sanitize_separators('77 777,777'), '77777.777')
with self.settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=False):
self.assertEqual(sanitize_separators('12\xa0345'), '12\xa0345')
def test_iter_format_modules(self):
"""
Tests the iter_format_modules function.
"""
with translation.override('de-at', deactivate=True):
de_format_mod = import_module('django.conf.locale.de.formats')
self.assertEqual(list(iter_format_modules('de')), [de_format_mod])
with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'):
test_de_format_mod = import_module('i18n.other.locale.de.formats')
self.assertEqual(list(iter_format_modules('de')), [test_de_format_mod, de_format_mod])
def test_iter_format_modules_stability(self):
"""
Tests the iter_format_modules function always yields format modules in
a stable and correct order in presence of both base ll and ll_CC formats.
"""
en_format_mod = import_module('django.conf.locale.en.formats')
en_gb_format_mod = import_module('django.conf.locale.en_GB.formats')
self.assertEqual(list(iter_format_modules('en-gb')), [en_gb_format_mod, en_format_mod])
def test_get_format_modules_lang(self):
with translation.override('de', deactivate=True):
self.assertEqual('.', get_format('DECIMAL_SEPARATOR', lang='en'))
def test_get_format_modules_stability(self):
with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'):
with translation.override('de', deactivate=True):
old = str("%r") % get_format_modules(reverse=True)
new = str("%r") % get_format_modules(reverse=True) # second try
self.assertEqual(new, old, 'Value returned by get_formats_modules() must be preserved between calls.')
def test_localize_templatetag_and_filter(self):
"""
Tests the {% localize %} templatetag
"""
context = Context({'value': 3.14 })
template1 = Template("{% load l10n %}{% localize %}{{ value }}{% endlocalize %};{% localize on %}{{ value }}{% endlocalize %}")
template2 = Template("{% load l10n %}{{ value }};{% localize off %}{{ value }};{% endlocalize %}{{ value }}")
template3 = Template('{% load l10n %}{{ value }};{{ value|unlocalize }}')
template4 = Template('{% load l10n %}{{ value }};{{ value|localize }}')
output1 = '3,14;3,14'
output2 = '3,14;3.14;3,14'
output3 = '3,14;3.14'
output4 = '3.14;3,14'
with translation.override('de', deactivate=True):
with self.settings(USE_L10N=False):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template4.render(context), output4)
with self.settings(USE_L10N=True):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template2.render(context), output2)
self.assertEqual(template3.render(context), output3)
def test_localized_as_text_as_hidden_input(self):
"""
Tests if form input with 'as_hidden' or 'as_text' is correctly localized. Ticket #18777
"""
self.maxDiff = 1200
with translation.override('de-at', deactivate=True):
template = Template('{% load l10n %}{{ form.date_added }}; {{ form.cents_paid }}')
template_as_text = Template('{% load l10n %}{{ form.date_added.as_text }}; {{ form.cents_paid.as_text }}')
template_as_hidden = Template('{% load l10n %}{{ form.date_added.as_hidden }}; {{ form.cents_paid.as_hidden }}')
form = CompanyForm({
'name': 'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_paid': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
context = Context({'form': form })
self.assertTrue(form.is_valid())
self.assertHTMLEqual(
template.render(context),
'<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" />; <input id="id_cents_paid" name="cents_paid" type="text" value="59,47" />'
)
self.assertHTMLEqual(
template_as_text.render(context),
'<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" />; <input id="id_cents_paid" name="cents_paid" type="text" value="59,47" />'
)
self.assertHTMLEqual(
template_as_hidden.render(context),
'<input id="id_date_added" name="date_added" type="hidden" value="31.12.2009 06:00:00" />; <input id="id_cents_paid" name="cents_paid" type="hidden" value="59,47" />'
)
class MiscTests(TransRealMixin, TestCase):
def setUp(self):
super(MiscTests, self).setUp()
self.rf = RequestFactory()
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
p = trans_real.parse_accept_lang_header
# Good headers.
self.assertEqual([('de', 1.0)], p('de'))
self.assertEqual([('en-AU', 1.0)], p('en-AU'))
self.assertEqual([('es-419', 1.0)], p('es-419'))
self.assertEqual([('*', 1.0)], p('*;q=1.00'))
self.assertEqual([('en-AU', 0.123)], p('en-AU;q=0.123'))
self.assertEqual([('en-au', 0.5)], p('en-au;q=0.5'))
self.assertEqual([('en-au', 1.0)], p('en-au;q=1.0'))
self.assertEqual([('da', 1.0), ('en', 0.5), ('en-gb', 0.25)], p('da, en-gb;q=0.25, en;q=0.5'))
self.assertEqual([('en-au-xx', 1.0)], p('en-au-xx'))
self.assertEqual([('de', 1.0), ('en-au', 0.75), ('en-us', 0.5), ('en', 0.25), ('es', 0.125), ('fa', 0.125)], p('de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125'))
self.assertEqual([('*', 1.0)], p('*'))
self.assertEqual([('de', 1.0)], p('de;q=0.'))
self.assertEqual([('en', 1.0), ('*', 0.5)], p('en; q=1.0, * ; q=0.5'))
self.assertEqual([], p(''))
# Bad headers; should always return [].
self.assertEqual([], p('en-gb;q=1.0000'))
self.assertEqual([], p('en;q=0.1234'))
self.assertEqual([], p('en;q=.2'))
self.assertEqual([], p('abcdefghi-au'))
self.assertEqual([], p('**'))
self.assertEqual([], p('en,,gb'))
self.assertEqual([], p('en-au;q=0.1.0'))
self.assertEqual([], p('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZ,en'))
self.assertEqual([], p('da, en-gb;q=0.8, en;q=0.7,#'))
self.assertEqual([], p('de;q=2.0'))
self.assertEqual([], p('de;q=0.a'))
self.assertEqual([], p('12-345'))
self.assertEqual([], p(''))
def test_parse_literal_http_header(self):
"""
Now test that we parse a literal HTTP header correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt'}
self.assertEqual('pt', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es,de'}
self.assertEqual('es', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-ar,de'}
self.assertEqual('es-ar', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-us'}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh_CN)
# the user sets zh-cn as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-cn,de'}
self.assertEqual(g(r), 'zh-cn')
def test_parse_language_cookie(self):
"""
Now test that we parse language preferences stored in a cookie correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'}
r.META = {}
self.assertEqual('pt-br', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'}
r.META = {}
self.assertEqual('pt', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual('es', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'}
r.META = {}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh_CN)
# the user sets zh-cn as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-cn'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual(g(r), 'zh-cn')
def test_get_language_from_path_real(self):
g = trans_real.get_language_from_path
self.assertEqual(g('/pl/'), 'pl')
self.assertEqual(g('/pl'), 'pl')
self.assertEqual(g('/xyz/'), None)
def test_get_language_from_path_null(self):
from django.utils.translation.trans_null import get_language_from_path as g
self.assertEqual(g('/pl/'), None)
self.assertEqual(g('/pl'), None)
self.assertEqual(g('/xyz/'), None)
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_in_translatable_block(self):
t_sing = Template("{% load i18n %}{% blocktrans %}The result was {{ percent }}%{% endblocktrans %}")
t_plur = Template("{% load i18n %}{% blocktrans count num as number %}{{ percent }}% represents {{ num }} object{% plural %}{{ percent }}% represents {{ num }} objects{% endblocktrans %}")
with translation.override('de'):
self.assertEqual(t_sing.render(Context({'percent': 42})), 'Das Ergebnis war 42%')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '42% stellt 1 Objekt dar')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '42% stellt 4 Objekte dar')
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_formatting_in_blocktrans(self):
"""
Test that using Python's %-formatting is properly escaped in blocktrans,
singular or plural
"""
t_sing = Template("{% load i18n %}{% blocktrans %}There are %(num_comments)s comments{% endblocktrans %}")
t_plur = Template("{% load i18n %}{% blocktrans count num as number %}%(percent)s% represents {{ num }} object{% plural %}%(percent)s% represents {{ num }} objects{% endblocktrans %}")
with translation.override('de'):
# Strings won't get translated as they don't match after escaping %
self.assertEqual(t_sing.render(Context({'num_comments': 42})), 'There are %(num_comments)s comments')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '%(percent)s% represents 1 object')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '%(percent)s% represents 4 objects')
class ResolutionOrderI18NTests(TransRealMixin, TestCase):
def setUp(self):
super(ResolutionOrderI18NTests, self).setUp()
activate('de')
def tearDown(self):
deactivate()
super(ResolutionOrderI18NTests, self).tearDown()
def assertUgettext(self, msgid, msgstr):
result = ugettext(msgid)
self.assertTrue(msgstr in result, ("The string '%s' isn't in the "
"translation of '%s'; the actual result is '%s'." % (msgstr, msgid, result)))
class AppResolutionOrderI18NTests(ResolutionOrderI18NTests):
def setUp(self):
self.old_installed_apps = settings.INSTALLED_APPS
settings.INSTALLED_APPS = ['i18n.resolution'] + list(settings.INSTALLED_APPS)
super(AppResolutionOrderI18NTests, self).setUp()
def tearDown(self):
settings.INSTALLED_APPS = self.old_installed_apps
super(AppResolutionOrderI18NTests, self).tearDown()
def test_app_translation(self):
self.assertUgettext('Date/time', 'APP')
@override_settings(LOCALE_PATHS=extended_locale_paths)
class LocalePathsResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_locale_paths_translation(self):
self.assertUgettext('Time', 'LOCALE_PATHS')
def test_locale_paths_override_app_translation(self):
extended_apps = list(settings.INSTALLED_APPS) + ['i18n.resolution']
with self.settings(INSTALLED_APPS=extended_apps):
self.assertUgettext('Time', 'LOCALE_PATHS')
class DjangoFallbackResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_django_fallback(self):
self.assertEqual(ugettext('Date/time'), 'Datum/Zeit')
class TestModels(TestCase):
def test_lazy(self):
tm = TestModel()
tm.save()
def test_safestr(self):
c = Company(cents_paid=12, products_delivered=1)
c.name = SafeText('Iñtërnâtiônàlizætiøn1')
c.save()
c.name = SafeBytes('Iñtërnâtiônàlizætiøn1'.encode('utf-8'))
c.save()
class TestLanguageInfo(TestCase):
def test_localized_language_info(self):
li = get_language_info('de')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], 'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertEqual(li['bidi'], False)
def test_unknown_language_code(self):
six.assertRaisesRegex(self, KeyError, r"Unknown language code xx\.", get_language_info, 'xx')
def test_unknown_only_country_code(self):
li = get_language_info('de-xx')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], 'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertEqual(li['bidi'], False)
def test_unknown_language_code_and_country_code(self):
six.assertRaisesRegex(self, KeyError, r"Unknown language code xx-xx and xx\.", get_language_info, 'xx-xx')
class MultipleLocaleActivationTests(TransRealMixin, TestCase):
"""
Tests for template rendering behavior when multiple locales are activated
during the lifetime of the same process.
"""
def setUp(self):
super(MultipleLocaleActivationTests, self).setUp()
self._old_language = get_language()
def tearDown(self):
super(MultipleLocaleActivationTests, self).tearDown()
activate(self._old_language)
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n constructs.
"""
with translation.override('fr'):
self.assertEqual(Template("{{ _('Yes') }}").render(Context({})), 'Oui')
self.assertEqual(Template("{% load i18n %}{% trans 'Yes' %}").render(Context({})), 'Oui')
self.assertEqual(Template("{% load i18n %}{% blocktrans %}Yes{% endblocktrans %}").render(Context({})), 'Oui')
# Literal marked up with _() in a filter expression
def test_multiple_locale_filter(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
with translation.override(self._old_language):
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'nee')
def test_multiple_locale_filter_deactivate(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'nee')
def test_multiple_locale_filter_direct_switch(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'nee')
# Literal marked up with _()
def test_multiple_locale(self):
with translation.override('de'):
t = Template("{{ _('No') }}")
with translation.override(self._old_language):
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate(self):
with translation.override('de', deactivate=True):
t = Template("{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch(self):
with translation.override('de'):
t = Template("{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
# Literal marked up with _(), loading the i18n template tag library
def test_multiple_locale_loadi18n(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ _('No') }}")
with translation.override(self._old_language):
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_loadi18n_deactivate(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_loadi18n_direct_switch(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
# trans i18n tag
def test_multiple_locale_trans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override(self._old_language):
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_trans(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_trans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
# blocktrans i18n tag
def test_multiple_locale_btrans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override(self._old_language):
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_btrans(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_btrans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
@override_settings(
USE_I18N=True,
LANGUAGES=(
('en', 'English'),
('fr', 'French'),
),
MIDDLEWARE_CLASSES=(
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
),
)
class LocaleMiddlewareTests(TransRealMixin, TestCase):
urls = 'i18n.urls'
def test_streaming_response(self):
# Regression test for #5241
response = self.client.get('/fr/streaming/')
self.assertContains(response, "Oui/Non")
response = self.client.get('/en/streaming/')
self.assertContains(response, "Yes/No")
@override_settings(
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
),
)
def test_language_not_saved_to_session(self):
"""Checks that current language is not automatically saved to
session on every request."""
# Regression test for #21473
self.client.get('/fr/simple/')
self.assertNotIn('django_language', self.client.session)
@override_settings(
USE_I18N=True,
LANGUAGES=(
('bg', 'Bulgarian'),
('en-us', 'English'),
('pt-br', 'Portugese (Brazil)'),
),
MIDDLEWARE_CLASSES=(
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
),
)
class CountrySpecificLanguageTests(TransRealMixin, TestCase):
urls = 'i18n.urls'
def setUp(self):
super(CountrySpecificLanguageTests, self).setUp()
self.rf = RequestFactory()
def test_check_for_language(self):
self.assertTrue(check_for_language('en'))
self.assertTrue(check_for_language('en-us'))
self.assertTrue(check_for_language('en-US'))
def test_get_language_from_request(self):
# issue 19919
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8,bg;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('en-us', lang)
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'bg-bg,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('bg', lang)
def test_specific_language_codes(self):
# issue 11915
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('pt-br', lang)
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('pt-br', lang)
|
bsd-3-clause
|
trunca/enigma2
|
lib/python/Components/Converter/genre.py
|
72
|
4163
|
#
# Genre types taken from DVB standards documentation
#
# some broadcaster do define other types so this list
# may grow or be replaced..
#
maintype = [ _("Reserved"),
_("Movie/Drama"),
_("News Current Affairs"),
_("Show Games show"),
_("Sports"),
_("Children/Youth"),
_("Music/Ballet/Dance"),
_("Arts/Culture"),
_("Social/Political/Economics"),
_("Education/Science/..."),
_("Leisure hobbies"),
_("Other")]
subtype = {}
# Movie/Drama
subtype[1] = [
_("movie/drama (general)"),
_("detective/thriller"),
_("adventure/western/war"),
_("science fiction/fantasy/horror"),
_("comedy"),
_("soap/melodram/folkloric"),
_("romance"),
_("serious/classical/religious/historical movie/drama"),
_("adult movie/drama")]
# News Current Affairs
subtype[2] = [
_("news/current affairs (general)"),
_("news/weather report"),
_("news magazine"),
_("documentary"),
_("discussion/interview/debate")]
# Show Games show
subtype[3] = [
_("show/game show (general)"),
_("game show/quiz/contest"),
_("variety show"),
_("talk show")]
# Sports
subtype[4] = [
_("sports (general)"),
_("special events"),
_("sports magazine"),
_("football/soccer"),
_("tennis/squash"),
_("team sports"),
_("athletics"),
_("motor sport"),
_("water sport"),
_("winter sport"),
_("equestrian"),
_("martial sports")]
# Children/Youth
subtype[5] = [
_("childrens's/youth program (general)"),
_("pre-school children's program"),
_("entertainment (6-14 year old)"),
_("entertainment (10-16 year old)"),
_("information/education/school program"),
_("cartoon/puppets")]
# Music/Ballet/Dance
subtype[6] = [
_("music/ballet/dance (general)"),
_("rock/pop"),
_("serious music/classic music"),
_("folk/traditional music"),
_("jazz"),
_("musical/opera"),
_("ballet")]
# Arts/Culture
subtype[7] = [
_("arts/culture (without music, general)"),
_("performing arts"),
_("fine arts"),
_("religion"),
_("popular culture/traditional arts"),
_("literature"),
_("film/cinema"),
_("experimental film/video"),
_("broadcasting/press"),
_("new media"),
_("arts/culture magazine"),
_("fashion")]
# Social/Political/Economics
subtype[8] = [
_("social/political issues/economics (general)"),
_("magazines/reports/documentary"),
_("economics/social advisory"),
_("remarkable people")]
# Education/Science/...
subtype[9] = [
_("education/science/factual topics (general)"),
_("nature/animals/environment"),
_("technology/natural science"),
_("medicine/physiology/psychology"),
_("foreign countries/expeditions"),
_("social/spiritual science"),
_("further education"),
_("languages")]
# Leisure hobies
subtype[10] = [
_("leisure hobbies (general)"),
_("tourism/travel"),
_("handicraft"),
_("motoring"),
_("fitness & health"),
_("cooking"),
_("advertisement/shopping"),
_("gardening")]
# Other
subtype[11] = [
_("original language"),
_("black & white"),
_("unpublished"),
_("live broadcast")]
def getGenreStringMain(hn, ln):
# if hn == 0:
# return _("Undefined content")
if hn == 15:
return _("User defined")
if 0 < hn < len(maintype):
return maintype[hn]
# return _("Reserved") + " " + str(hn)
return ""
def getGenreStringSub(hn, ln):
# if hn == 0:
# return _("Undefined content") + " " + str(ln)
if hn == 15:
return _("User defined") + " " + str(ln)
if 0 < hn < len(maintype):
if ln == 15:
return _("User defined")
if ln < len(subtype[hn]):
return subtype[hn][ln]
# return _("Reserved") " " + str(ln)
# return _("Reserved") + " " + str(hn) + "," + str(ln)
return ""
def getGenreStringLong(hn, ln):
# if hn == 0:
# return _("Undefined content") + " " + str(ln)
if hn == 15:
return _("User defined") + " " + str(ln)
if 0 < hn < len(maintype):
return maintype[hn] + ": " + getGenreStringSub(hn, ln)
# return _("Reserved") + " " + str(hn) + "," + str(ln)
return ""
#
# The End
#
|
gpl-2.0
|
dwaynebailey/translate
|
translate/storage/factory.py
|
2
|
8230
|
# -*- coding: utf-8 -*-
#
# Copyright 2006-2010 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""factory methods to build real storage objects that conform to base.py"""
import os
import six
#TODO: Monolingual formats (with template?)
decompressclass = {
'gz': ("gzip", "GzipFile"),
'bz2': ("bz2", "BZ2File"),
}
classes_str = {
"csv": ("csvl10n", "csvfile"),
"tab": ("omegat", "OmegaTFileTab"), "utf8": ("omegat", "OmegaTFile"),
"po": ("po", "pofile"), "pot": ("po", "pofile"),
"mo": ("mo", "mofile"), "gmo": ("mo", "mofile"),
"qm": ("qm", "qmfile"),
"lang": ("mozilla_lang", "LangStore"),
"utx": ("utx", "UtxFile"),
"_wftm": ("wordfast", "WordfastTMFile"),
"_trados_txt_tm": ("trados", "TradosTxtTmFile"),
"catkeys": ("catkeys", "CatkeysFile"),
"qph": ("qph", "QphFile"),
"tbx": ("tbx", "tbxfile"),
"tmx": ("tmx", "tmxfile"),
"ts": ("ts2", "tsfile"),
"xliff": ("xliff", "xlifffile"), "xlf": ("xliff", "xlifffile"),
"sdlxliff": ("xliff", "xlifffile"),
# Monolingual formats
"ftl": ("l20n", "l20nfile"),
}
### XXX: if you add anything here, you must also add it to translate.storage.
"""Dictionary of file extensions and the names of their associated class.
Used for dynamic lazy loading of modules.
_ext is a pseudo extension, that is their is no real extension by that name.
"""
def _examine_txt(storefile):
"""Determine the true filetype for a .txt file"""
if isinstance(storefile, six.string_types) and os.path.exists(storefile):
storefile = open(storefile, 'rb')
try:
start = storefile.read(600).strip()
except AttributeError:
raise ValueError("Need to read object to determine type")
# Some encoding magic for Wordfast
from translate.storage import wordfast
if wordfast.TAB_UTF16 in start.split(b"\n")[0]:
encoding = 'utf-16'
else:
encoding = 'iso-8859-1'
start = start.decode(encoding)
if '%Wordfast TM' in start:
pseudo_extension = '_wftm'
elif '<RTF Preamble>' in start:
pseudo_extension = '_trados_txt_tm'
else:
raise ValueError("Failed to guess file type.")
storefile.seek(0)
return pseudo_extension
hiddenclasses = {"txt": _examine_txt}
def _guessextention(storefile):
"""Guesses the type of a file object by looking at the first few
characters. The return value is a file extention.
"""
start = storefile.read(300).strip()
if b'<xliff ' in start:
extention = 'xlf'
elif b'msgid "' in start:
extention = 'po'
elif b'%Wordfast TM' in start:
extention = 'txt'
elif b'<!DOCTYPE TS>' in start:
extention = 'ts'
elif b'<tmx ' in start:
extention = 'tmx'
elif b'#UTX' in start:
extention = 'utx'
else:
raise ValueError("Failed to guess file type.")
storefile.seek(0)
return extention
def _getdummyname(storefile):
"""Provides a dummy name for a file object without a name attribute, by
guessing the file type.
"""
return 'dummy.' + _guessextention(storefile)
def _getname(storefile):
"""returns the filename"""
if storefile is None:
raise ValueError("This method cannot magically produce a filename when given None as input.")
if not isinstance(storefile, six.string_types):
if not hasattr(storefile, "name"):
storefilename = _getdummyname(storefile)
else:
storefilename = storefile.name
else:
storefilename = storefile
return storefilename
def getclass(storefile, localfiletype=None, ignore=None, classes=None,
classes_str=classes_str, hiddenclasses=hiddenclasses):
"""Factory that returns the applicable class for the type of file
presented. Specify ignore to ignore some part at the back of the name
(like .gz).
"""
storefilename = _getname(storefile)
if ignore and storefilename.endswith(ignore):
storefilename = storefilename[:-len(ignore)]
ext = localfiletype
if ext is None:
root, ext = os.path.splitext(storefilename)
ext = ext[len(os.path.extsep):].lower()
decomp = None
if ext in decompressclass:
decomp = ext
root, ext = os.path.splitext(root)
ext = ext[len(os.path.extsep):].lower()
if ext in hiddenclasses:
guesserfn = hiddenclasses[ext]
if decomp:
_module, _class = decompressclass[decomp]
module = __import__(_module, globals(), {}, [])
_file = getattr(module, _class)
ext = guesserfn(_file(storefile))
else:
ext = guesserfn(storefile)
try:
# we prefer classes (if given) since that is the older API that Pootle uses
if classes:
storeclass = classes[ext]
else:
_module, _class = classes_str[ext]
module = __import__("translate.storage.%s" % _module, globals(), {}, _module)
storeclass = getattr(module, _class)
except KeyError:
raise ValueError("Unknown filetype (%s)" % storefilename)
return storeclass
def getobject(storefile, localfiletype=None, ignore=None, classes=None,
classes_str=classes_str, hiddenclasses=hiddenclasses):
"""Factory that returns a usable object for the type of file presented.
:type storefile: file or str
:param storefile: File object or file name.
Specify ignore to ignore some part at the back of the name (like .gz).
"""
if isinstance(storefile, six.string_types):
if os.path.isdir(storefile) or storefile.endswith(os.path.sep):
from translate.storage import directory
return directory.Directory(storefile)
storefilename = _getname(storefile)
storeclass = getclass(storefile, localfiletype, ignore, classes=classes,
classes_str=classes_str, hiddenclasses=hiddenclasses)
if os.path.exists(storefilename) or not getattr(storefile, "closed", True):
name, ext = os.path.splitext(storefilename)
ext = ext[len(os.path.extsep):].lower()
if ext in decompressclass:
_module, _class = decompressclass[ext]
module = __import__(_module, globals(), {}, [])
_file = getattr(module, _class)
storefile = _file(storefilename)
store = storeclass.parsefile(storefile)
else:
store = storeclass()
store.filename = storefilename
return store
supported = [
('Gettext PO file', ['po', 'pot'], ["text/x-gettext-catalog", "text/x-gettext-translation", "text/x-po", "text/x-pot"]),
('XLIFF Translation File', ['xlf', 'xliff', 'sdlxliff'], ["application/x-xliff", "application/x-xliff+xml"]),
('Gettext MO file', ['mo', 'gmo'], ["application/x-gettext-catalog", "application/x-mo"]),
('Qt .qm file', ['qm'], ["application/x-qm"]),
('TBX Glossary', ['tbx'], ['application/x-tbx']),
('TMX Translation Memory', ['tmx'], ["application/x-tmx"]),
('Qt Linguist Translation File', ['ts'], ["application/x-linguist"]),
('Qt Phrase Book', ['qph'], ["application/x-qph"]),
('OmegaT Glossary', ['utf8', 'tab'], ["application/x-omegat-glossary"]),
('UTX Dictionary', ['utx'], ["text/x-utx"]),
('Haiku catkeys file', ['catkeys'], ["application/x-catkeys"]),
]
def supported_files():
"""Returns data about all supported files
:return: list of type that include (name, extensions, mimetypes)
:rtype: list
"""
return supported[:]
|
gpl-2.0
|
spartonia/django-oscar
|
src/oscar/apps/dashboard/pages/app.py
|
49
|
1113
|
from django.conf.urls import url
from oscar.core.application import Application
from oscar.core.loading import get_class
class FlatPageManagementApplication(Application):
name = None
default_permissions = ['is_staff', ]
list_view = get_class('dashboard.pages.views', 'PageListView')
create_view = get_class('dashboard.pages.views', 'PageCreateView')
update_view = get_class('dashboard.pages.views', 'PageUpdateView')
delete_view = get_class('dashboard.pages.views', 'PageDeleteView')
def get_urls(self):
"""
Get URL patterns defined for flatpage management application.
"""
urls = [
url(r'^$', self.list_view.as_view(), name='page-list'),
url(r'^create/$', self.create_view.as_view(), name='page-create'),
url(r'^update/(?P<pk>[-\w]+)/$',
self.update_view.as_view(), name='page-update'),
url(r'^delete/(?P<pk>\d+)/$',
self.delete_view.as_view(), name='page-delete')
]
return self.post_process_urls(urls)
application = FlatPageManagementApplication()
|
bsd-3-clause
|
scattering/ipeek
|
server/sans/data.py
|
1
|
5336
|
#!/usr/bin/env python
import struct
import sys
import vaxutils
import numpy
import math
def readNCNRData(inputfile):
f = open(inputfile, 'rb')
data = f.read()
f.close()
#filename
dat = struct.unpack('<21s',data[2:23])
filename = dat[0].replace(' ','')
#metadata
metadata = {}
reals = {}
formatstring = '<4i4s4s4s4s20s3s11s1s8s' #run
formatstring += '60s4s4s4s4s3i4s4s2i6s6s' #sample
formatstring += '6s4s4s4s4s4s4s2i4s4s4s4s4s4s4s' #det
formatstring += '4s4s4s4s4s4s' #resolution
formatstring += 'L2i' #tslice
formatstring += 'L4s4s4s2i' #temp
formatstring += '2L4s4s4s4s4s' #magnet
formatstring += '4s4s' #bmstp
formatstring += '3i4s4s4s4s42s' #params
formatstring += 'L4s4si' #voltage
formatstring += '2L4s4s' #polarization
formatstring += '4i4s4s4s4s4s' #analysis
#print formatstring
#print struct.calcsize(formatstring)
(metadata['run.npre'],
metadata['run.ctime'],
metadata['run.rtime'],
metadata['run.numruns'],
reals['run.moncnt'],
reals['run.savmon'],
reals['run.detcnt'],
reals['run.atten'],
metadata['run.datetime'],
metadata['run.type'],
metadata['run.defdir'],
metadata['run.mode'],
metadata['run.reserve'],
metadata['sample.labl'],
reals['sample.trns'],
reals['sample.thk'],
reals['sample.position'],
reals['sample.rotang'],
metadata['sample.table'],
metadata['sample.holder'],
metadata['sample.blank'],
reals['sample.temp'],
reals['sample.field'],
metadata['sample.tctrlr'],
metadata['sample.magnet'],
metadata['sample.tunits'],
metadata['sample.funits'],
metadata['det.typ'],
reals['det.calx1'],
reals['det.calx2'],
reals['det.calx3'],
reals['det.caly1'],
reals['det.caly2'],
reals['det.caly3'],
metadata['det.num'],
metadata['det.spacer'],
reals['det.beamx'],
reals['det.beamy'],
reals['det.dis'],
reals['det.ang'],
reals['det.siz'],
reals['det.bstop'],
reals['det.blank'],
reals['resolution.ap1'],
reals['resolution.ap2'],
reals['resolution.ap12dis'],
reals['resolution.lmda'],
reals['resolution.dlmda'],
reals['resolution.save'],
metadata['tslice.slicing'],
metadata['tslice.multfact'],
metadata['tslice.ltslice'],
metadata['temp.printemp'],
reals['temp.hold'],
reals['temp.err'],
reals['temp.blank'],
metadata['temp.extra'],
metadata['temp.reserve'],
metadata['magnet.printmag'],
metadata['magnet.sensor'],
reals['magnet.current'],
reals['magnet.conv'],
reals['magnet.fieldlast'],
reals['magnet.blank'],
reals['magnet.spacer'],
reals['bmstp.xpos'],
reals['bmstp.ypos'],
metadata['params.blank1'],
metadata['params.blank2'],
metadata['params.blank3'],
reals['params.trsncnt'],
reals['params.extra1'],
reals['params.extra2'],
reals['params.extra3'],
metadata['params.reserve'],
metadata['voltage.printvolt'],
reals['voltage.volts'],
reals['voltage.blank'],
metadata['voltage.spacer'],
metadata['polarization.printpol'],
metadata['polarization.flipper'],
reals['polarization.horiz'],
reals['polarization.vert'],
metadata['analysis.rows1'],
metadata['analysis.rows2'],
metadata['analysis.cols1'],
metadata['analysis.cols2'],
reals['analysis.factor'],
reals['analysis.qmin'],
reals['analysis.qmax'],
reals['analysis.imin'],
reals['analysis.imax']) = struct.unpack(formatstring,data[23:514])
#Process reals into metadata
for k,v in reals.items():
#print k,type(v)
metadata[k] = vaxutils.R4toFloat(v)
#print len(data[514:])
#print struct.calcsize('<16401h')
#dataformatstring = '<\
#1023h2x1023h2x1023h2x1023h2x\
#1023h2x1023h2x1023h2x1023h2x\
#1023h2x1023h2x1023h2x1023h2x\
#1023h2x1023h2x1023h2x1023h2x16h2x'
#print struct.calcsize(dataformatstring)
dataformatstring = '<16401h'
rawdata = numpy.array(struct.unpack(dataformatstring,data[514:]))
detdata = numpy.empty(16384)
ii=0
skip=0
while(ii < 16384):
if(((ii+skip) %1022)==0):
skip+=1
detdata[ii] = I2Decompress(rawdata[ii+skip])
ii+=1
#print numpy.shape(detdata)
detdata.resize(128,128)
#print type(detdata)
#print numpy.shape(detdata)
#print detdata
return (detdata,metadata)
def I2Decompress(val):
"""Take a 'compressed' I*2 value and convert to I*4.
Code taken from IGOR Pro macros by SRK. VAX Fortran code is ultimate source (RW_DATAFILE.FOR)"""
ib=10
nd=4
ipw = math.pow(ib,nd)
if val <= -ipw:
npw=trunc(-val/ipw)
val=(-val % ipw)*(math.pow(ib,npw))
return val
else:
return val
def arrayI2Decompress(datarray):
"""Apply the I2 to I4 decompression routine to a whole array"""
for element in datarray.flat:
element = I2Decompress(element)
return datarray
def trunc(val):
"""Return the integer closest to the supplied value in the direction of zero"""
if val < 0:
return math.ceil(val)
elif val > 0:
return math.floor(val)
else:
return val
|
unlicense
|
jjmiranda/edx-platform
|
lms/djangoapps/instructor/services.py
|
15
|
4813
|
"""
Implementation of "Instructor" service
"""
import logging
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from commerce.signals import create_zendesk_ticket
from courseware.models import StudentModule
from instructor.views.tools import get_student_from_identifier
from django.core.exceptions import ObjectDoesNotExist
import instructor.enrollment as enrollment
from django.utils.translation import ugettext as _
from xmodule.modulestore.django import modulestore
from student.roles import CourseStaffRole
from student import auth
log = logging.getLogger(__name__)
class InstructorService(object):
"""
Instructor service for deleting the students attempt(s) of an exam. This service has been created
for the edx_proctoring's dependency injection to cater for a requirement where edx_proctoring
needs to call into edx-platform's functions to delete the students' existing answers, grades
and attempt counts if there had been an earlier attempt.
"""
def delete_student_attempt(self, student_identifier, course_id, content_id, requesting_user):
"""
Deletes student state for a problem. requesting_user may be kept as an audit trail.
Takes some of the following query parameters
- student_identifier is an email or username
- content_id is a url-name of a problem
- course_id is the id for the course
"""
course_id = CourseKey.from_string(course_id)
try:
student = get_student_from_identifier(student_identifier)
except ObjectDoesNotExist:
err_msg = (
'Error occurred while attempting to reset student attempts for user '
'{student_identifier} for content_id {content_id}. '
'User does not exist!'.format(
student_identifier=student_identifier,
content_id=content_id
)
)
log.error(err_msg)
return
try:
module_state_key = UsageKey.from_string(content_id)
except InvalidKeyError:
err_msg = (
'Invalid content_id {content_id}!'.format(content_id=content_id)
)
log.error(err_msg)
return
if student:
try:
enrollment.reset_student_attempts(
course_id,
student,
module_state_key,
requesting_user=requesting_user,
delete_module=True,
)
except (StudentModule.DoesNotExist, enrollment.sub_api.SubmissionError):
err_msg = (
'Error occurred while attempting to reset student attempts for user '
'{student_identifier} for content_id {content_id}.'.format(
student_identifier=student_identifier,
content_id=content_id
)
)
log.error(err_msg)
def is_course_staff(self, user, course_id):
"""
Returns True if the user is the course staff
else Returns False
"""
return auth.user_has_role(user, CourseStaffRole(CourseKey.from_string(course_id)))
def send_support_notification(self, course_id, exam_name, student_username, review_status):
"""
Creates a Zendesk ticket for an exam attempt review from the proctoring system.
Currently, it sends notifications for 'Suspicious" status, but additional statuses can be supported
by adding to the notify_support_for_status list in edx_proctoring/backends/software_secure.py
The notifications can be disabled by disabling the
"Create Zendesk Tickets For Suspicious Proctored Exam Attempts" setting in the course's Advanced settings.
"""
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
if course.create_zendesk_tickets:
requester_name = "edx-proctoring"
email = "edx-proctoring@edx.org"
subject = _("Proctored Exam Review: {review_status}").format(review_status=review_status)
body = _(
"A proctored exam attempt for {exam_name} in {course_name} by username: {student_username} "
"was reviewed as {review_status} by the proctored exam review provider."
).format(
exam_name=exam_name,
course_name=course.display_name,
student_username=student_username,
review_status=review_status
)
tags = ["proctoring"]
create_zendesk_ticket(requester_name, email, subject, body, tags)
|
agpl-3.0
|
vmax-feihu/hue
|
desktop/core/ext-py/pysaml2-2.4.0/src/xmldsig/__init__.py
|
32
|
63554
|
#!/usr/bin/env python
#
# Generated Mon May 2 14:23:33 2011 by parse_xsd.py version 0.4.
#
import saml2
from saml2 import SamlBase
NAMESPACE = 'http://www.w3.org/2000/09/xmldsig#'
ENCODING_BASE64 = 'http://www.w3.org/2000/09/xmldsig#base64'
# digest and signature algorithms (not implemented = commented out)
DIGEST_MD5 = 'http://www.w3.org/2001/04/xmldsig-more#md5' # test framework only!
DIGEST_SHA1 = 'http://www.w3.org/2000/09/xmldsig#sha1'
DIGEST_SHA224 = 'http://www.w3.org/2001/04/xmldsig-more#sha224'
DIGEST_SHA256 = 'http://www.w3.org/2001/04/xmlenc#sha256'
DIGEST_SHA384 = 'http://www.w3.org/2001/04/xmldsig-more#sha384'
DIGEST_SHA512 = 'http://www.w3.org/2001/04/xmlenc#sha512'
DIGEST_RIPEMD160 = 'http://www.w3.org/2001/04/xmlenc#ripemd160'
digest_default = DIGEST_SHA1
DIGEST_ALLOWED_ALG = (('DIGEST_SHA1', DIGEST_SHA1),
('DIGEST_SHA224', DIGEST_SHA224),
('DIGEST_SHA256', DIGEST_SHA256),
('DIGEST_SHA384', DIGEST_SHA384),
('DIGEST_SHA512', DIGEST_SHA512),
('DIGEST_RIPEMD160', DIGEST_RIPEMD160))
DIGEST_AVAIL_ALG = DIGEST_ALLOWED_ALG + (('DIGEST_MD5', DIGEST_MD5), )
#SIG_DSA_SHA1 = 'http,//www.w3.org/2000/09/xmldsig#dsa-sha1'
#SIG_DSA_SHA256 = 'http://www.w3.org/2009/xmldsig11#dsa-sha256'
#SIG_ECDSA_SHA1 = 'http://www.w3.org/2001/04/xmldsig-more#ECDSA_sha1'
#SIG_ECDSA_SHA224 = 'http://www.w3.org/2001/04/xmldsig-more#ECDSA_sha224'
#SIG_ECDSA_SHA256 = 'http://www.w3.org/2001/04/xmldsig-more#ECDSA_sha256'
#SIG_ECDSA_SHA384 = 'http://www.w3.org/2001/04/xmldsig-more#ECDSA_sha384'
#SIG_ECDSA_SHA512 = 'http://www.w3.org/2001/04/xmldsig-more#ECDSA_sha512'
SIG_RSA_MD5 = 'http://www.w3.org/2001/04/xmldsig-more#rsa-md5' # test framework
SIG_RSA_SHA1 = 'http://www.w3.org/2000/09/xmldsig#rsa-sha1'
SIG_RSA_SHA224 = 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha224'
SIG_RSA_SHA256 = 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha256'
SIG_RSA_SHA384 = 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha384'
SIG_RSA_SHA512 = 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha512'
#SIG_RSA_RIPEMD160 = 'http://www.w3.org/2001/04/xmldsig-more#rsa-ripemd160'
sig_default = SIG_RSA_SHA1
SIG_ALLOWED_ALG = (('SIG_RSA_SHA1', SIG_RSA_SHA1),
('SIG_RSA_SHA224', SIG_RSA_SHA224),
('SIG_RSA_SHA256', SIG_RSA_SHA256),
('SIG_RSA_SHA384', SIG_RSA_SHA384),
('SIG_RSA_SHA512', SIG_RSA_SHA512))
SIG_AVAIL_ALG = SIG_ALLOWED_ALG + (('SIG_RSA_MD5', SIG_RSA_MD5), )
MAC_SHA1 = 'http://www.w3.org/2000/09/xmldsig#hmac-sha1'
C14N = 'http://www.w3.org/TR/2001/REC-xml-c14n-20010315'
C14N_WITH_C = 'http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments'
ALG_EXC_C14N = 'http://www.w3.org/2001/10/xml-exc-c14n#'
TRANSFORM_XSLT = 'http://www.w3.org/TR/1999/REC-xslt-19991116'
TRANSFORM_XPATH = 'http://www.w3.org/TR/1999/REC-xpath-19991116'
TRANSFORM_ENVELOPED = 'http://www.w3.org/2000/09/xmldsig#enveloped-signature'
class CryptoBinary_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:CryptoBinary element """
c_tag = 'CryptoBinary'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def crypto_binary__from_string(xml_string):
return saml2.create_class_from_xml_string(CryptoBinary_, xml_string)
class SignatureValueType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureValueType element """
c_tag = 'SignatureValueType'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Id'] = ('id', 'ID', False)
def __init__(self,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.id=id
def signature_value_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureValueType_, xml_string)
class CanonicalizationMethodType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:CanonicalizationMethodType element """
c_tag = 'CanonicalizationMethodType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Algorithm'] = ('algorithm', 'anyURI', True)
def __init__(self,
algorithm=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.algorithm=algorithm
def canonicalization_method_type__from_string(xml_string):
return saml2.create_class_from_xml_string(CanonicalizationMethodType_,
xml_string)
class TransformType_XPath(SamlBase):
c_tag = 'XPath'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def transform_type__x_path_from_string(xml_string):
return saml2.create_class_from_xml_string(TransformType_XPath, xml_string)
class TransformType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:TransformType element """
c_tag = 'TransformType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}XPath'] = ('x_path',
[TransformType_XPath])
c_cardinality['x_path'] = {"min":0}
c_attributes['Algorithm'] = ('algorithm', 'anyURI', True)
c_child_order.extend(['x_path'])
def __init__(self,
x_path=None,
algorithm=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.x_path=x_path or []
self.algorithm=algorithm
def transform_type__from_string(xml_string):
return saml2.create_class_from_xml_string(TransformType_, xml_string)
class DigestMethodType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:DigestMethodType element """
c_tag = 'DigestMethodType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Algorithm'] = ('algorithm', 'anyURI', True)
def __init__(self,
algorithm=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.algorithm=algorithm
def digest_method_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DigestMethodType_, xml_string)
class DigestValueType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:DigestValueType element """
c_tag = 'DigestValueType'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def digest_value_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DigestValueType_, xml_string)
class KeyName(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:KeyName element """
c_tag = 'KeyName'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def key_name_from_string(xml_string):
return saml2.create_class_from_xml_string(KeyName, xml_string)
class MgmtData(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:MgmtData element """
c_tag = 'MgmtData'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def mgmt_data_from_string(xml_string):
return saml2.create_class_from_xml_string(MgmtData, xml_string)
class X509IssuerName(SamlBase):
c_tag = 'X509IssuerName'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_issuer_name_from_string(xml_string):
return saml2.create_class_from_xml_string(X509IssuerName, xml_string)
class X509SerialNumber(SamlBase):
c_tag = 'X509SerialNumber'
c_namespace = NAMESPACE
c_value_type = {'base': 'integer'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_serial_number_from_string(xml_string):
return saml2.create_class_from_xml_string(X509SerialNumber, xml_string)
class X509IssuerSerialType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:X509IssuerSerialType element """
c_tag = 'X509IssuerSerialType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}X509IssuerName'] = ('x509_issuer_name', X509IssuerName)
c_children['{http://www.w3.org/2000/09/xmldsig#}X509SerialNumber'] = ('x509_serial_number', X509SerialNumber)
c_child_order.extend(['x509_issuer_name', 'x509_serial_number'])
def __init__(self,
x509_issuer_name=None,
x509_serial_number=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.x509_issuer_name=x509_issuer_name
self.x509_serial_number=x509_serial_number
def x509_issuer_serial_type__from_string(xml_string):
return saml2.create_class_from_xml_string(X509IssuerSerialType_, xml_string)
class PGPKeyID(SamlBase):
c_tag = 'PGPKeyID'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def pgp_key_id_from_string(xml_string):
return saml2.create_class_from_xml_string(PGPKeyID, xml_string)
class PGPKeyPacket(SamlBase):
c_tag = 'PGPKeyPacket'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def pgp_key_packet_from_string(xml_string):
return saml2.create_class_from_xml_string(PGPKeyPacket, xml_string)
class PGPDataType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:PGPDataType element """
c_tag = 'PGPDataType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}PGPKeyID'] = ('pgp_key_id', PGPKeyID)
c_children['{http://www.w3.org/2000/09/xmldsig#}PGPKeyPacket'] = ('pgp_key_packet', PGPKeyPacket)
c_cardinality['pgp_key_packet'] = {"min":0, "max":1}
c_child_order.extend(['pgp_key_id', 'pgp_key_packet'])
def __init__(self,
pgp_key_id=None,
pgp_key_packet=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.pgp_key_id=pgp_key_id
self.pgp_key_packet=pgp_key_packet
def pgp_data_type__from_string(xml_string):
return saml2.create_class_from_xml_string(PGPDataType_, xml_string)
class SPKISexp(SamlBase):
c_tag = 'SPKISexp'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def spki_sexp_from_string(xml_string):
return saml2.create_class_from_xml_string(SPKISexp, xml_string)
class SPKIDataType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SPKIDataType element """
c_tag = 'SPKIDataType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}SPKISexp'] = ('spki_sexp',
[SPKISexp])
c_cardinality['spki_sexp'] = {"min":1}
c_child_order.extend(['spki_sexp'])
def __init__(self,
spki_sexp=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.spki_sexp=spki_sexp or []
def spki_data_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SPKIDataType_, xml_string)
class ObjectType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:ObjectType element """
c_tag = 'ObjectType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Id'] = ('id', 'ID', False)
c_attributes['MimeType'] = ('mime_type', 'string', False)
c_attributes['Encoding'] = ('encoding', 'anyURI', False)
def __init__(self,
id=None,
mime_type=None,
encoding=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.id=id
self.mime_type=mime_type
self.encoding=encoding
def object_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ObjectType_, xml_string)
class SignaturePropertyType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignaturePropertyType element """
c_tag = 'SignaturePropertyType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Target'] = ('target', 'anyURI', True)
c_attributes['Id'] = ('id', 'ID', False)
def __init__(self,
target=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.target=target
self.id=id
def signature_property_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignaturePropertyType_, xml_string)
class HMACOutputLengthType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:HMACOutputLengthType element """
c_tag = 'HMACOutputLengthType'
c_namespace = NAMESPACE
c_value_type = {'base': 'integer'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def hmac_output_length_type__from_string(xml_string):
return saml2.create_class_from_xml_string(HMACOutputLengthType_, xml_string)
class P(CryptoBinary_):
c_tag = 'P'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def p_from_string(xml_string):
return saml2.create_class_from_xml_string(P, xml_string)
class Q(CryptoBinary_):
c_tag = 'Q'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def q_from_string(xml_string):
return saml2.create_class_from_xml_string(Q, xml_string)
class G(CryptoBinary_):
c_tag = 'G'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def g_from_string(xml_string):
return saml2.create_class_from_xml_string(G, xml_string)
class Y(CryptoBinary_):
c_tag = 'Y'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def y_from_string(xml_string):
return saml2.create_class_from_xml_string(Y, xml_string)
class J(CryptoBinary_):
c_tag = 'J'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def j_from_string(xml_string):
return saml2.create_class_from_xml_string(J, xml_string)
class Seed(CryptoBinary_):
c_tag = 'Seed'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def seed_from_string(xml_string):
return saml2.create_class_from_xml_string(Seed, xml_string)
class PgenCounter(CryptoBinary_):
c_tag = 'PgenCounter'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def pgen_counter_from_string(xml_string):
return saml2.create_class_from_xml_string(PgenCounter, xml_string)
class DSAKeyValueType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:DSAKeyValueType element """
c_tag = 'DSAKeyValueType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}P'] = ('p', P)
c_cardinality['p'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}Q'] = ('q', Q)
c_cardinality['q'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}G'] = ('g', G)
c_cardinality['g'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}Y'] = ('y', Y)
c_children['{http://www.w3.org/2000/09/xmldsig#}J'] = ('j', J)
c_cardinality['j'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}Seed'] = ('seed', Seed)
c_cardinality['seed'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}PgenCounter'] = ('pgen_counter',
PgenCounter)
c_cardinality['pgen_counter'] = {"min":0, "max":1}
c_child_order.extend(['p', 'q', 'g', 'y', 'j', 'seed', 'pgen_counter'])
def __init__(self,
p=None,
q=None,
g=None,
y=None,
j=None,
seed=None,
pgen_counter=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.p=p
self.q=q
self.g=g
self.y=y
self.j=j
self.seed=seed
self.pgen_counter=pgen_counter
def dsa_key_value_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DSAKeyValueType_, xml_string)
class Modulus(CryptoBinary_):
c_tag = 'Modulus'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def modulus_from_string(xml_string):
return saml2.create_class_from_xml_string(Modulus, xml_string)
class Exponent(CryptoBinary_):
c_tag = 'Exponent'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def exponent_from_string(xml_string):
return saml2.create_class_from_xml_string(Exponent, xml_string)
class RSAKeyValueType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:RSAKeyValueType element """
c_tag = 'RSAKeyValueType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}Modulus'] = ('modulus',
Modulus)
c_children['{http://www.w3.org/2000/09/xmldsig#}Exponent'] = ('exponent',
Exponent)
c_child_order.extend(['modulus', 'exponent'])
def __init__(self,
modulus=None,
exponent=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.modulus=modulus
self.exponent=exponent
def rsa_key_value_type__from_string(xml_string):
return saml2.create_class_from_xml_string(RSAKeyValueType_, xml_string)
class SignatureValue(SignatureValueType_):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureValue element """
c_tag = 'SignatureValue'
c_namespace = NAMESPACE
c_children = SignatureValueType_.c_children.copy()
c_attributes = SignatureValueType_.c_attributes.copy()
c_child_order = SignatureValueType_.c_child_order[:]
c_cardinality = SignatureValueType_.c_cardinality.copy()
def signature_value_from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureValue, xml_string)
class CanonicalizationMethod(CanonicalizationMethodType_):
"""The http://www.w3.org/2000/09/xmldsig#:CanonicalizationMethod element """
c_tag = 'CanonicalizationMethod'
c_namespace = NAMESPACE
c_children = CanonicalizationMethodType_.c_children.copy()
c_attributes = CanonicalizationMethodType_.c_attributes.copy()
c_child_order = CanonicalizationMethodType_.c_child_order[:]
c_cardinality = CanonicalizationMethodType_.c_cardinality.copy()
def canonicalization_method_from_string(xml_string):
return saml2.create_class_from_xml_string(CanonicalizationMethod,
xml_string)
class HMACOutputLength(HMACOutputLengthType_):
c_tag = 'HMACOutputLength'
c_namespace = NAMESPACE
c_children = HMACOutputLengthType_.c_children.copy()
c_attributes = HMACOutputLengthType_.c_attributes.copy()
c_child_order = HMACOutputLengthType_.c_child_order[:]
c_cardinality = HMACOutputLengthType_.c_cardinality.copy()
def hmac_output_length_from_string(xml_string):
return saml2.create_class_from_xml_string(HMACOutputLength, xml_string)
class SignatureMethodType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureMethodType element """
c_tag = 'SignatureMethodType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}HMACOutputLength'] = ('hmac_output_length', HMACOutputLength)
c_cardinality['hmac_output_length'] = {"min":0, "max":1}
c_attributes['Algorithm'] = ('algorithm', 'anyURI', True)
c_child_order.extend(['hmac_output_length'])
def __init__(self,
hmac_output_length=None,
algorithm=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.hmac_output_length=hmac_output_length
self.algorithm=algorithm
def signature_method_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureMethodType_, xml_string)
class Transform(TransformType_):
"""The http://www.w3.org/2000/09/xmldsig#:Transform element """
c_tag = 'Transform'
c_namespace = NAMESPACE
c_children = TransformType_.c_children.copy()
c_attributes = TransformType_.c_attributes.copy()
c_child_order = TransformType_.c_child_order[:]
c_cardinality = TransformType_.c_cardinality.copy()
def transform_from_string(xml_string):
return saml2.create_class_from_xml_string(Transform, xml_string)
class DigestMethod(DigestMethodType_):
"""The http://www.w3.org/2000/09/xmldsig#:DigestMethod element """
c_tag = 'DigestMethod'
c_namespace = NAMESPACE
c_children = DigestMethodType_.c_children.copy()
c_attributes = DigestMethodType_.c_attributes.copy()
c_child_order = DigestMethodType_.c_child_order[:]
c_cardinality = DigestMethodType_.c_cardinality.copy()
def digest_method_from_string(xml_string):
return saml2.create_class_from_xml_string(DigestMethod, xml_string)
class DigestValue(DigestValueType_):
"""The http://www.w3.org/2000/09/xmldsig#:DigestValue element """
c_tag = 'DigestValue'
c_namespace = NAMESPACE
c_children = DigestValueType_.c_children.copy()
c_attributes = DigestValueType_.c_attributes.copy()
c_child_order = DigestValueType_.c_child_order[:]
c_cardinality = DigestValueType_.c_cardinality.copy()
def digest_value_from_string(xml_string):
return saml2.create_class_from_xml_string(DigestValue, xml_string)
class X509IssuerSerial(X509IssuerSerialType_):
c_tag = 'X509IssuerSerial'
c_namespace = NAMESPACE
c_children = X509IssuerSerialType_.c_children.copy()
c_attributes = X509IssuerSerialType_.c_attributes.copy()
c_child_order = X509IssuerSerialType_.c_child_order[:]
c_cardinality = X509IssuerSerialType_.c_cardinality.copy()
def x509_issuer_serial_from_string(xml_string):
return saml2.create_class_from_xml_string(X509IssuerSerial, xml_string)
class X509SKI(SamlBase):
c_tag = 'X509SKI'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_ski_from_string(xml_string):
return saml2.create_class_from_xml_string(X509SKI, xml_string)
class X509SubjectName(SamlBase):
c_tag = 'X509SubjectName'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_subject_name_from_string(xml_string):
return saml2.create_class_from_xml_string(X509SubjectName, xml_string)
class X509Certificate(SamlBase):
c_tag = 'X509Certificate'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_certificate_from_string(xml_string):
return saml2.create_class_from_xml_string(X509Certificate, xml_string)
class X509CRL(SamlBase):
c_tag = 'X509CRL'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_crl_from_string(xml_string):
return saml2.create_class_from_xml_string(X509CRL, xml_string)
class X509DataType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:X509DataType element """
c_tag = 'X509DataType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}X509IssuerSerial'] = ('x509_issuer_serial',
X509IssuerSerial)
c_cardinality['x509_issuer_serial'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}X509SKI'] = ('x509_ski',
X509SKI)
c_cardinality['x509_ski'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}X509SubjectName'] = ('x509_subject_name',
X509SubjectName)
c_cardinality['x509_subject_name'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}X509Certificate'] = ('x509_certificate',
X509Certificate)
c_cardinality['x509_certificate'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}X509CRL'] = ('x509_crl',
X509CRL)
c_cardinality['x509_crl'] = {"min":0, "max":1}
c_child_order.extend(['x509_issuer_serial', 'x509_ski', 'x509_subject_name',
'x509_certificate', 'x509_crl'])
def __init__(self,
x509_issuer_serial=None,
x509_ski=None,
x509_subject_name=None,
x509_certificate=None,
x509_crl=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.x509_issuer_serial=x509_issuer_serial
self.x509_ski=x509_ski
self.x509_subject_name=x509_subject_name
self.x509_certificate=x509_certificate
self.x509_crl=x509_crl
def x509_data_type__from_string(xml_string):
return saml2.create_class_from_xml_string(X509DataType_, xml_string)
class PGPData(PGPDataType_):
"""The http://www.w3.org/2000/09/xmldsig#:PGPData element """
c_tag = 'PGPData'
c_namespace = NAMESPACE
c_children = PGPDataType_.c_children.copy()
c_attributes = PGPDataType_.c_attributes.copy()
c_child_order = PGPDataType_.c_child_order[:]
c_cardinality = PGPDataType_.c_cardinality.copy()
def pgp_data_from_string(xml_string):
return saml2.create_class_from_xml_string(PGPData, xml_string)
class SPKIData(SPKIDataType_):
"""The http://www.w3.org/2000/09/xmldsig#:SPKIData element """
c_tag = 'SPKIData'
c_namespace = NAMESPACE
c_children = SPKIDataType_.c_children.copy()
c_attributes = SPKIDataType_.c_attributes.copy()
c_child_order = SPKIDataType_.c_child_order[:]
c_cardinality = SPKIDataType_.c_cardinality.copy()
def spki_data_from_string(xml_string):
return saml2.create_class_from_xml_string(SPKIData, xml_string)
class Object(ObjectType_):
"""The http://www.w3.org/2000/09/xmldsig#:Object element """
c_tag = 'Object'
c_namespace = NAMESPACE
c_children = ObjectType_.c_children.copy()
c_attributes = ObjectType_.c_attributes.copy()
c_child_order = ObjectType_.c_child_order[:]
c_cardinality = ObjectType_.c_cardinality.copy()
def object_from_string(xml_string):
return saml2.create_class_from_xml_string(Object, xml_string)
class SignatureProperty(SignaturePropertyType_):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureProperty element """
c_tag = 'SignatureProperty'
c_namespace = NAMESPACE
c_children = SignaturePropertyType_.c_children.copy()
c_attributes = SignaturePropertyType_.c_attributes.copy()
c_child_order = SignaturePropertyType_.c_child_order[:]
c_cardinality = SignaturePropertyType_.c_cardinality.copy()
def signature_property_from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureProperty, xml_string)
class DSAKeyValue(DSAKeyValueType_):
"""The http://www.w3.org/2000/09/xmldsig#:DSAKeyValue element """
c_tag = 'DSAKeyValue'
c_namespace = NAMESPACE
c_children = DSAKeyValueType_.c_children.copy()
c_attributes = DSAKeyValueType_.c_attributes.copy()
c_child_order = DSAKeyValueType_.c_child_order[:]
c_cardinality = DSAKeyValueType_.c_cardinality.copy()
def dsa_key_value_from_string(xml_string):
return saml2.create_class_from_xml_string(DSAKeyValue, xml_string)
class RSAKeyValue(RSAKeyValueType_):
"""The http://www.w3.org/2000/09/xmldsig#:RSAKeyValue element """
c_tag = 'RSAKeyValue'
c_namespace = NAMESPACE
c_children = RSAKeyValueType_.c_children.copy()
c_attributes = RSAKeyValueType_.c_attributes.copy()
c_child_order = RSAKeyValueType_.c_child_order[:]
c_cardinality = RSAKeyValueType_.c_cardinality.copy()
def rsa_key_value_from_string(xml_string):
return saml2.create_class_from_xml_string(RSAKeyValue, xml_string)
class SignatureMethod(SignatureMethodType_):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureMethod element """
c_tag = 'SignatureMethod'
c_namespace = NAMESPACE
c_children = SignatureMethodType_.c_children.copy()
c_attributes = SignatureMethodType_.c_attributes.copy()
c_child_order = SignatureMethodType_.c_child_order[:]
c_cardinality = SignatureMethodType_.c_cardinality.copy()
def signature_method_from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureMethod, xml_string)
class TransformsType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:TransformsType element """
c_tag = 'TransformsType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}Transform'] = ('transform',
[Transform])
c_cardinality['transform'] = {"min":1}
c_child_order.extend(['transform'])
def __init__(self,
transform=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.transform=transform or []
def transforms_type__from_string(xml_string):
return saml2.create_class_from_xml_string(TransformsType_, xml_string)
class KeyValueType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:KeyValueType element """
c_tag = 'KeyValueType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}DSAKeyValue'] = ('dsa_key_value',
DSAKeyValue)
c_cardinality['dsa_key_value'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}RSAKeyValue'] = ('rsa_key_value',
RSAKeyValue)
c_cardinality['rsa_key_value'] = {"min":0, "max":1}
c_child_order.extend(['dsa_key_value', 'rsa_key_value'])
def __init__(self,
dsa_key_value=None,
rsa_key_value=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.dsa_key_value=dsa_key_value
self.rsa_key_value=rsa_key_value
def key_value_type__from_string(xml_string):
return saml2.create_class_from_xml_string(KeyValueType_, xml_string)
class X509Data(X509DataType_):
"""The http://www.w3.org/2000/09/xmldsig#:X509Data element """
c_tag = 'X509Data'
c_namespace = NAMESPACE
c_children = X509DataType_.c_children.copy()
c_attributes = X509DataType_.c_attributes.copy()
c_child_order = X509DataType_.c_child_order[:]
c_cardinality = X509DataType_.c_cardinality.copy()
def x509_data_from_string(xml_string):
return saml2.create_class_from_xml_string(X509Data, xml_string)
class SignaturePropertiesType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignaturePropertiesType element """
c_tag = 'SignaturePropertiesType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}SignatureProperty'] = ('signature_property', [SignatureProperty])
c_cardinality['signature_property'] = {"min":1}
c_attributes['Id'] = ('id', 'ID', False)
c_child_order.extend(['signature_property'])
def __init__(self,
signature_property=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.signature_property=signature_property or []
self.id=id
def signature_properties_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignaturePropertiesType_, xml_string)
class Transforms(TransformsType_):
"""The http://www.w3.org/2000/09/xmldsig#:Transforms element """
c_tag = 'Transforms'
c_namespace = NAMESPACE
c_children = TransformsType_.c_children.copy()
c_attributes = TransformsType_.c_attributes.copy()
c_child_order = TransformsType_.c_child_order[:]
c_cardinality = TransformsType_.c_cardinality.copy()
def transforms_from_string(xml_string):
return saml2.create_class_from_xml_string(Transforms, xml_string)
class KeyValue(KeyValueType_):
"""The http://www.w3.org/2000/09/xmldsig#:KeyValue element """
c_tag = 'KeyValue'
c_namespace = NAMESPACE
c_children = KeyValueType_.c_children.copy()
c_attributes = KeyValueType_.c_attributes.copy()
c_child_order = KeyValueType_.c_child_order[:]
c_cardinality = KeyValueType_.c_cardinality.copy()
def key_value_from_string(xml_string):
return saml2.create_class_from_xml_string(KeyValue, xml_string)
class RetrievalMethodType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:RetrievalMethodType element """
c_tag = 'RetrievalMethodType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}Transforms'] = ('transforms',
Transforms)
c_cardinality['transforms'] = {"min":0, "max":1}
c_attributes['URI'] = ('uri', 'anyURI', False)
c_attributes['Type'] = ('type', 'anyURI', False)
c_child_order.extend(['transforms'])
def __init__(self,
transforms=None,
uri=None,
type=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.transforms=transforms
self.uri=uri
self.type=type
def retrieval_method_type__from_string(xml_string):
return saml2.create_class_from_xml_string(RetrievalMethodType_, xml_string)
class SignatureProperties(SignaturePropertiesType_):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureProperties element """
c_tag = 'SignatureProperties'
c_namespace = NAMESPACE
c_children = SignaturePropertiesType_.c_children.copy()
c_attributes = SignaturePropertiesType_.c_attributes.copy()
c_child_order = SignaturePropertiesType_.c_child_order[:]
c_cardinality = SignaturePropertiesType_.c_cardinality.copy()
def signature_properties_from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureProperties, xml_string)
class ReferenceType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:ReferenceType element """
c_tag = 'ReferenceType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}Transforms'] = ('transforms',
Transforms)
c_cardinality['transforms'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}DigestMethod'] = ('digest_method',
DigestMethod)
c_children['{http://www.w3.org/2000/09/xmldsig#}DigestValue'] = ('digest_value',
DigestValue)
c_attributes['Id'] = ('id', 'ID', False)
c_attributes['URI'] = ('uri', 'anyURI', False)
c_attributes['Type'] = ('type', 'anyURI', False)
c_child_order.extend(['transforms', 'digest_method', 'digest_value'])
def __init__(self,
transforms=None,
digest_method=None,
digest_value=None,
id=None,
uri=None,
type=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.transforms=transforms
self.digest_method=digest_method
self.digest_value=digest_value
self.id=id
self.uri=uri
self.type=type
def reference_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ReferenceType_, xml_string)
class RetrievalMethod(RetrievalMethodType_):
"""The http://www.w3.org/2000/09/xmldsig#:RetrievalMethod element """
c_tag = 'RetrievalMethod'
c_namespace = NAMESPACE
c_children = RetrievalMethodType_.c_children.copy()
c_attributes = RetrievalMethodType_.c_attributes.copy()
c_child_order = RetrievalMethodType_.c_child_order[:]
c_cardinality = RetrievalMethodType_.c_cardinality.copy()
def retrieval_method_from_string(xml_string):
return saml2.create_class_from_xml_string(RetrievalMethod, xml_string)
class Reference(ReferenceType_):
"""The http://www.w3.org/2000/09/xmldsig#:Reference element """
c_tag = 'Reference'
c_namespace = NAMESPACE
c_children = ReferenceType_.c_children.copy()
c_attributes = ReferenceType_.c_attributes.copy()
c_child_order = ReferenceType_.c_child_order[:]
c_cardinality = ReferenceType_.c_cardinality.copy()
def reference_from_string(xml_string):
return saml2.create_class_from_xml_string(Reference, xml_string)
#import xmlenc as enc
class KeyInfoType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:KeyInfoType element """
c_tag = 'KeyInfoType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyName'] = ('key_name',
[KeyName])
c_cardinality['key_name'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyValue'] = ('key_value',
[KeyValue])
c_cardinality['key_value'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}RetrievalMethod'] = (
'retrieval_method', [RetrievalMethod])
c_cardinality['retrieval_method'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}X509Data'] = ('x509_data',
[X509Data])
c_cardinality['x509_data'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}PGPData'] = ('pgp_data',
[PGPData])
c_cardinality['pgp_data'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}SPKIData'] = ('spki_data',
[SPKIData])
c_cardinality['spki_data'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}MgmtData'] = ('mgmt_data',
[MgmtData])
c_cardinality['mgmt_data'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmlenc#}EncryptedKey'] = (
'encrypted_key', None)
c_cardinality['key_info'] = {"min":0, "max":1}
c_attributes['Id'] = ('id', 'ID', False)
c_child_order.extend(['key_name', 'key_value', 'retrieval_method',
'x509_data', 'pgp_data', 'spki_data', 'mgmt_data',
'encrypted_key'])
def __init__(self,
key_name=None,
key_value=None,
retrieval_method=None,
x509_data=None,
pgp_data=None,
spki_data=None,
mgmt_data=None,
encrypted_key=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes
)
self.key_name = key_name or []
self.key_value = key_value or []
self.retrieval_method = retrieval_method or []
self.x509_data = x509_data or []
self.pgp_data = pgp_data or []
self.spki_data = spki_data or []
self.mgmt_data = mgmt_data or []
self.encrypted_key = encrypted_key
self.id = id
def key_info_type__from_string(xml_string):
return saml2.create_class_from_xml_string(KeyInfoType_, xml_string)
class ManifestType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:ManifestType element """
c_tag = 'ManifestType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}Reference'] = ('reference',
[Reference])
c_cardinality['reference'] = {"min":1}
c_attributes['Id'] = ('id', 'ID', False)
c_child_order.extend(['reference'])
def __init__(self,
reference=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.reference=reference or []
self.id=id
def manifest_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ManifestType_, xml_string)
class SignedInfoType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignedInfoType element """
c_tag = 'SignedInfoType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}CanonicalizationMethod'] = ('canonicalization_method', CanonicalizationMethod)
c_children['{http://www.w3.org/2000/09/xmldsig#}SignatureMethod'] = ('signature_method',
SignatureMethod)
c_children['{http://www.w3.org/2000/09/xmldsig#}Reference'] = ('reference',
[Reference])
c_cardinality['reference'] = {"min":1}
c_attributes['Id'] = ('id', 'ID', False)
c_child_order.extend(['canonicalization_method', 'signature_method',
'reference'])
def __init__(self,
canonicalization_method=None,
signature_method=None,
reference=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.canonicalization_method=canonicalization_method
self.signature_method=signature_method
self.reference=reference or []
self.id=id
def signed_info_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignedInfoType_, xml_string)
class KeyInfo(KeyInfoType_):
"""The http://www.w3.org/2000/09/xmldsig#:KeyInfo element """
c_tag = 'KeyInfo'
c_namespace = NAMESPACE
c_children = KeyInfoType_.c_children.copy()
c_attributes = KeyInfoType_.c_attributes.copy()
c_child_order = KeyInfoType_.c_child_order[:]
c_cardinality = KeyInfoType_.c_cardinality.copy()
def key_info_from_string(xml_string):
return saml2.create_class_from_xml_string(KeyInfo, xml_string)
class Manifest(ManifestType_):
"""The http://www.w3.org/2000/09/xmldsig#:Manifest element """
c_tag = 'Manifest'
c_namespace = NAMESPACE
c_children = ManifestType_.c_children.copy()
c_attributes = ManifestType_.c_attributes.copy()
c_child_order = ManifestType_.c_child_order[:]
c_cardinality = ManifestType_.c_cardinality.copy()
def manifest_from_string(xml_string):
return saml2.create_class_from_xml_string(Manifest, xml_string)
class SignedInfo(SignedInfoType_):
"""The http://www.w3.org/2000/09/xmldsig#:SignedInfo element """
c_tag = 'SignedInfo'
c_namespace = NAMESPACE
c_children = SignedInfoType_.c_children.copy()
c_attributes = SignedInfoType_.c_attributes.copy()
c_child_order = SignedInfoType_.c_child_order[:]
c_cardinality = SignedInfoType_.c_cardinality.copy()
def signed_info_from_string(xml_string):
return saml2.create_class_from_xml_string(SignedInfo, xml_string)
class SignatureType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureType element """
c_tag = 'SignatureType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}SignedInfo'] = ('signed_info',
SignedInfo)
c_children['{http://www.w3.org/2000/09/xmldsig#}SignatureValue'] = ('signature_value', SignatureValue)
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyInfo'] = ('key_info',
KeyInfo)
c_cardinality['key_info'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}Object'] = ('object',
[Object])
c_cardinality['object'] = {"min":0}
c_attributes['Id'] = ('id', 'ID', False)
c_child_order.extend(['signed_info', 'signature_value', 'key_info',
'object'])
def __init__(self,
signed_info=None,
signature_value=None,
key_info=None,
object=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.signed_info=signed_info
self.signature_value=signature_value
self.key_info=key_info
self.object=object or []
self.id=id
def signature_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureType_, xml_string)
class Signature(SignatureType_):
"""The http://www.w3.org/2000/09/xmldsig#:Signature element """
c_tag = 'Signature'
c_namespace = NAMESPACE
c_children = SignatureType_.c_children.copy()
c_attributes = SignatureType_.c_attributes.copy()
c_child_order = SignatureType_.c_child_order[:]
c_cardinality = SignatureType_.c_cardinality.copy()
def signature_from_string(xml_string):
return saml2.create_class_from_xml_string(Signature, xml_string)
ELEMENT_FROM_STRING = {
CryptoBinary_.c_tag: crypto_binary__from_string,
Signature.c_tag: signature_from_string,
SignatureType_.c_tag: signature_type__from_string,
SignatureValue.c_tag: signature_value_from_string,
SignatureValueType_.c_tag: signature_value_type__from_string,
SignedInfo.c_tag: signed_info_from_string,
SignedInfoType_.c_tag: signed_info_type__from_string,
CanonicalizationMethod.c_tag: canonicalization_method_from_string,
CanonicalizationMethodType_.c_tag: canonicalization_method_type__from_string,
SignatureMethod.c_tag: signature_method_from_string,
SignatureMethodType_.c_tag: signature_method_type__from_string,
Reference.c_tag: reference_from_string,
ReferenceType_.c_tag: reference_type__from_string,
Transforms.c_tag: transforms_from_string,
TransformsType_.c_tag: transforms_type__from_string,
Transform.c_tag: transform_from_string,
TransformType_.c_tag: transform_type__from_string,
DigestMethod.c_tag: digest_method_from_string,
DigestMethodType_.c_tag: digest_method_type__from_string,
DigestValue.c_tag: digest_value_from_string,
DigestValueType_.c_tag: digest_value_type__from_string,
KeyInfo.c_tag: key_info_from_string,
KeyInfoType_.c_tag: key_info_type__from_string,
KeyName.c_tag: key_name_from_string,
MgmtData.c_tag: mgmt_data_from_string,
KeyValue.c_tag: key_value_from_string,
KeyValueType_.c_tag: key_value_type__from_string,
RetrievalMethod.c_tag: retrieval_method_from_string,
RetrievalMethodType_.c_tag: retrieval_method_type__from_string,
X509Data.c_tag: x509_data_from_string,
X509DataType_.c_tag: x509_data_type__from_string,
X509IssuerSerialType_.c_tag: x509_issuer_serial_type__from_string,
PGPData.c_tag: pgp_data_from_string,
PGPDataType_.c_tag: pgp_data_type__from_string,
SPKIData.c_tag: spki_data_from_string,
SPKIDataType_.c_tag: spki_data_type__from_string,
Object.c_tag: object_from_string,
ObjectType_.c_tag: object_type__from_string,
Manifest.c_tag: manifest_from_string,
ManifestType_.c_tag: manifest_type__from_string,
SignatureProperties.c_tag: signature_properties_from_string,
SignaturePropertiesType_.c_tag: signature_properties_type__from_string,
SignatureProperty.c_tag: signature_property_from_string,
SignaturePropertyType_.c_tag: signature_property_type__from_string,
HMACOutputLengthType_.c_tag: hmac_output_length_type__from_string,
DSAKeyValue.c_tag: dsa_key_value_from_string,
DSAKeyValueType_.c_tag: dsa_key_value_type__from_string,
RSAKeyValue.c_tag: rsa_key_value_from_string,
RSAKeyValueType_.c_tag: rsa_key_value_type__from_string,
TransformType_XPath.c_tag: transform_type__x_path_from_string,
X509IssuerName.c_tag: x509_issuer_name_from_string,
X509SerialNumber.c_tag: x509_serial_number_from_string,
PGPKeyID.c_tag: pgp_key_id_from_string,
PGPKeyPacket.c_tag: pgp_key_packet_from_string,
SPKISexp.c_tag: spki_sexp_from_string,
P.c_tag: p_from_string,
Q.c_tag: q_from_string,
G.c_tag: g_from_string,
Y.c_tag: y_from_string,
J.c_tag: j_from_string,
Seed.c_tag: seed_from_string,
PgenCounter.c_tag: pgen_counter_from_string,
Modulus.c_tag: modulus_from_string,
Exponent.c_tag: exponent_from_string,
HMACOutputLength.c_tag: hmac_output_length_from_string,
X509IssuerSerial.c_tag: x509_issuer_serial_from_string,
X509SKI.c_tag: x509_ski_from_string,
X509SubjectName.c_tag: x509_subject_name_from_string,
X509Certificate.c_tag: x509_certificate_from_string,
X509CRL.c_tag: x509_crl_from_string,
}
ELEMENT_BY_TAG = {
'CryptoBinary': CryptoBinary_,
'Signature': Signature,
'SignatureType': SignatureType_,
'SignatureValue': SignatureValue,
'SignatureValueType': SignatureValueType_,
'SignedInfo': SignedInfo,
'SignedInfoType': SignedInfoType_,
'CanonicalizationMethod': CanonicalizationMethod,
'CanonicalizationMethodType': CanonicalizationMethodType_,
'SignatureMethod': SignatureMethod,
'SignatureMethodType': SignatureMethodType_,
'Reference': Reference,
'ReferenceType': ReferenceType_,
'Transforms': Transforms,
'TransformsType': TransformsType_,
'Transform': Transform,
'TransformType': TransformType_,
'DigestMethod': DigestMethod,
'DigestMethodType': DigestMethodType_,
'DigestValue': DigestValue,
'DigestValueType': DigestValueType_,
'KeyInfo': KeyInfo,
'KeyInfoType': KeyInfoType_,
'KeyName': KeyName,
'MgmtData': MgmtData,
'KeyValue': KeyValue,
'KeyValueType': KeyValueType_,
'RetrievalMethod': RetrievalMethod,
'RetrievalMethodType': RetrievalMethodType_,
'X509Data': X509Data,
'X509DataType': X509DataType_,
'X509IssuerSerialType': X509IssuerSerialType_,
'PGPData': PGPData,
'PGPDataType': PGPDataType_,
'SPKIData': SPKIData,
'SPKIDataType': SPKIDataType_,
'Object': Object,
'ObjectType': ObjectType_,
'Manifest': Manifest,
'ManifestType': ManifestType_,
'SignatureProperties': SignatureProperties,
'SignaturePropertiesType': SignaturePropertiesType_,
'SignatureProperty': SignatureProperty,
'SignaturePropertyType': SignaturePropertyType_,
'HMACOutputLengthType': HMACOutputLengthType_,
'DSAKeyValue': DSAKeyValue,
'DSAKeyValueType': DSAKeyValueType_,
'RSAKeyValue': RSAKeyValue,
'RSAKeyValueType': RSAKeyValueType_,
'XPath': TransformType_XPath,
'X509IssuerName': X509IssuerName,
'X509SerialNumber': X509SerialNumber,
'PGPKeyID': PGPKeyID,
'PGPKeyPacket': PGPKeyPacket,
'SPKISexp': SPKISexp,
'P': P,
'Q': Q,
'G': G,
'Y': Y,
'J': J,
'Seed': Seed,
'PgenCounter': PgenCounter,
'Modulus': Modulus,
'Exponent': Exponent,
'HMACOutputLength': HMACOutputLength,
'X509IssuerSerial': X509IssuerSerial,
'X509SKI': X509SKI,
'X509SubjectName': X509SubjectName,
'X509Certificate': X509Certificate,
'X509CRL': X509CRL,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
|
apache-2.0
|
chauhanmohit/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py
|
117
|
39195
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for interacting with Bugzilla
import logging
import mimetypes
import re
import StringIO
import socket
import urllib
from datetime import datetime # used in timestamp()
from .attachment import Attachment
from .bug import Bug
from webkitpy.common.config import committers
import webkitpy.common.config.urls as config_urls
from webkitpy.common.net.credentials import Credentials
from webkitpy.common.system.user import User
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, BeautifulStoneSoup, SoupStrainer
_log = logging.getLogger(__name__)
class EditUsersParser(object):
def __init__(self):
self._group_name_to_group_string_cache = {}
def _login_and_uid_from_row(self, row):
first_cell = row.find("td")
# The first row is just headers, we skip it.
if not first_cell:
return None
# When there were no results, we have a fake "<none>" entry in the table.
if first_cell.find(text="<none>"):
return None
# Otherwise the <td> contains a single <a> which contains the login name or a single <i> with the string "<none>".
anchor_tag = first_cell.find("a")
login = unicode(anchor_tag.string).strip()
user_id = int(re.search(r"userid=(\d+)", str(anchor_tag['href'])).group(1))
return (login, user_id)
def login_userid_pairs_from_edit_user_results(self, results_page):
soup = BeautifulSoup(results_page, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
results_table = soup.find(id="admin_table")
login_userid_pairs = [self._login_and_uid_from_row(row) for row in results_table('tr')]
# Filter out None from the logins.
return filter(lambda pair: bool(pair), login_userid_pairs)
def _group_name_and_string_from_row(self, row):
label_element = row.find('label')
group_string = unicode(label_element['for'])
group_name = unicode(label_element.find('strong').string).rstrip(':')
return (group_name, group_string)
def user_dict_from_edit_user_page(self, page):
soup = BeautifulSoup(page, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
user_table = soup.find("table", {'class': 'main'})
user_dict = {}
for row in user_table('tr'):
label_element = row.find('label')
if not label_element:
continue # This must not be a row we know how to parse.
if row.find('table'):
continue # Skip the <tr> holding the groups table.
key = label_element['for']
if "group" in key:
key = "groups"
value = user_dict.get('groups', set())
# We must be parsing a "tr" inside the inner group table.
(group_name, _) = self._group_name_and_string_from_row(row)
if row.find('input', {'type': 'checkbox', 'checked': 'checked'}):
value.add(group_name)
else:
value = unicode(row.find('td').string).strip()
user_dict[key] = value
return user_dict
def _group_rows_from_edit_user_page(self, edit_user_page):
soup = BeautifulSoup(edit_user_page, convertEntities=BeautifulSoup.HTML_ENTITIES)
return soup('td', {'class': 'groupname'})
def group_string_from_name(self, edit_user_page, group_name):
# Bugzilla uses "group_NUMBER" strings, which may be different per install
# so we just look them up once and cache them.
if not self._group_name_to_group_string_cache:
rows = self._group_rows_from_edit_user_page(edit_user_page)
name_string_pairs = map(self._group_name_and_string_from_row, rows)
self._group_name_to_group_string_cache = dict(name_string_pairs)
return self._group_name_to_group_string_cache[group_name]
def timestamp():
return datetime.now().strftime("%Y%m%d%H%M%S")
# A container for all of the logic for making and parsing bugzilla queries.
class BugzillaQueries(object):
def __init__(self, bugzilla):
self._bugzilla = bugzilla
def _is_xml_bugs_form(self, form):
# ClientForm.HTMLForm.find_control throws if the control is not found,
# so we do a manual search instead:
return "xml" in [control.id for control in form.controls]
# This is kinda a hack. There is probably a better way to get this information from bugzilla.
def _parse_result_count(self, results_page):
result_count_text = BeautifulSoup(results_page).find(attrs={'class': 'bz_result_count'}).string
result_count_parts = result_count_text.strip().split(" ")
if result_count_parts[0] == "Zarro":
return 0
if result_count_parts[0] == "One":
return 1
return int(result_count_parts[0])
# Note: _load_query, _fetch_bug and _fetch_bugs_from_advanced_query
# are the only methods which access self._bugzilla.
def _load_query(self, query):
self._bugzilla.authenticate()
full_url = "%s%s" % (config_urls.bug_server_url, query)
return self._bugzilla.browser.open(full_url)
def _fetch_bugs_from_advanced_query(self, query):
results_page = self._load_query(query)
# Some simple searches can return a single result.
results_url = results_page.geturl()
if results_url.find("/show_bug.cgi?id=") != -1:
bug_id = int(results_url.split("=")[-1])
return [self._fetch_bug(bug_id)]
if not self._parse_result_count(results_page):
return []
# Bugzilla results pages have an "XML" submit button at the bottom
# which can be used to get an XML page containing all of the <bug> elements.
# This is slighty lame that this assumes that _load_query used
# self._bugzilla.browser and that it's in an acceptable state.
self._bugzilla.browser.select_form(predicate=self._is_xml_bugs_form)
bugs_xml = self._bugzilla.browser.submit()
return self._bugzilla._parse_bugs_from_xml(bugs_xml)
def _fetch_bug(self, bug_id):
return self._bugzilla.fetch_bug(bug_id)
def _fetch_bug_ids_advanced_query(self, query):
soup = BeautifulSoup(self._load_query(query))
# The contents of the <a> inside the cells in the first column happen
# to be the bug id.
return [int(bug_link_cell.find("a").string)
for bug_link_cell in soup('td', "first-child")]
def _parse_attachment_ids_request_query(self, page):
digits = re.compile("\d+")
attachment_href = re.compile("attachment.cgi\?id=\d+&action=review")
attachment_links = SoupStrainer("a", href=attachment_href)
return [int(digits.search(tag["href"]).group(0))
for tag in BeautifulSoup(page, parseOnlyThese=attachment_links)]
def _fetch_attachment_ids_request_query(self, query):
return self._parse_attachment_ids_request_query(self._load_query(query))
def _parse_quips(self, page):
soup = BeautifulSoup(page, convertEntities=BeautifulSoup.HTML_ENTITIES)
quips = soup.find(text=re.compile(r"Existing quips:")).findNext("ul").findAll("li")
return [unicode(quip_entry.string) for quip_entry in quips]
def fetch_quips(self):
return self._parse_quips(self._load_query("/quips.cgi?action=show"))
# List of all r+'d bugs.
def fetch_bug_ids_from_pending_commit_list(self):
needs_commit_query_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review%2B"
return self._fetch_bug_ids_advanced_query(needs_commit_query_url)
def fetch_bugs_matching_quicksearch(self, search_string):
# We may want to use a more explicit query than "quicksearch".
# If quicksearch changes we should probably change to use
# a normal buglist.cgi?query_format=advanced query.
quicksearch_url = "buglist.cgi?quicksearch=%s" % urllib.quote(search_string)
return self._fetch_bugs_from_advanced_query(quicksearch_url)
# Currently this returns all bugs across all components.
# In the future we may wish to extend this API to construct more restricted searches.
def fetch_bugs_matching_search(self, search_string):
query = "buglist.cgi?query_format=advanced"
if search_string:
query += "&short_desc_type=allwordssubstr&short_desc=%s" % urllib.quote(search_string)
return self._fetch_bugs_from_advanced_query(query)
def fetch_patches_from_pending_commit_list(self):
return sum([self._fetch_bug(bug_id).reviewed_patches()
for bug_id in self.fetch_bug_ids_from_pending_commit_list()], [])
def fetch_bugs_from_review_queue(self, cc_email=None):
query = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
if cc_email:
query += "&emailcc1=1&emailtype1=substring&email1=%s" % urllib.quote(cc_email)
return self._fetch_bugs_from_advanced_query(query)
def fetch_bug_ids_from_commit_queue(self):
commit_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B&order=Last+Changed"
return self._fetch_bug_ids_advanced_query(commit_queue_url)
def fetch_patches_from_commit_queue(self):
# This function will only return patches which have valid committers
# set. It won't reject patches with invalid committers/reviewers.
return sum([self._fetch_bug(bug_id).commit_queued_patches()
for bug_id in self.fetch_bug_ids_from_commit_queue()], [])
def fetch_bug_ids_from_review_queue(self):
review_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
return self._fetch_bug_ids_advanced_query(review_queue_url)
# This method will make several requests to bugzilla.
def fetch_patches_from_review_queue(self, limit=None):
# [:None] returns the whole array.
return sum([self._fetch_bug(bug_id).unreviewed_patches()
for bug_id in self.fetch_bug_ids_from_review_queue()[:limit]], [])
# NOTE: This is the only client of _fetch_attachment_ids_request_query
# This method only makes one request to bugzilla.
def fetch_attachment_ids_from_review_queue(self):
review_queue_url = "request.cgi?action=queue&type=review&group=type"
return self._fetch_attachment_ids_request_query(review_queue_url)
# This only works if your account has edituser privileges.
# We could easily parse https://bugs.webkit.org/userprefs.cgi?tab=permissions to
# check permissions, but bugzilla will just return an error if we don't have them.
def fetch_login_userid_pairs_matching_substring(self, search_string):
review_queue_url = "editusers.cgi?action=list&matchvalue=login_name&matchstr=%s&matchtype=substr" % urllib.quote(search_string)
results_page = self._load_query(review_queue_url)
# We could pull the EditUsersParser off Bugzilla if needed.
return EditUsersParser().login_userid_pairs_from_edit_user_results(results_page)
# FIXME: We should consider adding a BugzillaUser class.
def fetch_logins_matching_substring(self, search_string):
pairs = self.fetch_login_userid_pairs_matching_substring(search_string)
return map(lambda pair: pair[0], pairs)
class Bugzilla(object):
def __init__(self, committers=committers.CommitterList()):
self.authenticated = False
self.queries = BugzillaQueries(self)
self.committers = committers
self.cached_quips = []
self.edit_user_parser = EditUsersParser()
self._browser = None
def _get_browser(self):
if not self._browser:
self.setdefaulttimeout(600)
from webkitpy.thirdparty.autoinstalled.mechanize import Browser
self._browser = Browser()
# Ignore bugs.webkit.org/robots.txt until we fix it to allow this script.
self._browser.set_handle_robots(False)
return self._browser
def _set_browser(self, value):
self._browser = value
browser = property(_get_browser, _set_browser)
def setdefaulttimeout(self, value):
socket.setdefaulttimeout(value)
def fetch_user(self, user_id):
self.authenticate()
edit_user_page = self.browser.open(self.edit_user_url_for_id(user_id))
return self.edit_user_parser.user_dict_from_edit_user_page(edit_user_page)
def add_user_to_groups(self, user_id, group_names):
self.authenticate()
user_edit_page = self.browser.open(self.edit_user_url_for_id(user_id))
self.browser.select_form(nr=1)
for group_name in group_names:
group_string = self.edit_user_parser.group_string_from_name(user_edit_page, group_name)
self.browser.find_control(group_string).items[0].selected = True
self.browser.submit()
def quips(self):
# We only fetch and parse the list of quips once per instantiation
# so that we do not burden bugs.webkit.org.
if not self.cached_quips:
self.cached_quips = self.queries.fetch_quips()
return self.cached_quips
def bug_url_for_bug_id(self, bug_id, xml=False):
if not bug_id:
return None
content_type = "&ctype=xml&excludefield=attachmentdata" if xml else ""
return "%sshow_bug.cgi?id=%s%s" % (config_urls.bug_server_url, bug_id, content_type)
def short_bug_url_for_bug_id(self, bug_id):
if not bug_id:
return None
return "http://webkit.org/b/%s" % bug_id
def add_attachment_url(self, bug_id):
return "%sattachment.cgi?action=enter&bugid=%s" % (config_urls.bug_server_url, bug_id)
def attachment_url_for_id(self, attachment_id, action="view"):
if not attachment_id:
return None
action_param = ""
if action and action != "view":
action_param = "&action=%s" % action
return "%sattachment.cgi?id=%s%s" % (config_urls.bug_server_url,
attachment_id,
action_param)
def edit_user_url_for_id(self, user_id):
return "%seditusers.cgi?action=edit&userid=%s" % (config_urls.bug_server_url, user_id)
def _parse_attachment_flag(self,
element,
flag_name,
attachment,
result_key):
flag = element.find('flag', attrs={'name': flag_name})
if flag:
attachment[flag_name] = flag['status']
if flag['status'] == '+':
attachment[result_key] = flag['setter']
# Sadly show_bug.cgi?ctype=xml does not expose the flag modification date.
def _string_contents(self, soup):
# WebKit's bugzilla instance uses UTF-8.
# BeautifulStoneSoup always returns Unicode strings, however
# the .string method returns a (unicode) NavigableString.
# NavigableString can confuse other parts of the code, so we
# convert from NavigableString to a real unicode() object using unicode().
return unicode(soup.string)
# Example: 2010-01-20 14:31 PST
# FIXME: Some bugzilla dates seem to have seconds in them?
# Python does not support timezones out of the box.
# Assume that bugzilla always uses PST (which is true for bugs.webkit.org)
_bugzilla_date_format = "%Y-%m-%d %H:%M:%S"
@classmethod
def _parse_date(cls, date_string):
(date, time, time_zone) = date_string.split(" ")
if time.count(':') == 1:
# Add seconds into the time.
time += ':0'
# Ignore the timezone because python doesn't understand timezones out of the box.
date_string = "%s %s" % (date, time)
return datetime.strptime(date_string, cls._bugzilla_date_format)
def _date_contents(self, soup):
return self._parse_date(self._string_contents(soup))
def _parse_attachment_element(self, element, bug_id):
attachment = {}
attachment['bug_id'] = bug_id
attachment['is_obsolete'] = (element.has_key('isobsolete') and element['isobsolete'] == "1")
attachment['is_patch'] = (element.has_key('ispatch') and element['ispatch'] == "1")
attachment['id'] = int(element.find('attachid').string)
# FIXME: No need to parse out the url here.
attachment['url'] = self.attachment_url_for_id(attachment['id'])
attachment["attach_date"] = self._date_contents(element.find("date"))
attachment['name'] = self._string_contents(element.find('desc'))
attachment['attacher_email'] = self._string_contents(element.find('attacher'))
attachment['type'] = self._string_contents(element.find('type'))
self._parse_attachment_flag(
element, 'review', attachment, 'reviewer_email')
self._parse_attachment_flag(
element, 'commit-queue', attachment, 'committer_email')
return attachment
def _parse_log_descr_element(self, element):
comment = {}
comment['comment_email'] = self._string_contents(element.find('who'))
comment['comment_date'] = self._date_contents(element.find('bug_when'))
comment['text'] = self._string_contents(element.find('thetext'))
return comment
def _parse_bugs_from_xml(self, page):
soup = BeautifulSoup(page)
# Without the unicode() call, BeautifulSoup occasionally complains of being
# passed None for no apparent reason.
return [Bug(self._parse_bug_dictionary_from_xml(unicode(bug_xml)), self) for bug_xml in soup('bug')]
def _parse_bug_dictionary_from_xml(self, page):
soup = BeautifulStoneSoup(page, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
bug = {}
bug["id"] = int(soup.find("bug_id").string)
bug["title"] = self._string_contents(soup.find("short_desc"))
bug["bug_status"] = self._string_contents(soup.find("bug_status"))
dup_id = soup.find("dup_id")
if dup_id:
bug["dup_id"] = self._string_contents(dup_id)
bug["reporter_email"] = self._string_contents(soup.find("reporter"))
bug["assigned_to_email"] = self._string_contents(soup.find("assigned_to"))
bug["cc_emails"] = [self._string_contents(element) for element in soup.findAll('cc')]
bug["attachments"] = [self._parse_attachment_element(element, bug["id"]) for element in soup.findAll('attachment')]
bug["comments"] = [self._parse_log_descr_element(element) for element in soup.findAll('long_desc')]
return bug
# Makes testing fetch_*_from_bug() possible until we have a better
# BugzillaNetwork abstration.
def _fetch_bug_page(self, bug_id):
bug_url = self.bug_url_for_bug_id(bug_id, xml=True)
_log.info("Fetching: %s" % bug_url)
return self.browser.open(bug_url)
def fetch_bug_dictionary(self, bug_id):
try:
return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id))
except KeyboardInterrupt:
raise
except:
self.authenticate()
return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id))
# FIXME: A BugzillaCache object should provide all these fetch_ methods.
def fetch_bug(self, bug_id):
return Bug(self.fetch_bug_dictionary(bug_id), self)
def fetch_attachment_contents(self, attachment_id):
attachment_url = self.attachment_url_for_id(attachment_id)
# We need to authenticate to download patches from security bugs.
self.authenticate()
return self.browser.open(attachment_url).read()
def _parse_bug_id_from_attachment_page(self, page):
# The "Up" relation happens to point to the bug.
up_link = BeautifulSoup(page).find('link', rel='Up')
if not up_link:
# This attachment does not exist (or you don't have permissions to
# view it).
return None
match = re.search("show_bug.cgi\?id=(?P<bug_id>\d+)", up_link['href'])
return int(match.group('bug_id'))
def bug_id_for_attachment_id(self, attachment_id):
self.authenticate()
attachment_url = self.attachment_url_for_id(attachment_id, 'edit')
_log.info("Fetching: %s" % attachment_url)
page = self.browser.open(attachment_url)
return self._parse_bug_id_from_attachment_page(page)
# FIXME: This should just return Attachment(id), which should be able to
# lazily fetch needed data.
def fetch_attachment(self, attachment_id):
# We could grab all the attachment details off of the attachment edit
# page but we already have working code to do so off of the bugs page,
# so re-use that.
bug_id = self.bug_id_for_attachment_id(attachment_id)
if not bug_id:
return None
attachments = self.fetch_bug(bug_id).attachments(include_obsolete=True)
for attachment in attachments:
if attachment.id() == int(attachment_id):
return attachment
return None # This should never be hit.
def authenticate(self):
if self.authenticated:
return
credentials = Credentials(config_urls.bug_server_host, git_prefix="bugzilla")
attempts = 0
while not self.authenticated:
attempts += 1
username, password = credentials.read_credentials()
_log.info("Logging in as %s..." % username)
self.browser.open(config_urls.bug_server_url +
"index.cgi?GoAheadAndLogIn=1")
self.browser.select_form(name="login")
self.browser['Bugzilla_login'] = username
self.browser['Bugzilla_password'] = password
self.browser.find_control("Bugzilla_restrictlogin").items[0].selected = False
response = self.browser.submit()
match = re.search("<title>(.+?)</title>", response.read())
# If the resulting page has a title, and it contains the word
# "invalid" assume it's the login failure page.
if match and re.search("Invalid", match.group(1), re.IGNORECASE):
errorMessage = "Bugzilla login failed: %s" % match.group(1)
# raise an exception only if this was the last attempt
if attempts < 5:
_log.error(errorMessage)
else:
raise Exception(errorMessage)
else:
self.authenticated = True
self.username = username
# FIXME: Use enum instead of two booleans
def _commit_queue_flag(self, mark_for_landing, mark_for_commit_queue):
if mark_for_landing:
user = self.committers.contributor_by_email(self.username)
mark_for_commit_queue = True
if not user:
_log.warning("Your Bugzilla login is not listed in committers.py. Uploading with cq? instead of cq+")
mark_for_landing = False
elif not user.can_commit:
_log.warning("You're not a committer yet or haven't updated committers.py yet. Uploading with cq? instead of cq+")
mark_for_landing = False
if mark_for_landing:
return '+'
if mark_for_commit_queue:
return '?'
return 'X'
# FIXME: mark_for_commit_queue and mark_for_landing should be joined into a single commit_flag argument.
def _fill_attachment_form(self,
description,
file_object,
mark_for_review=False,
mark_for_commit_queue=False,
mark_for_landing=False,
is_patch=False,
filename=None,
mimetype=None):
self.browser['description'] = description
if is_patch:
self.browser['ispatch'] = ("1",)
# FIXME: Should this use self._find_select_element_for_flag?
self.browser['flag_type-1'] = ('?',) if mark_for_review else ('X',)
self.browser['flag_type-3'] = (self._commit_queue_flag(mark_for_landing, mark_for_commit_queue),)
filename = filename or "%s.patch" % timestamp()
if not mimetype:
mimetypes.add_type('text/plain', '.patch') # Make sure mimetypes knows about .patch
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = "text/plain" # Bugzilla might auto-guess for us and we might not need this?
self.browser.add_file(file_object, mimetype, filename, 'data')
def _file_object_for_upload(self, file_or_string):
if hasattr(file_or_string, 'read'):
return file_or_string
# Only if file_or_string is not already encoded do we want to encode it.
if isinstance(file_or_string, unicode):
file_or_string = file_or_string.encode('utf-8')
return StringIO.StringIO(file_or_string)
# timestamp argument is just for unittests.
def _filename_for_upload(self, file_object, bug_id, extension="txt", timestamp=timestamp):
if hasattr(file_object, "name"):
return file_object.name
return "bug-%s-%s.%s" % (bug_id, timestamp(), extension)
def add_attachment_to_bug(self, bug_id, file_or_string, description, filename=None, comment_text=None, mimetype=None):
self.authenticate()
_log.info('Adding attachment "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
self.browser.open(self.add_attachment_url(bug_id))
self.browser.select_form(name="entryform")
file_object = self._file_object_for_upload(file_or_string)
filename = filename or self._filename_for_upload(file_object, bug_id)
self._fill_attachment_form(description, file_object, filename=filename, mimetype=mimetype)
if comment_text:
_log.info(comment_text)
self.browser['comment'] = comment_text
self.browser.submit()
# FIXME: The arguments to this function should be simplified and then
# this should be merged into add_attachment_to_bug
def add_patch_to_bug(self,
bug_id,
file_or_string,
description,
comment_text=None,
mark_for_review=False,
mark_for_commit_queue=False,
mark_for_landing=False):
self.authenticate()
_log.info('Adding patch "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
self.browser.open(self.add_attachment_url(bug_id))
self.browser.select_form(name="entryform")
file_object = self._file_object_for_upload(file_or_string)
filename = self._filename_for_upload(file_object, bug_id, extension="patch")
self._fill_attachment_form(description,
file_object,
mark_for_review=mark_for_review,
mark_for_commit_queue=mark_for_commit_queue,
mark_for_landing=mark_for_landing,
is_patch=True,
filename=filename)
if comment_text:
_log.info(comment_text)
self.browser['comment'] = comment_text
self.browser.submit()
# FIXME: There has to be a more concise way to write this method.
def _check_create_bug_response(self, response_html):
match = re.search("<title>Bug (?P<bug_id>\d+) Submitted[^<]*</title>",
response_html)
if match:
return match.group('bug_id')
match = re.search(
'<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">',
response_html,
re.DOTALL)
error_message = "FAIL"
if match:
text_lines = BeautifulSoup(
match.group('error_message')).findAll(text=True)
error_message = "\n" + '\n'.join(
[" " + line.strip()
for line in text_lines if line.strip()])
raise Exception("Bug not created: %s" % error_message)
def create_bug(self,
bug_title,
bug_description,
component=None,
diff=None,
patch_description=None,
cc=None,
blocked=None,
assignee=None,
mark_for_review=False,
mark_for_commit_queue=False):
self.authenticate()
_log.info('Creating bug with title "%s"' % bug_title)
self.browser.open(config_urls.bug_server_url + "enter_bug.cgi?product=WebKit")
self.browser.select_form(name="Create")
component_items = self.browser.find_control('component').items
component_names = map(lambda item: item.name, component_items)
if not component:
component = "New Bugs"
if component not in component_names:
component = User.prompt_with_list("Please pick a component:", component_names)
self.browser["component"] = [component]
if cc:
self.browser["cc"] = cc
if blocked:
self.browser["blocked"] = unicode(blocked)
if not assignee:
assignee = self.username
if assignee and not self.browser.find_control("assigned_to").disabled:
self.browser["assigned_to"] = assignee
self.browser["short_desc"] = bug_title
self.browser["comment"] = bug_description
if diff:
# _fill_attachment_form expects a file-like object
# Patch files are already binary, so no encoding needed.
assert(isinstance(diff, str))
patch_file_object = StringIO.StringIO(diff)
self._fill_attachment_form(
patch_description,
patch_file_object,
mark_for_review=mark_for_review,
mark_for_commit_queue=mark_for_commit_queue,
is_patch=True)
response = self.browser.submit()
bug_id = self._check_create_bug_response(response.read())
_log.info("Bug %s created." % bug_id)
_log.info("%sshow_bug.cgi?id=%s" % (config_urls.bug_server_url, bug_id))
return bug_id
def _find_select_element_for_flag(self, flag_name):
# FIXME: This will break if we ever re-order attachment flags
if flag_name == "review":
return self.browser.find_control(type='select', nr=0)
elif flag_name == "commit-queue":
return self.browser.find_control(type='select', nr=1)
raise Exception("Don't know how to find flag named \"%s\"" % flag_name)
def clear_attachment_flags(self,
attachment_id,
additional_comment_text=None):
self.authenticate()
comment_text = "Clearing flags on attachment: %s" % attachment_id
if additional_comment_text:
comment_text += "\n\n%s" % additional_comment_text
_log.info(comment_text)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
self.browser.set_value(comment_text, name='comment', nr=0)
self._find_select_element_for_flag('review').value = ("X",)
self._find_select_element_for_flag('commit-queue').value = ("X",)
self.browser.submit()
def set_flag_on_attachment(self,
attachment_id,
flag_name,
flag_value,
comment_text=None):
# FIXME: We need a way to test this function on a live bugzilla
# instance.
self.authenticate()
_log.info(comment_text)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
if comment_text:
self.browser.set_value(comment_text, name='comment', nr=0)
self._find_select_element_for_flag(flag_name).value = (flag_value,)
self.browser.submit()
# FIXME: All of these bug editing methods have a ridiculous amount of
# copy/paste code.
def obsolete_attachment(self, attachment_id, comment_text=None):
self.authenticate()
_log.info("Obsoleting attachment: %s" % attachment_id)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
self.browser.find_control('isobsolete').items[0].selected = True
# Also clear any review flag (to remove it from review/commit queues)
self._find_select_element_for_flag('review').value = ("X",)
self._find_select_element_for_flag('commit-queue').value = ("X",)
if comment_text:
_log.info(comment_text)
# Bugzilla has two textareas named 'comment', one is somehow
# hidden. We want the first.
self.browser.set_value(comment_text, name='comment', nr=0)
self.browser.submit()
def add_cc_to_bug(self, bug_id, email_address_list):
self.authenticate()
_log.info("Adding %s to the CC list for bug %s" % (email_address_list, bug_id))
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
self.browser["newcc"] = ", ".join(email_address_list)
self.browser.submit()
def post_comment_to_bug(self, bug_id, comment_text, cc=None):
self.authenticate()
_log.info("Adding comment to bug %s" % bug_id)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
self.browser["comment"] = comment_text
if cc:
self.browser["newcc"] = ", ".join(cc)
self.browser.submit()
def close_bug_as_fixed(self, bug_id, comment_text=None):
self.authenticate()
_log.info("Closing bug %s as fixed" % bug_id)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
if comment_text:
self.browser['comment'] = comment_text
self.browser['bug_status'] = ['RESOLVED']
self.browser['resolution'] = ['FIXED']
self.browser.submit()
def _has_control(self, form, id):
return id in [control.id for control in form.controls]
def reassign_bug(self, bug_id, assignee=None, comment_text=None):
self.authenticate()
if not assignee:
assignee = self.username
_log.info("Assigning bug %s to %s" % (bug_id, assignee))
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
if not self._has_control(self.browser, "assigned_to"):
_log.warning("""Failed to assign bug to you (can't find assigned_to) control.
Ignore this message if you don't have EditBugs privileges (https://bugs.webkit.org/userprefs.cgi?tab=permissions)""")
return
if comment_text:
_log.info(comment_text)
self.browser["comment"] = comment_text
self.browser["assigned_to"] = assignee
self.browser.submit()
def reopen_bug(self, bug_id, comment_text):
self.authenticate()
_log.info("Re-opening bug %s" % bug_id)
# Bugzilla requires a comment when re-opening a bug, so we know it will
# never be None.
_log.info(comment_text)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
bug_status = self.browser.find_control("bug_status", type="select")
# This is a hack around the fact that ClientForm.ListControl seems to
# have no simpler way to ask if a control has an item named "REOPENED"
# without using exceptions for control flow.
possible_bug_statuses = map(lambda item: item.name, bug_status.items)
if "REOPENED" in possible_bug_statuses:
bug_status.value = ["REOPENED"]
# If the bug was never confirmed it will not have a "REOPENED"
# state, but only an "UNCONFIRMED" state.
elif "UNCONFIRMED" in possible_bug_statuses:
bug_status.value = ["UNCONFIRMED"]
else:
# FIXME: This logic is slightly backwards. We won't print this
# message if the bug is already open with state "UNCONFIRMED".
_log.info("Did not reopen bug %s, it appears to already be open with status %s." % (bug_id, bug_status.value))
self.browser['comment'] = comment_text
self.browser.submit()
|
bsd-3-clause
|
palaniyappanBala/sulley
|
sulley/__init__.py
|
6
|
29146
|
import sulley.blocks
import sulley.instrumentation
import sulley.legos
import sulley.pedrpc
import sulley.primitives
import sulley.sex
import sulley.sessions
import sulley.utils
BIG_ENDIAN = ">"
LITTLE_ENDIAN = "<"
########################################################################################################################
### REQUEST MANAGEMENT
########################################################################################################################
def s_get (name=None):
'''
Return the request with the specified name or the current request if name is not specified. Use this to switch from
global function style request manipulation to direct object manipulation. Example::
req = s_get("HTTP BASIC")
print req.num_mutations()
The selected request is also set as the default current. (ie: s_switch(name) is implied).
@type name: String
@param name: (Optional, def=None) Name of request to return or current request if name is None.
@rtype: blocks.request
@return: The requested request.
'''
if not name:
return blocks.CURRENT
# ensure this gotten request is the new current.
s_switch(name)
if not blocks.REQUESTS.has_key(name):
raise sex.error("blocks.REQUESTS NOT FOUND: %s" % name)
return blocks.REQUESTS[name]
def s_initialize (name):
'''
Initialize a new block request. All blocks / primitives generated after this call apply to the named request.
Use s_switch() to jump between factories.
@type name: String
@param name: Name of request
'''
if blocks.REQUESTS.has_key(name):
raise sex.error("blocks.REQUESTS ALREADY EXISTS: %s" % name)
blocks.REQUESTS[name] = blocks.request(name)
blocks.CURRENT = blocks.REQUESTS[name]
def s_mutate ():
'''
Mutate the current request and return False if mutations are exhausted, in which case the request has been reverted
back to its normal form.
@rtype: Boolean
@return: True on mutation success, False if mutations exhausted.
'''
return blocks.CURRENT.mutate()
def s_num_mutations ():
'''
Determine the number of repetitions we will be making.
@rtype: Integer
@return: Number of mutated forms this primitive can take.
'''
return blocks.CURRENT.num_mutations()
def s_render ():
'''
Render out and return the entire contents of the current request.
@rtype: Raw
@return: Rendered contents
'''
return blocks.CURRENT.render()
def s_switch (name):
'''
Change the currect request to the one specified by "name".
@type name: String
@param name: Name of request
'''
if not blocks.REQUESTS.has_key(name):
raise sex.error("blocks.REQUESTS NOT FOUND: %s" % name)
blocks.CURRENT = blocks.REQUESTS[name]
########################################################################################################################
### BLOCK MANAGEMENT
########################################################################################################################
def s_block_start (name, group=None, encoder=None, dep=None, dep_value=None, dep_values=[], dep_compare="=="):
'''
Open a new block under the current request. This routine always returns True so you can make your fuzzer pretty
with indenting::
if s_block_start("header"):
s_static("\\x00\\x01")
if s_block_start("body"):
...
@type name: String
@param name: Name of block being opened
@type group: String
@param group: (Optional, def=None) Name of group to associate this block with
@type encoder: Function Pointer
@param encoder: (Optional, def=None) Optional pointer to a function to pass rendered data to prior to return
@type dep: String
@param dep: (Optional, def=None) Optional primitive whose specific value this block is dependant on
@type dep_value: Mixed
@param dep_value: (Optional, def=None) Value that field "dep" must contain for block to be rendered
@type dep_values: List of Mixed Types
@param dep_values: (Optional, def=[]) Values that field "dep" may contain for block to be rendered
@type dep_compare: String
@param dep_compare: (Optional, def="==") Comparison method to use on dependency (==, !=, >, >=, <, <=)
'''
block = blocks.block(name, blocks.CURRENT, group, encoder, dep, dep_value, dep_values, dep_compare)
blocks.CURRENT.push(block)
return True
def s_block_end (name=None):
'''
Close the last opened block. Optionally specify the name of the block being closed (purely for aesthetic purposes).
@type name: String
@param name: (Optional, def=None) Name of block to closed.
'''
blocks.CURRENT.pop()
def s_checksum (block_name, algorithm="crc32", length=0, endian="<", name=None):
'''
Create a checksum block bound to the block with the specified name. You *can not* create a checksum for any
currently open blocks.
@type block_name: String
@param block_name: Name of block to apply sizer to
@type algorithm: String
@param algorithm: (Optional, def=crc32) Checksum algorithm to use. (crc32, adler32, md5, sha1)
@type length: Integer
@param length: (Optional, def=0) Length of checksum, specify 0 to auto-calculate
@type endian: Character
@param endian: (Optional, def=LITTLE_ENDIAN) Endianess of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >)
@type name: String
@param name: Name of this checksum field
'''
# you can't add a checksum for a block currently in the stack.
if block_name in blocks.CURRENT.block_stack:
raise sex.error("CAN N0T ADD A CHECKSUM FOR A BLOCK CURRENTLY IN THE STACK")
checksum = blocks.checksum(block_name, blocks.CURRENT, algorithm, length, endian, name)
blocks.CURRENT.push(checksum)
def s_repeat (block_name, min_reps=0, max_reps=None, step=1, variable=None, fuzzable=True, name=None):
'''
Repeat the rendered contents of the specified block cycling from min_reps to max_reps counting by step. By
default renders to nothing. This block modifier is useful for fuzzing overflows in table entries. This block
modifier MUST come after the block it is being applied to.
@see: Aliases: s_repeater()
@type block_name: String
@param block_name: Name of block to apply sizer to
@type min_reps: Integer
@param min_reps: (Optional, def=0) Minimum number of block repetitions
@type max_reps: Integer
@param max_reps: (Optional, def=None) Maximum number of block repetitions
@type step: Integer
@param step: (Optional, def=1) Step count between min and max reps
@type variable: Sulley Integer Primitive
@param variable: (Optional, def=None) An integer primitive which will specify the number of repitions
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
repeat = blocks.repeat(block_name, blocks.CURRENT, min_reps, max_reps, step, variable, fuzzable, name)
blocks.CURRENT.push(repeat)
def s_size (block_name, length=4, endian="<", format="binary", inclusive=False, signed=False, math=None, fuzzable=False, name=None):
'''
Create a sizer block bound to the block with the specified name. You *can not* create a sizer for any
currently open blocks.
@see: Aliases: s_sizer()
@type block_name: String
@param block_name: Name of block to apply sizer to
@type length: Integer
@param length: (Optional, def=4) Length of sizer
@type endian: Character
@param endian: (Optional, def=LITTLE_ENDIAN) Endianess of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >)
@type format: String
@param format: (Optional, def=binary) Output format, "binary" or "ascii"
@type inclusive: Boolean
@param inclusive: (Optional, def=False) Should the sizer count its own length?
@type signed: Boolean
@param signed: (Optional, def=False) Make size signed vs. unsigned (applicable only with format="ascii")
@type math: Function
@param math: (Optional, def=None) Apply the mathematical operations defined in this function to the size
@type fuzzable: Boolean
@param fuzzable: (Optional, def=False) Enable/disable fuzzing of this sizer
@type name: String
@param name: Name of this sizer field
'''
# you can't add a size for a block currently in the stack.
if block_name in blocks.CURRENT.block_stack:
raise sex.error("CAN NOT ADD A SIZE FOR A BLOCK CURRENTLY IN THE STACK")
size = blocks.size(block_name, blocks.CURRENT, length, endian, format, inclusive, signed, math, fuzzable, name)
blocks.CURRENT.push(size)
def s_update (name, value):
'''
Update the value of the named primitive in the currently open request.
@type name: String
@param name: Name of object whose value we wish to update
@type value: Mixed
@param value: Updated value
'''
if not blocks.CURRENT.names.has_key(name):
raise sex.error("NO OBJECT WITH NAME '%s' FOUND IN CURRENT REQUEST" % name)
blocks.CURRENT.names[name].value = value
########################################################################################################################
### PRIMITIVES
########################################################################################################################
def s_binary (value, name=None):
'''
Parse a variable format binary string into a static value and push it onto the current block stack.
@type value: String
@param value: Variable format binary string
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
# parse the binary string into.
parsed = value
parsed = parsed.replace(" ", "")
parsed = parsed.replace("\t", "")
parsed = parsed.replace("\r", "")
parsed = parsed.replace("\n", "")
parsed = parsed.replace(",", "")
parsed = parsed.replace("0x", "")
parsed = parsed.replace("\\x", "")
value = ""
while parsed:
pair = parsed[:2]
parsed = parsed[2:]
value += chr(int(pair, 16))
static = primitives.static(value, name)
blocks.CURRENT.push(static)
def s_delim (value, fuzzable=True, name=None):
'''
Push a delimiter onto the current block stack.
@type value: Character
@param value: Original value
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
delim = primitives.delim(value, fuzzable, name)
blocks.CURRENT.push(delim)
def s_group (name, values):
'''
This primitive represents a list of static values, stepping through each one on mutation. You can tie a block
to a group primitive to specify that the block should cycle through all possible mutations for *each* value
within the group. The group primitive is useful for example for representing a list of valid opcodes.
@type name: String
@param name: Name of group
@type values: List or raw data
@param values: List of possible raw values this group can take.
'''
group = primitives.group(name, values)
blocks.CURRENT.push(group)
def s_lego (lego_type, value=None, options={}):
'''
Legos are pre-built blocks... XXX finish this doc
'''
# as legos are blocks they must have a name.
# generate a unique name for this lego.
name = "LEGO_%08x" % len(blocks.CURRENT.names)
if not legos.BIN.has_key(lego_type):
raise sex.error("INVALID LEGO TYPE SPECIFIED: %s" % lego_type)
lego = legos.BIN[lego_type](name, blocks.CURRENT, value, options)
# push the lego onto the stack and immediately pop to close the block.
blocks.CURRENT.push(lego)
blocks.CURRENT.pop()
def s_random (value, min_length, max_length, num_mutations=25, fuzzable=True, step=None, name=None):
'''
Generate a random chunk of data while maintaining a copy of the original. A random length range can be specified.
For a static length, set min/max length to be the same.
@type value: Raw
@param value: Original value
@type min_length: Integer
@param min_length: Minimum length of random block
@type max_length: Integer
@param max_length: Maximum length of random block
@type num_mutations: Integer
@param num_mutations: (Optional, def=25) Number of mutations to make before reverting to default
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type step: Integer
@param step: (Optional, def=None) If not null, step count between min and max reps, otherwise random
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
random = primitives.random_data(value, min_length, max_length, num_mutations, fuzzable, step, name)
blocks.CURRENT.push(random)
def s_static (value, name=None):
'''
Push a static value onto the current block stack.
@see: Aliases: s_dunno(), s_raw(), s_unknown()
@type value: Raw
@param value: Raw static data
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
static = primitives.static(value, name)
blocks.CURRENT.push(static)
def s_string (value, size=-1, padding="\x00", encoding="ascii", fuzzable=True, max_len=0, name=None):
'''
Push a string onto the current block stack.
@type value: String
@param value: Default string value
@type size: Integer
@param size: (Optional, def=-1) Static size of this field, leave -1 for dynamic.
@type padding: Character
@param padding: (Optional, def="\\x00") Value to use as padding to fill static field size.
@type encoding: String
@param encoding: (Optonal, def="ascii") String encoding, ex: utf_16_le for Microsoft Unicode.
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type max_len: Integer
@param max_len: (Optional, def=0) Maximum string length
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
s = primitives.string(value, size, padding, encoding, fuzzable, max_len, name)
blocks.CURRENT.push(s)
def s_bit_field (value, width, endian="<", format="binary", signed=False, full_range=False, fuzzable=True, name=None):
'''
Push a variable length bit field onto the current block stack.
@see: Aliases: s_bit(), s_bits()
@type value: Integer
@param value: Default integer value
@type width: Integer
@param width: Width of bit fields
@type endian: Character
@param endian: (Optional, def=LITTLE_ENDIAN) Endianess of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >)
@type format: String
@param format: (Optional, def=binary) Output format, "binary" or "ascii"
@type signed: Boolean
@param signed: (Optional, def=False) Make size signed vs. unsigned (applicable only with format="ascii")
@type full_range: Boolean
@param full_range: (Optional, def=False) If enabled the field mutates through *all* possible values.
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
bit_field = primitives.bit_field(value, width, None, endian, format, signed, full_range, fuzzable, name)
blocks.CURRENT.push(bit_field)
def s_byte (value, endian="<", format="binary", signed=False, full_range=False, fuzzable=True, name=None):
'''
Push a byte onto the current block stack.
@see: Aliases: s_char()
@type value: Integer
@param value: Default integer value
@type endian: Character
@param endian: (Optional, def=LITTLE_ENDIAN) Endianess of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >)
@type format: String
@param format: (Optional, def=binary) Output format, "binary" or "ascii"
@type signed: Boolean
@param signed: (Optional, def=False) Make size signed vs. unsigned (applicable only with format="ascii")
@type full_range: Boolean
@param full_range: (Optional, def=False) If enabled the field mutates through *all* possible values.
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
byte = primitives.byte(value, endian, format, signed, full_range, fuzzable, name)
blocks.CURRENT.push(byte)
def s_word (value, endian="<", format="binary", signed=False, full_range=False, fuzzable=True, name=None):
'''
Push a word onto the current block stack.
@see: Aliases: s_short()
@type value: Integer
@param value: Default integer value
@type endian: Character
@param endian: (Optional, def=LITTLE_ENDIAN) Endianess of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >)
@type format: String
@param format: (Optional, def=binary) Output format, "binary" or "ascii"
@type signed: Boolean
@param signed: (Optional, def=False) Make size signed vs. unsigned (applicable only with format="ascii")
@type full_range: Boolean
@param full_range: (Optional, def=False) If enabled the field mutates through *all* possible values.
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
word = primitives.word(value, endian, format, signed, full_range, fuzzable, name)
blocks.CURRENT.push(word)
def s_dword (value, endian="<", format="binary", signed=False, full_range=False, fuzzable=True, name=None):
'''
Push a double word onto the current block stack.
@see: Aliases: s_long(), s_int()
@type value: Integer
@param value: Default integer value
@type endian: Character
@param endian: (Optional, def=LITTLE_ENDIAN) Endianess of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >)
@type format: String
@param format: (Optional, def=binary) Output format, "binary" or "ascii"
@type signed: Boolean
@param signed: (Optional, def=False) Make size signed vs. unsigned (applicable only with format="ascii")
@type full_range: Boolean
@param full_range: (Optional, def=False) If enabled the field mutates through *all* possible values.
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
dword = primitives.dword(value, endian, format, signed, full_range, fuzzable, name)
blocks.CURRENT.push(dword)
def s_qword (value, endian="<", format="binary", signed=False, full_range=False, fuzzable=True, name=None):
'''
Push a quad word onto the current block stack.
@see: Aliases: s_double()
@type value: Integer
@param value: Default integer value
@type endian: Character
@param endian: (Optional, def=LITTLE_ENDIAN) Endianess of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >)
@type format: String
@param format: (Optional, def=binary) Output format, "binary" or "ascii"
@type signed: Boolean
@param signed: (Optional, def=False) Make size signed vs. unsigned (applicable only with format="ascii")
@type full_range: Boolean
@param full_range: (Optional, def=False) If enabled the field mutates through *all* possible values.
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
qword = primitives.qword(value, endian, format, signed, full_range, fuzzable, name)
blocks.CURRENT.push(qword)
########################################################################################################################
### ALIASES
########################################################################################################################
s_dunno = s_raw = s_unknown = s_static
s_sizer = s_size
s_bit = s_bits = s_bit_field
s_char = s_byte
s_short = s_word
s_long = s_int = s_dword
s_double = s_qword
s_repeater = s_repeat
### SPIKE Aliases
def custom_raise (argument, msg):
def _(x):
raise msg, argument(x)
return _
s_intelword = lambda x: s_long(x, endian=LITTLE_ENDIAN)
s_intelhalfword = lambda x: s_short(x, endian=LITTLE_ENDIAN)
s_bigword = lambda x: s_long(x, endian=BIG_ENDIAN)
s_string_lf = custom_raise(ValueError, "NotImplementedError: s_string_lf is not currently implemented, arguments were")
s_string_or_env = custom_raise(ValueError, "NotImplementedError: s_string_or_env is not currently implemented, arguments were")
s_string_repeat = custom_raise(ValueError, "NotImplementedError: s_string_repeat is not currently implemented, arguments were")
s_string_variable = custom_raise(ValueError, "NotImplementedError: s_string_variable is not currently implemented, arguments were")
s_string_variables = custom_raise(ValueError, "NotImplementedError: s_string_variables is not currently implemented, arguments were")
s_binary_repeat = custom_raise(ValueError, "NotImplementedError: s_string_variables is not currently implemented, arguments were")
s_unistring = lambda x: s_string(x, encoding="utf_16_le")
s_unistring_variable = custom_raise(ValueError, "NotImplementedError: s_unistring_variable is not currently implemented, arguments were")
s_xdr_string = custom_raise(ValueError, "LegoNotUtilizedError: XDR strings are available in the XDR lego, arguments were")
def s_cstring (x):
s_string(x)
s_static("\x00")
s_binary_block_size_intel_halfword_plus_variable = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_halfword_bigendian_variable = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_word_bigendian_plussome = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_word_bigendian_variable = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_halfword_bigendian_mult = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_intel_halfword_variable = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_intel_halfword_mult = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_intel_halfword_plus = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_halfword_bigendian = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_word_intel_mult_plus = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_intel_word_variable = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_word_bigendian_mult = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_blocksize_unsigned_string_variable = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_intel_word_plus = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_intel_halfword = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_word_bigendian = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_blocksize_signed_string_variable = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_byte_variable = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_intel_word = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_byte_plus = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_byte_mult = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_blocksize_asciihex_variable = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_binary_block_size_byte = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_blocksize_asciihex = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
s_blocksize_string = custom_raise(ValueError, "SizerNotUtilizedError: Use the s_size primitive for including sizes, arguments were")
########################################################################################################################
### MISC
########################################################################################################################
def s_hex_dump (data, addr=0):
'''
Return the hex dump of the supplied data starting at the offset address specified.
@type data: Raw
@param data: Data to show hex dump of
@type addr: Integer
@param addr: (Optional, def=0) Offset to start displaying hex dump addresses from
@rtype: String
@return: Hex dump of raw data
'''
dump = slice = ""
for byte in data:
if addr % 16 == 0:
dump += " "
for char in slice:
if ord(char) >= 32 and ord(char) <= 126:
dump += char
else:
dump += "."
dump += "\n%04x: " % addr
slice = ""
dump += "%02x " % ord(byte)
slice += byte
addr += 1
remainder = addr % 16
if remainder != 0:
dump += " " * (16 - remainder) + " "
for char in slice:
if ord(char) >= 32 and ord(char) <= 126:
dump += char
else:
dump += "."
return dump + "\n"
|
gpl-2.0
|
PersonalGenomesOrg/open-humans-data-processing
|
sources/go_viral.py
|
2
|
2529
|
"""
Create data files from a user's GoViral data.
Copyright (C) 2016 PersonalGenomes.org
This software is shared under the "MIT License" license (aka "Expat License"),
see LICENSE.TXT for full license text.
"""
import json
import os
import requests
from base_source import BaseSource
GO_VIRAL_DATA_URL = 'https://www.goviralstudy.com/participants/{}/data'
class GoViralSource(BaseSource):
"""
Create a GoViral dataset for the given ID.
Required arguments:
access_token: the management access token for GoViral
go_viral_id: the user's GoViral ID
"""
source = 'go_viral'
def get_go_viral_data(self):
"""
Retrieve GoViral data from the API for a given user.
"""
request = requests.get(GO_VIRAL_DATA_URL.format(self.go_viral_id),
params={'access_token': self.access_token})
if request.status_code != 200:
self.sentry_log('GoViral website not permitting any access! Bad '
'access token?')
return None
request_data = request.json()
if ('code' in request_data and
request_data['code'] == 'PERMISSION_DENIED'):
self.sentry_log('Data access denied by GoViral. User: {}'
.format(self.go_viral_id))
return None
data = request.json()
for item in data.keys():
if not data[item]:
data.pop(item)
if not data:
return None
return data
def handle_go_viral_data(self, data):
json_filename = 'GoViral-sickness-data.json'
json_filepath = os.path.join(self.temp_directory, json_filename)
with open(json_filepath, 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
return {
'temp_filename': json_filename,
'tempdir': self.temp_directory,
'metadata': {
'description': ('GoViral annual surveys, and sickness reports '
'with viral lab test results (if available)'),
'tags': ['viral', 'survey', 'GoViral', 'json'],
}
}
def create_files(self):
data_go_viral = self.get_go_viral_data()
# Don't create a file if there's no data from GoViral
if not data_go_viral:
return
self.temp_files.append(self.handle_go_viral_data(data=data_go_viral))
if __name__ == '__main__':
GoViralSource.cli()
|
mit
|
quantopian/serializable-traitlets
|
straitlets/ext/tests/test_click.py
|
1
|
4991
|
import re
from textwrap import dedent
import click
from click.testing import CliRunner
import pytest
from straitlets import (
Serializable,
StrictSerializable,
Bool,
Unicode,
Integer,
)
from straitlets.ext.click import (
JsonConfigFile,
YamlConfigFile,
)
from straitlets.test_utils import assert_serializables_equal
@pytest.fixture
def runner():
return CliRunner()
class Config(Serializable):
bool = Bool()
unicode = Unicode()
int = Integer()
class MissingAttr(Serializable):
bool = Bool()
unicode = Unicode()
class StrictConfig(Config, StrictSerializable):
pass
@pytest.fixture
def expected_instance():
return Config(
bool=True,
unicode='ayy',
int=1,
)
@pytest.fixture
def missing_attr_instance():
return MissingAttr(
bool=True,
unicode='ayy',
)
multi_error_output = re.compile(
dedent(
"""\
Failed to validate the schema:
bool:
No default value found for bool trait of <.+?>
int:
No default value found for int trait of <.+?>
unicode:
No default value found for unicode trait of <.+?>
""",
),
)
single_error_output = re.compile(
dedent(
"""\
Failed to validate the schema:
No default value found for int trait of <.+?>
""",
),
)
def test_json_file(runner, expected_instance):
instance = [None] # nonlocal
@click.command()
@click.option('--config', type=JsonConfigFile(Config))
def main(config):
instance[0] = config
with runner.isolated_filesystem():
with open('f.json', 'w') as f:
f.write(expected_instance.to_json())
result = runner.invoke(
main,
['--config', 'f.json'],
input='not-json',
catch_exceptions=False,
)
assert result.output == ''
assert result.exit_code == 0
assert_serializables_equal(
instance[0],
expected_instance,
)
def test_json_multiple_errors(runner):
@click.command()
@click.option('--config', type=JsonConfigFile(StrictConfig))
def main(config): # pragma: no cover
pass
with runner.isolated_filesystem():
with open('f.json', 'w') as f:
f.write('{}')
result = runner.invoke(
main,
['--config', 'f.json'],
input='not-json',
catch_exceptions=False,
)
assert result.exit_code
assert multi_error_output.search(result.output)
def test_json_single_error(runner, missing_attr_instance):
@click.command()
@click.option('--config', type=JsonConfigFile(StrictConfig))
def main(config): # pragma: no cover
pass
with runner.isolated_filesystem():
with open('f.json', 'w') as f:
f.write(missing_attr_instance.to_json())
result = runner.invoke(
main,
['--config', 'f.json'],
input='not-json',
catch_exceptions=False,
)
assert result.exit_code
assert single_error_output.search(result.output)
def test_yaml_file(runner, expected_instance):
instance = [None] # nonlocal
@click.command()
@click.option('--config', type=YamlConfigFile(Config))
def main(config):
instance[0] = config
with runner.isolated_filesystem():
with open('f.yml', 'w') as f:
f.write(expected_instance.to_yaml())
result = runner.invoke(
main,
['--config', 'f.yml'],
input='not-yaml',
catch_exceptions=False,
)
assert result.output == ''
assert result.exit_code == 0
assert_serializables_equal(
instance[0],
expected_instance,
)
def test_yaml_multiple_errors(runner):
@click.command()
@click.option('--config', type=YamlConfigFile(StrictConfig))
def main(config): # pragma: no cover
pass
with runner.isolated_filesystem():
with open('f.yml', 'w') as f:
f.write('{}')
result = runner.invoke(
main,
['--config', 'f.yml'],
input='not-yaml',
catch_exceptions=False,
)
assert result.exit_code
assert multi_error_output.search(result.output)
def test_yaml_single_error(runner, missing_attr_instance):
@click.command()
@click.option('--config', type=YamlConfigFile(StrictConfig))
def main(config): # pragma: no cover
pass
with runner.isolated_filesystem():
with open('f.yml', 'w') as f:
f.write(missing_attr_instance.to_yaml())
result = runner.invoke(
main,
['--config', 'f.yml'],
input='not-yaml',
catch_exceptions=False,
)
assert result.exit_code
assert single_error_output.search(result.output)
|
apache-2.0
|
kailIII/geraldo
|
site/newsite/django_1_0/django/utils/text.py
|
12
|
8731
|
import re
from django.conf import settings
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.utils.translation import ugettext_lazy
from htmlentitydefs import name2codepoint
# Capitalizes the first letter of a string.
capfirst = lambda x: x and force_unicode(x)[0].upper() + force_unicode(x)[1:]
capfirst = allow_lazy(capfirst, unicode)
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks and most spaces in
the text. Expects that existing line breaks are posix newlines.
"""
text = force_unicode(text)
def _generator():
it = iter(text.split(' '))
word = it.next()
yield word
pos = len(word) - word.rfind('\n') - 1
for word in it:
if "\n" in word:
lines = word.split('\n')
else:
lines = (word,)
pos += len(lines[0]) + 1
if pos > width:
yield '\n'
pos = len(lines[-1])
else:
yield ' '
if len(lines) > 1:
pos = len(lines[-1])
yield word
return u''.join(_generator())
wrap = allow_lazy(wrap, unicode)
def truncate_words(s, num):
"Truncates a string after a certain number of words."
s = force_unicode(s)
length = int(num)
words = s.split()
if len(words) > length:
words = words[:length]
if not words[-1].endswith('...'):
words.append('...')
return u' '.join(words)
truncate_words = allow_lazy(truncate_words, unicode)
def truncate_html_words(s, num):
"""
Truncates html to a certain number of words (not counting tags and
comments). Closes opened tags if they were correctly closed in the given
html.
"""
s = force_unicode(s)
length = int(num)
if length <= 0:
return u''
html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input')
# Set up regular expressions
re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U)
re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>')
# Count non-HTML words and keep note of open tags
pos = 0
ellipsis_pos = 0
words = 0
open_tags = []
while words <= length:
m = re_words.search(s, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word
words += 1
if words == length:
ellipsis_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or ellipsis_pos:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
tagname = tagname.lower() # Element names are always case-insensitive
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag, all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i+1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if words <= length:
# Don't try to close tags if we don't need to truncate
return s
out = s[:ellipsis_pos] + ' ...'
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
truncate_html_words = allow_lazy(truncate_html_words, unicode)
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and all non-filename-safe characters
are removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
u'johns_portrait_in_2004.jpg'
"""
s = force_unicode(s).strip().replace(' ', '_')
return re.sub(r'[^-A-Za-z0-9_.]', '', s)
get_valid_filename = allow_lazy(get_valid_filename, unicode)
def get_text_list(list_, last_word=ugettext_lazy(u'or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
u'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
u'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
u'a and b'
>>> get_text_list(['a'])
u'a'
>>> get_text_list([])
u''
"""
if len(list_) == 0: return u''
if len(list_) == 1: return force_unicode(list_[0])
return u'%s %s %s' % (', '.join([force_unicode(i) for i in list_][:-1]), force_unicode(last_word), force_unicode(list_[-1]))
get_text_list = allow_lazy(get_text_list, unicode)
def normalize_newlines(text):
return force_unicode(re.sub(r'\r\n|\r|\n', '\n', text))
normalize_newlines = allow_lazy(normalize_newlines, unicode)
def recapitalize(text):
"Recapitalizes text, placing caps after end-of-sentence punctuation."
text = force_unicode(text).lower()
capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])')
text = capsRE.sub(lambda x: x.group(1).upper(), text)
return text
recapitalize = allow_lazy(recapitalize)
def phone2numeric(phone):
"Converts a phone number with letters into its numeric equivalent."
letters = re.compile(r'[A-PR-Y]', re.I)
char2number = lambda m: {'a': '2', 'c': '2', 'b': '2', 'e': '3',
'd': '3', 'g': '4', 'f': '3', 'i': '4', 'h': '4', 'k': '5',
'j': '5', 'm': '6', 'l': '5', 'o': '6', 'n': '6', 'p': '7',
's': '7', 'r': '7', 'u': '8', 't': '8', 'w': '9', 'v': '8',
'y': '9', 'x': '9'}.get(m.group(0).lower())
return letters.sub(char2number, phone)
phone2numeric = allow_lazy(phone2numeric)
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
import cStringIO, gzip
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
ustring_re = re.compile(u"([\u0080-\uffff])")
def javascript_quote(s, quote_double_quotes=False):
def fix(match):
return r"\u%04x" % ord(match.group(1))
if type(s) == str:
s = s.decode('utf-8')
elif type(s) != unicode:
raise TypeError, s
s = s.replace('\\', '\\\\')
s = s.replace('\r', '\\r')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace("'", "\\'")
if quote_double_quotes:
s = s.replace('"', '"')
return str(ustring_re.sub(fix, s))
javascript_quote = allow_lazy(javascript_quote, unicode)
smart_split_re = re.compile('("(?:[^"\\\\]*(?:\\\\.[^"\\\\]*)*)"|\'(?:[^\'\\\\]*(?:\\\\.[^\'\\\\]*)*)\'|[^\\s]+)')
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks.
>>> list(smart_split(r'This is "a person\'s" test.'))
[u'This', u'is', u'"a person\\\'s"', u'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
[u'Another', u"'person's'", u'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
[u'A', u'""funky" style"', u'test.']
"""
text = force_unicode(text)
for bit in smart_split_re.finditer(text):
bit = bit.group(0)
if bit[0] == '"' and bit[-1] == '"':
yield '"' + bit[1:-1].replace('\\"', '"').replace('\\\\', '\\') + '"'
elif bit[0] == "'" and bit[-1] == "'":
yield "'" + bit[1:-1].replace("\\'", "'").replace("\\\\", "\\") + "'"
else:
yield bit
smart_split = allow_lazy(smart_split, unicode)
def _replace_entity(match):
text = match.group(1)
if text[0] == u'#':
text = text[1:]
try:
if text[0] in u'xX':
c = int(text[1:], 16)
else:
c = int(text)
return unichr(c)
except ValueError:
return match.group(0)
else:
try:
return unichr(name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape_entities(text):
return _entity_re.sub(_replace_entity, text)
unescape_entities = allow_lazy(unescape_entities, unicode)
|
lgpl-3.0
|
hackers-terabit/portage
|
pym/portage/manifest.py
|
1
|
24994
|
# Copyright 1999-2016 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
import errno
import io
import logging
import re
import stat
import sys
import warnings
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
'verify_all,_apply_hash_filter,_filter_unaccelarated_hashes',
'portage.repository.config:_find_invalid_path_char',
'portage.util:write_atomic,writemsg_level',
)
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
from portage.exception import DigestException, FileNotFound, \
InvalidDataType, MissingParameter, PermissionDenied, \
PortageException, PortagePackageException
from portage.const import (MANIFEST1_HASH_FUNCTIONS, MANIFEST2_HASH_DEFAULTS,
MANIFEST2_HASH_FUNCTIONS, MANIFEST2_IDENTIFIERS, MANIFEST2_REQUIRED_HASH)
from portage.localization import _
_manifest_re = re.compile(
r'^(' + '|'.join(MANIFEST2_IDENTIFIERS) + r') (.*)( \d+( \S+ \S+)+)$',
re.UNICODE)
if sys.hexversion >= 0x3000000:
# pylint: disable=W0622
_unicode = str
basestring = str
else:
_unicode = unicode
class FileNotInManifestException(PortageException):
pass
def manifest2AuxfileFilter(filename):
filename = filename.strip(os.sep)
mysplit = filename.split(os.path.sep)
if "CVS" in mysplit:
return False
for x in mysplit:
if x[:1] == '.':
return False
return not filename[:7] == 'digest-'
def manifest2MiscfileFilter(filename):
return not (filename == "Manifest" or filename.endswith(".ebuild"))
def guessManifestFileType(filename):
""" Perform a best effort guess of which type the given filename is, avoid using this if possible """
if filename.startswith("files" + os.sep + "digest-"):
return None
if filename.startswith("files" + os.sep):
return "AUX"
elif filename.endswith(".ebuild"):
return "EBUILD"
elif filename in ["ChangeLog", "metadata.xml"]:
return "MISC"
else:
return "DIST"
def guessThinManifestFileType(filename):
type = guessManifestFileType(filename)
if type != "DIST":
return None
return "DIST"
def parseManifest2(line):
if not isinstance(line, basestring):
line = ' '.join(line)
myentry = None
match = _manifest_re.match(line)
if match is not None:
tokens = match.group(3).split()
hashes = dict(zip(tokens[1::2], tokens[2::2]))
hashes["size"] = int(tokens[0])
myentry = Manifest2Entry(type=match.group(1),
name=match.group(2), hashes=hashes)
return myentry
class ManifestEntry(object):
__slots__ = ("type", "name", "hashes")
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class Manifest2Entry(ManifestEntry):
def __str__(self):
myline = " ".join([self.type, self.name, str(self.hashes["size"])])
myhashkeys = list(self.hashes)
myhashkeys.remove("size")
myhashkeys.sort()
for h in myhashkeys:
myline += " " + h + " " + str(self.hashes[h])
return myline
def __eq__(self, other):
if not isinstance(other, Manifest2Entry) or \
self.type != other.type or \
self.name != other.name or \
self.hashes != other.hashes:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
if sys.hexversion < 0x3000000:
__unicode__ = __str__
def __str__(self):
return _unicode_encode(self.__unicode__(),
encoding=_encodings['repo.content'], errors='strict')
class Manifest(object):
parsers = (parseManifest2,)
def __init__(self, pkgdir, distdir=None, fetchlist_dict=None,
manifest1_compat=DeprecationWarning, from_scratch=False, thin=False,
allow_missing=False, allow_create=True, hashes=None,
find_invalid_path_char=None):
""" Create new Manifest instance for package in pkgdir.
Do not parse Manifest file if from_scratch == True (only for internal use)
The fetchlist_dict parameter is required only for generation of
a Manifest (not needed for parsing and checking sums).
If thin is specified, then the manifest carries only info for
distfiles."""
if manifest1_compat is not DeprecationWarning:
warnings.warn("The manifest1_compat parameter of the "
"portage.manifest.Manifest constructor is deprecated.",
DeprecationWarning, stacklevel=2)
if find_invalid_path_char is None:
find_invalid_path_char = _find_invalid_path_char
self._find_invalid_path_char = find_invalid_path_char
self.pkgdir = _unicode_decode(pkgdir).rstrip(os.sep) + os.sep
self.fhashdict = {}
self.hashes = set()
if hashes is None:
hashes = MANIFEST2_HASH_DEFAULTS
self.hashes.update(hashes.intersection(MANIFEST2_HASH_FUNCTIONS))
self.hashes.difference_update(hashname for hashname in \
list(self.hashes) if hashname not in hashfunc_map)
self.hashes.add("size")
self.hashes.add(MANIFEST2_REQUIRED_HASH)
for t in MANIFEST2_IDENTIFIERS:
self.fhashdict[t] = {}
if not from_scratch:
self._read()
if fetchlist_dict != None:
self.fetchlist_dict = fetchlist_dict
else:
self.fetchlist_dict = {}
self.distdir = distdir
self.thin = thin
if thin:
self.guessType = guessThinManifestFileType
else:
self.guessType = guessManifestFileType
self.allow_missing = allow_missing
self.allow_create = allow_create
def getFullname(self):
""" Returns the absolute path to the Manifest file for this instance """
return os.path.join(self.pkgdir, "Manifest")
def getDigests(self):
""" Compability function for old digest/manifest code, returns dict of filename:{hashfunction:hashvalue} """
rval = {}
for t in MANIFEST2_IDENTIFIERS:
rval.update(self.fhashdict[t])
return rval
def getTypeDigests(self, ftype):
""" Similar to getDigests(), but restricted to files of the given type. """
return self.fhashdict[ftype]
def _readManifest(self, file_path, myhashdict=None, **kwargs):
"""Parse a manifest. If myhashdict is given then data will be added too it.
Otherwise, a new dict will be created and returned."""
try:
with io.open(_unicode_encode(file_path,
encoding=_encodings['fs'], errors='strict'), mode='r',
encoding=_encodings['repo.content'], errors='replace') as f:
if myhashdict is None:
myhashdict = {}
self._parseDigests(f, myhashdict=myhashdict, **kwargs)
return myhashdict
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
raise FileNotFound(file_path)
else:
raise
def _read(self):
""" Parse Manifest file for this instance """
try:
self._readManifest(self.getFullname(), myhashdict=self.fhashdict)
except FileNotFound:
pass
def _parseManifestLines(self, mylines):
"""Parse manifest lines and return a list of manifest entries."""
for myline in mylines:
myentry = None
for parser in self.parsers:
myentry = parser(myline)
if myentry is not None:
yield myentry
break # go to the next line
def _parseDigests(self, mylines, myhashdict=None, mytype=None):
"""Parse manifest entries and store the data in myhashdict. If mytype
is specified, it will override the type for all parsed entries."""
if myhashdict is None:
myhashdict = {}
for myentry in self._parseManifestLines(mylines):
if mytype is None:
myentry_type = myentry.type
else:
myentry_type = mytype
myhashdict.setdefault(myentry_type, {})
myhashdict[myentry_type].setdefault(myentry.name, {})
myhashdict[myentry_type][myentry.name].update(myentry.hashes)
return myhashdict
def _getDigestData(self, distlist):
"""create a hash dict for a specific list of files"""
myhashdict = {}
for myname in distlist:
for mytype in self.fhashdict:
if myname in self.fhashdict[mytype]:
myhashdict.setdefault(mytype, {})
myhashdict[mytype].setdefault(myname, {})
myhashdict[mytype][myname].update(self.fhashdict[mytype][myname])
return myhashdict
def _createManifestEntries(self):
valid_hashes = set(MANIFEST2_HASH_FUNCTIONS)
valid_hashes.add('size')
mytypes = list(self.fhashdict)
mytypes.sort()
for t in mytypes:
myfiles = list(self.fhashdict[t])
myfiles.sort()
for f in myfiles:
myentry = Manifest2Entry(
type=t, name=f, hashes=self.fhashdict[t][f].copy())
for h in list(myentry.hashes):
if h not in valid_hashes:
del myentry.hashes[h]
yield myentry
def checkIntegrity(self):
for t in self.fhashdict:
for f in self.fhashdict[t]:
if MANIFEST2_REQUIRED_HASH not in self.fhashdict[t][f]:
raise MissingParameter(_("Missing %s checksum: %s %s") %
(MANIFEST2_REQUIRED_HASH, t, f))
def write(self, sign=False, force=False):
""" Write Manifest instance to disk, optionally signing it. Returns
True if the Manifest is actually written, and False if the write
is skipped due to existing Manifest being identical."""
rval = False
if not self.allow_create:
return rval
self.checkIntegrity()
try:
myentries = list(self._createManifestEntries())
update_manifest = True
preserved_stats = {}
preserved_stats[self.pkgdir.rstrip(os.sep)] = os.stat(self.pkgdir)
if myentries and not force:
try:
f = io.open(_unicode_encode(self.getFullname(),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace')
oldentries = list(self._parseManifestLines(f))
preserved_stats[self.getFullname()] = os.fstat(f.fileno())
f.close()
if len(oldentries) == len(myentries):
update_manifest = False
for i in range(len(oldentries)):
if oldentries[i] != myentries[i]:
update_manifest = True
break
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
pass
else:
raise
if update_manifest:
if myentries or not (self.thin or self.allow_missing):
# If myentries is empty, don't write an empty manifest
# when thin or allow_missing is enabled. Except for
# thin manifests with no DIST entries, myentries is
# non-empty for all currently known use cases.
write_atomic(self.getFullname(), "".join("%s\n" %
_unicode(myentry) for myentry in myentries))
self._apply_max_mtime(preserved_stats, myentries)
rval = True
else:
# With thin manifest, there's no need to have
# a Manifest file if there are no DIST entries.
try:
os.unlink(self.getFullname())
except OSError as e:
if e.errno != errno.ENOENT:
raise
rval = True
if sign:
self.sign()
except (IOError, OSError) as e:
if e.errno == errno.EACCES:
raise PermissionDenied(str(e))
raise
return rval
def _apply_max_mtime(self, preserved_stats, entries):
"""
Set the Manifest mtime to the max mtime of all relevant files
and directories. Directory mtimes account for file renames and
removals. The existing Manifest mtime accounts for eclass
modifications that change DIST entries. This results in a
stable/predictable mtime, which is useful when converting thin
manifests to thick manifests for distribution via rsync. For
portability, the mtime is set with 1 second resolution.
@param preserved_stats: maps paths to preserved stat results
that should be used instead of os.stat() calls
@type preserved_stats: dict
@param entries: list of current Manifest2Entry instances
@type entries: list
"""
# Use stat_result[stat.ST_MTIME] for 1 second resolution, since
# it always rounds down. Note that stat_result.st_mtime will round
# up from 0.999999999 to 1.0 when precision is lost during conversion
# from nanosecond resolution to float.
max_mtime = None
_update_max = (lambda st: max_mtime if max_mtime is not None
and max_mtime > st[stat.ST_MTIME] else st[stat.ST_MTIME])
_stat = (lambda path: preserved_stats[path] if path in preserved_stats
else os.stat(path))
for stat_result in preserved_stats.values():
max_mtime = _update_max(stat_result)
for entry in entries:
if entry.type == 'DIST':
continue
abs_path = (os.path.join(self.pkgdir, 'files', entry.name) if
entry.type == 'AUX' else os.path.join(self.pkgdir, entry.name))
max_mtime = _update_max(_stat(abs_path))
if not self.thin:
# Account for changes to all relevant nested directories.
# This is not necessary for thin manifests because
# self.pkgdir is already included via preserved_stats.
for parent_dir, dirs, files in os.walk(self.pkgdir.rstrip(os.sep)):
try:
parent_dir = _unicode_decode(parent_dir,
encoding=_encodings['fs'], errors='strict')
except UnicodeDecodeError:
# If an absolute path cannot be decoded, then it is
# always excluded from the manifest (repoman will
# report such problems).
pass
else:
max_mtime = _update_max(_stat(parent_dir))
if max_mtime is not None:
for path in preserved_stats:
try:
os.utime(path, (max_mtime, max_mtime))
except OSError as e:
# Even though we have write permission, utime fails
# with EPERM if path is owned by a different user.
# Only warn in this case, since it's not a problem
# unless this repo is being prepared for distribution
# via rsync.
writemsg_level('!!! utime(\'%s\', (%s, %s)): %s\n' %
(path, max_mtime, max_mtime, e),
level=logging.WARNING, noiselevel=-1)
def sign(self):
""" Sign the Manifest """
raise NotImplementedError()
def validateSignature(self):
""" Validate signature on Manifest """
raise NotImplementedError()
def addFile(self, ftype, fname, hashdict=None, ignoreMissing=False):
""" Add entry to Manifest optionally using hashdict to avoid recalculation of hashes """
if ftype == "AUX" and not fname.startswith("files/"):
fname = os.path.join("files", fname)
if not os.path.exists(self.pkgdir+fname) and not ignoreMissing:
raise FileNotFound(fname)
if not ftype in MANIFEST2_IDENTIFIERS:
raise InvalidDataType(ftype)
if ftype == "AUX" and fname.startswith("files"):
fname = fname[6:]
self.fhashdict[ftype][fname] = {}
if hashdict != None:
self.fhashdict[ftype][fname].update(hashdict)
if not MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]:
self.updateFileHashes(ftype, fname, checkExisting=False, ignoreMissing=ignoreMissing)
def removeFile(self, ftype, fname):
""" Remove given entry from Manifest """
del self.fhashdict[ftype][fname]
def hasFile(self, ftype, fname):
""" Return whether the Manifest contains an entry for the given type,filename pair """
return (fname in self.fhashdict[ftype])
def findFile(self, fname):
""" Return entrytype of the given file if present in Manifest or None if not present """
for t in MANIFEST2_IDENTIFIERS:
if fname in self.fhashdict[t]:
return t
return None
def create(self, checkExisting=False, assumeDistHashesSometimes=False,
assumeDistHashesAlways=False, requiredDistfiles=[]):
""" Recreate this Manifest from scratch. This will not use any
existing checksums unless assumeDistHashesSometimes or
assumeDistHashesAlways is true (assumeDistHashesSometimes will only
cause DIST checksums to be reused if the file doesn't exist in
DISTDIR). The requiredDistfiles parameter specifies a list of
distfiles to raise a FileNotFound exception for (if no file or existing
checksums are available), and defaults to all distfiles when not
specified."""
if not self.allow_create:
return
if checkExisting:
self.checkAllHashes()
if assumeDistHashesSometimes or assumeDistHashesAlways:
distfilehashes = self.fhashdict["DIST"]
else:
distfilehashes = {}
self.__init__(self.pkgdir, distdir=self.distdir,
fetchlist_dict=self.fetchlist_dict, from_scratch=True,
thin=self.thin, allow_missing=self.allow_missing,
allow_create=self.allow_create, hashes=self.hashes,
find_invalid_path_char=self._find_invalid_path_char)
pn = os.path.basename(self.pkgdir.rstrip(os.path.sep))
cat = self._pkgdir_category()
pkgdir = self.pkgdir
if self.thin:
cpvlist = self._update_thin_pkgdir(cat, pn, pkgdir)
else:
cpvlist = self._update_thick_pkgdir(cat, pn, pkgdir)
distlist = set()
for cpv in cpvlist:
distlist.update(self._getCpvDistfiles(cpv))
if requiredDistfiles is None:
# This allows us to force removal of stale digests for the
# ebuild --force digest option (no distfiles are required).
requiredDistfiles = set()
elif len(requiredDistfiles) == 0:
# repoman passes in an empty list, which implies that all distfiles
# are required.
requiredDistfiles = distlist.copy()
required_hash_types = set()
required_hash_types.add("size")
required_hash_types.add(MANIFEST2_REQUIRED_HASH)
for f in distlist:
fname = os.path.join(self.distdir, f)
mystat = None
try:
mystat = os.stat(fname)
except OSError:
pass
if f in distfilehashes and \
not required_hash_types.difference(distfilehashes[f]) and \
((assumeDistHashesSometimes and mystat is None) or \
(assumeDistHashesAlways and mystat is None) or \
(assumeDistHashesAlways and mystat is not None and \
set(distfilehashes[f]) == set(self.hashes) and \
distfilehashes[f]["size"] == mystat.st_size)):
self.fhashdict["DIST"][f] = distfilehashes[f]
else:
try:
self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes)
except FileNotFound:
if f in requiredDistfiles:
raise
def _is_cpv(self, cat, pn, filename):
if not filename.endswith(".ebuild"):
return None
pf = filename[:-7]
ps = portage.versions._pkgsplit(pf)
cpv = "%s/%s" % (cat, pf)
if not ps:
raise PortagePackageException(
_("Invalid package name: '%s'") % cpv)
if ps[0] != pn:
raise PortagePackageException(
_("Package name does not "
"match directory name: '%s'") % cpv)
return cpv
def _update_thin_pkgdir(self, cat, pn, pkgdir):
for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir):
break
cpvlist = []
for f in pkgdir_files:
try:
f = _unicode_decode(f,
encoding=_encodings['fs'], errors='strict')
except UnicodeDecodeError:
continue
if f[:1] == '.':
continue
pf = self._is_cpv(cat, pn, f)
if pf is not None:
cpvlist.append(pf)
return cpvlist
def _update_thick_pkgdir(self, cat, pn, pkgdir):
cpvlist = []
for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir):
break
for f in pkgdir_files:
try:
f = _unicode_decode(f,
encoding=_encodings['fs'], errors='strict')
except UnicodeDecodeError:
continue
if f[:1] == ".":
continue
pf = self._is_cpv(cat, pn, f)
if pf is not None:
mytype = "EBUILD"
cpvlist.append(pf)
elif self._find_invalid_path_char(f) == -1 and \
manifest2MiscfileFilter(f):
mytype = "MISC"
else:
continue
self.fhashdict[mytype][f] = perform_multiple_checksums(self.pkgdir+f, self.hashes)
recursive_files = []
pkgdir = self.pkgdir
cut_len = len(os.path.join(pkgdir, "files") + os.sep)
for parentdir, dirs, files in os.walk(os.path.join(pkgdir, "files")):
for f in files:
try:
f = _unicode_decode(f,
encoding=_encodings['fs'], errors='strict')
except UnicodeDecodeError:
continue
full_path = os.path.join(parentdir, f)
recursive_files.append(full_path[cut_len:])
for f in recursive_files:
if self._find_invalid_path_char(f) != -1 or \
not manifest2AuxfileFilter(f):
continue
self.fhashdict["AUX"][f] = perform_multiple_checksums(
os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
return cpvlist
def _pkgdir_category(self):
return self.pkgdir.rstrip(os.sep).split(os.sep)[-2]
def _getAbsname(self, ftype, fname):
if ftype == "DIST":
absname = os.path.join(self.distdir, fname)
elif ftype == "AUX":
absname = os.path.join(self.pkgdir, "files", fname)
else:
absname = os.path.join(self.pkgdir, fname)
return absname
def checkAllHashes(self, ignoreMissingFiles=False):
for t in MANIFEST2_IDENTIFIERS:
self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
def checkTypeHashes(self, idtype, ignoreMissingFiles=False, hash_filter=None):
for f in self.fhashdict[idtype]:
self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles,
hash_filter=hash_filter)
def checkFileHashes(self, ftype, fname, ignoreMissing=False, hash_filter=None):
digests = _filter_unaccelarated_hashes(self.fhashdict[ftype][fname])
if hash_filter is not None:
digests = _apply_hash_filter(digests, hash_filter)
try:
ok, reason = verify_all(self._getAbsname(ftype, fname), digests)
if not ok:
raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason)))
return ok, reason
except FileNotFound as e:
if not ignoreMissing:
raise
return False, _("File Not Found: '%s'") % str(e)
def checkCpvHashes(self, cpv, checkDistfiles=True, onlyDistfiles=False, checkMiscfiles=False):
""" check the hashes for all files associated to the given cpv, include all
AUX files and optionally all MISC files. """
if not onlyDistfiles:
self.checkTypeHashes("AUX", ignoreMissingFiles=False)
if checkMiscfiles:
self.checkTypeHashes("MISC", ignoreMissingFiles=False)
ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
self.checkFileHashes("EBUILD", ebuildname, ignoreMissing=False)
if checkDistfiles or onlyDistfiles:
for f in self._getCpvDistfiles(cpv):
self.checkFileHashes("DIST", f, ignoreMissing=False)
def _getCpvDistfiles(self, cpv):
""" Get a list of all DIST files associated to the given cpv """
return self.fetchlist_dict[cpv]
def getDistfilesSize(self, fetchlist):
total_bytes = 0
for f in fetchlist:
total_bytes += int(self.fhashdict["DIST"][f]["size"])
return total_bytes
def updateFileHashes(self, ftype, fname, checkExisting=True, ignoreMissing=True, reuseExisting=False):
""" Regenerate hashes for the given file """
if checkExisting:
self.checkFileHashes(ftype, fname, ignoreMissing=ignoreMissing)
if not ignoreMissing and fname not in self.fhashdict[ftype]:
raise FileNotInManifestException(fname)
if fname not in self.fhashdict[ftype]:
self.fhashdict[ftype][fname] = {}
myhashkeys = list(self.hashes)
if reuseExisting:
for k in [h for h in self.fhashdict[ftype][fname] if h in myhashkeys]:
myhashkeys.remove(k)
myhashes = perform_multiple_checksums(self._getAbsname(ftype, fname), myhashkeys)
self.fhashdict[ftype][fname].update(myhashes)
def updateTypeHashes(self, idtype, checkExisting=False, ignoreMissingFiles=True):
""" Regenerate all hashes for all files of the given type """
for fname in self.fhashdict[idtype]:
self.updateFileHashes(idtype, fname, checkExisting)
def updateAllHashes(self, checkExisting=False, ignoreMissingFiles=True):
""" Regenerate all hashes for all files in this Manifest. """
for idtype in MANIFEST2_IDENTIFIERS:
self.updateTypeHashes(idtype, checkExisting=checkExisting,
ignoreMissingFiles=ignoreMissingFiles)
def updateCpvHashes(self, cpv, ignoreMissingFiles=True):
""" Regenerate all hashes associated to the given cpv (includes all AUX and MISC
files)."""
self.updateTypeHashes("AUX", ignoreMissingFiles=ignoreMissingFiles)
self.updateTypeHashes("MISC", ignoreMissingFiles=ignoreMissingFiles)
ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
self.updateFileHashes("EBUILD", ebuildname, ignoreMissingFiles=ignoreMissingFiles)
for f in self._getCpvDistfiles(cpv):
self.updateFileHashes("DIST", f, ignoreMissingFiles=ignoreMissingFiles)
def updateHashesGuessType(self, fname, *args, **kwargs):
""" Regenerate hashes for the given file (guesses the type and then
calls updateFileHashes)."""
mytype = self.guessType(fname)
if mytype == "AUX":
fname = fname[len("files" + os.sep):]
elif mytype is None:
return
myrealtype = self.findFile(fname)
if myrealtype is not None:
mytype = myrealtype
return self.updateFileHashes(mytype, fname, *args, **kwargs)
def getFileData(self, ftype, fname, key):
""" Return the value of a specific (type,filename,key) triple, mainly useful
to get the size for distfiles."""
return self.fhashdict[ftype][fname][key]
def getVersions(self):
""" Returns a list of manifest versions present in the manifest file. """
rVal = []
mfname = self.getFullname()
if not os.path.exists(mfname):
return rVal
myfile = io.open(_unicode_encode(mfname,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace')
lines = myfile.readlines()
myfile.close()
for l in lines:
mysplit = l.split()
if len(mysplit) == 4 and mysplit[0] in MANIFEST1_HASH_FUNCTIONS \
and 1 not in rVal:
rVal.append(1)
elif len(mysplit) > 4 and mysplit[0] in MANIFEST2_IDENTIFIERS \
and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal:
rVal.append(2)
return rVal
def _catsplit(self, pkg_key):
"""Split a category and package, returning a list of [cat, pkg].
This is compatible with portage.catsplit()"""
return pkg_key.split("/", 1)
|
gpl-2.0
|
PythonNut/servo
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/performance/concatenation.py
|
451
|
1145
|
from __future__ import absolute_import, division, unicode_literals
def f1():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x += y + z
def f2():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = x + y + z
def f3():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "".join((x, y, z))
def f4():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "%s%s%s" % (x, y, z)
import timeit
for x in range(4):
statement = "f%s" % (x + 1)
t = timeit.Timer(statement, "from __main__ import " + statement)
r = t.repeat(3, 1000000)
print(r, min(r))
|
mpl-2.0
|
indictranstech/erpnext
|
erpnext/healthcare/setup.py
|
6
|
16695
|
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.setup.utils import insert_record
def setup_healthcare():
if frappe.db.exists('Medical Department', 'Cardiology'):
# already setup
return
create_medical_departments()
create_antibiotics()
create_test_uom()
create_duration()
create_dosage()
create_healthcare_item_groups()
create_lab_test_items()
create_lab_test_template()
create_sensitivity()
def create_medical_departments():
departments = [
"Accident And Emergency Care" ,"Anaesthetics", "Biochemistry", "Cardiology", "Dermatology",
"Diagnostic Imaging", "ENT", "Gastroenterology", "General Surgery", "Gynaecology",
"Haematology", "Maternity", "Microbiology", "Nephrology", "Neurology", "Oncology",
"Orthopaedics", "Pathology", "Physiotherapy", "Rheumatology", "Serology", "Urology"
]
for department in departments:
mediacal_department = frappe.new_doc("Medical Department")
mediacal_department.department = _(department)
try:
mediacal_department.save()
except frappe.DuplicateEntryError:
pass
def create_antibiotics():
abt = [
"Amoxicillin", "Ampicillin", "Bacampicillin", "Carbenicillin", "Cloxacillin", "Dicloxacillin",
"Flucloxacillin", "Mezlocillin", "Nafcillin", "Oxacillin", "Penicillin G", "Penicillin V",
"Piperacillin", "Pivampicillin", "Pivmecillinam", "Ticarcillin", "Cefacetrile (cephacetrile)",
"Cefadroxil (cefadroxyl)", "Cefalexin (cephalexin)", "Cefaloglycin (cephaloglycin)",
"Cefalonium (cephalonium)", "Cefaloridine (cephaloradine)", "Cefalotin (cephalothin)",
"Cefapirin (cephapirin)", "Cefatrizine", "Cefazaflur", "Cefazedone", "Cefazolin (cephazolin)",
"Cefradine (cephradine)", "Cefroxadine", "Ceftezole", "Cefaclor", "Cefamandole", "Cefmetazole",
"Cefonicid", "Cefotetan", "Cefoxitin", "Cefprozil (cefproxil)", "Cefuroxime", "Cefuzonam",
"Cefcapene", "Cefdaloxime", "Cefdinir", "Cefditoren", "Cefetamet", "Cefixime", "Cefmenoxime",
"Cefodizime", "Cefotaxime", "Cefpimizole", "Cefpodoxime", "Cefteram", "Ceftibuten", "Ceftiofur",
"Ceftiolene", "Ceftizoxime", "Ceftriaxone", "Cefoperazone", "Ceftazidime", "Cefclidine", "Cefepime",
"Cefluprenam", "Cefoselis", "Cefozopran", "Cefpirome", "Cefquinome", "Ceftobiprole", "Ceftaroline",
"Cefaclomezine","Cefaloram", "Cefaparole", "Cefcanel", "Cefedrolor", "Cefempidone", "Cefetrizole",
"Cefivitril", "Cefmatilen", "Cefmepidium", "Cefovecin", "Cefoxazole", "Cefrotil", "Cefsumide",
"Cefuracetime", "Ceftioxide", "Ceftazidime/Avibactam", "Ceftolozane/Tazobactam", "Aztreonam",
"Imipenem", "Imipenem/cilastatin", "Doripenem", "Meropenem", "Ertapenem", "Azithromycin",
"Erythromycin", "Clarithromycin", "Dirithromycin", "Roxithromycin", "Telithromycin", "Clindamycin",
"Lincomycin", "Pristinamycin", "Quinupristin/dalfopristin", "Amikacin", "Gentamicin", "Kanamycin",
"Neomycin", "Netilmicin", "Paromomycin", "Streptomycin", "Tobramycin", "Flumequine", "Nalidixic acid",
"Oxolinic acid", "Piromidic acid", "Pipemidic acid", "Rosoxacin", "Ciprofloxacin", "Enoxacin",
"Lomefloxacin", "Nadifloxacin", "Norfloxacin", "Ofloxacin", "Pefloxacin", "Rufloxacin", "Balofloxacin",
"Gatifloxacin", "Grepafloxacin", "Levofloxacin", "Moxifloxacin", "Pazufloxacin", "Sparfloxacin",
"Temafloxacin", "Tosufloxacin", "Besifloxacin", "Clinafloxacin", "Gemifloxacin",
"Sitafloxacin", "Trovafloxacin", "Prulifloxacin", "Sulfamethizole", "Sulfamethoxazole",
"Sulfisoxazole", "Trimethoprim-Sulfamethoxazole", "Demeclocycline", "Doxycycline", "Minocycline",
"Oxytetracycline", "Tetracycline", "Tigecycline", "Chloramphenicol", "Metronidazole",
"Tinidazole", "Nitrofurantoin", "Vancomycin", "Teicoplanin", "Telavancin", "Linezolid",
"Cycloserine 2", "Rifampin", "Rifabutin", "Rifapentine", "Rifalazil", "Bacitracin", "Polymyxin B",
"Viomycin", "Capreomycin"
]
for a in abt:
antibiotic = frappe.new_doc("Antibiotic")
antibiotic.antibiotic_name = a
try:
antibiotic.save()
except frappe.DuplicateEntryError:
pass
def create_test_uom():
records = [
{"doctype": "Lab Test UOM", "name": "umol/L", "test_uom": "umol/L", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "mg/L", "test_uom": "mg/L", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "mg / dl", "test_uom": "mg / dl", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "pg / ml", "test_uom": "pg / ml", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "U/ml", "test_uom": "U/ml", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "/HPF", "test_uom": "/HPF", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "Million Cells / cumm", "test_uom": "Million Cells / cumm", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "Lakhs Cells / cumm", "test_uom": "Lakhs Cells / cumm", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "U / L", "test_uom": "U / L", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "g / L", "test_uom": "g / L", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "IU / ml", "test_uom": "IU / ml", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "gm %", "test_uom": "gm %", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "Microgram", "test_uom": "Microgram", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "Micron", "test_uom": "Micron", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "Cells / cumm", "test_uom": "Cells / cumm", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "%", "test_uom": "%", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "mm / dl", "test_uom": "mm / dl", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "mm / hr", "test_uom": "mm / hr", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "ulU / ml", "test_uom": "ulU / ml", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "ng / ml", "test_uom": "ng / ml", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "ng / dl", "test_uom": "ng / dl", "uom_description": None },
{"doctype": "Lab Test UOM", "name": "ug / dl", "test_uom": "ug / dl", "uom_description": None }
]
insert_record(records)
def create_duration():
records = [
{"doctype": "Prescription Duration", "name": "3 Month", "number": "3", "period": "Month" },
{"doctype": "Prescription Duration", "name": "2 Month", "number": "2", "period": "Month" },
{"doctype": "Prescription Duration", "name": "1 Month", "number": "1", "period": "Month" },
{"doctype": "Prescription Duration", "name": "12 Hour", "number": "12", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "11 Hour", "number": "11", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "10 Hour", "number": "10", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "9 Hour", "number": "9", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "8 Hour", "number": "8", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "7 Hour", "number": "7", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "6 Hour", "number": "6", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "5 Hour", "number": "5", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "4 Hour", "number": "4", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "3 Hour", "number": "3", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "2 Hour", "number": "2", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "1 Hour", "number": "1", "period": "Hour" },
{"doctype": "Prescription Duration", "name": "5 Week", "number": "5", "period": "Week" },
{"doctype": "Prescription Duration", "name": "4 Week", "number": "4", "period": "Week" },
{"doctype": "Prescription Duration", "name": "3 Week", "number": "3", "period": "Week" },
{"doctype": "Prescription Duration", "name": "2 Week", "number": "2", "period": "Week" },
{"doctype": "Prescription Duration", "name": "1 Week", "number": "1", "period": "Week" },
{"doctype": "Prescription Duration", "name": "6 Day", "number": "6", "period": "Day" },
{"doctype": "Prescription Duration", "name": "5 Day", "number": "5", "period": "Day" },
{"doctype": "Prescription Duration", "name": "4 Day", "number": "4", "period": "Day" },
{"doctype": "Prescription Duration", "name": "3 Day", "number": "3", "period": "Day" },
{"doctype": "Prescription Duration", "name": "2 Day", "number": "2", "period": "Day" },
{"doctype": "Prescription Duration", "name": "1 Day", "number": "1", "period": "Day" }
]
insert_record(records)
def create_dosage():
records = [
{"doctype": "Prescription Dosage", "name": "1-1-1-1", "dosage": "1-1-1-1","dosage_strength":
[{"strength": "1.0","strength_time": "9:00:00"}, {"strength": "1.0","strength_time": "13:00:00"},{"strength": "1.0","strength_time": "17:00:00"},{"strength": "1.0","strength_time": "21:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "0-0-1", "dosage": "0-0-1","dosage_strength":
[{"strength": "1.0","strength_time": "21:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "1-0-0", "dosage": "1-0-0","dosage_strength":
[{"strength": "1.0","strength_time": "9:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "0-1-0", "dosage": "0-1-0","dosage_strength":
[{"strength": "1.0","strength_time": "14:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "1-1-1", "dosage": "1-1-1","dosage_strength":
[{"strength": "1.0","strength_time": "9:00:00"}, {"strength": "1.0","strength_time": "14:00:00"},{"strength": "1.0","strength_time": "21:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "1-0-1", "dosage": "1-0-1","dosage_strength":
[{"strength": "1.0","strength_time": "9:00:00"}, {"strength": "1.0","strength_time": "21:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "Once Bedtime", "dosage": "Once Bedtime","dosage_strength":
[{"strength": "1.0","strength_time": "21:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "5 times a day", "dosage": "5 times a day","dosage_strength":
[{"strength": "1.0","strength_time": "5:00:00"}, {"strength": "1.0","strength_time": "9:00:00"}, {"strength": "1.0","strength_time": "13:00:00"},{"strength": "1.0","strength_time": "17:00:00"},{"strength": "1.0","strength_time": "21:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "QID", "dosage": "QID","dosage_strength":
[{"strength": "1.0","strength_time": "9:00:00"}, {"strength": "1.0","strength_time": "13:00:00"},{"strength": "1.0","strength_time": "17:00:00"},{"strength": "1.0","strength_time": "21:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "TID", "dosage": "TID","dosage_strength":
[{"strength": "1.0","strength_time": "9:00:00"}, {"strength": "1.0","strength_time": "14:00:00"},{"strength": "1.0","strength_time": "21:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "BID", "dosage": "BID","dosage_strength":
[{"strength": "1.0","strength_time": "9:00:00"}, {"strength": "1.0","strength_time": "21:00:00"}]
},
{"doctype": "Prescription Dosage", "name": "Once Daily", "dosage": "Once Daily","dosage_strength":
[{"strength": "1.0","strength_time": "9:00:00"}]
}
]
insert_record(records)
def create_healthcare_item_groups():
records = [
{'doctype': 'Item Group', 'item_group_name': _('Laboratory'),
'is_group': 0, 'parent_item_group': _('All Item Groups') },
{'doctype': 'Item Group', 'item_group_name': _('Drug'),
'is_group': 0, 'parent_item_group': _('All Item Groups') }
]
insert_record(records)
def create_lab_test_items():
records = [
{"doctype": "Item", "item_code": "MCH", "item_name": "MCH", "item_group": _("Laboratory"),
"stock_uom": _("Unit"), "is_stock_item": 0, "is_purchase_item": 0, "is_sales_item": 1},
{"doctype": "Item", "item_code": "LDL", "item_name": "LDL", "item_group": _("Laboratory"),
"stock_uom": _("Unit"), "is_stock_item": 0, "is_purchase_item": 0, "is_sales_item": 1},
{"doctype": "Item", "item_code": "GTT", "item_name": "GTT", "item_group": _("Laboratory"),
"stock_uom": _("Unit"), "is_stock_item": 0, "is_purchase_item": 0, "is_sales_item": 1},
{"doctype": "Item", "item_code": "HDL", "item_name": "HDL", "item_group": _("Laboratory"),
"stock_uom": _("Unit"), "is_stock_item": 0, "is_purchase_item": 0, "is_sales_item": 1},
{"doctype": "Item", "item_code": "BILT", "item_name": "BILT", "item_group": _("Laboratory"),
"stock_uom": _("Unit"), "is_stock_item": 0, "is_purchase_item": 0, "is_sales_item": 1},
{"doctype": "Item", "item_code": "BILD", "item_name": "BILD", "item_group": _("Laboratory"),
"stock_uom": _("Unit"), "is_stock_item": 0, "is_purchase_item": 0, "is_sales_item": 1},
{"doctype": "Item", "item_code": "BP", "item_name": "BP", "item_group": _("Laboratory"),
"stock_uom": _("Unit"), "is_stock_item": 0, "is_purchase_item": 0, "is_sales_item": 1},
{"doctype": "Item", "item_code": "BS", "item_name": "BS", "item_group": _("Laboratory"),
"stock_uom": _("Unit"), "is_stock_item": 0, "is_purchase_item": 0, "is_sales_item": 1}
]
insert_record(records)
def create_lab_test_template():
records = [
{"doctype": "Lab Test Template", "name": "MCH","test_name": "MCH","test_code": "MCH",
"test_group": _("Laboratory"),"department": _("Haematology"),"item": "MCH",
"test_template_type": "Single","is_billable": 1,"test_rate": 0.0,"test_uom": "Microgram",
"test_normal_range": "27 - 32 Microgram",
"sensitivity": 0,"test_description": "Mean Corpuscular Hemoglobin"},
{"doctype": "Lab Test Template", "name": "LDL","test_name": "LDL (Serum)","test_code": "LDL",
"test_group": _("Laboratory"),"department": _("Biochemistry"),
"item": "LDL","test_template_type": "Single",
"is_billable": 1,"test_rate": 0.0,"test_uom": "mg / dl","test_normal_range": "70 - 160 mg/dlLow-density Lipoprotein (LDL)",
"sensitivity": 0,"test_description": "Low-density Lipoprotein (LDL)"},
{"doctype": "Lab Test Template", "name": "GTT","test_name": "GTT","test_code": "GTT",
"test_group": _("Laboratory"),"department": _("Haematology"),
"item": "GTT","test_template_type": "Single",
"is_billable": 1,"test_rate": 0.0,"test_uom": "mg / dl","test_normal_range": "Less than 85 mg/dl",
"sensitivity": 0,"test_description": "Glucose Tolerance Test"},
{"doctype": "Lab Test Template", "name": "HDL","test_name": "HDL (Serum)","test_code": "HDL",
"test_group": _("Laboratory"),"department": _("Biochemistry"),
"item": "HDL","test_template_type": "Single",
"is_billable": 1,"test_rate": 0.0,"test_uom": "mg / dl","test_normal_range": "35 - 65 mg/dl",
"sensitivity": 0,"test_description": "High-density Lipoprotein (HDL)"},
{"doctype": "Lab Test Template", "name": "BILT","test_name": "Bilirubin Total","test_code": "BILT",
"test_group": _("Laboratory"),"department": _("Biochemistry"),
"item": "BILT","test_template_type": "Single",
"is_billable": 1,"test_rate": 0.0,"test_uom": "mg / dl","test_normal_range": "0.2 - 1.2 mg / dl",
"sensitivity": 0,"test_description": "Bilirubin Total"},
{"doctype": "Lab Test Template", "name": "BILD","test_name": "Bilirubin Direct","test_code": "BILD",
"test_group": _("Laboratory"),"department": _("Biochemistry"),
"item": "BILD","test_template_type": "Single",
"is_billable": 1,"test_rate": 0.0,"test_uom": "mg / dl","test_normal_range": "0.4 mg / dl",
"sensitivity": 0,"test_description": "Bilirubin Direct"},
{"doctype": "Lab Test Template", "name": "BP","test_name": "Bile Pigment","test_code": "BP",
"test_group": _("Laboratory"),"department": _("Pathology"),
"item": "BP","test_template_type": "Single",
"is_billable": 1,"test_rate": 0.0,"test_uom": "","test_normal_range": "",
"sensitivity": 0,"test_description": "Bile Pigment"},
{"doctype": "Lab Test Template", "name": "BS","test_name": "Bile Salt","test_code": "BS",
"test_group": _("Laboratory"),"department": _("Pathology"),
"item": "BS","test_template_type": "Single",
"is_billable": 1,"test_rate": 0.0,"test_uom": "","test_normal_range": "",
"sensitivity": 0,"test_description": "Bile Salt"}
]
insert_record(records)
def create_sensitivity():
records = [
{"doctype": "Sensitivity", "sensitivity": _("Low Sensitivity")},
{"doctype": "Sensitivity", "sensitivity": _("High Sensitivity")},
{"doctype": "Sensitivity", "sensitivity": _("Moderate Sensitivity")},
{"doctype": "Sensitivity", "sensitivity": _("Susceptible")},
{"doctype": "Sensitivity", "sensitivity": _("Resistant")},
{"doctype": "Sensitivity", "sensitivity": _("Intermediate")}
]
insert_record(records)
|
agpl-3.0
|
SlimRemix/android_external_chromium_org
|
chrome/common/extensions/docs/server2/api_list_data_source_test.py
|
78
|
8013
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import json
from api_list_data_source import APIListDataSource
from api_models import ContentScriptAPI
from extensions_paths import CHROME_EXTENSIONS
from server_instance import ServerInstance
from test_file_system import TestFileSystem
def _ToTestData(obj):
'''Transforms |obj| into test data by turning a list of files into an object
mapping that file to its contents (derived from its name).
'''
return dict((name, name) for name in obj)
def _ToTestFeatures(names):
'''Transforms a list of strings into a minimal JSON features object.
'''
def platforms_to_extension_types(platforms):
return ['platform_app' if platform == 'apps' else 'extension'
for platform in platforms]
features = dict((name, {
'name': name,
'extension_types': platforms_to_extension_types(platforms),
'contexts': context
}) for name, platforms, context in names)
features['sockets.udp']['channel'] = 'dev'
return features
def _ToTestAPIData(names):
api_data = dict((name, [{'namespace': name, 'description': description}])
for name, description in names)
return api_data
def _ToTestAPISchema(names, apis):
for name, json_file in names:
apis['api'][json_file] = json.dumps(_TEST_API_DATA[name])
return apis
_TEST_API_FEATURES = _ToTestFeatures([
('alarms', ['apps', 'extensions'], ['content_script']),
('app.window', ['apps'], []),
('browserAction', ['extensions'], []),
('experimental.bluetooth', ['apps'], []),
('experimental.history', ['extensions'], []),
('experimental.power', ['apps', 'extensions'], []),
('extension', ['extensions'], ['content_script']),
('extension.onRequest', ['extensions'], ['content_script']),
('extension.sendNativeMessage', ['extensions'], []),
('extension.sendRequest', ['extensions'], ['content_script']),
('infobars', ['extensions'], []),
('something_internal', ['apps'], []),
('something_else_internal', ['extensions'], []),
('storage', ['apps', 'extensions'], []),
('sockets.udp', ['apps', 'extensions'], [])
])
_TEST_API_DATA = _ToTestAPIData([
('alarms', u'<code>alarms</code>'),
('app.window', u'<code>app.window</code>'),
('browserAction', u'<code>browserAction</code>'),
('experimental.bluetooth', u'<code>experimental.bluetooth</code>'),
('experimental.history', u'<code>experimental.history</code>'),
('experimental.power', u'<code>experimental.power</code>'),
('extension', u'<code>extension</code>'),
('infobars', u'<code>infobars</code>'),
('something_internal', u'<code>something_internal</code>'),
('something_else_internal', u'<code>something_else_internal</code>'),
('storage', u'<code>storage</code>'),
('sockets.udp', u'<code>sockets.udp</code>')
])
_TEST_API_SCHEMA = [
('alarms', 'alarms.json'),
('app.window', 'app_window.json'),
('browserAction', 'browser_action.json'),
('experimental.bluetooth', 'experimental_bluetooth.json'),
('experimental.history', 'experimental_history.json'),
('experimental.power', 'experimental_power.json'),
('extension', 'extension.json'),
('infobars', 'infobars.json'),
('something_internal', 'something_internal.json'),
('something_else_internal', 'something_else_internal.json'),
('storage', 'storage.json'),
('sockets.udp', 'sockets_udp.json')
]
_TEST_DATA = _ToTestAPISchema(_TEST_API_SCHEMA, {
'api': {
'_api_features.json': json.dumps(_TEST_API_FEATURES),
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': '{}',
'manifest.json': '{}',
'permissions.json': '{}',
},
'public': {
'apps': _ToTestData([
'alarms.html',
'app_window.html',
'experimental_bluetooth.html',
'experimental_power.html',
'storage.html',
'sockets_udp.html'
]),
'extensions': _ToTestData([
'alarms.html',
'browserAction.html',
'experimental_history.html',
'experimental_power.html',
'extension.html',
'infobars.html',
'storage.html',
'sockets_udp.html'
]),
},
},
},
})
class APIListDataSourceTest(unittest.TestCase):
def setUp(self):
server_instance = ServerInstance.ForTest(
TestFileSystem(_TEST_DATA, relative_to=CHROME_EXTENSIONS))
# APIListDataSource takes a request but doesn't use it,
# so put None
self._api_list = APIListDataSource(server_instance, None)
self.maxDiff = None
def testApps(self):
self.assertEqual({
'stable': [
{
'name': 'alarms',
'version': 5,
'description': u'<code>alarms</code>'
},
{
'name': 'app.window',
# Availability logic will look for a camelCase format filename
# (i.e. 'app.window.html') at version 20 and below, but the
# unix_name format above won't be found at these versions.
'version': 21,
'description': u'<code>app.window</code>'
},
{
'name': 'storage',
'last': True,
'version': 5,
'description': u'<code>storage</code>'
}],
'dev': [
{
'name': 'sockets.udp',
'last': True,
'description': u'<code>sockets.udp</code>'
}],
'beta': [],
'master': []
}, self._api_list.get('apps').get('chrome'))
def testExperimentalApps(self):
self.assertEqual([
{
'name': 'experimental.bluetooth',
'description': u'<code>experimental.bluetooth</code>'
},
{
'name': 'experimental.power',
'last': True,
'description': u'<code>experimental.power</code>'
}], self._api_list.get('apps').get('experimental'))
def testExtensions(self):
self.assertEqual({
'stable': [
{
'name': 'alarms',
'version': 5,
'description': u'<code>alarms</code>'
},
{
'name': 'browserAction',
# See comment above for 'app.window'.
'version': 21,
'description': u'<code>browserAction</code>'
},
{
'name': 'extension',
'version': 5,
'description': u'<code>extension</code>'
},
{
'name': 'infobars',
'version': 5,
'description': u'<code>infobars</code>'
},
{
'name': 'storage',
'last': True,
'version': 5,
'description': u'<code>storage</code>'
}],
'dev': [
{
'name': 'sockets.udp',
'last': True,
'description': u'<code>sockets.udp</code>'
}],
'beta': [],
'master': []
}, self._api_list.get('extensions').get('chrome'))
def testExperimentalExtensions(self):
self.assertEqual([
{
'name': 'experimental.history',
'description': u'<code>experimental.history</code>'
},
{
'name': 'experimental.power',
'description': u'<code>experimental.power</code>',
'last': True
}], self._api_list.get('extensions').get('experimental'))
def testContentScripts(self):
self.assertEqual([{
'name': 'alarms',
},
{
'name': 'extension',
'restrictedTo': [{
'node': 'onRequest',
'first': True
},
{
'node': 'sendRequest',
'last': True
}]
}], self._api_list.get('contentScripts'))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
arjoly/scikit-learn
|
doc/sphinxext/github_link.py
|
314
|
2661
|
from operator import attrgetter
import inspect
import subprocess
import os
import sys
from functools import partial
REVISION_CMD = 'git rev-parse --short HEAD'
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except subprocess.CalledProcessError:
print('Failed to execute git to get revision')
return None
return revision.decode('utf-8')
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ('py', 'pyx'):
return
if not info.get('module') or not info.get('fullname'):
return
class_name = info['fullname'].split('.')[0]
if type(class_name) != str:
# Python 2 only
class_name = class_name.encode('utf-8')
module = __import__(info['module'], fromlist=[class_name])
obj = attrgetter(info['fullname'])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn,
start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ''
return url_fmt.format(revision=revision, package=package,
path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(_linkcode_resolve, revision=revision, package=package,
url_fmt=url_fmt)
|
bsd-3-clause
|
akretion/odoo
|
odoo/cli/deploy.py
|
26
|
3620
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import requests
import sys
import tempfile
import zipfile
from . import Command
class Deploy(Command):
"""Deploy a module on an Odoo instance"""
def __init__(self):
super(Deploy, self).__init__()
self.session = requests.session()
def deploy_module(self, module_path, url, login, password, db='', force=False):
url = url.rstrip('/')
module_file = self.zip_module(module_path)
try:
return self.login_upload_module(module_file, url, login, password, db, force=force)
finally:
os.remove(module_file)
def login_upload_module(self, module_file, url, login, password, db, force=False):
print("Uploading module file...")
endpoint = url + '/base_import_module/login_upload'
post_data = {
'login': login,
'password': password,
'db': db,
'force': '1' if force else '',
}
with open(module_file, 'rb') as f:
res = self.session.post(endpoint, files={'mod_file': f}, data=post_data)
if res.status_code == 404:
raise Exception(
"The server '%s' does not have the 'base_import_module' installed or is not up-to-date." % url)
res.raise_for_status()
return res.text
def zip_module(self, path):
path = os.path.abspath(path)
if not os.path.isdir(path):
raise Exception("Could not find module directory '%s'" % path)
container, module_name = os.path.split(path)
temp = tempfile.mktemp(suffix='.zip')
try:
print("Zipping module directory...")
with zipfile.ZipFile(temp, 'w') as zfile:
for root, dirs, files in os.walk(path):
for file in files:
file_path = os.path.join(root, file)
zfile.write(file_path, file_path.split(container).pop())
return temp
except Exception:
os.remove(temp)
raise
def run(self, cmdargs):
parser = argparse.ArgumentParser(
prog="%s deploy" % sys.argv[0].split(os.path.sep)[-1],
description=self.__doc__
)
parser.add_argument('path', help="Path of the module to deploy")
parser.add_argument('url', nargs='?', help='Url of the server (default=http://localhost:8069)', default="http://localhost:8069")
parser.add_argument('--db', dest='db', help='Database to use if server does not use db-filter.')
parser.add_argument('--login', dest='login', default="admin", help='Login (default=admin)')
parser.add_argument('--password', dest='password', default="admin", help='Password (default=admin)')
parser.add_argument('--verify-ssl', action='store_true', help='Verify SSL certificate')
parser.add_argument('--force', action='store_true', help='Force init even if module is already installed. (will update `noupdate="1"` records)')
if not cmdargs:
sys.exit(parser.print_help())
args = parser.parse_args(args=cmdargs)
if not args.verify_ssl:
self.session.verify = False
try:
if not args.url.startswith(('http://', 'https://')):
args.url = 'https://%s' % args.url
result = self.deploy_module(args.path, args.url, args.login, args.password, args.db, force=args.force)
print(result)
except Exception as e:
sys.exit("ERROR: %s" % e)
|
agpl-3.0
|
dbbhattacharya/kitsune
|
kitsune/wiki/migrations/0003_add_afrikaans.py
|
4
|
15018
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Create the Afrikaans Locale."""
orm.Locale.objects.create(locale='af')
def backwards(self, orm):
"""Remove the Afrikaans Locale."""
orm.Locale.objects.filter(locale='af').delete()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'products.product': {
'Meta': {'ordering': "['display_order']", 'object_name': 'Product'},
'description': ('django.db.models.fields.TextField', [], {}),
'display_order': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'products.topic': {
'Meta': {'ordering': "['product', 'display_order']", 'unique_together': "(('slug', 'product'),)", 'object_name': 'Topic'},
'description': ('django.db.models.fields.TextField', [], {}),
'display_order': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subtopics'", 'null': 'True', 'to': "orm['products.Topic']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics'", 'to': "orm['products.Product']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'tidings.watch': {
'Meta': {'object_name': 'Watch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'wiki.document': {
'Meta': {'unique_together': "(('parent', 'locale'), ('title', 'locale'), ('slug', 'locale'))", 'object_name': 'Document'},
'allow_discussion': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'category': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'contributors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_for+'", 'null': 'True', 'to': "orm['wiki.Revision']"}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_localizable': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'latest_localizable_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'localizable_for+'", 'null': 'True', 'to': "orm['wiki.Revision']"}),
'locale': ('kitsune.sumo.models.LocaleField', [], {'default': "'en-US'", 'max_length': '7', 'db_index': 'True'}),
'needs_change': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'needs_change_comment': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': "orm['wiki.Document']"}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['products.Product']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['products.Topic']", 'symmetrical': 'False'})
},
'wiki.documentlink': {
'Meta': {'unique_together': "(('linked_from', 'linked_to'),)", 'object_name': 'DocumentLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'linked_from': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documentlink_to_set'", 'to': "orm['wiki.Document']"}),
'linked_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documentlink_from_set'", 'to': "orm['wiki.Document']"})
},
'wiki.helpfulvote': {
'Meta': {'object_name': 'HelpfulVote'},
'anonymous_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'null': 'True', 'to': "orm['auth.User']"}),
'helpful': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'to': "orm['wiki.Revision']"}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'wiki.helpfulvotemetadata': {
'Meta': {'object_name': 'HelpfulVoteMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['wiki.HelpfulVote']"})
},
'wiki.importantdate': {
'Meta': {'object_name': 'ImportantDate'},
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wiki.locale': {
'Meta': {'ordering': "['locale']", 'object_name': 'Locale'},
'editors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'locales_editor'", 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leaders': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'locales_leader'", 'blank': 'True', 'to': "orm['auth.User']"}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '7', 'db_index': 'True'}),
'reviewers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'locales_reviewer'", 'blank': 'True', 'to': "orm['auth.User']"})
},
'wiki.revision': {
'Meta': {'object_name': 'Revision'},
'based_on': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_revisions'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_ready_for_localization': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'readied_for_localization': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'readied_for_localization_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'readied_for_l10n_revisions'", 'null': 'True', 'to': "orm['auth.User']"}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviewed_revisions'", 'null': 'True', 'to': "orm['auth.User']"}),
'significance': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['wiki']
symmetrical = True
|
bsd-3-clause
|
AnotherIvan/calibre
|
src/calibre/utils/ipc/server.py
|
11
|
12195
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os, cPickle, time, tempfile, errno
from math import ceil
from threading import Thread, RLock
from Queue import Queue, Empty
from multiprocessing.connection import Listener, arbitrary_address
from collections import deque
from binascii import hexlify
from calibre.utils.ipc import eintr_retry_call
from calibre.utils.ipc.launch import Worker
from calibre.utils.ipc.worker import PARALLEL_FUNCS
from calibre import detect_ncpus as cpu_count
from calibre.constants import iswindows, DEBUG, islinux
from calibre.ptempfile import base_dir
_counter = 0
class ConnectedWorker(Thread):
def __init__(self, worker, conn, rfile):
Thread.__init__(self)
self.daemon = True
self.conn = conn
self.worker = worker
self.notifications = Queue()
self._returncode = 'dummy'
self.killed = False
self.log_path = worker.log_path
self.rfile = rfile
self.close_log_file = getattr(worker, 'close_log_file', None)
def start_job(self, job):
notification = PARALLEL_FUNCS[job.name][-1] is not None
eintr_retry_call(self.conn.send, (job.name, job.args, job.kwargs, job.description))
if notification:
self.start()
else:
self.conn.close()
self.job = job
def run(self):
while True:
try:
x = eintr_retry_call(self.conn.recv)
self.notifications.put(x)
except BaseException:
break
try:
self.conn.close()
except BaseException:
pass
def kill(self):
self.killed = True
try:
self.worker.kill()
except BaseException:
pass
@property
def is_alive(self):
return not self.killed and self.worker.is_alive
@property
def returncode(self):
if self._returncode != 'dummy':
return self._returncode
r = self.worker.returncode
if self.killed and r is None:
self._returncode = 1
return 1
if r is not None:
self._returncode = r
return r
class CriticalError(Exception):
pass
_name_counter = 0
if islinux:
import fcntl
class LinuxListener(Listener):
def __init__(self, *args, **kwargs):
Listener.__init__(self, *args, **kwargs)
# multiprocessing tries to call unlink even on abstract
# named sockets, prevent it from doing so.
self._listener._unlink.cancel()
# Prevent child processes from inheriting this socket
# If we dont do this child processes not created by calibre, will
# inherit this socket, preventing the calibre GUI from being restarted.
# Examples of such processes are external viewers launched by Qt
# using openUrl().
fd = self._listener._socket.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
def close(self):
# To ensure that the socket is released, we have to call
# shutdown() not close(). This is needed to allow calibre to
# restart using the same socket address.
import socket
self._listener._socket.shutdown(socket.SHUT_RDWR)
self._listener._socket.close()
def accept(self, *args, **kwargs):
ans = Listener.accept(self, *args, **kwargs)
fd = ans.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
return ans
def create_listener(authkey, backlog=4):
# Use abstract named sockets on linux to avoid creating unnecessary temp files
global _name_counter
prefix = u'\0calibre-ipc-listener-%d-%%d' % os.getpid()
while True:
_name_counter += 1
address = (prefix % _name_counter).encode('ascii')
try:
l = LinuxListener(address=address, authkey=authkey, backlog=backlog)
return address, l
except EnvironmentError as err:
if err.errno == errno.EADDRINUSE:
continue
raise
else:
def create_listener(authkey, backlog=4):
address = arbitrary_address('AF_PIPE' if iswindows else 'AF_UNIX')
if iswindows and address[1] == ':':
address = address[2:]
listener = Listener(address=address, authkey=authkey, backlog=backlog)
return address, listener
class Server(Thread):
def __init__(self, notify_on_job_done=lambda x: x, pool_size=None,
limit=sys.maxint, enforce_cpu_limit=True):
Thread.__init__(self)
self.daemon = True
global _counter
self.id = _counter+1
_counter += 1
if enforce_cpu_limit:
limit = min(limit, cpu_count())
self.pool_size = limit if pool_size is None else pool_size
self.notify_on_job_done = notify_on_job_done
self.auth_key = os.urandom(32)
self.address, self.listener = create_listener(self.auth_key, backlog=4)
self.add_jobs_queue, self.changed_jobs_queue = Queue(), Queue()
self.kill_queue = Queue()
self.waiting_jobs = []
self.workers = deque()
self.launched_worker_count = 0
self._worker_launch_lock = RLock()
self.start()
def launch_worker(self, gui=False, redirect_output=None, job_name=None):
start = time.time()
with self._worker_launch_lock:
self.launched_worker_count += 1
id = self.launched_worker_count
fd, rfile = tempfile.mkstemp(prefix=u'ipc_result_%d_%d_'%(self.id, id),
dir=base_dir(), suffix=u'.pickle')
os.close(fd)
if redirect_output is None:
redirect_output = not gui
env = {
'CALIBRE_WORKER_ADDRESS' : hexlify(cPickle.dumps(self.listener.address, -1)),
'CALIBRE_WORKER_KEY' : hexlify(self.auth_key),
'CALIBRE_WORKER_RESULT' : hexlify(rfile.encode('utf-8')),
}
cw = self.do_launch(env, gui, redirect_output, rfile, job_name=job_name)
if isinstance(cw, basestring):
raise CriticalError('Failed to launch worker process:\n'+cw)
if DEBUG:
print 'Worker Launch took:', time.time() - start
return cw
def do_launch(self, env, gui, redirect_output, rfile, job_name=None):
w = Worker(env, gui=gui, job_name=job_name)
try:
w(redirect_output=redirect_output)
conn = eintr_retry_call(self.listener.accept)
if conn is None:
raise Exception('Failed to launch worker process')
except BaseException:
try:
w.kill()
except:
pass
import traceback
return traceback.format_exc()
return ConnectedWorker(w, conn, rfile)
def add_job(self, job):
job.done2 = self.notify_on_job_done
self.add_jobs_queue.put(job)
def run_job(self, job, gui=True, redirect_output=False):
w = self.launch_worker(gui=gui, redirect_output=redirect_output, job_name=getattr(job, 'name', None))
w.start_job(job)
def run(self):
while True:
try:
job = self.add_jobs_queue.get(True, 0.2)
if job is None:
break
self.waiting_jobs.insert(0, job)
except Empty:
pass
# Get notifications from worker process
for worker in self.workers:
while True:
try:
n = worker.notifications.get_nowait()
worker.job.notifications.put(n)
self.changed_jobs_queue.put(worker.job)
except Empty:
break
# Remove finished jobs
for worker in [w for w in self.workers if not w.is_alive]:
try:
worker.close_log_file()
except:
pass
self.workers.remove(worker)
job = worker.job
if worker.returncode != 0:
job.failed = True
job.returncode = worker.returncode
elif os.path.exists(worker.rfile):
try:
job.result = cPickle.load(open(worker.rfile, 'rb'))
os.remove(worker.rfile)
except:
pass
job.duration = time.time() - job.start_time
self.changed_jobs_queue.put(job)
# Start waiting jobs
sj = self.suitable_waiting_job()
if sj is not None:
job = self.waiting_jobs.pop(sj)
job.start_time = time.time()
if job.kill_on_start:
job.duration = 0.0
job.returncode = 1
job.killed = job.failed = True
job.result = None
else:
worker = self.launch_worker()
worker.start_job(job)
self.workers.append(worker)
job.log_path = worker.log_path
self.changed_jobs_queue.put(job)
while True:
try:
j = self.kill_queue.get_nowait()
self._kill_job(j)
except Empty:
break
def suitable_waiting_job(self):
available_workers = self.pool_size - len(self.workers)
for worker in self.workers:
job = worker.job
if job.core_usage == -1:
available_workers = 0
elif job.core_usage > 1:
available_workers -= job.core_usage - 1
if available_workers < 1:
return None
for i, job in enumerate(self.waiting_jobs):
if job.core_usage == -1:
if available_workers >= self.pool_size:
return i
elif job.core_usage <= available_workers:
return i
def kill_job(self, job):
self.kill_queue.put(job)
def killall(self):
for worker in self.workers:
self.kill_queue.put(worker.job)
def _kill_job(self, job):
if job.start_time is None:
job.kill_on_start = True
return
for worker in self.workers:
if job is worker.job:
worker.kill()
job.killed = True
break
def split(self, tasks):
'''
Split a list into a list of sub lists, with the number of sub lists being
no more than the number of workers this server supports. Each sublist contains
2-tuples of the form (i, x) where x is an element from the original list
and i is the index of the element x in the original list.
'''
ans, count, pos = [], 0, 0
delta = int(ceil(len(tasks)/float(self.pool_size)))
while count < len(tasks):
section = []
for t in tasks[pos:pos+delta]:
section.append((count, t))
count += 1
ans.append(section)
pos += delta
return ans
def close(self):
try:
self.add_jobs_queue.put(None)
except:
pass
try:
self.listener.close()
except:
pass
time.sleep(0.2)
for worker in list(self.workers):
try:
worker.kill()
except:
pass
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
|
gpl-3.0
|
ol-loginov/intellij-community
|
python/lib/Lib/site-packages/django/contrib/gis/utils/ogrinfo.py
|
389
|
1973
|
"""
This module includes some utility functions for inspecting the layout
of a GDAL data source -- the functionality is analogous to the output
produced by the `ogrinfo` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.geometries import GEO_CLASSES
def ogrinfo(data_source, num_features=10):
"""
Walks the available layers in the supplied `data_source`, displaying
the fields for the first `num_features` features.
"""
# Checking the parameters.
if isinstance(data_source, str):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise Exception('Data source parameter must be a string or a DataSource object.')
for i, layer in enumerate(data_source):
print "data source : %s" % data_source.name
print "==== layer %s" % i
print " shape type: %s" % GEO_CLASSES[layer.geom_type.num].__name__
print " # features: %s" % len(layer)
print " srs: %s" % layer.srs
extent_tup = layer.extent.tuple
print " extent: %s - %s" % (extent_tup[0:2], extent_tup[2:4])
print "Displaying the first %s features ====" % num_features
width = max(*map(len,layer.fields))
fmt = " %%%ss: %%s" % width
for j, feature in enumerate(layer[:num_features]):
print "=== Feature %s" % j
for fld_name in layer.fields:
type_name = feature[fld_name].type_name
output = fmt % (fld_name, type_name)
val = feature.get(fld_name)
if val:
if isinstance(val, str):
val_fmt = ' ("%s")'
else:
val_fmt = ' (%s)'
output += val_fmt % val
else:
output += ' (None)'
print output
# For backwards compatibility.
sample = ogrinfo
|
apache-2.0
|
sharbison3/python-docs-samples
|
bigquery/api/streaming.py
|
5
|
3152
|
#!/usr/bin/env python
# Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line application that streams data into BigQuery.
This sample is used on this page:
https://cloud.google.com/bigquery/streaming-data-into-bigquery
For more information, see the README.md under /bigquery.
"""
import argparse
import ast
import json
import uuid
import googleapiclient.discovery
from six.moves import input
# [START stream_row_to_bigquery]
def stream_row_to_bigquery(bigquery, project_id, dataset_id, table_name, row,
num_retries=5):
insert_all_data = {
'rows': [{
'json': row,
# Generate a unique id for each row so retries don't accidentally
# duplicate insert
'insertId': str(uuid.uuid4()),
}]
}
return bigquery.tabledata().insertAll(
projectId=project_id,
datasetId=dataset_id,
tableId=table_name,
body=insert_all_data).execute(num_retries=num_retries)
# [END stream_row_to_bigquery]
# [START run]
def main(project_id, dataset_id, table_name, num_retries):
# [START build_service]
# Construct the service object for interacting with the BigQuery API.
bigquery = googleapiclient.discovery.build('bigquery', 'v2')
# [END build_service]
for row in get_rows():
response = stream_row_to_bigquery(
bigquery, project_id, dataset_id, table_name, row, num_retries)
print(json.dumps(response))
def get_rows():
line = input("Enter a row (python dict) into the table: ")
while line:
yield ast.literal_eval(line)
line = input("Enter another row into the table \n" +
"[hit enter to stop]: ")
# [END run]
# [START main]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('project_id', help='Your Google Cloud project ID.')
parser.add_argument('dataset_id', help='A BigQuery dataset ID.')
parser.add_argument(
'table_name', help='Name of the table to load data into.')
parser.add_argument(
'-p', '--poll_interval',
help='How often to poll the query for completion (seconds).',
type=int,
default=1)
parser.add_argument(
'-r', '--num_retries',
help='Number of times to retry in case of 500 error.',
type=int,
default=5)
args = parser.parse_args()
main(
args.project_id,
args.dataset_id,
args.table_name,
args.num_retries)
# [END main]
|
apache-2.0
|
pmquang/python-anyconfig
|
anyconfig/backend/tests/json.py
|
1
|
1856
|
#
# Copyright (C) 2012 - 2015 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
# pylint: disable=missing-docstring
import os.path
import unittest
import anyconfig.backend.json as TT
import anyconfig.tests.common
from anyconfig.tests.common import dicts_equal
CNF_0_S = """{
"a": 0,
"b": "bbb",
"sect0": {
"c": ["x", "y", "z"]
}
}
"""
CNF_0 = {'a': 0, 'b': 'bbb', 'sect0': {'c': ['x', 'y', 'z']}}
class Test10(unittest.TestCase):
cnf = CNF_0
cnf_s = CNF_0_S
def test_10_loads(self):
cnf = TT.Parser().loads(self.cnf_s)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
def test_30_dumps(self):
cnf = TT.Parser().loads(TT.Parser().dumps(self.cnf))
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
class Test20(unittest.TestCase):
cnf = CNF_0
cnf_s = CNF_0_S
def setUp(self):
self.workdir = anyconfig.tests.common.setup_workdir()
self.cpath = os.path.join(self.workdir, "test0.json")
open(self.cpath, 'w').write(self.cnf_s)
def tearDown(self):
anyconfig.tests.common.cleanup_workdir(self.workdir)
def test_20_load(self):
cnf = TT.Parser().load(self.cpath)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
def test_22_load__optional_kwargs(self):
cnf = TT.Parser().load(self.cpath, parse_int=None)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
def test_40_dump(self):
TT.Parser().dump(self.cnf, self.cpath)
cnf = TT.Parser().load(self.cpath)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
def test_42_dump_w_special_option(self):
TT.Parser().dump(self.cnf, self.cpath, parse_int=None, indent=3)
cnf = TT.Parser().load(self.cpath)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
# vim:sw=4:ts=4:et:
|
mit
|
damaggu/SAMRI
|
samri/plotting/aggregate.py
|
1
|
4664
|
import matplotlib.pyplot as plt
import pandas as pd
import seaborn.apionly as sns
from os import path
from matplotlib import rcParams
EXTRA_COLORSET = ["#797979","#000000","#505050","#FFFFFF","#B0B0B0",]
def registration_qc(df,
cmap="Set3",
extra=False,
extra_cmap=EXTRA_COLORSET,
group={"sub":"Subject"},
repeat={"ses":"Session"},
samri_style=True,
save_as=False,
show=True,
value={"similarity":"Similarity"},
values_rename={},
):
"""Aggregate plot of similarity metrics for registration quality control
Parameters
----------
df : pandas.DataFrame or str
Pandas Dataframe or CSV file containing similarity scores.
cmap : str or list, optional
If a string, the variable specifies the matplotlib colormap [2]_ (qualitative colormaps are recommended) to use for `repeat` highlighting. If a List, the variable should be a list of colors (e.g. `["#00FF00","#2222FF"]`).
extra_cmap : str or list, optional
If a string, the variable specifies the matplotlib colormap [2]_ (qualitative colormaps are recommended) to use for `extra` highlighting, which is applied as a contour to the `repeat`-colored pacthes. If a List, the variable should be a list of colors (e.g. `["#00FF00","#2222FF"]`).
group : str or dict, optional
Column of `df` to use as the group factor (values of this factor will represent the x-axis). If a dictionary is passed, the column named for the key of the dictionary is renamed to the value, and the value name is then used as the group factor. This is useful for the input of longer but clearer names for plotting.
samri_style : bool, optional
Whether to apply a generic SAMRI style to the plot.
save_as : str, optional
Path under which to save the generated plot (format is interpreted from provided extension).
show : bool, optional
Whether to show the plot in an interactive window.
repeat : str or dict, optional
Column of `df` to use as the repeat factor (values of this factor will be represent via different hues, according to `cmap`). If a dictionary is passed, the column named for the key of the dictionary is renamed to the value, and the value name is then used as the group factor. This is useful for the input of longer but clearer names for plotting.
value : str or dict, optional
Column of `df` to use as the value (this variable will be represented on the y-axis). If a dictionary is passed, the column named for the key of the dictionary is renamed to the value, and the value name is then used as the group factor. This is useful for the input of longer but clearer names for plotting.
values_rename : dict, optional
Dictionary used to rename values in `df`. This is useful for the input of longer but clearer names for plotting (this parameter will not rename column names, for renaming those, see parameters `extra`, `group`, `repeat`, and `value`).
Returns
-------
pandas.DataFrame
ANOVA summary table in DataFrame format.
Reference
----------
.. [1] http://goanna.cs.rmit.edu.au/~fscholer/anova.php
.. [2] https://matplotlib.org/examples/color/colormaps_reference.html
.. [3] http://www.statsmodels.org/dev/example_formulas.html
"""
if samri_style:
this_path = path.dirname(path.realpath(__file__))
plt.style.use(path.join(this_path,"samri.conf"))
try:
if isinstance(df, basestring):
df = path.abspath(path.expanduser(df))
df = pd.read_csv(df)
except NameError:
if isinstance(df, str):
df = path.abspath(path.expanduser(df))
df = pd.read_csv(df)
for key in values_rename:
df.replace(to_replace=key, value=values_rename[key], inplace=True)
column_renames={}
if isinstance(value, dict):
column_renames.update(value)
value = list(value.values())[0]
if isinstance(group, dict):
column_renames.update(group)
group = list(group.values())[0]
if isinstance(repeat, dict):
column_renames.update(repeat)
repeat = list(repeat.values())[0]
if isinstance(extra, dict):
column_renames.update(extra)
extra = list(extra.values())[0]
df = df.rename(columns=column_renames)
if extra:
myplot = sns.swarmplot(x=group, y=value, hue=extra, data=df,
size=rcParams["lines.markersize"]*1.4,
palette=sns.color_palette(extra_cmap),
)
myplot = sns.swarmplot(x=group, y=value, hue=repeat, data=df,
edgecolor=(1, 1, 1, 0.0),
linewidth=rcParams["lines.markersize"]*.4,
palette=sns.color_palette(cmap),
)
else:
myplot = sns.swarmplot(x=group, y=value, hue=repeat, data=df,
palette=sns.color_palette(cmap),
size=rcParams["lines.markersize"]*2,
)
plt.legend(loc=rcParams["legend.loc"])
if show:
sns.plt.show()
if save_as:
plt.savefig(path.abspath(path.expanduser(save_as)), bbox_inches='tight')
|
gpl-3.0
|
sport-monkey/GYP
|
test/generator-output/gyptest-rules.py
|
198
|
1768
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --generator-output= behavior when using rules.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('rules'), False)
test.run_gyp('rules.gyp',
'--generator-output=' + test.workpath('gypfiles'),
chdir='rules')
test.writable(test.workpath('rules'), True)
test.relocate('rules', 'relocate/rules')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/rules'), False)
test.writable(test.workpath('relocate/rules/build'), True)
test.writable(test.workpath('relocate/rules/subdir1/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/rules-out'), True)
test.build('rules.gyp', test.ALL, chdir='relocate/gypfiles')
expect = """\
Hello from program.c
Hello from function1.in1
Hello from function2.in1
Hello from define3.in0
Hello from define4.in0
"""
if test.format == 'xcode':
chdir = 'relocate/rules/subdir1'
else:
chdir = 'relocate/gypfiles'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/rules/subdir2/rules-out/file1.out',
"Hello from file1.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file2.out',
"Hello from file2.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file3.out',
"Hello from file3.in1\n")
test.must_match('relocate/rules/subdir2/rules-out/file4.out',
"Hello from file4.in1\n")
test.pass_test()
|
bsd-3-clause
|
evgchz/scikit-learn
|
examples/ensemble/plot_gradient_boosting_regularization.py
|
355
|
2843
|
"""
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
|
bsd-3-clause
|
qwertyjune/BethSaidaBible
|
venv/lib/python2.7/site-packages/django/contrib/gis/geos/geometry.py
|
54
|
25297
|
"""
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
from __future__ import unicode_literals
# Python, ctypes and types dependencies.
from ctypes import addressof, byref, c_double
from django.contrib.gis import memoryview
# super-class for mutable list behavior
from django.contrib.gis.geos.mutable_list import ListMixin
from django.contrib.gis.gdal.error import SRSException
# GEOS-related dependencies.
from django.contrib.gis.geos.base import GEOSBase, gdal
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR
# All other functions in this module come from the ctypes
# prototypes module -- which handles all interaction with
# the underlying GEOS library.
from django.contrib.gis.geos import prototypes as capi
# These functions provide access to a thread-local instance
# of their corresponding GEOS I/O class.
from django.contrib.gis.geos.prototypes.io import wkt_r, wkt_w, wkb_r, wkb_w, ewkb_w
# For recognizing geometry input.
from django.contrib.gis.geometry.regex import hex_regex, wkt_regex, json_regex
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
ptr_type = GEOM_PTR
#### Python 'magic' routines ####
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'):
srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif gdal.HAS_GDAL and json_regex.match(geo_input):
# Handling GeoJSON input.
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geometry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if g:
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int):
self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
self.__class__ = GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr:
capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"WKT is used for the string representation."
return self.wkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return bytes(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(memoryview(wkb))
if not ptr:
raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, six.string_types):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
#### Coordinate Sequence Routines ####
@property
def has_cs(self):
"Returns True if this Geometry has a coordinate sequence, False if not."
# Only these geometries are allowed to have coordinate sequences.
if isinstance(self, (Point, LineString, LinearRing)):
return True
else:
return False
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
#### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr).decode()
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
#### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
return capi.geos_isvalidreason(self.ptr).decode()
#### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, six.string_types) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
#### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0:
return None
else:
return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
#### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (WKT + SRID) of the Geometry. Note that Z values
are *not* included in this representation because GEOS does not yet
support serializing them.
"""
if self.get_srid():
return 'SRID=%s;%s' % (self.srid, self.wkt)
else:
return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w(3 if self.hasz else 2).write(self).decode()
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID is not included in this representation because it is not
a part of the OGC specification (use the `hexewkb` property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w(3 if self.hasz else 2).write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID value that are
a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry if GDAL is installed.
"""
if gdal.HAS_GDAL:
return self.ogr.json
else:
raise GEOSException('GeoJSON output only supported when GDAL is installed.')
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w(3 if self.hasz else 2).write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
value that are a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
return PreparedGeometry(self)
#### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
if self.srid:
try:
return gdal.OGRGeometry(self.wkb, self.srid)
except SRSException:
pass
return gdal.OGRGeometry(self.wkb)
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to return a SpatialReference object.')
if self.srid:
try:
return gdal.SpatialReference(self.srid)
except SRSException:
pass
return None
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
#### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def interpolate(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_interpolate'):
raise NotImplementedError('interpolate requires GEOS 3.2+')
return self._topology(capi.geos_interpolate(self.ptr, distance))
def interpolate_normalized(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_interpolate_normalized'):
raise NotImplementedError('interpolate_normalized requires GEOS 3.2+')
return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def project(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_project'):
raise NotImplementedError('geos_project requires GEOS 3.2+')
return capi.geos_project(self.ptr, point.ptr)
def project_normalized(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_project_normalized'):
raise NotImplementedError('project_normalized requires GEOS 3.2+')
return capi.geos_project_normalized(self.ptr, point.ptr)
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr).decode()
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
#### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumference of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
# Class mapping dictionary. Has to be at the end to avoid import
# conflicts with GEOSGeometry.
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos.collections import GeometryCollection, MultiPoint, MultiLineString, MultiPolygon
from django.contrib.gis.geos.prepared import PreparedGeometry
GEOS_CLASSES = {
0: Point,
1: LineString,
2: LinearRing,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
}
|
gpl-3.0
|
SmartPeople/zulip
|
zerver/management/commands/delete_old_unclaimed_attachments.py
|
46
|
1892
|
from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_delete_old_unclaimed_attachments
from zerver.models import Attachment, get_old_unclaimed_attachments
class Command(BaseCommand):
help = """Remove unclaimed attachments from storage older than a supplied
numerical value indicating the limit of how old the attachment can be.
One week is taken as the default value."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('-w', '--weeks',
dest='delta_weeks',
default=1,
help="Limiting value of how old the file can be.")
parser.add_argument('-f', '--for-real',
dest='for_real',
action='store_true',
default=False,
help="Actually remove the files from the storage.")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
delta_weeks = options['delta_weeks']
print("Deleting unclaimed attached files older than %s" % (delta_weeks,))
print("")
# print the list of files that are going to be removed
old_attachments = get_old_unclaimed_attachments(delta_weeks)
for old_attachment in old_attachments:
print("%s created at %s" % (old_attachment.file_name, old_attachment.create_time))
print("")
if not options["for_real"]:
print("This was a dry run. Pass -f to actually delete.")
exit(1)
do_delete_old_unclaimed_attachments(delta_weeks)
print("")
print("Unclaimed Files deleted.")
|
apache-2.0
|
svagionitis/youtube-dl
|
youtube_dl/extractor/mpora.py
|
12
|
2006
|
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
class MporaIE(InfoExtractor):
_VALID_URL = r'^https?://(www\.)?mpora\.(?:com|de)/videos/(?P<id>[^?#/]+)'
IE_NAME = 'MPORA'
_TEST = {
'url': 'http://mpora.de/videos/AAdo8okx4wiz/embed?locale=de',
'file': 'AAdo8okx4wiz.mp4',
'md5': 'a7a228473eedd3be741397cf452932eb',
'info_dict': {
'title': 'Katy Curd - Winter in the Forest',
'duration': 416,
'uploader': 'Peter Newman Media',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
data_json = self._search_regex(
r"new FM\.Player\('[^']+',\s*(\{.*?)\).player;", webpage, 'json')
data = json.loads(data_json)
uploader = data['info_overlay'].get('username')
duration = data['video']['duration'] // 1000
thumbnail = data['video']['encodings']['sd']['poster']
title = data['info_overlay']['title']
formats = []
for encoding_id, edata in data['video']['encodings'].items():
for src in edata['sources']:
width_str = self._search_regex(
r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'],
False, default=None)
vcodec = src['type'].partition('/')[2]
formats.append({
'format_id': encoding_id + '-' + vcodec,
'url': src['src'],
'vcodec': vcodec,
'width': int_or_none(width_str),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'uploader': uploader,
'duration': duration,
'thumbnail': thumbnail,
}
|
unlicense
|
dzan/xenOnArm
|
tools/xm-test/lib/XmTestLib/Console.py
|
37
|
9322
|
#!/usr/bin/python
"""
XmConsole.py - Interact with a xen console, getting return codes and
output from commands executed there.
Copyright (C) International Business Machines Corp., 2005
Author: Dan Smith <danms@us.ibm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; under version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
NB: This requires the domU's prompt to be set to
a _very_ specific value, set in the PROMPT
variable of this script
"""
import sys
import os
import pty
import tty
import termios
import fcntl
import select
import arch
from Test import *
TIMEDOUT = 1
RUNAWAY = 2
class ConsoleError(Exception):
def __init__(self, msg, reason=TIMEDOUT):
self.errMsg = msg
self.reason = reason
def __str__(self):
return str(self.errMsg)
class XmConsole:
def __init__(self, domain, historyLimit=256, historySaveAll=True, historySaveCmds=False, cLimit=131072):
"""
Parameters:
historyLimit: specifies how many lines of history are maintained
historySaveAll: determines whether or not extra messages in
between commands are saved
historySaveCmds : determines whether or not the command echos
are included in the history buffer
"""
self.TIMEOUT = 30
self.PROMPT = "@%@%> "
self.domain = domain
self.historyBuffer = []
self.historyLines = 0
self.historyLimit = historyLimit
self.historySaveAll = historySaveAll
self.historySaveCmds = historySaveCmds
self.debugMe = False
self.limit = cLimit
consoleCmd = ["/usr/sbin/xm", "xm", "console", domain]
if verbose:
print "Console executing: %s" % str(consoleCmd)
pid, fd = pty.fork()
if pid == 0:
os.execvp("/usr/sbin/xm", consoleCmd[1:])
self.consolePid = pid
self.consoleFd = fd
tty.setraw(self.consoleFd, termios.TCSANOW)
def __addToHistory(self, line):
self.historyBuffer.append(line)
self.historyLines += 1
if self.historyLines > self.historyLimit:
self.historyBuffer = self.historyBuffer[1:]
self.historyLines -= 1
def clearHistory(self):
"""Clear the history buffer"""
self.historyBuffer = []
self.historyLines = 0
def getHistory(self):
"""Returns a string containing the entire history buffer"""
output = ""
for line in self.historyBuffer:
output += line + "\n"
return output
def setTimeout(self, timeout):
"""Sets the timeout used to determine if a remote command
has blocked"""
self.TIMEOUT = timeout
def setPrompt(self, prompt):
"""Sets the string key used to delimit the end of command
output"""
self.PROMPT = prompt
def __getprompt(self, fd):
timeout = 0
bytes = 0
buffer = ""
while timeout < 180:
# eat anything while total bytes less than limit else raise RUNAWAY
while (not self.limit) or (bytes < self.limit):
i, o, e = select.select([fd], [], [], 1)
if fd in i:
try:
foo = os.read(fd, 1)
if self.debugMe:
sys.stdout.write(foo)
bytes += 1
buffer += foo
except Exception, exn:
raise ConsoleError(str(exn))
else:
break
else:
raise ConsoleError("Console run-away (exceeded %i bytes)"
% self.limit, RUNAWAY)
# Check to see if the buffer contains anything interetsing
arch.checkBuffer(buffer)
# press enter
os.write(self.consoleFd, "\n")
# look for prompt
for prompt_char in "\r\n" + self.PROMPT:
i, o, e = select.select([fd], [], [], 1)
if fd in i:
try:
foo = os.read(fd, 1)
if self.debugMe:
sys.stdout.write(foo)
if foo != prompt_char:
break
except Exception, exn:
raise ConsoleError(str(exn))
else:
timeout += 1
break
else:
break
else:
raise ConsoleError("Timed out waiting for console prompt")
def __runCmd(self, command, saveHistory=True):
output = ""
line = ""
lines = 0
bytes = 0
self.__getprompt(self.consoleFd)
if verbose:
print "[%s] Sending `%s'" % (self.domain, command)
os.write(self.consoleFd, "%s\n" % command)
while True:
i, o, e = select.select([self.consoleFd], [], [], self.TIMEOUT)
if self.consoleFd in i:
try:
str = os.read(self.consoleFd, 1)
if self.debugMe:
sys.stdout.write(str)
bytes += 1
except Exception, exn:
raise ConsoleError(
"Failed to read from console (fd=%i): %s" %
(self.consoleFd, exn))
else:
raise ConsoleError("Timed out waiting for console command")
if self.limit and bytes >= self.limit:
raise ConsoleError("Console run-away (exceeded %i bytes)"
% self.limit, RUNAWAY)
if str == "\n":
if lines > 0:
output += line + "\n"
if saveHistory:
self.__addToHistory(line)
elif self.historySaveCmds and saveHistory:
self.__addToHistory("*" + line)
lines += 1
line = ""
elif str == "\r":
pass # ignore \r's
else:
line += str
if line == self.PROMPT:
break
return output
def runCmd(self, command):
"""Runs a command on the remote terminal and returns the output
as well as the return code. For example:
ret = c.runCmd("ls")
print ret["output"]
sys.exit(run["return"])
"""
# Allow exceptions to bubble up
realOutput = self.__runCmd(command)
retOutput = self.__runCmd("echo $?", saveHistory=False)
try:
retCode = int(retOutput)
except:
retCode = 255
return {
"output": realOutput,
"return": retCode,
}
def sendInput(self, input):
"""Sends input to the remote terminal, but doesn't check
for a return code"""
realOutput = self.__runCmd(input)
return {
"output": realOutput,
"return": 0,
}
def __closeConsole(self):
"""Closes the console connection and ensures that the console
process is killed. This should only be called by the domain.
Tests should call domain.closeConsole()"""
if self.consolePid != 0:
os.close(self.consoleFd)
os.kill(self.consolePid, 2)
self.consolePid = 0
def setLimit(self, limit):
"""Sets a limit on the number of bytes that can be
read in an attempt to run a command. We need this when
running something that can run away"""
try:
self.limit = int(limit)
except Exception, e:
self.limit = None
def setHistorySaveCmds(self, value):
# True or False
self.historySaveCmds = value
if __name__ == "__main__":
"""This is both an example of using the XmConsole class, as
well as a utility for command-line execution of single commands
on a domU console. Prints output to stdout. Exits with the same
code as the domU command.
"""
verbose = True
try:
t = XmConsole(sys.argv[1])
except ConsoleError, e:
print "Failed to attach to console (%s)" % str(e)
sys.exit(255)
try:
run = t.runCmd(sys.argv[2])
except ConsoleError, e:
print "Console failed (%)" % str(e)
sys.exit(255)
t._XmConsole__closeConsole()
print run["output"],
sys.exit(run["return"])
|
gpl-2.0
|
rhinstaller/anaconda
|
pyanaconda/modules/security/__main__.py
|
5
|
1150
|
#
# Security DBus service launcher.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.modules.common import init
init()
from pyanaconda.modules.security.security import SecurityService
service = SecurityService()
service.run()
|
gpl-2.0
|
neraliu/tainted-phantomjs
|
src/breakpad/src/tools/gyp/test/generator-output/gyptest-actions.py
|
151
|
1953
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --generator-output= behavior when using actions.
"""
import TestGyp
test = TestGyp.TestGyp()
# All the generated files should go under 'gypfiles'. The source directory
# ('actions') should be untouched.
test.writable(test.workpath('actions'), False)
test.run_gyp('actions.gyp',
'--generator-output=' + test.workpath('gypfiles'),
chdir='actions')
test.writable(test.workpath('actions'), True)
test.relocate('actions', 'relocate/actions')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/actions'), False)
# Some of the action outputs use "pure" relative paths (i.e. without prefixes
# like <(INTERMEDIATE_DIR) or <(PROGRAM_DIR)). Even though we are building under
# 'gypfiles', such outputs will still be created relative to the original .gyp
# sources. Projects probably wouldn't normally do this, since it kind of defeats
# the purpose of '--generator-output', but it is supported behaviour.
test.writable(test.workpath('relocate/actions/build'), True)
test.writable(test.workpath('relocate/actions/subdir1/build'), True)
test.writable(test.workpath('relocate/actions/subdir1/actions-out'), True)
test.writable(test.workpath('relocate/actions/subdir2/build'), True)
test.writable(test.workpath('relocate/actions/subdir2/actions-out'), True)
test.build('actions.gyp', test.ALL, chdir='relocate/gypfiles')
expect = """\
Hello from program.c
Hello from make-prog1.py
Hello from make-prog2.py
"""
if test.format == 'xcode':
chdir = 'relocate/actions/subdir1'
else:
chdir = 'relocate/gypfiles'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/actions/subdir2/actions-out/file.out',
"Hello from make-file.py\n")
test.pass_test()
|
bsd-3-clause
|
zasdfgbnm/tensorflow
|
tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy_test.py
|
47
|
9513
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the conversion code and for feature importances export.
Tests that cover conversion from TFBT format to a tensorflow.contrib.
decision_tree generic_tree_model format and feature importances export.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.contrib.boosted_trees.estimator_batch import custom_export_strategy
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class ConvertModelTest(test_util.TensorFlowTestCase):
def _make_trees(self):
dtec_str = """
trees {
nodes {
leaf {
vector {
value: -1
}
}
}
}
trees {
nodes {
dense_float_binary_split {
feature_column: 0
threshold: 1740.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 500
}
}
nodes {
leaf {
vector {
value: 0.6
}
}
}
nodes {
sparse_float_binary_split_default_left {
split {
feature_column: 0
threshold: 1500.0
left_id: 3
right_id: 4
}
}
node_metadata {
gain: 500
}
}
nodes {
categorical_id_binary_split {
feature_column: 0
feature_id: 5
left_id: 5
right_id: 6
}
node_metadata {
gain: 500
}
}
nodes {
leaf {
vector {
value: 0.8
}
}
}
nodes {
leaf {
vector {
value: 0.5
}
}
}
nodes {
sparse_float_binary_split_default_right {
split {
feature_column: 1
dimension_id:3
threshold: -0.4
left_id: 7
right_id: 8
}
}
node_metadata {
gain: 3600
}
}
nodes {
leaf {
vector {
value: 0.36
}
}
}
nodes {
leaf {
vector {
value: 18
}
}
}
}
tree_weights: 1.0
tree_weights: 0.1
"""
dtec = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(dtec_str, dtec)
feature_columns = [
"feature_b",
"feature_a",
"feature_a_m",
"feature_d",
]
return dtec, feature_columns
def testConvertModel(self):
dtec, feature_columns = self._make_trees()
# Assume 2 sparse float columns, one with 1 dimension, the second one with
# 5 dimensions.
# The feature columns in the order they were added.
out = custom_export_strategy.convert_to_universal_format(
dtec, feature_columns, 1, 2, 1)
# Features a and a_m are sparse float features, a_m is multidimensional.
expected_tree = """
features { key: "feature_a_0" }
features { key: "feature_a_m_3" }
features { key: "feature_b" }
features { key: "feature_d" }
model {
ensemble {
summation_combination_technique {
}
members {
submodel {
decision_tree {
nodes {
node_id {
}
leaf {
vector {
value {
float_value: -1.0
}
}
}
}
}
}
submodel_id {
}
}
members {
submodel {
decision_tree {
nodes {
node_id {
}
binary_node {
left_child_id {
value: 1
}
right_child_id {
value: 2
}
inequality_left_child_test {
feature_id {
id {
value: "feature_b"
}
}
threshold {
float_value: 1740.0
}
}
}
}
nodes {
node_id {
value: 1
}
leaf {
vector {
value {
float_value: 0.06
}
}
}
}
nodes {
node_id {
value: 2
}
binary_node {
left_child_id {
value: 3
}
right_child_id {
value: 4
}
inequality_left_child_test {
feature_id {
id {
value: "feature_a_0"
}
}
threshold {
float_value: 1500.0
}
}
}
}
nodes {
node_id {
value: 3
}
binary_node {
left_child_id {
value: 5
}
right_child_id {
value: 6
}
default_direction: RIGHT
custom_left_child_test {
[type.googleapis.com/tensorflow.decision_trees.MatchingValuesTest] {
feature_id {
id {
value: "feature_d"
}
}
value {
int64_value: 5
}
}
}
}
}
nodes {
node_id {
value: 4
}
leaf {
vector {
value {
float_value: 0.08
}
}
}
}
nodes {
node_id {
value: 5
}
leaf {
vector {
value {
float_value: 0.05
}
}
}
}
nodes {
node_id {
value: 6
}
binary_node {
left_child_id {
value: 7
}
right_child_id {
value: 8
}
default_direction: RIGHT
inequality_left_child_test {
feature_id {
id {
value: "feature_a_m_3"
}
}
threshold {
float_value: -0.4
}
}
}
}
nodes {
node_id {
value: 7
}
leaf {
vector {
value {
float_value: 0.036
}
}
}
}
nodes {
node_id {
value: 8
}
leaf {
vector {
value {
float_value: 1.8
}
}
}
}
}
}
submodel_id {
value: 1
}
}
}
}"""
self.assertProtoEquals(expected_tree, out)
def testFeatureImportance(self):
dtec, feature_columns = self._make_trees()
feature_importances = custom_export_strategy._get_feature_importances(
dtec, feature_columns, 1, 2, 1)
self.assertItemsEqual(
["feature_b", "feature_a_0", "feature_a_m_3", "feature_d"],
feature_importances.keys())
self.assertAlmostEqual(50.0, feature_importances["feature_b"], places=4)
self.assertAlmostEqual(50.0, feature_importances["feature_a_0"], places=4)
self.assertAlmostEqual(50.0, feature_importances["feature_d"], places=4)
self.assertAlmostEqual(
360.0, feature_importances["feature_a_m_3"], places=4)
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
MrNuggles/HeyBoet-Telegram-Bot
|
temboo/Library/InfluenceExplorer/EntityOverview.py
|
5
|
3664
|
# -*- coding: utf-8 -*-
###############################################################################
#
# EntityOverview
# Returns general information about a particular politician, individual, or organization with a given entity id.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class EntityOverview(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the EntityOverview Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(EntityOverview, self).__init__(temboo_session, '/Library/InfluenceExplorer/EntityOverview')
def new_input_set(self):
return EntityOverviewInputSet()
def _make_result_set(self, result, path):
return EntityOverviewResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return EntityOverviewChoreographyExecution(session, exec_id, path)
class EntityOverviewInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the EntityOverview
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API key provided by Sunlight Data Services.)
"""
super(EntityOverviewInputSet, self)._set_input('APIKey', value)
def set_Cycle(self, value):
"""
Set the value of the Cycle input for this Choreo. ((optional, date) Specify a yyyy-formatted election cycle. Example: 2012, or 2008|2012 to limit results between 2008 and 2012.)
"""
super(EntityOverviewInputSet, self)._set_input('Cycle', value)
def set_EntityID(self, value):
"""
Set the value of the EntityID input for this Choreo. ((required, string) The ID for the Entity that you want to return information for. This ID can be retrieved by running the SearchByName Choreo.)
"""
super(EntityOverviewInputSet, self)._set_input('EntityID', value)
class EntityOverviewResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the EntityOverview Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Influence Explorer.)
"""
return self._output.get('Response', None)
class EntityOverviewChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return EntityOverviewResultSet(response, path)
|
gpl-3.0
|
nathania/data-science-from-scratch
|
code/gradient_descent.py
|
53
|
5895
|
from __future__ import division
from collections import Counter
from linear_algebra import distance, vector_subtract, scalar_multiply
import math, random
def sum_of_squares(v):
"""computes the sum of squared elements in v"""
return sum(v_i ** 2 for v_i in v)
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# plot to show they're basically the same
import matplotlib.pyplot as plt
x = range(-10,10)
plt.plot(x, map(derivative, x), 'rx') # red x
plt.plot(x, map(derivative_estimate, x), 'b+') # blue +
plt.show() # purple *, hopefully
def partial_difference_quotient(f, v, i, h):
# add h to just the i-th element of v
w = [v_j + (h if j == i else 0)
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
def step(v, direction, step_size):
"""move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
#
#
# minimize / maximize batch
#
#
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
#
# minimize / maximize stochastic
#
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = zip(x, y)
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print "using the gradient"
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print "minimum v", v
print "minimum value", sum_of_squares(v)
print
print "using minimize_batch"
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print "minimum v", v
print "minimum value", sum_of_squares(v)
|
unlicense
|
deanhiller/databus
|
webapp/play1.3.x/python/Lib/encodings/cp1258.py
|
93
|
13927
|
""" Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1258',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0300' # 0xCC -> COMBINING GRAVE ACCENT
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u0309' # 0xD2 -> COMBINING HOOK ABOVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN
u'\u0303' # 0xDE -> COMBINING TILDE
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0301' # 0xEC -> COMBINING ACUTE ACCENT
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\u0323' # 0xF2 -> COMBINING DOT BELOW
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN
u'\u20ab' # 0xFE -> DONG SIGN
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mpl-2.0
|
K-Constantine/Amaraki
|
core/deps/gyp/test/msvs/filters/gyptest-filters-2008.py
|
166
|
1599
|
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that extra filters are pruned correctly for Visual Studio 2008.
"""
import re
import TestGyp
def strip_ws(str):
return re.sub('^ +', '', str, flags=re.M).replace('\n', '')
test = TestGyp.TestGyp(formats=['msvs'])
test.run_gyp('filters.gyp', '-G', 'standalone', '-G', 'msvs_version=2008')
test.must_contain('no_source_files.vcproj', '<Files/>')
test.must_contain('one_source_file.vcproj', strip_ws('''\
<Files>
<File RelativePath="..\\folder\\a.c"/>
</Files>
'''))
test.must_contain('two_source_files.vcproj', strip_ws('''\
<Files>
<File RelativePath="..\\folder\\a.c"/>
<File RelativePath="..\\folder\\b.c"/>
</Files>
'''))
test.must_contain('three_files_in_two_folders.vcproj', strip_ws('''\
<Files>
<Filter Name="folder1">
<File RelativePath="..\\folder1\\a.c"/>
<File RelativePath="..\\folder1\\b.c"/>
</Filter>
<Filter Name="folder2">
<File RelativePath="..\\folder2\\c.c"/>
</Filter>
</Files>
'''))
test.must_contain('nested_folders.vcproj', strip_ws('''\
<Files>
<Filter Name="folder1">
<Filter Name="nested">
<File RelativePath="..\\folder1\\nested\\a.c"/>
<File RelativePath="..\\folder1\\nested\\b.c"/>
</Filter>
<Filter Name="other">
<File RelativePath="..\\folder1\\other\\c.c"/>
</Filter>
</Filter>
<Filter Name="folder2">
<File RelativePath="..\\folder2\\d.c"/>
</Filter>
</Files>
'''))
test.pass_test()
|
mit
|
abusse/cinder
|
cinder/tests/test_fusionio_ioControl.py
|
3
|
41319
|
# Copyright (c) 2014 Fusion-io, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import mock
from oslo_utils import timeutils
from oslo_utils import units
import requests
from cinder import context
from cinder.db.sqlalchemy.models import VolumeMetadata
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.fusionio.ioControl import FIOconnection
from cinder.volume.drivers.fusionio.ioControl import FIOioControlDriver
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
basic_net_response = [{"IsManagementPort": True,
"NetworkAddress": "10.10.1.82",
"IsReplicationPort": True, "OperationalState": "up",
"ControllerUID": "FakeControl1_UID",
"IfIndex": 2},
{"IsManagementPort": True,
"NetworkAddress": "10.10.1.83",
"IsReplicationPort": True, "OperationalState": "up",
"ControllerUID": "FakeControl1_UID",
"IfIndex": 3},
{"IsManagementPort": False,
"NetworkAddress": "",
"IsReplicationPort": False, "OperationalState": "down",
"ControllerUID": "FakeControl1_UID",
"IfIndex": 4},
{"IsManagementPort": True,
"NetworkAddress": "10.10.2.88",
"IsReplicationPort": True, "OperationalState": "up",
"ControllerUID": "FakeControl2_UID",
"IfIndex": 2},
{"IsManagementPort": False,
"NetworkAddress": "10.10.2.84",
"IsReplicationPort": False, "OperationalState": "up",
"ControllerUID": "FakeControl2_UID",
"IfIndex": 3},
{"IsManagementPort": False,
"NetworkAddress": "",
"IsReplicationPort": False, "OperationalState": "down",
"ControllerUID": "FakeControl2_UID",
"IfIndex": 4}]
basic_pools_response = [{"TotalMB": 5079, "Name": "PoolOwnerA",
"ExportedVolumeMB": 2049,
"basetype": "StoragePool", "UsedVolumeMB": 3,
"ObjectPath": "", "UsedMetaMB": 4, "UsedMB": 4,
"SizeMB": 68677278, "UsedSnapMB": 0,
"PagingUsedMB": 4,
"CurrentOwnerUUID": "FakeControl1_UID",
"TaskId": "", "PagingTotalMB": 5079, "Ready": True,
"id": "FakePoolA_id",
"Size": 72013345456128},
{"TotalMB": 5079, "Name": "PoolOwnerB",
"ExportedVolumeMB": 2049,
"basetype": "StoragePool", "UsedVolumeMB": 193,
"ObjectPath": "", "UsedMetaMB": 3, "UsedMB": 211,
"SizeMB": 68677278, "UsedSnapMB": 0,
"PagingUsedMB": 211,
"CurrentOwnerUUID": "FakeControl2_UID",
"TaskId": "", "PagingTotalMB": 5079, "Ready": True,
"id": "FakePoolB_id",
"Size": 72013345456128}
]
basic_vol_response = [{"basetype": "Volume", "ObjectPath": "", "TaskId": "",
"id": "FakeBasicVolID",
"Name": "cinderVolumeID",
"IQN": "iqn.2010-11.com.ngs:Volume:FakeBasicVolID",
"Size": 1074266112, "SizeMB": 1024, "HighWaterMark": 0,
"HighWaterMarkMB": 0, "MetadataSize": 262144,
"MetadataSizeMB": 0, "DupedSize": 1074266112,
"DupedSizeMB": 1024, "FaultTolerance": 0,
"PathTolerance": 0,
"AllowedTierMask": 18446744073709551615,
"RequiredTierMask": 0, "NumberOfPagesPerChapter": 0,
"CreateDateTime": 1390837136,
"LayerId": "407115424bb9539c",
"ParentLayerId": "0", "Protocol": "iscsi",
"PoolUUID": "FakePoolB_id",
"PolicyUUID": "00000000-00000000-0000-000000000000",
"CurrentOwnerUUID": "FakeControl2_UID",
"AclGroupList": ["1"], "ReplicaPeerList": [],
"SnapshotRetention": 0}
]
basic_policy_response = [{"id": "00000000-00000000-0000-000000000000",
"Name": "Policy 5", },
{"id": "00000000-00000000-0000-000000000002",
"Name": "Policy 4", },
{"id": "00000000-00000000-0000-000000000004",
"Name": "Policy 3", },
{"id": "00000000-00000000-0000-000000000008",
"Name": "Policy 2", },
{"id": "00000000-00000000-0000-000000000010",
"Name": "Policy 1", },
]
basic_snapshot_response = [{"basetype": "Snapshot", "ObjectPath": "",
"TaskId": "", "id": "407115424bb9539c",
"Name": "cinderSnapshotID",
"VolumeUUID": "FakeBasicVolID",
"PoolUUID": "FakePoolB_id",
"ParentUUID": "0", "Size": 1074266112,
"SizeMB": 1024, "SizeUsed": 0, "SizeUsedMB": 0,
"SizeReclaimable": 0, "SizeReclaimableMB": 0,
"CreateDateTime": 1390952554, "ChildCount": 1,
"IsMounted": False, "IsHostConsistent": False,
"ReplicaInfoList": []}
]
basic_acl_group_response = [{"id": 1,
"GroupName": "Deny Access",
"InitiatorList": [], },
{"id": 2,
"GroupName": "Allow Access",
"InitiatorList": ["iqn*"], },
{"id": 3,
"GroupName": "fake:01", "Description": "",
"InitiatorList": ["fake:01"], },
{"id": 4,
"GroupName": "iqn.1994-05.com.redhat:fake1",
"InitiatorList": ["iqn.1994-05.com.rhel:fake1"],
},
{"id": 5,
"GroupName": "MyGroup", "Description": "",
"InitiatorList": "iqn.1994-05.com.rhel:fake2", }
]
def create_configuration():
configuration = conf.Configuration(None)
configuration.san_ip = "10.123.10.123"
configuration.san_login = "fioTestUser"
configuration.san_password = "fioTestUserPassword"
# we can set targetdelay to 0 for testing
configuration.fusionio_iocontrol_targetdelay = 0
configuration.fusionio_iocontrol_retry = 3
configuration.fusionio_iocontrol_verify_cert = True
return configuration
class FIOFakeResponse(object):
"""Fake response to requests."""
def __init__(self, code=None, text=None):
self.status_code = code
self.text = text
def json(self):
return json.loads(self.text)
def raise_for_status(self):
if self.status_code > 300:
raise requests.exceptions.HTTPError
class FIOioControlConnectionTests(test.TestCase):
VERSION = '1.0.0'
fakeSessionID = '12345678'
def setUp(self):
super(FIOioControlConnectionTests, self).setUp()
self.configuration = create_configuration()
self.ctxt = context.get_admin_context()
return_text = json.dumps({"Version": FIOconnection.APIVERSION})
get_return = FIOFakeResponse(code=200,
text=return_text)
requests.get = mock.Mock(return_value=get_return)
self.conn = FIOconnection(self.configuration.san_ip,
self.configuration.san_login,
self.configuration.san_password,
self.configuration.fusionio_iocontrol_retry,
(self.configuration.
fusionio_iocontrol_verify_cert),)
def test_conn_init_sucess(self):
expected = [mock.call(url=("https://" +
self.configuration.san_ip +
"/AUTH/Version"),
headers=self.conn.defhdrs,
verify=True)]
requests.get.assert_has_calls(expected)
def test_wrong_version(self):
expected = json.dumps({"Version": (FIOconnection.APIVERSION + ".1")})
get_return = FIOFakeResponse(code=200,
text=expected)
requests.get = mock.Mock(return_value=get_return)
self.assertRaises(exception.VolumeDriverException,
FIOconnection,
self.configuration.san_ip,
self.configuration.san_login,
self.configuration.san_password,
self.configuration.fusionio_iocontrol_retry,
self.configuration.fusionio_iocontrol_verify_cert,)
def test_create_session_sucess(self):
expected_text = json.dumps({"id": self.fakeSessionID})
post_return = FIOFakeResponse(code=201,
text=expected_text)
put_return = FIOFakeResponse(code=201,
text=json.dumps({"Status": 1}))
requests.post = mock.Mock(return_value=post_return)
requests.put = mock.Mock(return_value=put_return)
result = self.conn._create_session()
expectedhdr = copy.deepcopy(self.conn.defhdrs)
expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
assert result == expectedhdr
def test_create_session_auth_fail(self):
expected_text = json.dumps({"id": self.fakeSessionID})
post_return = FIOFakeResponse(code=201,
text=expected_text)
put_return = FIOFakeResponse(code=201,
text=json.dumps({"Status": (-1)}))
requests.post = mock.Mock(return_value=post_return)
requests.put = mock.Mock(return_value=put_return)
requests.delete = mock.Mock()
self.assertRaises(exception.VolumeDriverException,
self.conn._create_session,)
def test_delete_session_sucess(self):
requests.delete = mock.Mock(return_value=True)
hdrs = copy.deepcopy(self.conn.defhdrs)
hdrs["Cookie"] = 'session=' + self.fakeSessionID
self.conn._delete_session(hdrs)
expected = [mock.call(url=("https://" +
self.configuration.san_ip +
"/AUTH/SESSION/" + self.fakeSessionID),
headers=self.conn.defhdrs,
verify=True), ]
requests.delete.assert_has_calls(expected)
def test_put_sucess(self):
put_return = FIOFakeResponse(code=201,
text=json.dumps({"Status": 1}))
requests.put = mock.Mock(return_value=put_return)
expectedhdr = copy.deepcopy(self.conn.defhdrs)
expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
self.conn._create_session = mock.Mock(return_value=expectedhdr)
self.conn._delete_session = mock.Mock()
testurl = '/test/url/'
testcontent = {'testdict': 'testvalue'}
self.conn.put(testurl, testcontent)
self.conn.post(testurl, testcontent)
expected = [mock.call(), ]
self.conn._create_session.assert_has_calls(expected)
expected = [mock.call(expectedhdr), ]
self.conn._delete_session.assert_has_calls(expected)
expected = [mock.call(url=self.conn._complete_uri(testurl),
data=json.dumps(testcontent, sort_keys=True),
headers=expectedhdr, verify=True), ]
requests.put.assert_has_calls(expected)
def test_post_sucess(self):
expected_text = json.dumps({"id": self.fakeSessionID})
post_return = FIOFakeResponse(code=201,
text=expected_text)
requests.post = mock.Mock(return_value=post_return)
expectedhdr = copy.deepcopy(self.conn.defhdrs)
expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
self.conn._create_session = mock.Mock(return_value=expectedhdr)
self.conn._delete_session = mock.Mock()
testurl = '/test/url/'
testcontent = {'testdict': 'testvalue'}
self.conn.post(testurl, testcontent)
expected = [mock.call(), ]
self.conn._create_session.assert_has_calls(expected)
expected = [mock.call(expectedhdr), ]
self.conn._delete_session.assert_has_calls(expected)
expected = [mock.call(url=self.conn._complete_uri(testurl),
data=json.dumps(testcontent, sort_keys=True),
headers=expectedhdr, verify=True), ]
requests.post.assert_has_calls(expected)
def test_delete_sucess(self):
del_return = FIOFakeResponse(code=201, text=json.dumps({}))
requests.delete = mock.Mock(return_value=del_return)
expectedhdr = copy.deepcopy(self.conn.defhdrs)
expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
self.conn._create_session = mock.Mock(return_value=expectedhdr)
self.conn._delete_session = mock.Mock()
testurl = '/test/url/'
self.conn.delete(testurl,)
expected = [mock.call(), ]
self.conn._create_session.assert_has_calls(expected)
expected = [mock.call(expectedhdr), ]
self.conn._delete_session.assert_has_calls(expected)
expected = [mock.call(url=self.conn._complete_uri(testurl),
headers=expectedhdr, verify=True), ]
requests.delete.assert_has_calls(expected)
def test_get_sucess(self):
get_return = FIOFakeResponse(code=200,
text=json.dumps(basic_acl_group_response))
expectedhdr = copy.deepcopy(self.conn.defhdrs)
expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
self.conn._create_session = mock.Mock(return_value=expectedhdr)
self.conn._delete_session = mock.Mock()
requests.get = mock.Mock(return_value=get_return)
testurl = '/test/url/'
result = self.conn.get(testurl,)
expected = [mock.call(), ]
self.conn._create_session.assert_has_calls(expected)
expected = [mock.call(expectedhdr), ]
self.conn._delete_session.assert_has_calls(expected)
expected = [mock.call(url=self.conn._complete_uri(testurl),
headers=expectedhdr, verify=True), ]
requests.get.assert_has_calls(expected)
assert result == basic_acl_group_response
def test_get_bad_json_once(self):
expectedhdr = copy.deepcopy(self.conn.defhdrs)
expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
self.conn._create_session = mock.Mock(return_value=expectedhdr)
self.conn._delete_session = mock.Mock()
expected_text = json.dumps(basic_acl_group_response)
jsonErrEffect = [FIOFakeResponse(code=200,
text='{"badjson":"bad",,}'),
FIOFakeResponse(code=200,
text=expected_text)]
requests.get = mock.Mock(side_effect=jsonErrEffect)
testurl = '/test/url/'
result = self.conn.get(testurl,)
expected = [mock.call(), ]
self.conn._create_session.assert_has_calls(expected)
expected = [mock.call(expectedhdr), ]
self.conn._delete_session.assert_has_calls(expected)
expected = [mock.call(url=self.conn._complete_uri(testurl),
headers=expectedhdr, verify=True), ]
requests.get.assert_has_calls(expected)
assert result == basic_acl_group_response
def test_get_bad_json_retry_expire(self):
get_return = FIOFakeResponse(code=200, text='{"badjson":"bad",,}')
expectedhdr = copy.deepcopy(self.conn.defhdrs)
expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
self.conn._create_session = mock.Mock(return_value=expectedhdr)
self.conn._delete_session = mock.Mock()
requests.get = mock.Mock(return_value=get_return)
testurl = '/test/url/'
self.assertRaises(exception.VolumeDriverException,
self.conn.get, testurl)
expected = [mock.call(), ]
self.conn._create_session.assert_has_calls(expected)
expected = [mock.call(expectedhdr), ]
self.conn._delete_session.assert_has_calls(expected)
expected = [mock.call(url=self.conn._complete_uri(testurl),
headers=expectedhdr, verify=True),
mock.call(url=self.conn._complete_uri(testurl),
headers=expectedhdr, verify=True),
mock.call(url=self.conn._complete_uri(testurl),
headers=expectedhdr, verify=True), ]
requests.get.assert_has_calls(expected)
def test_get_failed_http_response(self):
get_return = FIOFakeResponse(code=404,
text=json.dumps(basic_acl_group_response))
expectedhdr = copy.deepcopy(self.conn.defhdrs)
expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
self.conn._create_session = mock.Mock(return_value=expectedhdr)
self.conn._delete_session = mock.Mock()
requests.get = mock.Mock(return_value=get_return)
testurl = '/test/url/'
self.assertRaises(requests.exceptions.HTTPError,
self.conn.get, testurl)
expected = [mock.call(), ]
self.conn._create_session.assert_has_calls(expected)
expected = [mock.call(expectedhdr), ]
self.conn._delete_session.assert_has_calls(expected)
expected = [mock.call(url=self.conn._complete_uri(testurl),
headers=expectedhdr, verify=True), ]
requests.get.assert_has_calls(expected)
@mock.patch('cinder.volume.drivers.fusionio.ioControl.FIOconnection',
autospec=True)
class FIOioControlTestCases(test.TestCase):
VERSION = '1.0.0'
policyTable = {'Policy 4': '00000000-00000000-0000-000000000002',
'Policy 5': '00000000-00000000-0000-000000000000',
'Policy 2': '00000000-00000000-0000-000000000008',
'Policy 3': '00000000-00000000-0000-000000000004',
'Policy 1': '00000000-00000000-0000-000000000010'}
def setUp(self):
super(FIOioControlTestCases, self).setUp()
self.configuration = create_configuration()
self.ctxt = context.get_admin_context()
self.drv = FIOioControlDriver(configuration=self.configuration)
self.drv.fio_qos_dict = self.policyTable
def test_do_setup_sucess(self, connmock):
# erase policy table, then make sure drv.do_setup builds it
self.drv.fio_qos_dict = {}
instance = connmock.return_value
instance.get.return_value = basic_policy_response
self.drv.do_setup(context="")
self.assertEqual(self.policyTable, self.drv.fio_qos_dict,
"wrong policy table built")
def test_create_volume_simple_success_poolA(self, connmock):
self.drv.conn = connmock.return_value
bPoolResponse = copy.deepcopy(basic_pools_response)
bPoolResponse[1]['ExportedVolumeMB'] = 5009
self.drv.conn.get.return_value = bPoolResponse
testvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
self.drv.create_volume(testvol)
cmd = {"Size": int(testvol['size']) * units.Gi,
"PolicyUUID": '00000000-00000000-0000-000000000000',
"PoolUUID": "FakePoolA_id",
"Name": testvol['id'], }
expected = [mock.call.get('TierStore/Pools/by-id/'),
mock.call.post('TierStore/Volumes/by-id/', cmd)]
self.drv.conn.assert_has_calls(expected)
def test_create_volume_simple_success_poolB(self, connmock):
self.drv.conn = connmock.return_value
bPoolResponse = copy.deepcopy(basic_pools_response)
bPoolResponse[0]['ExportedVolumeMB'] = 5009
self.drv.conn.get.return_value = bPoolResponse
testvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
self.drv.create_volume(testvol)
cmd = {"Size": int(testvol['size']) * units.Gi,
"PolicyUUID": '00000000-00000000-0000-000000000000',
"PoolUUID": "FakePoolB_id",
"Name": testvol['id'], }
expected = [mock.call.get('TierStore/Pools/by-id/'),
mock.call.post('TierStore/Volumes/by-id/', cmd)]
self.drv.conn.assert_has_calls(expected)
def test_delete_volume_sucess(self, connmock):
self.drv.conn = connmock.return_value
testvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
self.drv.conn.get.return_value = basic_vol_response
self.drv.delete_volume(testvol)
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.delete('TierStore/Volumes/by-id/FakeBasicVolID')]
self.drv.conn.assert_has_calls(expected)
def test_create_snapshot_sucess(self, connmock):
self.drv.conn = connmock.return_value
snapshot = {'volume_id': 'cinderVolumeID',
'id': 'a720b3c0-d1f0-11e1-9b23-1234500cab39', }
self.drv.conn.get.return_value = basic_vol_response
cmd = {"VolumeUUID": "FakeBasicVolID",
"Name": snapshot['id'], }
self.drv.create_snapshot(snapshot)
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.post('TierStore/Snapshots/by-id/', cmd), ]
self.drv.conn.assert_has_calls(expected)
def test_delete_snapshot_sucess(self, connmock):
self.drv.conn = connmock.return_value
snapshot = {'volume_id': '1dead3c0-d1f0-beef-9b23-1274500cab58',
'id': 'cinderSnapshotID'}
self.drv.conn.get.return_value = basic_snapshot_response
self.drv.delete_snapshot(snapshot)
expected = [mock.call.get('TierStore/Snapshots/by-id/'),
mock.call.delete(
('TierStore/Snapshots/by-id/' +
'407115424bb9539c')), ]
self.drv.conn.assert_has_calls(expected)
def test_create_volume_from_snapshot_simple_sucess(self, connmock):
self.drv.conn = connmock.return_value
testvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
snapshot = {'volume_id': testvol['id'],
'id': 'cinderSnapshotID'}
self.drv.conn.get.return_value = basic_snapshot_response
cmd = {"ParentLayerId": "407115424bb9539c",
"Name": testvol['id'],
"PolicyUUID": '00000000-00000000-0000-000000000000'}
self.drv.create_volume_from_snapshot(testvol, snapshot)
expected = [mock.call.get('TierStore/Snapshots/by-id/'),
mock.call.put(
'TierStore/Snapshots/functions/CloneSnapshot', cmd), ]
self.drv.conn.assert_has_calls(expected)
def test_initialize_connection_no_usable_Networks_fail(self, connmock):
self.drv.conn = connmock.return_value
connector = {'initiator': 'fake:01'}
testvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID',
'volume_type_id': None,
'created_at': timeutils.utcnow(),
'provider_auth': {}}
cmd = {"GroupName": "fake:01",
"InitiatorList": ["fake:01"]}
cmd2 = {"AclGroupList": ["3"], }
netResponse = copy.deepcopy(basic_net_response)
netResponse[4]['OperationalState'] = "down"
get_effect = [basic_vol_response,
basic_acl_group_response,
basic_vol_response,
netResponse, ]
self.drv.conn.get.side_effect = get_effect
self.assertRaises(exception.VolumeDriverException,
self.drv.initialize_connection, testvol,
connector)
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.post('TierStore/ACLGroup/by-id/', cmd),
mock.call.get('TierStore/ACLGroup/by-id/'),
mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
cmd2),
mock.call.get('TierStore/Volumes/by-id/'),
mock.call.get('System/Network/by-id/'), ]
self.drv.conn.assert_has_calls(expected)
def test_initialize_connection_simple_sucess(self, connmock):
self.drv.conn = connmock.return_value
connector = {'initiator': 'fake:01'}
testvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID',
'volume_type_id': None,
'created_at': timeutils.utcnow(),
'provider_auth': {}}
cmd = {"GroupName": "fake:01",
"InitiatorList": ["fake:01"]}
cmd2 = {"AclGroupList": ["3"], }
netResponse = copy.deepcopy(basic_net_response)
netResponse[2]['OperationalState'] = "up"
get_effect = [basic_vol_response,
basic_acl_group_response,
basic_vol_response,
netResponse, ]
self.drv.conn.get.side_effect = get_effect
result = self.drv.initialize_connection(testvol, connector)
expected = {'driver_volume_type': 'iscsi',
'data': {'target_lun': 0,
'target_portal': u'10.10.2.84:3260',
'target_iqn': (
'iqn.2010-11.com.ngs:Volume:FakeBasicVolID'),
'target_discovered': False,
'volume_id': 'cinderVolumeID'}}
self.assertEqual(result, expected, "wrong result from init connection")
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.post('TierStore/ACLGroup/by-id/', cmd),
mock.call.get('TierStore/ACLGroup/by-id/'),
mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
cmd2),
mock.call.get('TierStore/Volumes/by-id/'),
mock.call.get('System/Network/by-id/'), ]
self.drv.conn.assert_has_calls(expected)
def test_terminate_connection_single_delete_sucess(self, connmock):
self.drv.conn = connmock.return_value
connector = {'initiator': 'fake:01'}
testvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID',
'volume_type_id': None,
'created_at': timeutils.utcnow(),
'provider_auth': {}}
cmd = {"AclGroupList": ["1"], }
get_effect = [basic_vol_response,
basic_acl_group_response,
basic_acl_group_response,
basic_vol_response, ]
self.drv.conn.get.side_effect = get_effect
self.drv.terminate_connection(testvol, connector)
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.get('TierStore/ACLGroup/by-id/'),
mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
cmd),
mock.call.get('TierStore/ACLGroup/by-id/'),
mock.call.get('TierStore/Volumes/by-id/'),
mock.call.delete('TierStore/ACLGroup/by-id/3')]
self.drv.conn.assert_has_calls(expected)
def test_terminate_connection_multiple_no_delete(self, connmock):
self.drv.conn = connmock.return_value
connector = {'initiator': 'fake:01'}
testvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID',
'volume_type_id': None,
'created_at': timeutils.utcnow(),
'provider_auth': {}}
cmd = {"AclGroupList": ["1"], }
return2vol = copy.deepcopy(basic_vol_response)
return2vol.append(copy.deepcopy(basic_vol_response[0]))
return2vol[1]['AclGroupList'] = ["3"]
get_effect = [basic_vol_response,
basic_acl_group_response,
basic_acl_group_response,
return2vol, ]
self.drv.conn.get.side_effect = get_effect
self.drv.terminate_connection(testvol, connector)
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.get('TierStore/ACLGroup/by-id/'),
mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
cmd),
mock.call.get('TierStore/ACLGroup/by-id/'),
mock.call.get('TierStore/Volumes/by-id/')]
self.drv.conn.assert_has_calls(expected)
def test_terminate_connection_multiple_delete(self, connmock):
self.drv.conn = connmock.return_value
connector = {'initiator': 'fake:01'}
testvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID',
'volume_type_id': None,
'created_at': timeutils.utcnow(),
'provider_auth': {}}
cmd = {"AclGroupList": ["1"], }
return2vol = copy.deepcopy(basic_vol_response)
return2vol.append(copy.deepcopy(basic_vol_response[0]))
get_effect = [basic_vol_response,
basic_acl_group_response,
basic_acl_group_response,
return2vol, ]
self.drv.conn.get.side_effect = get_effect
self.drv.terminate_connection(testvol, connector)
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.get('TierStore/ACLGroup/by-id/'),
mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
cmd),
mock.call.get('TierStore/ACLGroup/by-id/'),
mock.call.get('TierStore/Volumes/by-id/'),
mock.call.delete('TierStore/ACLGroup/by-id/3')]
self.drv.conn.assert_has_calls(expected)
def test_create_cloned_volume_simple_sucess(self, connmock):
self.drv.conn = connmock.return_value
srcvol = {'id': 'cinderVolumeID'}
dstvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID-dst',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
cmd = {'VolumeUUID': 'FakeBasicVolID',
'Name': 'mockedFakeUUID'}
# also mock _getSnapshotByName because of the random snapshotname.
self.drv._get_snapshot_by_name = mock.MagicMock()
self.drv._get_snapshot_by_name.return_value = \
basic_snapshot_response[0]
cmd2 = {"ParentLayerId": "407115424bb9539c",
"Name": "cinderVolumeID-dst",
"PolicyUUID": "00000000-00000000-0000-000000000000"}
get_effect = [basic_vol_response, ]
self.drv.conn.get.side_effect = get_effect
with mock.patch('cinder.volume.drivers.fusionio.ioControl.uuid',
autospec=True) as uuidmock:
uuidmock.uuid4.return_value = cmd['Name']
self.drv.create_cloned_volume(dstvol, srcvol)
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.post('TierStore/Snapshots/by-id/', cmd),
mock.call.put(('TierStore/Snapshots/functions/' +
'CloneSnapshot'), cmd2), ]
self.drv.conn.assert_has_calls(expected)
def test_create_cloned_volume_snapfails(self, connmock):
self.drv.conn = connmock.return_value
# this operation is a 2 part process, snap, then clone.
# This tests for the snap failing
srcvol = {'id': 'cinderVolumeID'}
dstvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID-dst',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
cmd = {'VolumeUUID': 'FakeBasicVolID',
'Name': 'mockedFakeUUID'}
get_effect = [basic_vol_response, ]
self.drv.conn.get.side_effect = get_effect
self.drv.conn.post.side_effect = requests.exceptions.HTTPError
with mock.patch('cinder.volume.drivers.fusionio.ioControl.uuid',
autospec=True) as uuidmock:
uuidmock.uuid4.return_value = cmd['Name']
self.assertRaises(requests.exceptions.HTTPError,
self.drv.create_cloned_volume,
dstvol, srcvol)
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.post('TierStore/Snapshots/by-id/', cmd), ]
self.drv.conn.assert_has_calls(expected)
def test_create_cloned_volume_clonefails(self, connmock):
self.drv.conn = connmock.return_value
srcvol = {'id': 'cinderVolumeID'}
dstvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID-dst',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
get_effect = [basic_vol_response,
basic_snapshot_response[0], ]
self.drv.conn.get.side_effect = get_effect
# also mock _getSnapshotByName because of the random snapshotname.
self.drv._get_snapshot_by_name = mock.MagicMock()
self.drv._get_snapshot_by_name.return_value = \
basic_snapshot_response[0]
cmd = {'VolumeUUID': 'FakeBasicVolID',
'Name': 'mockedFakeUUID'}
cmd2 = {"ParentLayerId": "407115424bb9539c",
"Name": "cinderVolumeID-dst",
"PolicyUUID": "00000000-00000000-0000-000000000000"}
self.drv.conn.put.side_effect = requests.exceptions.HTTPError
with mock.patch('cinder.volume.drivers.fusionio.ioControl.uuid',
autospec=True) as uuidmock:
uuidmock.uuid4.return_value = cmd['Name']
self.assertRaises(requests.exceptions.HTTPError,
self.drv.create_cloned_volume,
dstvol, srcvol)
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.post('TierStore/Snapshots/by-id/', cmd),
mock.call.put(('TierStore/Snapshots/functions/' +
'CloneSnapshot'), cmd2),
mock.call.delete(('TierStore/Snapshots/by-id/' +
cmd2['ParentLayerId'])), ]
self.drv.conn.assert_has_calls(expected)
def test_get_volume_stats_simple_sucess(self, connmock):
self.drv.conn = connmock.return_value
self.drv.conn.get.side_effect = [basic_pools_response, ]
result = self.drv.get_volume_stats(refresh=True)
self.assertEqual(basic_pools_response[0]['PagingTotalMB'] +
basic_pools_response[1]['PagingTotalMB'],
result['total_capacity_gb'],
"capacity calc wrong")
self.assertEqual(self.VERSION, result['driver_version'],
"Driver/Test version Mismatch")
def test_create_volume_QoS_by_presets(self, connmock):
preset_qos = VolumeMetadata(key='fio-qos', value='Policy 2')
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_metadata': [preset_qos],
'volume_type_id': None,
'created_at': timeutils.utcnow()}
expected_qos_result = '00000000-00000000-0000-000000000008' # Policy 2
qos = self.drv._set_qos_presets(testvol)
self.assertEqual(qos, expected_qos_result)
def test_create_volume_Qos_by_volumeType_QoSspec(self, connmock):
qos_ref = qos_specs.create(self.ctxt,
'qos-specs-1', {'fio-qos': 'Policy 2'})
type_ref = volume_types.create(self.ctxt,
"type1",
{"volume_backend_name": "fio-ioControl",
"qos:fio-qos": "Policy 4"}
)
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
expected_qos_result = '00000000-00000000-0000-000000000008' # Policy 2
qos = self.drv._set_qos_by_volume_type(type_ref['id'])
self.assertEqual(qos, expected_qos_result)
def test_create_volume_Qos_by_volumeType_extraSpec(self, connmock):
type_ref = volume_types.create(self.ctxt,
"type1",
{"volume_backend_name": "fio-ioControl",
"qos:fio-qos": "Policy 4"}
)
expected_qos_result = '00000000-00000000-0000-000000000002' # Policy 4
qos = self.drv._set_qos_by_volume_type(type_ref['id'])
self.assertEqual(qos, expected_qos_result)
def test_extend_volume_simple_success(self, connmock):
self.drv.conn = connmock.return_value
testvol = {'project_id': 'testproject',
'name': 'cinderVolumeName',
'size': 1,
'id': 'cinderVolumeID',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
new_size = 10
cmd = {"Size": int(new_size) * units.Gi}
self.drv.conn.get.side_effect = [basic_vol_response, ]
self.drv.extend_volume(testvol, new_size)
expected = [mock.call.get('TierStore/Volumes/by-id/'),
mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
cmd)]
self.drv.conn.assert_has_calls(expected)
|
apache-2.0
|
phaethon/scapy
|
kamene/contrib/eigrp.py
|
1
|
16003
|
#!/usr/bin/env python
# kamene.contrib.description = EIGRP
# kamene.contrib.status = loads
"""
EIGRP Scapy Extension
~~~~~~~~~~~~~~~~~~~~~
:version: 2009-08-13
:copyright: 2009 by Jochen Bartl
:e-mail: lobo@c3a.de / jochen.bartl@gmail.com
:license: GPL v2
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
:TODO
- Replace TLV code with a more generic solution
* http://trac.secdev.org/scapy/ticket/90
- Write function for calculating authentication data
:Known bugs:
-
:Thanks:
- TLV code derived from the CDP implementation of scapy. (Thanks to Nicolas Bareil and Arnaud Ebalard)
http://trac.secdev.org/scapy/ticket/18
- IOS / EIGRP Version Representation FIX by Dirk Loss
"""
from kamene.packet import *
from kamene.fields import *
from kamene.layers.inet import IP
from kamene.layers.inet6 import *
class EigrpIPField(StrField, IPField):
"""
This is a special field type for handling ip addresses of destination networks in internal and
external route updates.
EIGRP removes zeros from the host portion of the ip address if the netmask is 8, 16 or 24 bits.
"""
def __init__(self, name, default, length=None, length_from=None):
StrField.__init__(self, name, default)
self.length_from = length_from
if length is not None:
self.length_from = lambda pkt,length=length: length
def h2i(self, pkt, x):
return IPField.h2i(self, pkt, x)
def i2m(self, pkt, x):
x = inet_aton(x)
l = self.length_from(pkt)
if l <= 8:
return x[:1]
elif l <= 16:
return x[:2]
elif l <= 24:
return x[:3]
else:
return x
def m2i(self, pkt, x):
l = self.length_from(pkt)
if l <= 8:
x += "\x00\x00\x00"
elif l <= 16:
x += "\x00\x00"
elif l <= 24:
x += "\x00"
return inet_ntoa(x)
def prefixlen_to_bytelen(self, l):
if l <= 8:
l = 1
elif l <= 16:
l = 2
elif l <= 24:
l = 3
else:
l = 4
return l
def i2len(self, pkt, x):
l = self.length_from(pkt)
l = self.prefixlen_to_bytelen(l)
return l
def getfield(self, pkt, s):
l = self.length_from(pkt)
l = self.prefixlen_to_bytelen(l)
return s[l:], self.m2i(pkt, s[:l])
def randval(self):
return IPField.randval(self)
class EigrpIP6Field(StrField, IP6Field, EigrpIPField):
"""
This is a special field type for handling ip addresses of destination networks in internal and
external route updates.
"""
def __init__(self, name, default, length=None, length_from=None):
StrField.__init__(self, name, default)
self.length_from = length_from
if length is not None:
self.length_from = lambda pkt,length=length: length
def any2i(self, pkt, x):
return IP6Field.any2i(self, pkt, x)
def i2repr(self, pkt, x):
return IP6Field.i2repr(self, pkt, x)
def h2i(self, pkt, x):
return IP6Field.h2i(self, pkt, x)
def i2m(self, pkt, x):
x = inet_pton(socket.AF_INET6, x)
l = self.length_from(pkt)
l = self.prefixlen_to_bytelen(l)
return x[:l]
def m2i(self, pkt, x):
l = self.length_from(pkt)
prefixlen = self.prefixlen_to_bytelen(l)
if l > 128:
warning("EigrpIP6Field: Prefix length is > 128. Dissection of this packet will fail")
else:
pad = "\x00" * (16 - prefixlen)
x += pad
return inet_ntop(socket.AF_INET6, x)
def prefixlen_to_bytelen(self, l):
l = l / 8
if l < 16:
l += 1
return l
def i2len(self, pkt, x):
return EigrpIPField.i2len(self, pkt, x)
def getfield(self, pkt, s):
return EigrpIPField.getfield(self, pkt, s)
class ThreeBytesField(X3BytesField, ByteField):
def i2repr(self, pkt, x):
return ByteField.i2repr(self, pkt, x)
class EIGRPGeneric(Packet):
name = "EIGRP Generic TLV"
fields_desc = [ XShortField("type", 0x0000),
FieldLenField("len", None, "value", "!H", adjust=lambda pkt,x: x + 4),
StrLenField("value", "\x00", length_from=lambda pkt: pkt.len - 4)]
def guess_payload_class(self, p):
return conf.padding_layer
class EIGRPParam(EIGRPGeneric):
name = "EIGRP Parameters"
fields_desc = [ XShortField("type", 0x0001),
ShortField("len", 12),
# Bandwidth
ByteField("k1", 1),
# Load
ByteField("k2", 0),
# Delay
ByteField("k3", 1),
# Reliability
ByteField("k4", 0),
# MTU
ByteField("k5", 0),
ByteField("reserved", 0),
ShortField("holdtime", 15)
]
class EIGRPAuthData(EIGRPGeneric):
name = "EIGRP Authentication Data"
fields_desc = [ XShortField("type", 0x0002),
FieldLenField("len", None, "authdata", "!H", adjust=lambda pkt,x: x + 24),
ShortEnumField("authtype", 2, {2 : "MD5"}),
ShortField("keysize", None),
IntField("keyid", 1),
StrFixedLenField("nullpad", "\x00" * 12, 12),
StrLenField("authdata", RandString(16), length_from=lambda pkt: pkt.keysize)
]
def post_build(self, p, pay):
p += pay
if self.keysize is None:
keysize = len(self.authdata)
p = p[:6] + chr((keysize >> 8) & 0xff) + chr(keysize & 0xff) + p[8:]
return p
class EIGRPSeq(EIGRPGeneric):
name = "EIGRP Sequence"
fields_desc = [ XShortField("type", 0x0003),
ShortField("len", None),
ByteField("addrlen", 4),
ConditionalField(IPField("ipaddr", "192.168.0.1"),
lambda pkt:pkt.addrlen == 4),
ConditionalField(IP6Field("ip6addr", "2001::"),
lambda pkt:pkt.addrlen == 16)
]
def post_build(self, p, pay):
p += pay
if self.len is None:
l = len(p)
p = p[:2] + chr((l >> 8) & 0xff) + chr(l & 0xff) + p[4:]
return p
class ShortVersionField(ShortField):
def i2repr(self, pkt, x):
try:
minor = x & 0xff
major = (x >> 8) & 0xff
except TypeError:
return "unknown"
else:
# We print a leading 'v' so that these values don't look like floats
return "v%s.%s" % (major, minor)
def h2i(self, pkt, x):
"""The field accepts string values like v12.1, v1.1 or integer values.
String values have to start with a "v" folled by a floating point number.
Valid numbers are between 0 and 255.
"""
if type(x) is str and x.startswith("v") and len(x) <= 8:
major = int(x.split(".")[0][1:])
minor = int(x.split(".")[1])
return (major << 8) | minor
elif type(x) is int and x >= 0 and x <= 65535:
return x
else:
if self.default != None:
warning("set value to default. Format of %r is invalid" % x)
return self.default
else:
raise Kamene_Exception("Format of value is invalid")
def randval(self):
return RandShort()
class EIGRPSwVer(EIGRPGeneric):
name = "EIGRP Software Version"
fields_desc = [ XShortField("type", 0x0004),
ShortField("len", 8),
ShortVersionField("ios", "v12.0"),
ShortVersionField("eigrp", "v1.2")
]
class EIGRPNms(EIGRPGeneric):
name = "EIGRP Next Multicast Sequence"
fields_desc = [ XShortField("type", 0x0005),
ShortField("len", 8),
IntField("nms", 2)
]
# Don't get confused by the term "receive-only". This flag is always set, when you configure
# one of the stub options. It's also the only flag set, when you configure "eigrp stub receive-only".
_EIGRP_STUB_FLAGS = ["connected", "static", "summary", "receive-only", "redistributed", "leak-map"]
class EIGRPStub(EIGRPGeneric):
name = "EIGRP Stub Router"
fields_desc = [ XShortField("type", 0x0006),
ShortField("len", 6),
FlagsField("flags", 0x000d, 16, _EIGRP_STUB_FLAGS)]
# Delay 0xffffffff == Destination Unreachable
class EIGRPIntRoute(EIGRPGeneric):
name = "EIGRP Internal Route"
fields_desc = [ XShortField("type", 0x0102),
FieldLenField("len", None, "dst", "!H", adjust=lambda pkt,x: x + 25),
IPField("nexthop", "192.168.0.0"),
IntField("delay", 128000),
IntField("bandwidth", 256),
ThreeBytesField("mtu", 1500),
ByteField("hopcount", 0),
ByteField("reliability", 255),
ByteField("load", 0),
XShortField("reserved", 0),
ByteField("prefixlen", 24),
EigrpIPField("dst", "192.168.1.0", length_from=lambda pkt: pkt.prefixlen),
]
_EIGRP_EXTERNAL_PROTOCOL_ID = {
0x01 : "IGRP",
0x02 : "EIGRP",
0x03 : "Static Route",
0x04 : "RIP",
0x05 : "Hello",
0x06 : "OSPF",
0x07 : "IS-IS",
0x08 : "EGP",
0x09 : "BGP",
0x0A : "IDRP",
0x0B : "Connected Link"
}
_EIGRP_EXTROUTE_FLAGS = ["external", "candidate-default"]
class EIGRPExtRoute(EIGRPGeneric):
name = "EIGRP External Route"
fields_desc = [ XShortField("type", 0x0103),
FieldLenField("len", None, "dst", "!H", adjust=lambda pkt,x: x + 45),
IPField("nexthop", "192.168.0.0"),
IPField("originrouter", "192.168.0.1"),
IntField("originasn", 0),
IntField("tag", 0),
IntField("externalmetric", 0),
ShortField("reserved", 0),
ByteEnumField("extprotocolid", 3, _EIGRP_EXTERNAL_PROTOCOL_ID),
FlagsField("flags", 0, 8, _EIGRP_EXTROUTE_FLAGS),
IntField("delay", 0),
IntField("bandwidth", 256),
ThreeBytesField("mtu", 1500),
ByteField("hopcount", 0),
ByteField("reliability", 255),
ByteField("load", 0),
XShortField("reserved2", 0),
ByteField("prefixlen", 24),
EigrpIPField("dst", "192.168.1.0", length_from=lambda pkt: pkt.prefixlen)
]
class EIGRPv6IntRoute(EIGRPGeneric):
name = "EIGRP for IPv6 Internal Route"
fields_desc = [ XShortField("type", 0x0402),
FieldLenField("len", None, "dst", "!H", adjust=lambda pkt,x: x + 37),
IP6Field("nexthop", "::"),
IntField("delay", 128000),
IntField("bandwidth", 256000),
ThreeBytesField("mtu", 1500),
ByteField("hopcount", 1),
ByteField("reliability", 255),
ByteField("load", 0),
XShortField("reserved", 0),
ByteField("prefixlen", 16),
EigrpIP6Field("dst", "2001::", length_from=lambda pkt: pkt.prefixlen)
]
class EIGRPv6ExtRoute(EIGRPGeneric):
name = "EIGRP for IPv6 External Route"
fields_desc = [ XShortField("type", 0x0403),
FieldLenField("len", None, "dst", "!H", adjust=lambda pkt,x: x + 57),
IP6Field("nexthop", "::"),
IPField("originrouter", "192.168.0.1"),
IntField("originasn", 0),
IntField("tag", 0),
IntField("externalmetric", 0),
ShortField("reserved", 0),
ByteEnumField("extprotocolid", 3, _EIGRP_EXTERNAL_PROTOCOL_ID),
FlagsField("flags", 0, 8, _EIGRP_EXTROUTE_FLAGS),
IntField("delay", 0),
IntField("bandwidth", 256000),
ThreeBytesField("mtu", 1500),
ByteField("hopcount", 1),
ByteField("reliability", 0),
ByteField("load", 1),
XShortField("reserved2", 0),
ByteField("prefixlen", 8),
EigrpIP6Field("dst", "::", length_from=lambda pkt: pkt.prefixlen)
]
_eigrp_tlv_cls = {
0x0001: "EIGRPParam",
0x0002: "EIGRPAuthData",
0x0003: "EIGRPSeq",
0x0004: "EIGRPSwVer",
0x0005: "EIGRPNms",
0x0006: "EIGRPStub",
0x0102: "EIGRPIntRoute",
0x0103: "EIGRPExtRoute",
0x0402: "EIGRPv6IntRoute",
0x0403: "EIGRPv6ExtRoute"
}
class RepeatedTlvListField(PacketListField):
def __init__(self, name, default, cls):
PacketField.__init__(self, name, default, cls)
def getfield(self, pkt, s):
lst = []
remain = s
while len(remain) > 0:
p = self.m2i(pkt, remain)
if conf.padding_layer in p:
pad = p[conf.padding_layer]
remain = pad.load
del(pad.underlayer.payload)
else:
remain = ""
lst.append(p)
return remain,lst
def addfield(self, pkt, s, val):
return s + reduce(str.__add__, map(str, val), "")
def _EIGRPGuessPayloadClass(p, **kargs):
cls = conf.raw_layer
if len(p) >= 2:
t = struct.unpack("!H", p[:2])[0]
clsname = _eigrp_tlv_cls.get(t, "EIGRPGeneric")
cls = globals()[clsname]
return cls(p, **kargs)
_EIGRP_OPCODES = { 1 : "Update",
2 : "Request",
3 : "Query",
4 : "Replay",
5 : "Hello",
6 : "IPX SAP",
10 : "SIA Query",
11 : "SIA Reply" }
# The Conditional Receive bit is used for reliable multicast communication.
# Update-Flag: Not sure if Cisco calls it that way, but it's set when neighbors
# are exchanging routing information
_EIGRP_FLAGS = ["init", "cond-recv", "unknown", "update"]
class EIGRP(Packet):
name = "EIGRP"
fields_desc = [ ByteField("ver", 2),
ByteEnumField("opcode", 5, _EIGRP_OPCODES),
XShortField("chksum", None),
FlagsField("flags", 0, 32, _EIGRP_FLAGS),
IntField("seq", 0),
IntField("ack", 0),
IntField("asn", 100),
RepeatedTlvListField("tlvlist", [], _EIGRPGuessPayloadClass)
]
def post_build(self, p, pay):
p += pay
if self.chksum is None:
c = checksum(p)
p = p[:2] + chr((c >> 8) & 0xff) + chr(c & 0xff) + p[4:]
return p
def mysummary(self):
summarystr = "EIGRP (AS=%EIGRP.asn% Opcode=%EIGRP.opcode%"
if self.opcode == 5 and self.ack != 0:
summarystr += " (ACK)"
if self.flags != 0:
summarystr += " Flags=%EIGRP.flags%"
return self.sprintf(summarystr + ")")
bind_layers(IP, EIGRP, proto=88)
bind_layers(IPv6, EIGRP, nh=88)
if __name__ == "__main__":
from kamene.main import interact
interact(mydict=globals(), mybanner="EIGRP")
|
gpl-2.0
|
Charlotte-Morgan/inasafe
|
safe/datastore/test/test_geopackage.py
|
6
|
5376
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid - **Clipper test suite.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import unittest
import sys
from tempfile import mktemp
from qgis.core import QgsVectorLayer, QgsRasterLayer
from qgis.PyQt.QtCore import QFileInfo
from osgeo import gdal
from safe.definitions.constants import INASAFE_TEST
from safe.test.utilities import (
get_qgis_app,
load_test_vector_layer,
standard_data_path)
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app(qsetting=INASAFE_TEST)
from safe.datastore.geopackage import GeoPackage
# Decorator for expecting fails in windows but not other OS's
# Probably we should move this somewhere in utils for easy re-use...TS
def expect_failure_in_windows(exception):
"""Marks test to expect a fail in windows - call assertRaises internally.
..versionadded:: 4.0.0
"""
def test_decorator(fn):
def test_decorated(self, *args, **kwargs):
if sys.platform.startswith('win'):
self.assertRaises(exception, fn, self, *args, **kwargs)
return test_decorated
return test_decorator
class TestGeoPackage(unittest.TestCase):
"""Test the GeoPackage datastore."""
def setUp(self):
pass
def tearDown(self):
pass
@unittest.skipIf(
int(gdal.VersionInfo('VERSION_NUM')) < 2000000,
'GDAL 2.0 is required for geopackage.')
def test_create_geopackage(self):
"""Test if we can store geopackage."""
# Create a geopackage from an empty file.
path = QFileInfo(mktemp() + '.gpkg')
self.assertFalse(path.exists())
data_store = GeoPackage(path)
path.refresh()
self.assertTrue(path.exists())
# Let's add a vector layer.
layer_name = 'flood_test'
layer = standard_data_path('hazard', 'flood_multipart_polygons.shp')
vector_layer = QgsVectorLayer(layer, 'Flood', 'ogr')
result = data_store.add_layer(vector_layer, layer_name)
self.assertTrue(result[0])
# We should have one layer.
layers = data_store.layers()
self.assertEqual(len(layers), 1)
self.assertIn(layer_name, layers)
# Add the same layer with another name.
layer_name = 'another_vector_flood'
result = data_store.add_layer(vector_layer, layer_name)
self.assertTrue(result[0])
# We should have two layers.
layers = data_store.layers()
self.assertEqual(len(layers), 2)
self.assertIn(layer_name, layers)
# Test the URI of the new layer.
expected = path.absoluteFilePath() + '|layername=' + layer_name
self.assertEqual(data_store.layer_uri(layer_name), expected)
# Test a fake layer.
self.assertIsNone(data_store.layer_uri('fake_layer'))
# Test to add a raster
layer_name = 'raster_flood'
layer = standard_data_path('hazard', 'classified_hazard.tif')
raster_layer = QgsRasterLayer(layer, layer_name)
result = data_store.add_layer(raster_layer, layer_name)
self.assertTrue(result[0])
# We should have 3 layers inside.
layers = data_store.layers()
self.assertEqual(len(layers), 3)
# Check the URI for the raster layer.
expected = 'GPKG:' + path.absoluteFilePath() + ':' + layer_name
self.assertEqual(data_store.layer_uri(layer_name), expected)
# Add a second raster.
layer_name = 'big raster flood'
self.assertTrue(data_store.add_layer(raster_layer, layer_name))
self.assertEqual(len(data_store.layers()), 4)
# Test layer without geometry
layer = load_test_vector_layer(
'gisv4', 'impacts', 'exposure_summary_table.csv')
tabular_layer_name = 'breakdown'
result = data_store.add_layer(layer, tabular_layer_name)
self.assertTrue(result[0])
@unittest.skipIf(
int(gdal.VersionInfo('VERSION_NUM')) < 2000000,
'GDAL 2.0 is required for geopackage.')
@expect_failure_in_windows(AssertionError)
def test_read_existing_geopackage(self):
"""Test we can read an existing geopackage."""
path = standard_data_path('other', 'jakarta.gpkg')
import os
path = os.path.normpath(os.path.normcase(os.path.abspath(path)))
geopackage = QFileInfo(path)
data_store = GeoPackage(geopackage)
# We should have 3 layers in this geopackage.
self.assertEqual(len(data_store.layers()), 3)
# Test we can load a vector layer.
roads = QgsVectorLayer(
data_store.layer_uri('roads'),
'Test',
'ogr'
)
self.assertTrue(roads.isValid())
# Test we can load a raster layers.
# This currently fails on windows...
# So we have decorated it with expected fail on windows
# Should pass on other platforms.
path = data_store.layer_uri('flood')
flood = QgsRasterLayer(path, 'flood')
self.assertTrue(flood.isValid())
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.